Compare commits

...

26 Commits
v3.1 ... v3.1.2

Author SHA1 Message Date
378539459a release v3.1.2 2022-06-14 11:43:30 +02:00
e4356cb516 Added -F (--force) to allow 1:1 replication. 2022-02-23 18:36:03 +01:00
cab2f98bb8 Better strip path handling and collision checking. Now also supports stripping so much it ends up on a pool-target.
Fixes #102, #117
2022-02-23 17:47:50 +01:00
07cb7cfad4 version bump 2021-12-19 18:23:09 +01:00
7b4a986f13 fix #103 2021-12-19 18:16:54 +01:00
be2474bb1c error doc 2021-11-02 20:18:57 +01:00
81e7cd940c forgot to remove debugging print 2021-10-04 00:59:50 +02:00
0b4448798e out of range for python 2 2021-10-04 00:34:07 +02:00
b1689f5066 added --...-format options. closes #87 2021-10-04 00:14:40 +02:00
dcb9cdac44 Merge branch 'master' of github.com:psy0rz/zfs_autobackup 2021-10-03 21:50:08 +02:00
9dc280abad Merge pull request #98 from sbidoul/relative-imports
Use relative imports
2021-09-21 16:44:47 +02:00
6b8c683315 Merge branch 'relative-imports' of https://github.com/sbidoul/zfs_autobackup 2021-09-21 14:31:50 +02:00
66e123849b preparations for #87 2021-09-21 14:30:59 +02:00
7325e1e351 ignore coveralls submission errors 2021-09-21 14:21:32 +02:00
9f4ea51622 ignore coveralls submission errors 2021-09-21 14:14:58 +02:00
8c1058a808 Use relative imports 2021-09-21 14:05:33 +02:00
d9e759a3eb fix 2021-09-20 21:14:26 +02:00
46457b3aca added some common short options and changes to fix #88 2021-09-20 15:11:23 +02:00
59f7ccc352 default compression is now zstd-fast, fixes #97 2021-09-20 14:54:38 +02:00
578fb1be4b renamed --ignore-replicated to --exclude-unchanged. tidied up and removed seperate filter_replicated() step. #93, #95, #96 2021-09-20 14:52:20 +02:00
f9b16c050b Merge pull request #96 from xrobau/really-ignore-replicated
Fix #93, Fix #95 Re-Document --exclude-received
2021-09-16 12:14:01 +02:00
2ba6fe5235 Fix #93, Fix #95 Re-Document --exclude-received
If another zfs_autobackup session is running, there will be changes
on zvols that are replicated. This means that it is possible for
a pair of servers running replication between themselves to start
backing up the backups.  Turning on --exclude-received when backups
are running between an a <--> b pair of servers means that the vols
that are received will never be accidentally seleted.
2021-09-16 09:38:26 +10:00
8e2c91735a Merge remote-tracking branch 'origin/master' 2021-08-24 11:14:24 +02:00
d57e3922a0 added note, #91 2021-08-24 11:14:13 +02:00
4b25dd76f1 Merge pull request #90 from bagbag/master
Support for zstd-adapt
2021-08-19 15:45:17 +02:00
2843781aa6 Support for zstd-adapt 2021-08-18 23:08:00 +02:00
19 changed files with 426 additions and 272 deletions

View File

@ -27,7 +27,7 @@ jobs:
- name: Coveralls
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github
run: coveralls --service=github || true
ubuntu18:
runs-on: ubuntu-18.04
@ -49,7 +49,7 @@ jobs:
- name: Coveralls
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github
run: coveralls --service=github || true
ubuntu18_python2:
runs-on: ubuntu-18.04
@ -73,4 +73,4 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: coveralls --service=github
run: coveralls --service=github || true

View File

@ -279,6 +279,8 @@ root@ws1:~# zfs-autobackup test --verbose
This also allows you to make several snapshots during the day, but only backup the data at night when the server is not busy.
**Note**: In this mode it doesnt take a specified target-schedule into account when thinning, it only knows a snapshot is the common snapshot by looking at the holds. So make sure your source-schedule keeps the snapshots you still want to transfer at a later point.
## Thinning out obsolete snapshots
The thinner is the thing that destroys old snapshots on the source and target.
@ -396,7 +398,14 @@ Note 1: The --encrypt option will rely on inheriting encryption parameters from
Note 2: Decide what you want at an early stage: If you change the --encrypt or --decrypt parameter after the inital sync you might get weird and wonderfull errors. (nothing dangerous)
I'll add some tips when the issues start to get in on github. :)
**Some common errors while using zfs encryption:**
```
cannot receive incremental stream: kernel modules must be upgraded to receive this stream.
```
This happens if you forget to use --encrypt, while the target datasets are already encrypted. (Very strange error message indeed)
## Transfer buffering, compression and rate limiting.
@ -619,9 +628,9 @@ optional arguments:
multiple times)
--recv-pipe COMMAND pipe zfs recv input through COMMAND (can be used
multiple times)
--compress TYPE Use compression during transfer, zstd-fast
recommended. (xz, pigz-slow, zstd-slow, zstd-fast,
lzo, gzip, pigz-fast, lz4)
--compress TYPE Use compression during transfer, defaults to zstd-adapt
if TYPE is not specified. (gzip, pigz-fast, pigz-slow,
zstd-fast, zstd-slow, zstd-adapt, xz, lzo, lz4)
--rate DATARATE Limit data transfer rate (e.g. 128K. requires
mbuffer.)
--buffer SIZE Add zfs send and recv buffers to smooth out IO bursts.

View File

@ -13,10 +13,10 @@ class TestZfsNode(unittest2.TestCase):
def test_destroymissing(self):
#initial backup
with patch('time.strftime', return_value="10101111000000"): #1000 years in past
with patch('time.strftime', return_value="test-19101111000000"): #1000 years in past
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds".split(" ")).run())
with patch('time.strftime', return_value="20101111000000"): #far in past
with patch('time.strftime', return_value="test-20101111000000"): #far in past
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
@ -40,7 +40,7 @@ class TestZfsNode(unittest2.TestCase):
print(buf.getvalue())
#should have done the snapshot cleanup for destoy missing:
self.assertIn("fs1@test-10101111000000: Destroying", buf.getvalue())
self.assertIn("fs1@test-19101111000000: Destroying", buf.getvalue())
self.assertIn("fs1: Destroy missing: Still has children here.", buf.getvalue())
@ -130,6 +130,6 @@ test_target1/test_source1
test_target1/test_source2
test_target1/test_source2/fs2
test_target1/test_source2/fs2/sub
test_target1/test_source2/fs2/sub@test-10101111000000
test_target1/test_source2/fs2/sub@test-19101111000000
test_target1/test_source2/fs2/sub@test-20101111000000
""")

View File

@ -48,11 +48,11 @@ class TestZfsEncryption(unittest2.TestCase):
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsourcekeyless", unload_key=True) # raw mode shouldn't need a key
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
@ -85,11 +85,11 @@ test_target1/test_source2/fs2/sub encryption
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
@ -120,11 +120,11 @@ test_target1/test_source2/fs2/sub encryptionroot -
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
@ -155,14 +155,14 @@ test_target1/test_source2/fs2/sub encryptionroot -
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup(
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
" ")).run())
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup(

View File

@ -32,7 +32,7 @@ class TestExternalFailures(unittest2.TestCase):
def test_initial_resume(self):
# inital backup, leaves resume token
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.generate_resume()
# --test should resume and succeed
@ -81,11 +81,11 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_incremental_resume(self):
# initial backup
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# incremental backup leaves resume token
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
# --test should resume and succeed
@ -138,7 +138,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
self.skipTest("Resume not supported in this ZFS userspace version")
# inital backup, leaves resume token
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.generate_resume()
# remove corresponding source snapshot, so it becomes invalid
@ -148,11 +148,11 @@ test_target1/test_source2/fs2/sub@test-20101111000000
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
# --test try again, should abort old resume
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
# try again, should abort old resume
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all test_target1")
@ -176,22 +176,22 @@ test_target1/test_source2/fs2/sub@test-20101111000000
self.skipTest("Resume not supported in this ZFS userspace version")
# initial backup
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# icremental backup, leaves resume token
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
# remove corresponding source snapshot, so it becomes invalid
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
# --test try again, should abort old resume
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
# try again, should abort old resume
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all test_target1")
@ -215,17 +215,17 @@ test_target1/test_source2/fs2/sub@test-20101111000000
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# generate resume
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
with OutputIO() as buf:
with redirect_stdout(buf):
# incremental, doesnt want previous anymore
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-target=0 --allow-empty".split(" ")).run())
@ -253,11 +253,11 @@ test_target1/test_source2/fs2/sub@test-20101111000002
if "0.6.5" in ZFS_USERSPACE:
self.skipTest("Resume not supported in this ZFS userspace version")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
# generate resume
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.generate_resume()
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
@ -265,7 +265,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
with OutputIO() as buf:
with redirect_stdout(buf):
# incremental, doesnt want previous anymore
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --no-snapshot".split(
" ")).run())
@ -277,14 +277,14 @@ test_target1/test_source2/fs2/sub@test-20101111000002
def test_missing_common(self):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
# remove common snapshot and leave nothing
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
#UPDATE: offcourse the one thing that wasn't tested had a bug :( (in ExecuteNode.run()).
@ -295,7 +295,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
# #recreate target pool without any features
# # shelltest("zfs set compress=on test_source1; zpool destroy test_target1; zpool create test_target1 -o feature@project_quota=disabled /dev/ram2")
#
# with patch('time.strftime', return_value="20101111000000"):
# with patch('time.strftime', return_value="test-20101111000000"):
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --no-progress".split(" ")).run())
#
# r = shelltest("zfs list -H -o name -r -t all test_target1")

View File

@ -11,17 +11,17 @@ class TestZfsNode(unittest2.TestCase):
def test_keepsource0target10queuedsend(self):
"""Test if thinner doesnt destroy too much early on if there are no common snapshots YET. Issue #84"""
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
" ")).run())
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
" ")).run())
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup(
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty".split(
" ")).run())
@ -56,3 +56,50 @@ test_target1/test_source2/fs2/sub@test-20101111000000
test_target1/test_source2/fs2/sub@test-20101111000001
test_target1/test_source2/fs2/sub@test-20101111000002
""")
def test_excludepaths(self):
"""Test issue #103"""
shelltest("zfs create test_target1/target_shouldnotbeexcluded")
shelltest("zfs set autobackup:test=true test_target1/target_shouldnotbeexcluded")
shelltest("zfs create test_target1/target")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(
"test test_target1/target --no-progress --verbose --allow-empty".split(
" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertMultiLineEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-20101111000000
test_source1/fs1/sub
test_source1/fs1/sub@test-20101111000000
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-20101111000000
test_source2/fs3
test_source2/fs3/sub
test_target1
test_target1/target
test_target1/target/test_source1
test_target1/target/test_source1/fs1
test_target1/target/test_source1/fs1@test-20101111000000
test_target1/target/test_source1/fs1/sub
test_target1/target/test_source1/fs1/sub@test-20101111000000
test_target1/target/test_source2
test_target1/target/test_source2/fs2
test_target1/target/test_source2/fs2/sub
test_target1/target/test_source2/fs2/sub@test-20101111000000
test_target1/target/test_target1
test_target1/target/test_target1/target_shouldnotbeexcluded
test_target1/target/test_target1/target_shouldnotbeexcluded@test-20101111000000
test_target1/target_shouldnotbeexcluded
test_target1/target_shouldnotbeexcluded@test-20101111000000
""")

View File

@ -33,7 +33,7 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000000"):
with patch('time.strftime', return_value="test-20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
@ -46,7 +46,7 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000001"):
with patch('time.strftime', return_value="test-20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
@ -73,7 +73,7 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000000"):
with patch('time.strftime', return_value="test-20101112000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
@ -87,7 +87,7 @@ class TestZfsScaling(unittest2.TestCase):
run_counter=0
with patch.object(ExecuteNode,'run', run_count) as p:
with patch('time.strftime', return_value="20101112000001"):
with patch('time.strftime', return_value="test-20101112000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())

View File

@ -16,25 +16,25 @@ class TestSendRecvPipes(unittest2.TestCase):
with self.subTest("local local pipe"):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote local pipe"):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("local remote pipe"):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote remote pipe"):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
def test_compress(self):
@ -43,7 +43,7 @@ class TestSendRecvPipes(unittest2.TestCase):
for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys():
with self.subTest("compress "+compress):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
@ -53,25 +53,25 @@ class TestSendRecvPipes(unittest2.TestCase):
with self.subTest("local local pipe"):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--buffer=1M" ]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote local pipe"):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("local remote pipe"):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--buffer=1M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote remote pipe"):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
def test_rate(self):
@ -79,7 +79,7 @@ class TestSendRecvPipes(unittest2.TestCase):
start=time.time()
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k" ]).run())
#not a great way of verifying but it works.

View File

@ -33,7 +33,7 @@ class TestZfsAutobackup(unittest2.TestCase):
def test_snapshotmode(self):
"""test snapshot tool mode"""
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -57,7 +57,7 @@ test_target1
with self.subTest("no datasets selected"):
with OutputIO() as buf:
with redirect_stderr(buf):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug --no-progress".split(" ")).run())
print(buf.getvalue())
@ -67,7 +67,7 @@ test_target1
with self.subTest("defaults with full verbose and debug"):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -96,7 +96,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
""")
with self.subTest("bare defaults, allow empty"):
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --no-progress".split(" ")).run())
@ -167,14 +167,14 @@ test_target1/test_source2/fs2/sub@test-20101111000001 userrefs 1 -
#make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept
with self.subTest("test time checking"):
with patch('time.strftime', return_value="20111111000000"):
with patch('time.strftime', return_value="test-20111111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --no-progress".split(" ")).run())
time_str="20111112000000" #month in the "future"
future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
with patch('time.time', return_value=future_timestamp):
with patch('time.strftime', return_value="20111111000001"):
with patch('time.strftime', return_value="test-20111111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y --no-progress".split(" ")).run())
@ -214,7 +214,7 @@ test_target1/test_source2/fs2/sub@test-20111111000001
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -249,7 +249,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --other-snapshots".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -284,7 +284,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_nosnapshot(self):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -308,7 +308,7 @@ test_target1/test_source2/fs2
def test_nosend(self):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -331,7 +331,7 @@ test_target1
def test_ignorereplicated(self):
r=shelltest("zfs snapshot test_source1/fs1@otherreplication")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --ignore-replicated".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -360,7 +360,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_noholds(self):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --no-progress".split(" ")).run())
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
@ -392,7 +392,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
def test_strippath(self):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1 --no-progress".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -418,6 +418,13 @@ test_target1/fs2/sub
test_target1/fs2/sub@test-20101111000000
""")
def test_strippath_collision(self):
with self.assertRaisesRegexp(Exception,"collision"):
ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress --debug".split(" ")).run()
def test_strippath_toomuch(self):
with self.assertRaisesRegexp(Exception,"too much"):
ZfsAutobackup("test test_target1 --verbose --strip-path=3 --no-progress --debug".split(" ")).run()
def test_clearrefres(self):
@ -428,7 +435,7 @@ test_target1/fs2/sub@test-20101111000000
r=shelltest("zfs set refreservation=1M test_source1/fs1")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-refreservation".split(" ")).run())
r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1")
@ -466,7 +473,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000 refreservation -
self.skipTest("This zfs-userspace version doesnt support -o")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-mountpoint --debug".split(" ")).run())
r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1")
@ -499,18 +506,18 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
def test_rollback(self):
#initial backup
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#make change
r=shelltest("zfs mount test_target1/test_source1/fs1")
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
#should fail (busy)
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
#rollback, should succeed
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run())
@ -518,14 +525,14 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
def test_destroyincompat(self):
#initial backup
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#add multiple compatible snapshot (written is still 0)
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1")
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2")
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
#should be ok, is compatible
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
@ -535,19 +542,19 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
#--test should fail, now incompatible
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --test".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
#should fail, now incompatible
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000003"):
with patch('time.strftime', return_value="test-20101111000003"):
#--test should succeed by destroying incompatibles
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
with patch('time.strftime', return_value="20101111000003"):
with patch('time.strftime', return_value="test-20101111000003"):
#should succeed by destroying incompatibles
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible".split(" ")).run())
@ -585,13 +592,13 @@ test_target1/test_source2/fs2/sub@test-20101111000003
#test all ssh directions
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
@ -636,7 +643,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
def test_minchange(self):
#initial
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
#make small change, use umount to reflect the changes immediately
@ -646,7 +653,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
#too small change, takes no snapshots
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
#make big change
@ -654,7 +661,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
#bigger change, should take snapshot
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -687,7 +694,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_test(self):
#initial
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -704,12 +711,12 @@ test_target1
""")
#actual make initial backup
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#test incremental
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --allow-empty --verbose --test".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -745,7 +752,7 @@ test_target1/test_source2/fs2/sub@test-20101111000001
shelltest("zfs create test_target1/test_source1")
shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -778,15 +785,15 @@ test_target1/test_source2/fs2/sub@test-20101111000000
def test_keep0(self):
"""test if keep-source=0 and keep-target=0 dont delete common snapshot and break backup"""
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0".split(" ")).run())
#make snapshot, shouldnt delete 0
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
#make snapshot 2, shouldnt delete 0 since it has holds, but will delete 1 since it has no holds
with patch('time.strftime', return_value="20101111000002"):
with patch('time.strftime', return_value="test-20101111000002"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
@ -818,7 +825,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
""")
#make another backup but with no-holds. we should naturally endup with only number 3
with patch('time.strftime', return_value="20101111000003"):
with patch('time.strftime', return_value="test-20101111000003"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
@ -848,7 +855,7 @@ test_target1/test_source2/fs2/sub@test-20101111000003
# run with snapshot-only for 4, since we used no-holds, it will delete 3 on the source, breaking the backup
with patch('time.strftime', return_value="20101111000004"):
with patch('time.strftime', return_value="test-20101111000004"):
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
@ -883,7 +890,7 @@ test_target1/test_source2/fs2/sub@test-20101111000003
r = shelltest("zfs snapshot test_source1@test")
l=LogConsole(show_verbose=True, show_debug=False, color=False)
n=ZfsNode("test",l)
n=ZfsNode(snapshot_time_format="bla", hold_name="bla", logger=l)
d=ZfsDataset(n,"test_source1@test")
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True, zfs_compressed=True)

View File

@ -10,10 +10,10 @@ class TestZfsAutobackup31(unittest2.TestCase):
def test_no_thinning(self):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
@ -54,10 +54,10 @@ test_target1/test_source2/fs2/sub@test-20101111000001
shelltest("zfs create test_target1/a")
shelltest("zfs create test_target1/b")
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1/a --no-progress --verbose --debug".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
with patch('time.strftime', return_value="test-20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1/b --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
@ -75,7 +75,7 @@ test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
def test_zfs_compressed(self):
with patch('time.strftime', return_value="20101111000000"):
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --zfs-compressed".split(" ")).run())

View File

@ -12,60 +12,60 @@ class TestZfsNode(unittest2.TestCase):
def test_consistent_snapshot(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode("test", logger, description=description)
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
with self.subTest("first snapshot"):
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1", 100000)
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000001", 100000)
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-1
test_source1/fs1@test-20101111000001
test_source1/fs1/sub
test_source1/fs1/sub@test-1
test_source1/fs1/sub@test-20101111000001
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-1
test_source2/fs2/sub@test-20101111000001
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
with self.subTest("second snapshot, no changes, no snapshot"):
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-2", 1)
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 1)
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-1
test_source1/fs1@test-20101111000001
test_source1/fs1/sub
test_source1/fs1/sub@test-1
test_source1/fs1/sub@test-20101111000001
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-1
test_source2/fs2/sub@test-20101111000001
test_source2/fs3
test_source2/fs3/sub
test_target1
""")
with self.subTest("second snapshot, no changes, empty snapshot"):
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-2", 0)
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 0)
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
self.assertEqual(r, """
test_source1
test_source1/fs1
test_source1/fs1@test-1
test_source1/fs1@test-2
test_source1/fs1@test-20101111000001
test_source1/fs1@test-20101111000002
test_source1/fs1/sub
test_source1/fs1/sub@test-1
test_source1/fs1/sub@test-2
test_source1/fs1/sub@test-20101111000001
test_source1/fs1/sub@test-20101111000002
test_source2
test_source2/fs2
test_source2/fs2/sub
test_source2/fs2/sub@test-1
test_source2/fs2/sub@test-2
test_source2/fs2/sub@test-20101111000001
test_source2/fs2/sub@test-20101111000002
test_source2/fs3
test_source2/fs3/sub
test_target1
@ -74,12 +74,12 @@ test_target1
def test_consistent_snapshot_prepostcmds(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode("test", logger, description=description, debug_output=True)
node = ZfsNode(snapshot_time_format="test", hold_name="test", logger=logger, description=description, debug_output=True)
with self.subTest("Test if all cmds are executed correctly (no failures)"):
with OutputIO() as buf:
with redirect_stdout(buf):
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1",
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "echo pre2"],
post_snapshot_cmds=["echo post1 >&2", "echo post2 >&2"]
@ -95,7 +95,7 @@ test_target1
with OutputIO() as buf:
with redirect_stdout(buf):
with self.assertRaises(ExecuteError):
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1",
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "false", "echo pre2"],
post_snapshot_cmds=["echo post1", "false", "echo post2"]
@ -112,7 +112,7 @@ test_target1
with redirect_stdout(buf):
with self.assertRaises(ExecuteError):
#same snapshot name as before so it fails
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1",
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
0,
pre_snapshot_cmds=["echo pre1", "echo pre2"],
post_snapshot_cmds=["echo post1", "echo post2"]
@ -126,10 +126,19 @@ test_target1
def test_getselected(self):
# should be excluded by property
shelltest("zfs create test_source1/fs1/subexcluded")
shelltest("zfs set autobackup:test=false test_source1/fs1/subexcluded")
# should be excluded by being unchanged
shelltest("zfs create test_source1/fs1/unchanged")
shelltest("zfs snapshot test_source1/fs1/unchanged@somesnapshot")
logger = LogStub()
description = "[Source]"
node = ZfsNode("test", logger, description=description)
s = pformat(node.selected_datasets(exclude_paths=[], exclude_received=False))
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
s = pformat(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=True, min_change=1))
print(s)
# basics
@ -137,16 +146,11 @@ test_target1
(local): test_source1/fs1/sub,
(local): test_source2/fs2/sub]""")
# caching, so expect same result after changing it
subprocess.check_call("zfs set autobackup:test=true test_source2/fs3", shell=True)
self.assertEqual(s, """[(local): test_source1/fs1,
(local): test_source1/fs1/sub,
(local): test_source2/fs2/sub]""")
def test_validcommand(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode("test", logger, description=description)
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
with self.subTest("test invalid option"):
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
@ -156,7 +160,7 @@ test_target1
def test_supportedsendoptions(self):
logger = LogStub()
description = "[Source]"
node = ZfsNode("test", logger, description=description)
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
# -D propably always supported
self.assertGreater(len(node.supported_send_options), 0)
@ -164,7 +168,7 @@ test_target1
logger = LogStub()
description = "[Source]"
# NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
node = ZfsNode("test", logger, description=description, ssh_to='localhost')
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description, ssh_to='localhost')
self.assertIsInstance(node.supported_recv_options, list)

View File

@ -1,8 +1,8 @@
import os
import select
import subprocess
from zfs_autobackup.CmdPipe import CmdPipe, CmdItem
from zfs_autobackup.LogStub import LogStub
from .CmdPipe import CmdPipe, CmdItem
from .LogStub import LogStub
try:
from shlex import quote as cmd_quote

View File

@ -1,6 +1,6 @@
import time
from zfs_autobackup.ThinnerRule import ThinnerRule
from .ThinnerRule import ThinnerRule
class Thinner:

View File

@ -2,21 +2,19 @@ import argparse
import sys
import time
from zfs_autobackup import compressors
from zfs_autobackup.ExecuteNode import ExecuteNode
from zfs_autobackup.Thinner import Thinner
from zfs_autobackup.ZfsDataset import ZfsDataset
from zfs_autobackup.LogConsole import LogConsole
from zfs_autobackup.ZfsNode import ZfsNode
from zfs_autobackup.ThinnerRule import ThinnerRule
from . import compressors
from .ExecuteNode import ExecuteNode
from .Thinner import Thinner
from .ZfsDataset import ZfsDataset
from .LogConsole import LogConsole
from .ZfsNode import ZfsNode
from .ThinnerRule import ThinnerRule
import os.path
class ZfsAutobackup:
"""main class"""
VERSION = "3.1"
VERSION = "3.1.2"
HEADER = "zfs-autobackup v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(VERSION)
def __init__(self, argv, print_arguments=True):
@ -38,10 +36,10 @@ class ZfsAutobackup:
parser.add_argument('--keep-target', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
help='Thinning schedule for old target snapshots. Default: %(default)s')
parser.add_argument('backup_name', metavar='backup-name',
parser.add_argument('backup_name', metavar='BACKUP-NAME', default=None, nargs='?',
help='Name of the backup (you should set the zfs property "autobackup:backup-name" to '
'true on filesystems you want to backup')
parser.add_argument('target_path', metavar='target-path', default=None, nargs='?',
parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
help='Target ZFS filesystem (optional: if not specified, zfs-autobackup will only operate '
'as snapshot-tool on source)')
@ -63,15 +61,16 @@ class ZfsAutobackup:
'default)s)')
parser.add_argument('--allow-empty', action='store_true',
help='If nothing has changed, still create empty snapshots. (same as --min-change=0)')
parser.add_argument('--ignore-replicated', action='store_true',
help='Ignore datasets that seem to be replicated some other way. (No changes since '
'lastest snapshot. Useful for proxmox HA replication)')
parser.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--exclude-unchanged', action='store_true',
help='Exclude datasets that have no changes since any last snapshot. (Useful in combination with proxmox HA replication)')
parser.add_argument('--exclude-received', action='store_true',
help='Exclude datasets that have the origin of their autobackup: property as "received". '
'This can avoid recursive replication between two backup partners.')
parser.add_argument('--strip-path', metavar='N', default=0, type=int,
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
'SmartOS machines)')
# parser.add_argument('--buffer', default="", help='Use mbuffer with specified size to speedup zfs transfer.
# (e.g. --buffer 1G) Will also show nice progress output.')
parser.add_argument('--clear-refreservation', action='store_true',
help='Filter "refreservation" property. (recommended, safes space. same as '
@ -88,6 +87,8 @@ class ZfsAutobackup:
parser.add_argument('--rollback', action='store_true',
help='Rollback changes to the latest target snapshot before starting. (normally you can '
'prevent changes by setting the readonly property on the target_path to on)')
parser.add_argument('--force', '-F', action='store_true',
help='Use zfs -F option to force overwrite/rollback. (Usefull with --strip-path=1, but use with care)')
parser.add_argument('--destroy-incompatible', action='store_true',
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
parser.add_argument('--destroy-missing', metavar="SCHEDULE", type=str, default=None,
@ -106,11 +107,11 @@ class ZfsAutobackup:
parser.add_argument('--zfs-compressed', action='store_true',
help='Transfer blocks that already have zfs-compression as-is.')
parser.add_argument('--test', action='store_true',
help='dont change anything, just show what would be done (still does all read-only '
parser.add_argument('--test','--dry-run', '-n', action='store_true',
help='Dry run, dont change anything, just show what would be done (still does all read-only '
'operations)')
parser.add_argument('--verbose', action='store_true', help='verbose output')
parser.add_argument('--debug', action='store_true',
parser.add_argument('--verbose','-v', action='store_true', help='verbose output')
parser.add_argument('--debug','-d', action='store_true',
help='Show zfs commands that are executed, stops after an exception.')
parser.add_argument('--debug-output', action='store_true',
help='Show zfs commands and their output/exit codes. (noisy)')
@ -121,25 +122,40 @@ class ZfsAutobackup:
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--raw', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--exclude-received', action='store_true',
help=argparse.SUPPRESS) # probably never needed anymore
#these things all do stuff by piping zfs send/recv IO
# these things all do stuff by piping zfs send/recv IO
parser.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs send output through COMMAND (can be used multiple times)')
parser.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs recv input through COMMAND (can be used multiple times)')
parser.add_argument('--compress', metavar='TYPE', default=None, choices=compressors.choices(), help='Use compression during transfer, zstd-fast recommended. ({})'.format(", ".join(compressors.choices())))
parser.add_argument('--rate', metavar='DATARATE', default=None, help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
parser.add_argument('--buffer', metavar='SIZE', default=None, help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
parser.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-fast',
choices=compressors.choices(),
help='Use compression during transfer, defaults to zstd-fast if TYPE is not specified. ({})'.format(
", ".join(compressors.choices())))
parser.add_argument('--rate', metavar='DATARATE', default=None,
help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
parser.add_argument('--buffer', metavar='SIZE', default=None,
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
parser.add_argument('--snapshot-format', metavar='FORMAT', default="{}-%Y%m%d%H%M%S",
help='Snapshot naming format. Default: %(default)s')
parser.add_argument('--property-format', metavar='FORMAT', default="autobackup:{}",
help='Select property naming format. Default: %(default)s')
parser.add_argument('--hold-format', metavar='FORMAT', default="zfs_autobackup:{}",
help='Hold naming format. Default: %(default)s')
parser.add_argument('--version', action='store_true',
help='Show version.')
# note args is the only global variable we use, since its a global readonly setting anyway
args = parser.parse_args(argv)
self.args = args
if args.version:
print(self.HEADER)
sys.exit(255)
# auto enable progress?
if sys.stderr.isatty() and not args.no_progress:
args.progress = True
@ -159,6 +175,11 @@ class ZfsAutobackup:
self.log = LogConsole(show_debug=self.args.debug, show_verbose=self.args.verbose, color=sys.stdout.isatty())
self.verbose(self.HEADER)
if args.backup_name==None:
parser.print_usage()
self.log.error("Please specify BACKUP-NAME")
sys.exit(255)
if args.resume:
self.warning("The --resume option isn't needed anymore (its autodetected now)")
@ -176,6 +197,10 @@ class ZfsAutobackup:
if args.compress and args.zfs_compressed:
self.warning("Using --compress with --zfs-compressed, might be inefficient.")
if args.ignore_replicated:
self.warning("--ignore-replicated has been renamed, using --exclude-unchanged")
args.exclude_unchanged = True
def verbose(self, txt):
self.log.verbose(txt)
@ -288,12 +313,12 @@ class ZfsAutobackup:
def get_send_pipes(self, logger):
"""determine the zfs send pipe"""
ret=[]
ret = []
# IO buffer
if self.args.buffer:
logger("zfs send buffer : {}".format(self.args.buffer))
ret.extend([ ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m"+self.args.buffer ])
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m" + self.args.buffer])
# custom pipes
for send_pipe in self.args.send_pipe:
@ -302,27 +327,26 @@ class ZfsAutobackup:
logger("zfs send custom pipe : {}".format(send_pipe))
# compression
if self.args.compress!=None:
if self.args.compress != None:
ret.append(ExecuteNode.PIPE)
cmd=compressors.compress_cmd(self.args.compress)
cmd = compressors.compress_cmd(self.args.compress)
ret.extend(cmd)
logger("zfs send compression : {}".format(" ".join(cmd)))
# transfer rate
if self.args.rate:
logger("zfs send transfer rate : {}".format(self.args.rate))
ret.extend([ ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R"+self.args.rate ])
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R" + self.args.rate])
return ret
def get_recv_pipes(self, logger):
ret=[]
ret = []
# decompression
if self.args.compress!=None:
cmd=compressors.decompress_cmd(self.args.compress)
if self.args.compress != None:
cmd = compressors.decompress_cmd(self.args.compress)
ret.extend(cmd)
ret.append(ExecuteNode.PIPE)
logger("zfs recv decompression : {}".format(" ".join(cmd)))
@ -335,13 +359,36 @@ class ZfsAutobackup:
# IO buffer
if self.args.buffer:
#only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
if self.args.ssh_source!=None or self.args.ssh_target!=None or self.args.recv_pipe or self.args.send_pipe or self.args.compress!=None:
# only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
if self.args.ssh_source != None or self.args.ssh_target != None or self.args.recv_pipe or self.args.send_pipe or self.args.compress != None:
logger("zfs recv buffer : {}".format(self.args.buffer))
ret.extend(["mbuffer", "-q", "-s128k", "-m"+self.args.buffer, ExecuteNode.PIPE ])
ret.extend(["mbuffer", "-q", "-s128k", "-m" + self.args.buffer, ExecuteNode.PIPE])
return ret
def make_target_name(self, source_dataset):
"""make target_name from a source_dataset"""
stripped=source_dataset.lstrip_path(self.args.strip_path)
if stripped!="":
return self.args.target_path + "/" + stripped
else:
return self.args.target_path
def check_target_names(self, source_node, source_datasets, target_node):
"""check all target names for collesions etc due to strip-options"""
self.debug("Checking target names:")
target_datasets={}
for source_dataset in source_datasets:
target_name = self.make_target_name(source_dataset)
source_dataset.debug("-> {}".format(target_name))
if target_name in target_datasets:
raise Exception("Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name, source_dataset, target_datasets[target_name]))
target_datasets[target_name]=source_dataset
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node):
"""Sync datasets, or thin-only on both sides
@ -350,8 +397,8 @@ class ZfsAutobackup:
:type source_node: ZfsNode
"""
send_pipes=self.get_send_pipes(source_node.verbose)
recv_pipes=self.get_recv_pipes(target_node.verbose)
send_pipes = self.get_send_pipes(source_node.verbose)
recv_pipes = self.get_recv_pipes(target_node.verbose)
fail_count = 0
count = 0
@ -365,13 +412,14 @@ class ZfsAutobackup:
try:
# determine corresponding target_dataset
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
target_name = self.make_target_name(source_dataset)
target_dataset = ZfsDataset(target_node, target_name)
target_datasets.append(target_dataset)
# ensure parents exists
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
if not self.args.no_send \
and target_dataset.parent \
and target_dataset.parent not in target_datasets \
and not target_dataset.parent.exists:
target_dataset.parent.create_filesystem(parents=True)
@ -391,7 +439,8 @@ class ZfsAutobackup:
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, zfs_compressed=self.args.zfs_compressed )
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed, force=self.args.force)
except Exception as e:
fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e))
@ -417,20 +466,6 @@ class ZfsAutobackup:
for source_dataset in source_datasets:
source_dataset.thin(skip_holds=True)
def filter_replicated(self, datasets):
if not self.args.ignore_replicated:
return datasets
else:
self.set_title("Filtering already replicated filesystems")
ret = []
for dataset in datasets:
if dataset.is_changed(self.args.min_change):
ret.append(dataset)
else:
dataset.verbose("Ignoring, already replicated")
return ret
def filter_properties_list(self):
if self.args.filter_properties:
@ -462,6 +497,19 @@ class ZfsAutobackup:
if self.args.test:
self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
#format all the names
property_name = self.args.property_format.format(self.args.backup_name)
snapshot_time_format = self.args.snapshot_format.format(self.args.backup_name)
hold_name = self.args.hold_format.format(self.args.backup_name)
self.verbose("")
self.verbose("Selecting dataset property : {}".format(property_name))
self.verbose("Snapshot format : {}".format(snapshot_time_format))
if not self.args.no_holds:
self.verbose("Hold name : {}".format(hold_name))
################ create source zfsNode
self.set_title("Source settings")
@ -470,20 +518,20 @@ class ZfsAutobackup:
source_thinner = None
else:
source_thinner = Thinner(self.args.keep_source)
source_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
source_node = ZfsNode(snapshot_time_format=snapshot_time_format, hold_name=hold_name, logger=self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_source, readonly=self.args.test,
debug_output=self.args.debug_output, description=description, thinner=source_thinner)
source_node.verbose(
"Selects all datasets that have property 'autobackup:{}=true' (or children of datasets that have "
"'autobackup:{}=child')".format(
self.args.backup_name, self.args.backup_name))
################# select source datasets
self.set_title("Selecting")
#Note: Before version v3.1-beta5, we always used exclude_received. This was a problem if you wanto to replicate an existing backup to another host and use the same backupname/snapshots.
# Note: Before version v3.1-beta5, we always used exclude_received. This was a problem if you wanted to
# replicate an existing backup to another host and use the same backupname/snapshots. However, exclude_received
# may still need to be used to explicitly exclude a backup with the 'received' source to avoid accidental
# recursive replication of a zvol that is currently being received in another session (as it will have changes).
exclude_paths = []
exclude_received=self.args.exclude_received
exclude_received = self.args.exclude_received
if self.args.ssh_source == self.args.ssh_target:
if self.args.target_path:
# target and source are the same, make sure to exclude target_path
@ -491,25 +539,24 @@ class ZfsAutobackup:
exclude_paths.append(self.args.target_path)
else:
self.warning("Source and target are on the same host, excluding received datasets from selection.")
exclude_received=True
exclude_received = True
selected_source_datasets = source_node.selected_datasets(exclude_received=exclude_received,
exclude_paths=exclude_paths)
if not selected_source_datasets:
source_datasets = source_node.selected_datasets(property_name=property_name,exclude_received=exclude_received,
exclude_paths=exclude_paths,
exclude_unchanged=self.args.exclude_unchanged,
min_change=self.args.min_change)
if not source_datasets:
self.error(
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
"you want to select.".format(
self.args.backup_name))
return 255
# filter out already replicated stuff?
source_datasets = self.filter_replicated(selected_source_datasets)
################# snapshotting
if not self.args.no_snapshot:
self.set_title("Snapshotting")
source_node.consistent_snapshot(source_datasets, source_node.new_snapshotname(),
snapshot_name=time.strftime(snapshot_time_format)
source_node.consistent_snapshot(source_datasets, snapshot_name,
min_changed_bytes=self.args.min_change,
pre_snapshot_cmds=self.args.pre_snapshot_cmd,
post_snapshot_cmds=self.args.post_snapshot_cmd)
@ -524,7 +571,7 @@ class ZfsAutobackup:
target_thinner = None
else:
target_thinner = Thinner(self.args.keep_target)
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
target_node = ZfsNode(snapshot_time_format=snapshot_time_format, hold_name=hold_name, logger=self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output,
description="[Target]",
@ -539,6 +586,9 @@ class ZfsAutobackup:
raise (Exception(
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
# check for collisions due to strip-path
self.check_target_names(source_node, source_datasets, target_node)
# do the actual sync
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
fail_count = self.sync_datasets(

View File

@ -1,8 +1,8 @@
import re
import time
from zfs_autobackup.CachedProperty import CachedProperty
from zfs_autobackup.ExecuteNode import ExecuteError
from .CachedProperty import CachedProperty
from .ExecuteNode import ExecuteError
class ZfsDataset:
@ -79,7 +79,11 @@ class ZfsDataset:
Args:
:type count: int
"""
return "/".join(self.split_path()[count:])
components=self.split_path()
if count>len(components):
raise Exception("Trying to strip too much from path ({} items from {})".format(count, self.name))
return "/".join(components[count:])
def rstrip_path(self, count):
"""return name with last count components stripped
@ -112,7 +116,7 @@ class ZfsDataset:
"""true if this dataset is a snapshot"""
return self.name.find("@") != -1
def is_selected(self, value, source, inherited, exclude_received, exclude_paths):
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged, min_change):
"""determine if dataset should be selected for backup (called from
ZfsNode)
@ -122,6 +126,12 @@ class ZfsDataset:
:type source: str
:type inherited: bool
:type exclude_received: bool
:type exclude_unchanged: bool
:type min_change: bool
:param value: Value of the zfs property ("false"/"true"/"child"/"-")
:param source: Source of the zfs property ("local"/"received", "-")
:param inherited: True of the value/source was inherited from a higher dataset.
"""
# sanity checks
@ -135,28 +145,41 @@ class ZfsDataset:
raise (Exception(
"{} autobackup-property has illegal value: '{}'".format(self.name, value)))
# non specified, ignore
if value == "-":
return False
# only select childs of this dataset, ignore
if value == "child" and not inherited:
return False
# manually excluded by property
if value == "false":
self.verbose("Excluded")
return False
# from here on the dataset is selected by property, now do additional exclusion checks
# our path starts with one of the excluded paths?
for exclude_path in exclude_paths:
if self.name.startswith(exclude_path):
# if self.name.startswith(exclude_path):
if (self.name + "/").startswith(exclude_path + "/"):
# too noisy for verbose
self.debug("Excluded (in exclude list)")
self.debug("Excluded (path in exclude list)")
return False
# now determine if its actually selected
if value == "false":
self.verbose("Excluded (disabled)")
if source == "received":
if exclude_received:
self.verbose("Excluded (dataset already received)")
return False
if exclude_unchanged and not self.is_changed(min_change):
self.verbose("Excluded (unchanged since last snapshot)")
return False
elif value == "true" or (value == "child" and inherited):
if source == "local":
self.verbose("Selected")
return True
elif source == "received":
if exclude_received:
self.verbose("Excluded (dataset already received)")
return False
else:
self.verbose("Selected")
return True
self.verbose("Selected")
return True
@CachedProperty
def parent(self):
@ -169,7 +192,11 @@ class ZfsDataset:
if self.is_snapshot:
return ZfsDataset(self.zfs_node, self.filesystem_name)
else:
return ZfsDataset(self.zfs_node, self.rstrip_path(1))
stripped=self.rstrip_path(1)
if stripped:
return ZfsDataset(self.zfs_node, stripped)
else:
return None
# NOTE: unused for now
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
@ -296,21 +323,20 @@ class ZfsDataset:
if min_changed_bytes == 0:
return True
if int(self.properties['written']) < min_changed_bytes:
return False
else:
return True
def is_ours(self):
"""return true if this snapshot is created by this backup_name"""
if re.match("^" + self.zfs_node.backup_name + "-[0-9]*$", self.snapshot_name):
return True
else:
"""return true if this snapshot name has format"""
try:
test = self.timestamp
except ValueError as e:
return False
@property
def _hold_name(self):
return "zfs_autobackup:" + self.zfs_node.backup_name
return True
@property
def holds(self):
@ -322,30 +348,26 @@ class ZfsDataset:
def is_hold(self):
"""did we hold this snapshot?"""
return self._hold_name in self.holds
return self.zfs_node.hold_name in self.holds
def hold(self):
"""hold dataset"""
self.debug("holding")
self.zfs_node.run(["zfs", "hold", self._hold_name, self.name], valid_exitcodes=[0, 1])
self.zfs_node.run(["zfs", "hold", self.zfs_node.hold_name, self.name], valid_exitcodes=[0, 1])
def release(self):
"""release dataset"""
if self.zfs_node.readonly or self.is_hold():
self.debug("releasing")
self.zfs_node.run(["zfs", "release", self._hold_name, self.name], valid_exitcodes=[0, 1])
self.zfs_node.run(["zfs", "release", self.zfs_node.hold_name, self.name], valid_exitcodes=[0, 1])
@property
def timestamp(self):
"""get timestamp from snapshot name. Only works for our own snapshots
with the correct format.
"""
time_str = re.findall("^.*-([0-9]*)$", self.snapshot_name)[0]
if len(time_str) != 14:
raise (Exception("Snapshot has invalid timestamp in name: {}".format(self.snapshot_name)))
# new format:
time_secs = time.mktime(time.strptime(time_str, "%Y%m%d%H%M%S"))
time_secs = time.mktime(time.strptime(self.snapshot_name, self.zfs_node.snapshot_time_format))
return time_secs
def from_names(self, names):
@ -563,7 +585,7 @@ class ZfsDataset:
return output_pipe
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False):
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False, force=False):
"""starts a zfs recv for this snapshot and uses pipe as input
note: you can it both on a snapshot or filesystem object. The
@ -604,6 +626,9 @@ class ZfsDataset:
# verbose output
cmd.append("-v")
if force:
cmd.append("-F")
if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options:
# support resuming
self.debug("Enabled resume support")
@ -634,7 +659,7 @@ class ZfsDataset:
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed):
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed, force):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer
@ -675,7 +700,7 @@ class ZfsDataset:
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes)
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes, force=force)
def abort_resume(self):
"""abort current resume state"""
@ -972,7 +997,7 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed):
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
@ -993,6 +1018,8 @@ class ZfsDataset:
:type destroy_incompatible: bool
"""
self.verbose("sending to {}".format(target_dataset))
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)
@ -1055,7 +1082,9 @@ class ZfsDataset:
filter_properties=active_filter_properties,
set_properties=active_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes, zfs_compressed=zfs_compressed)
resume_token=resume_token, write_embedded=write_embedded, raw=raw,
send_properties=send_properties, send_pipes=send_pipes,
recv_pipes=recv_pipes, zfs_compressed=zfs_compressed, force=force)
resume_token = None

View File

@ -6,20 +6,24 @@ import subprocess
import sys
import time
from zfs_autobackup.ExecuteNode import ExecuteNode
from zfs_autobackup.Thinner import Thinner
from zfs_autobackup.CachedProperty import CachedProperty
from zfs_autobackup.ZfsPool import ZfsPool
from zfs_autobackup.ZfsDataset import ZfsDataset
from zfs_autobackup.ExecuteNode import ExecuteError
from .ExecuteNode import ExecuteNode
from .Thinner import Thinner
from .CachedProperty import CachedProperty
from .ZfsPool import ZfsPool
from .ZfsDataset import ZfsDataset
from .ExecuteNode import ExecuteError
class ZfsNode(ExecuteNode):
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
def __init__(self, backup_name, logger, ssh_config=None, ssh_to=None, readonly=False, description="",
def __init__(self, snapshot_time_format, hold_name, logger, ssh_config=None, ssh_to=None, readonly=False,
description="",
debug_output=False, thinner=None):
self.backup_name = backup_name
self.snapshot_time_format = snapshot_time_format
self.hold_name = hold_name
self.description = description
self.logger = logger
@ -54,7 +58,7 @@ class ZfsNode(ExecuteNode):
if self.__thinner is not None:
return self.__thinner.thin(objects, keep_objects)
else:
return ( keep_objects, [] )
return (keep_objects, [])
@CachedProperty
def supported_send_options(self):
@ -129,8 +133,9 @@ class ZfsNode(ExecuteNode):
bytes_left = self._progress_total_bytes - bytes_
minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60)
self.logger.progress("Transfer {}% {}MB/s (total {}MB, {} minutes left)".format(percentage, speed, int(
self._progress_total_bytes / (1024 * 1024)), minutes_left))
self.logger.progress(
"Transfer {}% {}MB/s (total {}MB, {} minutes left)".format(percentage, speed, int(
self._progress_total_bytes / (1024 * 1024)), minutes_left))
return
@ -158,11 +163,8 @@ class ZfsNode(ExecuteNode):
def debug(self, txt):
self.logger.debug("{} {}".format(self.description, txt))
def new_snapshotname(self):
"""determine uniq new snapshotname"""
return self.backup_name + "-" + time.strftime("%Y%m%d%H%M%S")
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_snapshot_cmds=[], post_snapshot_cmds=[]):
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_snapshot_cmds=[],
post_snapshot_cmds=[]):
"""create a consistent (atomic) snapshot of specified datasets, per pool.
"""
@ -214,9 +216,7 @@ class ZfsNode(ExecuteNode):
except Exception as e:
pass
def selected_datasets(self, exclude_received, exclude_paths):
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change):
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
returns: list of ZfsDataset
@ -227,7 +227,7 @@ class ZfsNode(ExecuteNode):
# get all source filesystems that have the backup property
lines = self.run(tab_split=True, readonly=True, cmd=[
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H",
"autobackup:" + self.backup_name
property_name
])
# The returnlist of selected ZfsDataset's:
@ -251,7 +251,9 @@ class ZfsNode(ExecuteNode):
source = raw_source
# determine it
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received, exclude_paths=exclude_paths):
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
min_change=min_change):
selected_filesystems.append(dataset)
return selected_filesystems

View File

@ -1,4 +1,4 @@
from zfs_autobackup.CachedProperty import CachedProperty
from .CachedProperty import CachedProperty
class ZfsPool():

View File

@ -3,7 +3,7 @@
def cli():
import sys
from zfs_autobackup.ZfsAutobackup import ZfsAutobackup
from .ZfsAutobackup import ZfsAutobackup
zfs_autobackup = ZfsAutobackup(sys.argv[1:], False)
sys.exit(zfs_autobackup.run())

View File

@ -35,6 +35,12 @@ COMPRESS_CMDS = {
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'zstd-adapt': {
'cmd': 'zstdmt',
'args': [ '--adapt' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'xz': {
'cmd': 'xz',
'args': [],