Compare commits
28 Commits
v3.2-beta1
...
v3.2.1
| Author | SHA1 | Date | |
|---|---|---|---|
| 62f078eaec | |||
| fd1e7d5b33 | |||
| 03ff730a70 | |||
| 2c5d3c50e1 | |||
| ee1d17b6ff | |||
| 0ff989691f | |||
| 088710fd39 | |||
| c12d63470f | |||
| 4df9e52a97 | |||
| 3155702c47 | |||
| a77fc9afe7 | |||
| 7533d9bcc2 | |||
| bc57ee8d08 | |||
| be53c454da | |||
| 8a6a62ce9c | |||
| 428e6edc13 | |||
| 23fbafab42 | |||
| cdd151d45f | |||
| ab43689a0f | |||
| 535e21863b | |||
| a078be3e9e | |||
| 00b230792a | |||
| 8b600c9e9c | |||
| 60840a4213 | |||
| 7f91473188 | |||
| e106e7f1da | |||
| d531e9fdaf | |||
| a322eb96ae |
11
.github/ISSUE_TEMPLATE/issue.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/issue.md
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Issue
|
||||
about: 'Use this if you have issues or feature requests'
|
||||
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
(Please add the commandline that you use to the issue. Also at least add the output of --verbose. Sometimes it helps if you add the output of --debug-output instead, but its huge, so use an attachment for that.)
|
||||
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "python" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
11
.github/workflows/codeql-analysis.yml
vendored
11
.github/workflows/codeql-analysis.yml
vendored
@ -17,8 +17,8 @@ on:
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '26 23 * * 3'
|
||||
# schedule:
|
||||
# - cron: '26 23 * * 3'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
@ -40,9 +40,10 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@ -53,7 +54,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@ -67,4 +68,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
||||
42
.github/workflows/regression.yml
vendored
42
.github/workflows/regression.yml
vendored
@ -6,15 +6,12 @@ on: ["push", "pull_request"]
|
||||
|
||||
|
||||
jobs:
|
||||
|
||||
ubuntu20:
|
||||
runs-on: ubuntu-20.04
|
||||
ubuntu22:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
|
||||
|
||||
uses: actions/checkout@v3.5.0
|
||||
|
||||
- name: Prepare
|
||||
run: sudo apt update && sudo apt install zfsutils-linux lzop pigz zstd gzip xz-utils lz4 mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||
@ -29,17 +26,15 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github || true
|
||||
|
||||
ubuntu18:
|
||||
runs-on: ubuntu-18.04
|
||||
ubuntu20:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
|
||||
|
||||
uses: actions/checkout@v3.5.0
|
||||
|
||||
- name: Prepare
|
||||
run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||
run: sudo apt update && sudo apt install zfsutils-linux lzop pigz zstd gzip xz-utils lz4 mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||
|
||||
|
||||
- name: Regression test
|
||||
@ -51,26 +46,3 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github || true
|
||||
|
||||
ubuntu18_python2:
|
||||
runs-on: ubuntu-18.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Set up Python 2.x
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '2.x'
|
||||
|
||||
- name: Prepare
|
||||
run: sudo apt update && sudo apt install zfsutils-linux python-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls colorama
|
||||
|
||||
- name: Regression test
|
||||
run: sudo -E ./tests/run_tests
|
||||
|
||||
- name: Coveralls
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github || true
|
||||
|
||||
@ -14,14 +14,14 @@ You can select what to backup by setting a custom `ZFS property`. This makes it
|
||||
|
||||
Other settings are just specified on the commandline: Simply setup and test your zfs-autobackup command and fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
|
||||
|
||||
Since it's using ZFS commands, you can see what it's actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
|
||||
Since it's using ZFS commands, you can see what it's actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or errors. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
|
||||
|
||||
An important feature that's missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your system.
|
||||
|
||||
## Features
|
||||
|
||||
* Works across operating systems: Tested with **Linux**, **FreeBSD/FreeNAS** and **SmartOS**.
|
||||
* Low learning curve: no complex daemons or services, no additional software or networking needed. (Only read this page)
|
||||
* Low learning curve: no complex daemons or services, no additional software or networking needed.
|
||||
* Plays nicely with existing replication systems. (Like Proxmox HA)
|
||||
* Automatically selects filesystems to backup by looking at a simple ZFS property.
|
||||
* Creates consistent snapshots. (takes all snapshots at once, atomicly.)
|
||||
@ -31,6 +31,7 @@ An important feature that's missing from other tools is a reliable `--test` opti
|
||||
* "pull" remote data from a server via SSH and backup it locally.
|
||||
* "pull+push": Zero trust between source and target.
|
||||
* Can be scheduled via simple cronjob or run directly from commandline.
|
||||
* Also supports complex backup geometries.
|
||||
* ZFS encryption support: Can decrypt / encrypt or even re-encrypt datasets during transfer.
|
||||
* Supports sending with compression. (Using pigz, zstd etc)
|
||||
* IO buffering to speed up transfer.
|
||||
@ -45,6 +46,7 @@ An important feature that's missing from other tools is a reliable `--test` opti
|
||||
* Easy migration from other zfs backup systems to zfs-autobackup.
|
||||
* Gracefully handles datasets that no longer exist on source.
|
||||
* Complete and clean logging.
|
||||
* All code is regression tested against actual ZFS environments.
|
||||
* Easy installation:
|
||||
* Just install zfs-autobackup via pip.
|
||||
* Only needs to be installed on one side.
|
||||
@ -60,3 +62,4 @@ Please look at our wiki to [Get started](https://github.com/psy0rz/zfs_autobacku
|
||||
This project was sponsorred by:
|
||||
|
||||
* JetBrains (Provided me with a license for their whole professional product line, https://www.jetbrains.com/pycharm/ )
|
||||
* [DatuX](https://www.datux.nl)
|
||||
|
||||
33
scripts/enctest
Executable file
33
scripts/enctest
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
#NOTE: usually the speed is the same, but the cpu usage is much higher for ccm
|
||||
|
||||
set -e
|
||||
|
||||
D=/enctest123
|
||||
DS=rpool$D
|
||||
|
||||
echo sdflsakjfklsjfsda > key.txt
|
||||
|
||||
dd if=/dev/urandom of=dump.bin bs=1M count=10000
|
||||
|
||||
#readcache
|
||||
cat dump.bin > /dev/null
|
||||
|
||||
zfs destroy $DS || true
|
||||
|
||||
zfs create $DS
|
||||
|
||||
echo Unencrypted:
|
||||
sync
|
||||
time ( cp dump.bin $D/dump.bin; sync )
|
||||
|
||||
|
||||
for E in aes-128-ccm aes-192-ccm aes-256-ccm aes-128-gcm aes-192-gcm aes-256-gcm; do
|
||||
zfs destroy $DS
|
||||
zfs create -o encryption=$E -o keylocation=file://`pwd`/key.txt -o keyformat=passphrase $DS
|
||||
echo $E
|
||||
sync
|
||||
time ( cp dump.bin $D/dump.bin; sync )
|
||||
done
|
||||
|
||||
@ -2,6 +2,15 @@
|
||||
# To run tests as non-root, use this hack:
|
||||
# chmod 4755 /usr/sbin/zpool /usr/sbin/zfs
|
||||
|
||||
import sys
|
||||
|
||||
#dirty hack for this error:
|
||||
#AttributeError: module 'collections' has no attribute 'MutableMapping'
|
||||
|
||||
if sys.version_info.major == 3 and sys.version_info.minor >= 10:
|
||||
import collections
|
||||
setattr(collections, "MutableMapping", collections.abc.MutableMapping)
|
||||
|
||||
import subprocess
|
||||
import random
|
||||
|
||||
|
||||
@ -117,7 +117,7 @@ class TestZfsNode(unittest2.TestCase):
|
||||
|
||||
print(buf.getvalue())
|
||||
#on second run it sees the dangling ex-parent but doesnt know what to do with it (since it has no own snapshot)
|
||||
self.assertIn("test_source1: Destroy missing: has no snapshots made by us.", buf.getvalue())
|
||||
self.assertIn("test_source1: Destroy missing: has no snapshots made by us", buf.getvalue())
|
||||
|
||||
|
||||
|
||||
|
||||
@ -2,87 +2,146 @@ import zfs_autobackup.compressors
|
||||
from basetest import *
|
||||
import time
|
||||
|
||||
|
||||
class TestSendRecvPipes(unittest2.TestCase):
|
||||
"""test input/output pipes for zfs send and recv"""
|
||||
|
||||
def setUp(self):
|
||||
prepare_zpools()
|
||||
self.longMessage=True
|
||||
|
||||
|
||||
self.longMessage = True
|
||||
|
||||
def test_send_basics(self):
|
||||
"""send basics (remote/local send pipe)"""
|
||||
|
||||
|
||||
with self.subTest("local local pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("remote local pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("local remote pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("remote remote pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
with patch('time.strftime', return_value="test-20101111000003"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M",
|
||||
"--recv-pipe=dd bs=2M"]).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1@test-20101111000002
|
||||
test_target1/test_source1/fs1@test-20101111000003
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||
""")
|
||||
|
||||
def test_compress(self):
|
||||
"""send basics (remote/local send pipe)"""
|
||||
|
||||
for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys():
|
||||
|
||||
with self.subTest("compress "+compress):
|
||||
with self.subTest("compress " + compress):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run())
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--verbose",
|
||||
"--compress=" + compress]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
def test_buffer(self):
|
||||
"""test different buffer configurations"""
|
||||
|
||||
|
||||
with self.subTest("local local pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--buffer=1M" ]).run())
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--buffer=1M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("remote local pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--verbose", "--exclude-received", "--no-holds",
|
||||
"--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("local remote pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-target=localhost", "--buffer=1M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("remote remote pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||
with patch('time.strftime', return_value="test-20101111000003"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1@test-20101111000002
|
||||
test_target1/test_source1/fs1@test-20101111000003
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||
""")
|
||||
|
||||
def test_rate(self):
|
||||
"""test rate limit"""
|
||||
|
||||
|
||||
start=time.time()
|
||||
start = time.time()
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k" ]).run())
|
||||
|
||||
#not a great way of verifying but it works.
|
||||
self.assertGreater(time.time()-start, 5)
|
||||
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k"]).run())
|
||||
|
||||
# not a great way of verifying but it works.
|
||||
self.assertGreater(time.time() - start, 5)
|
||||
|
||||
@ -95,3 +95,25 @@ test_target1/fs1@test-20101111000000
|
||||
test_target1/fs1/sub@test-20101111000000
|
||||
test_target1/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
|
||||
def test_exclude_unchanged(self):
|
||||
|
||||
shelltest("zfs snapshot -r test_source1@somesnapshot")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(
|
||||
ZfsAutobackup(
|
||||
"test test_target1 --verbose --allow-empty --exclude-unchanged=1".split(" ")).run())
|
||||
|
||||
#everything should be excluded, but should not return an error (see #190)
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
self.assertFalse(
|
||||
ZfsAutobackup(
|
||||
"test test_target1 --verbose --allow-empty --exclude-unchanged=1".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t snapshot test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
|
||||
@ -15,7 +15,9 @@ class TestZfsNode(unittest2.TestCase):
|
||||
node = ZfsNode(utc=False, snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
|
||||
with self.subTest("first snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000001", 100000)
|
||||
(selected_datasets, excluded_datasets)=node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False,
|
||||
exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-20101111000001", 100000)
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertEqual(r, """
|
||||
test_source1
|
||||
@ -33,7 +35,9 @@ test_target1
|
||||
""")
|
||||
|
||||
with self.subTest("second snapshot, no changes, no snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 1)
|
||||
(selected_datasets, excluded_datasets)=node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False,
|
||||
exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-20101111000002", 1)
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertEqual(r, """
|
||||
test_source1
|
||||
@ -51,7 +55,8 @@ test_target1
|
||||
""")
|
||||
|
||||
with self.subTest("second snapshot, no changes, empty snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 0)
|
||||
(selected_datasets, excluded_datasets) =node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-20101111000002", 0)
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertEqual(r, """
|
||||
test_source1
|
||||
@ -79,7 +84,8 @@ test_target1
|
||||
with self.subTest("Test if all cmds are executed correctly (no failures)"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||
(selected_datasets, excluded_datasets) =node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-1",
|
||||
0,
|
||||
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||
post_snapshot_cmds=["echo post1 >&2", "echo post2 >&2"]
|
||||
@ -95,7 +101,8 @@ test_target1
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
with self.assertRaises(ExecuteError):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||
(selected_datasets, excluded_datasets) =node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-1",
|
||||
0,
|
||||
pre_snapshot_cmds=["echo pre1", "false", "echo pre2"],
|
||||
post_snapshot_cmds=["echo post1", "false", "echo post2"]
|
||||
@ -112,7 +119,8 @@ test_target1
|
||||
with redirect_stdout(buf):
|
||||
with self.assertRaises(ExecuteError):
|
||||
#same snapshot name as before so it fails
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||
(selected_datasets, excluded_datasets) =node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-1",
|
||||
0,
|
||||
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||
post_snapshot_cmds=["echo post1", "echo post2"]
|
||||
@ -158,7 +166,9 @@ test_target1
|
||||
logger = LogStub()
|
||||
description = "[Source]"
|
||||
node = ZfsNode(utc=False, snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
s = pformat(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=True, min_change=1))
|
||||
(selected_datasets, excluded_datasets)=node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False,
|
||||
exclude_unchanged=1)
|
||||
s = pformat(selected_datasets)
|
||||
print(s)
|
||||
|
||||
# basics
|
||||
|
||||
@ -10,7 +10,7 @@ class CliBase(object):
|
||||
Overridden in subclasses that add stuff for the specific programs."""
|
||||
|
||||
# also used by setup.py
|
||||
VERSION = "3.2-beta1"
|
||||
VERSION = "3.2"
|
||||
HEADER = "{} v{} - (c)2022 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
|
||||
|
||||
def __init__(self, argv, print_arguments=True):
|
||||
|
||||
@ -98,8 +98,8 @@ class ZfsAuto(CliBase):
|
||||
|
||||
group=parser.add_argument_group("Selection options")
|
||||
group.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
|
||||
group.add_argument('--exclude-unchanged', action='store_true',
|
||||
help='Exclude datasets that have no changes since any last snapshot. (Useful in combination with proxmox HA replication)')
|
||||
group.add_argument('--exclude-unchanged', metavar='BYTES', default=0, type=int,
|
||||
help='Exclude datasets that have less than BYTES data changed since any last snapshot. (Use with proxmox HA replication)')
|
||||
group.add_argument('--exclude-received', action='store_true',
|
||||
help='Exclude datasets that have the origin of their autobackup: property as "received". '
|
||||
'This can avoid recursive replication between two backup partners.')
|
||||
|
||||
@ -37,11 +37,11 @@ class ZfsAutobackup(ZfsAuto):
|
||||
args.rollback = True
|
||||
|
||||
if args.resume:
|
||||
self.warning("The --resume option isn't needed anymore (its autodetected now)")
|
||||
self.warning("The --resume option isn't needed anymore (it's autodetected now)")
|
||||
|
||||
if args.raw:
|
||||
self.warning(
|
||||
"The --raw option isn't needed anymore (its autodetected now). Also see --encrypt and --decrypt.")
|
||||
"The --raw option isn't needed anymore (it's autodetected now). Also see --encrypt and --decrypt.")
|
||||
|
||||
if args.compress and args.ssh_source is None and args.ssh_target is None:
|
||||
self.warning("Using compression, but transfer is local.")
|
||||
@ -67,7 +67,7 @@ class ZfsAutobackup(ZfsAuto):
|
||||
help='Only create snapshot if enough bytes are changed. (default %('
|
||||
'default)s)')
|
||||
group.add_argument('--allow-empty', action='store_true',
|
||||
help='If nothing has changed, still create empty snapshots. (Faster. Same as --min-change=0)')
|
||||
help='If nothing has changed, still create empty snapshots. (Same as --min-change=0)')
|
||||
group.add_argument('--other-snapshots', action='store_true',
|
||||
help='Send over other snapshots as well, not just the ones created by this tool.')
|
||||
group.add_argument('--set-snapshot-properties', metavar='PROPERTY=VALUE,...', type=str,
|
||||
@ -76,11 +76,11 @@ class ZfsAutobackup(ZfsAuto):
|
||||
|
||||
group = parser.add_argument_group("Transfer options")
|
||||
group.add_argument('--no-send', action='store_true',
|
||||
help='Don\'t transfer snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
||||
help='Don\'t transfer snapshots (useful for cleanups, or if you want a separate send-cronjob)')
|
||||
group.add_argument('--no-holds', action='store_true',
|
||||
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
|
||||
group.add_argument('--clear-refreservation', action='store_true',
|
||||
help='Filter "refreservation" property. (recommended, safes space. same as '
|
||||
help='Filter "refreservation" property. (recommended, saves space. same as '
|
||||
'--filter-properties refreservation)')
|
||||
group.add_argument('--clear-mountpoint', action='store_true',
|
||||
help='Set property canmount=noauto for new datasets. (recommended, prevents mount '
|
||||
@ -95,7 +95,7 @@ class ZfsAutobackup(ZfsAuto):
|
||||
help='Rollback changes to the latest target snapshot before starting. (normally you can '
|
||||
'prevent changes by setting the readonly property on the target_path to on)')
|
||||
group.add_argument('--force', '-F', action='store_true',
|
||||
help='Use zfs -F option to force overwrite/rollback. (Usefull with --strip-path=1, but use with care)')
|
||||
help='Use zfs -F option to force overwrite/rollback. (Useful with --strip-path=1, but use with care)')
|
||||
group.add_argument('--destroy-incompatible', action='store_true',
|
||||
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
|
||||
group.add_argument('--ignore-transfer-errors', action='store_true',
|
||||
@ -110,13 +110,13 @@ class ZfsAutobackup(ZfsAuto):
|
||||
group.add_argument('--zfs-compressed', action='store_true',
|
||||
help='Transfer blocks that already have zfs-compression as-is.')
|
||||
|
||||
group = parser.add_argument_group("ZFS send/recv pipes")
|
||||
group = parser.add_argument_group("Data transfer options")
|
||||
group.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-fast',
|
||||
choices=compressors.choices(),
|
||||
help='Use compression during transfer, defaults to zstd-fast if TYPE is not specified. ({})'.format(
|
||||
", ".join(compressors.choices())))
|
||||
group.add_argument('--rate', metavar='DATARATE', default=None,
|
||||
help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
|
||||
help='Limit data transfer rate in Bytes/sec (e.g. 128K. requires mbuffer.)')
|
||||
group.add_argument('--buffer', metavar='SIZE', default=None,
|
||||
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
|
||||
group.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
|
||||
@ -189,7 +189,7 @@ class ZfsAutobackup(ZfsAuto):
|
||||
dataset.debug("Destroy missing: ignoring")
|
||||
else:
|
||||
dataset.verbose(
|
||||
"Destroy missing: has no snapshots made by us. (please destroy manually)")
|
||||
"Destroy missing: has no snapshots made by us (please destroy manually).")
|
||||
else:
|
||||
# past the deadline?
|
||||
deadline_ttl = ThinnerRule("0s" + self.args.destroy_missing).ttl
|
||||
@ -443,12 +443,11 @@ class ZfsAutobackup(ZfsAuto):
|
||||
|
||||
################# select source datasets
|
||||
self.set_title("Selecting")
|
||||
source_datasets = source_node.selected_datasets(property_name=self.property_name,
|
||||
( source_datasets, excluded_datasets) = source_node.selected_datasets(property_name=self.property_name,
|
||||
exclude_received=self.args.exclude_received,
|
||||
exclude_paths=self.exclude_paths,
|
||||
exclude_unchanged=self.args.exclude_unchanged,
|
||||
min_change=self.args.min_change)
|
||||
if not source_datasets:
|
||||
exclude_unchanged=self.args.exclude_unchanged)
|
||||
if not source_datasets and not excluded_datasets:
|
||||
self.print_error_sources()
|
||||
return 255
|
||||
|
||||
|
||||
@ -239,12 +239,11 @@ class ZfsAutoverify(ZfsAuto):
|
||||
|
||||
################# select source datasets
|
||||
self.set_title("Selecting")
|
||||
source_datasets = source_node.selected_datasets(property_name=self.property_name,
|
||||
( source_datasets, excluded_datasets) = source_node.selected_datasets(property_name=self.property_name,
|
||||
exclude_received=self.args.exclude_received,
|
||||
exclude_paths=self.exclude_paths,
|
||||
exclude_unchanged=self.args.exclude_unchanged,
|
||||
min_change=0)
|
||||
if not source_datasets:
|
||||
exclude_unchanged=self.args.exclude_unchanged)
|
||||
if not source_datasets and not excluded_datasets:
|
||||
self.print_error_sources()
|
||||
return 255
|
||||
|
||||
@ -307,6 +306,7 @@ class ZfsAutoverify(ZfsAuto):
|
||||
def cli():
|
||||
import sys
|
||||
|
||||
raise(Exception("This program is incomplete, dont use it yet."))
|
||||
signal(SIGPIPE, sigpipe_handler)
|
||||
failed = ZfsAutoverify(sys.argv[1:], False).run()
|
||||
sys.exit(min(failed,255))
|
||||
|
||||
@ -106,7 +106,7 @@ class ZfsCheck(CliBase):
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
raise (Exception("Timeout while waiting for /dev entry to appear. (looking in: {})".format(locations)))
|
||||
raise (Exception("Timeout while waiting for /dev entry to appear. (looking in: {}). Hint: did you forget to load the encryption key?".format(locations)))
|
||||
|
||||
def cleanup_zfs_volume(self, snapshot):
|
||||
"""destroys temporary volume snapshot"""
|
||||
|
||||
@ -118,7 +118,7 @@ class ZfsDataset:
|
||||
"""true if this dataset is a snapshot"""
|
||||
return self.name.find("@") != -1
|
||||
|
||||
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged, min_change):
|
||||
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged):
|
||||
"""determine if dataset should be selected for backup (called from
|
||||
ZfsNode)
|
||||
|
||||
@ -128,12 +128,15 @@ class ZfsDataset:
|
||||
:type source: str
|
||||
:type inherited: bool
|
||||
:type exclude_received: bool
|
||||
:type exclude_unchanged: bool
|
||||
:type min_change: bool
|
||||
:type exclude_unchanged: int
|
||||
|
||||
:param value: Value of the zfs property ("false"/"true"/"child"/parent/"-")
|
||||
:param source: Source of the zfs property ("local"/"received", "-")
|
||||
:param inherited: True of the value/source was inherited from a higher dataset.
|
||||
|
||||
Returns: True : Selected
|
||||
False: Excluded
|
||||
None: No property found
|
||||
"""
|
||||
|
||||
# sanity checks
|
||||
@ -149,7 +152,7 @@ class ZfsDataset:
|
||||
|
||||
# non specified, ignore
|
||||
if value == "-":
|
||||
return False
|
||||
return None
|
||||
|
||||
# only select childs of this dataset, ignore
|
||||
if value == "child" and not inherited:
|
||||
@ -179,8 +182,8 @@ class ZfsDataset:
|
||||
self.verbose("Excluded (dataset already received)")
|
||||
return False
|
||||
|
||||
if exclude_unchanged and not self.is_changed(min_change):
|
||||
self.verbose("Excluded (unchanged since last snapshot)")
|
||||
if not self.is_changed(exclude_unchanged):
|
||||
self.verbose("Excluded (by --exclude-unchanged)")
|
||||
return False
|
||||
|
||||
self.verbose("Selected")
|
||||
@ -575,13 +578,14 @@ class ZfsDataset:
|
||||
|
||||
# all kind of performance options:
|
||||
if 'large_blocks' in features and "-L" in self.zfs_node.supported_send_options:
|
||||
cmd.append("--large-block") # large block support (only if recordsize>128k which is seldomly used)
|
||||
# large block support (only if recordsize>128k which is seldomly used)
|
||||
cmd.append("-L") # --large-block
|
||||
|
||||
if write_embedded and 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options:
|
||||
cmd.append("--embed") # WRITE_EMBEDDED, more compact stream
|
||||
cmd.append("-e") # --embed; WRITE_EMBEDDED, more compact stream
|
||||
|
||||
if zfs_compressed and "-c" in self.zfs_node.supported_send_options:
|
||||
cmd.append("--compressed") # use compressed WRITE records
|
||||
cmd.append("-c") # --compressed; use compressed WRITE records
|
||||
|
||||
# raw? (send over encrypted data in its original encrypted form without decrypting)
|
||||
if raw:
|
||||
@ -589,8 +593,8 @@ class ZfsDataset:
|
||||
|
||||
# progress output
|
||||
if show_progress:
|
||||
cmd.append("--verbose")
|
||||
cmd.append("--parsable")
|
||||
cmd.append("-v") # --verbose
|
||||
cmd.append("-P") # --parsable
|
||||
|
||||
# resume a previous send? (don't need more parameters in that case)
|
||||
if resume_token:
|
||||
@ -599,7 +603,7 @@ class ZfsDataset:
|
||||
else:
|
||||
# send properties
|
||||
if send_properties:
|
||||
cmd.append("--props")
|
||||
cmd.append("-p") # --props
|
||||
|
||||
# incremental?
|
||||
if prev_snapshot:
|
||||
|
||||
@ -235,10 +235,10 @@ class ZfsNode(ExecuteNode):
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change):
|
||||
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged):
|
||||
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
|
||||
|
||||
returns: list of ZfsDataset
|
||||
returns: ( list of selected ZfsDataset, list of excluded ZfsDataset)
|
||||
"""
|
||||
|
||||
self.debug("Getting selected datasets")
|
||||
@ -249,8 +249,10 @@ class ZfsNode(ExecuteNode):
|
||||
property_name
|
||||
])
|
||||
|
||||
|
||||
# The returnlist of selected ZfsDataset's:
|
||||
selected_filesystems = []
|
||||
excluded_filesystems = []
|
||||
|
||||
# list of sources, used to resolve inherited sources
|
||||
sources = {}
|
||||
@ -270,9 +272,14 @@ class ZfsNode(ExecuteNode):
|
||||
source = raw_source
|
||||
|
||||
# determine it
|
||||
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
|
||||
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
|
||||
min_change=min_change):
|
||||
selected_filesystems.append(dataset)
|
||||
selected=dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
|
||||
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged)
|
||||
|
||||
return selected_filesystems
|
||||
if selected==True:
|
||||
selected_filesystems.append(dataset)
|
||||
elif selected==False:
|
||||
excluded_filesystems.append(dataset)
|
||||
#returns None when no property is set.
|
||||
|
||||
|
||||
return ( selected_filesystems, excluded_filesystems)
|
||||
|
||||
Reference in New Issue
Block a user