Compare commits
165 Commits
v3.0.1-bet
...
v3.1.1-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
| 81e7cd940c | |||
| 0b4448798e | |||
| b1689f5066 | |||
| dcb9cdac44 | |||
| 9dc280abad | |||
| 6b8c683315 | |||
| 66e123849b | |||
| 7325e1e351 | |||
| 9f4ea51622 | |||
| 8c1058a808 | |||
| d9e759a3eb | |||
| 46457b3aca | |||
| 59f7ccc352 | |||
| 578fb1be4b | |||
| f9b16c050b | |||
| 2ba6fe5235 | |||
| 8e2c91735a | |||
| d57e3922a0 | |||
| 4b25dd76f1 | |||
| 2843781aa6 | |||
| ce987328d9 | |||
| 9a902f0f38 | |||
| ee2c074539 | |||
| 77f1c16414 | |||
| c5363a1538 | |||
| 119225ba5b | |||
| 84437ee1d0 | |||
| 1286bfafd0 | |||
| 9fc2703638 | |||
| 01dc65af96 | |||
| 082153e0ce | |||
| 77f5474447 | |||
| 55ff14f1d8 | |||
| 2acd26b304 | |||
| ec9459c1d2 | |||
| 233fd83ded | |||
| 37c24e092c | |||
| b2bf11382c | |||
| 19b918044e | |||
| 67d9240e7b | |||
| 1a5e4a9cdd | |||
| 31f8c359ff | |||
| b50b7b7563 | |||
| 37f91e1e08 | |||
| a2f3aee5b1 | |||
| 75d0a3cc7e | |||
| 98c55e2aa8 | |||
| d478e22111 | |||
| 3a4953fbc5 | |||
| 8d4e041a9c | |||
| 8725d56bc9 | |||
| ab0bfdbf4e | |||
| ea9012e476 | |||
| 97e3c110b3 | |||
| 9264e8de6d | |||
| 830ccf1bd4 | |||
| a389e4c81c | |||
| 36a66fbafc | |||
| b70c9986c7 | |||
| 664ea32c96 | |||
| 30f30babea | |||
| 5e04aabf37 | |||
| 59d53e9664 | |||
| 171f0ac5ad | |||
| 0ce3bf1297 | |||
| c682665888 | |||
| 086cfe570b | |||
| 521d1078bd | |||
| 8ea178af1f | |||
| 3e39e1553e | |||
| f0cc2bca2a | |||
| 59b0c23a20 | |||
| 401a3f73cc | |||
| 8ec5ed2f4f | |||
| 8318b2f9bf | |||
| 72b97ab2e8 | |||
| a16a038f0e | |||
| fc0da9d380 | |||
| 31be12c0bf | |||
| 176f04b302 | |||
| 7696d8c16d | |||
| 190a73ec10 | |||
| 2bf015e127 | |||
| 671eda7386 | |||
| 3d4b26cec3 | |||
| c0ea311e18 | |||
| b7b2723b2e | |||
| ec1d3ff93e | |||
| 352d5e6094 | |||
| 488ff6f551 | |||
| f52b8bbf58 | |||
| e47d461999 | |||
| a920744b1e | |||
| 63f423a201 | |||
| db6523f3c0 | |||
| 6b172dce2d | |||
| 85d493469d | |||
| bef3be4955 | |||
| f9719ba87e | |||
| 4b97f789df | |||
| ed7cd41ad7 | |||
| 62e19d97c2 | |||
| 594a2664c4 | |||
| d8fbc96be6 | |||
| 61bb590112 | |||
| 86ea5e49f4 | |||
| 01642365c7 | |||
| 4910b1dfb5 | |||
| 966df73d2f | |||
| 69ed827c0d | |||
| e79f6ac157 | |||
| 59efd070a1 | |||
| 80c1bdad1c | |||
| cf72de7c28 | |||
| 686bb48bda | |||
| 6a48b8a2a9 | |||
| 477b66c342 | |||
| a4155f970e | |||
| 0c9d14bf32 | |||
| 1f5955ccec | |||
| 1b94a849db | |||
| 98c40c6df5 | |||
| b479ab9c98 | |||
| a0fb205e75 | |||
| d3ce222921 | |||
| 36e134eb75 | |||
| 628cd75941 | |||
| 1da14c5c3b | |||
| c83d0fcff2 | |||
| 573af341b8 | |||
| a64168bee2 | |||
| c678ae5f9a | |||
| e95967db53 | |||
| 29e6c056d1 | |||
| deadbe9383 | |||
| 5cbec2e06f | |||
| 66d284f183 | |||
| ae64fd6e99 | |||
| 305bd3008d | |||
| 17fec7d1ee | |||
| f5b0a4f272 | |||
| 06c8416771 | |||
| 4f9b7b6cef | |||
| 0214584e4c | |||
| b6627eb389 | |||
| 48f1f6ec5d | |||
| e33e47c10c | |||
| 01f0078ccf | |||
| 9fad773bfb | |||
| 7493a0bc55 | |||
| 0649f42d66 | |||
| 6fefadf884 | |||
| ce05e1ba4c | |||
| 35584149ff | |||
| 427f74d2f0 | |||
| 9b2c321fe7 | |||
| d02a6df0f3 | |||
| 7fb5a7db92 | |||
| 64e53952fe | |||
| b7ef6c9528 | |||
| b7b3e785ce | |||
| 50070bc70f | |||
| 0fb0faccae | |||
| ab77b91d4e | |||
| bbe7a112fd |
24
.github/workflows/python-publish.yml
vendored
24
.github/workflows/python-publish.yml
vendored
@ -14,19 +14,33 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
- name: Set up Python
|
|
||||||
|
- name: Set up Python 3.x
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: '3.x'
|
python-version: '3.x'
|
||||||
- name: Install dependencies
|
|
||||||
|
- name: Set up Python 2.x
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: '2.x'
|
||||||
|
|
||||||
|
- name: Install dependencies 3.x
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install setuptools wheel twine
|
pip3 install setuptools wheel twine
|
||||||
|
|
||||||
|
- name: Install dependencies 2.x
|
||||||
|
run: |
|
||||||
|
python2 -m pip install --upgrade pip
|
||||||
|
pip2 install setuptools wheel twine
|
||||||
|
|
||||||
- name: Build and publish
|
- name: Build and publish
|
||||||
env:
|
env:
|
||||||
TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }}
|
TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }}
|
||||||
TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
|
TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
|
||||||
run: |
|
run: |
|
||||||
python setup.py sdist bdist_wheel
|
python3 setup.py sdist bdist_wheel
|
||||||
python3 -m twine check dist/*
|
python2 setup.py sdist bdist_wheel
|
||||||
|
twine check dist/*
|
||||||
twine upload dist/*
|
twine upload dist/*
|
||||||
|
|||||||
31
.github/workflows/regression.yml
vendored
31
.github/workflows/regression.yml
vendored
@ -17,17 +17,17 @@ jobs:
|
|||||||
|
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: lsmod && sudo apt update && sudo apt install zfsutils-linux && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
run: sudo apt update && sudo apt install zfsutils-linux lzop pigz zstd gzip xz-utils lz4 mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||||
|
|
||||||
|
|
||||||
- name: Regression test
|
- name: Regression test
|
||||||
run: sudo -E ./run_tests
|
run: sudo -E ./tests/run_tests
|
||||||
|
|
||||||
|
|
||||||
- name: Coveralls
|
- name: Coveralls
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
run: coveralls --service=github
|
run: coveralls --service=github || true
|
||||||
|
|
||||||
ubuntu18:
|
ubuntu18:
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-18.04
|
||||||
@ -39,21 +39,38 @@ jobs:
|
|||||||
|
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: lsmod && sudo apt update && sudo apt install zfsutils-linux python3-setuptools && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||||
|
|
||||||
|
|
||||||
- name: Regression test
|
- name: Regression test
|
||||||
run: sudo -E ./run_tests
|
run: sudo -E ./tests/run_tests
|
||||||
|
|
||||||
|
|
||||||
- name: Coveralls
|
- name: Coveralls
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
run: coveralls --service=github
|
run: coveralls --service=github || true
|
||||||
|
|
||||||
|
|
||||||
|
ubuntu18_python2:
|
||||||
|
runs-on: ubuntu-18.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2.3.4
|
||||||
|
|
||||||
|
- name: Set up Python 2.x
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: '2.x'
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: sudo apt update && sudo apt install zfsutils-linux python-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls colorama
|
||||||
|
|
||||||
|
- name: Regression test
|
||||||
|
run: sudo -E ./tests/run_tests
|
||||||
|
|
||||||
|
- name: Coveralls
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: coveralls --service=github || true
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -9,3 +9,6 @@ __pycache__
|
|||||||
.coverage
|
.coverage
|
||||||
*.pyc
|
*.pyc
|
||||||
python2.env
|
python2.env
|
||||||
|
venv
|
||||||
|
.idea
|
||||||
|
password.sh
|
||||||
|
|||||||
32
.travis.yml
32
.travis.yml
@ -1,32 +0,0 @@
|
|||||||
#MOVING TO GITHUB ACTIONS
|
|
||||||
|
|
||||||
# jobs:
|
|
||||||
# include:
|
|
||||||
# - os: linux
|
|
||||||
# dist: xenial
|
|
||||||
# language: python
|
|
||||||
# python: 2.7
|
|
||||||
# - os: linux
|
|
||||||
# dist: xenial
|
|
||||||
# language: python
|
|
||||||
# python: 3.6
|
|
||||||
# - os: linux
|
|
||||||
# dist: bionic
|
|
||||||
# language: python
|
|
||||||
# python: 2.7
|
|
||||||
# - os: linux
|
|
||||||
# dist: bionic
|
|
||||||
# language: python
|
|
||||||
# python: 3.6
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# before_install:
|
|
||||||
# - sudo apt-get update
|
|
||||||
# - sudo apt-get install zfsutils-linux
|
|
||||||
|
|
||||||
# script:
|
|
||||||
# # - sudo -E ./ngrok.sh
|
|
||||||
# - sudo -E ./run_tests
|
|
||||||
# # - sudo -E pip --version
|
|
||||||
301
README.md
301
README.md
@ -1,62 +1,42 @@
|
|||||||
|
|
||||||
# ZFS autobackup
|
# ZFS autobackup
|
||||||
|
|
||||||
[](https://coveralls.io/github/psy0rz/zfs_autobackup) [](https://github.com/psy0rz/zfs_autobackup/actions?query=workflow%3A%22Regression+tests%22)
|
[](https://github.com/psy0rz/zfs_autobackup/actions?query=workflow%3A%22Regression+tests%22) [](https://coveralls.io/github/psy0rz/zfs_autobackup) [](https://pypi.org/project/zfs-autobackup/)
|
||||||
|
|
||||||
## New in v3
|
|
||||||
|
|
||||||
* Complete rewrite, cleaner object oriented code.
|
|
||||||
* Python 3 and 2 support.
|
|
||||||
* Automated regression against real ZFS environment.
|
|
||||||
* Installable via [pip](https://pypi.org/project/zfs-autobackup/).
|
|
||||||
* Backwards compatible with your current backups and parameters.
|
|
||||||
* Progressive thinning (via a destroy schedule. default schedule should be fine for most people)
|
|
||||||
* Cleaner output, with optional color support (pip install colorama).
|
|
||||||
* Clear distinction between local and remote output.
|
|
||||||
* Summary at the beginning, displaying what will happen and the current thinning-schedule.
|
|
||||||
* More efficient destroying/skipping snapshots on the fly. (no more space issues if your backup is way behind)
|
|
||||||
* Progress indicator (--progress)
|
|
||||||
* Better property management (--set-properties and --filter-properties)
|
|
||||||
* Better resume handling, automatically abort invalid resumes.
|
|
||||||
* More robust error handling.
|
|
||||||
* Prepared for future enhancements.
|
|
||||||
* Supports raw backups for encryption.
|
|
||||||
* Custom SSH client config.
|
|
||||||
|
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
This is a tool I wrote to make replicating ZFS datasets easy and reliable.
|
ZFS-autobackup tries to be the most reliable and easiest to use tool, while having all the features.
|
||||||
|
|
||||||
You can either use it as a **backup** tool, **replication** tool or **snapshot** tool.
|
You can either use it as a **backup** tool, **replication** tool or **snapshot** tool.
|
||||||
|
|
||||||
You can select what to backup by setting a custom `ZFS property`. This allows you to set and forget: Configure it so it backups your entire pool, and you never have to worry about backupping again. Even new datasets you create later will be backupped.
|
You can select what to backup by setting a custom `ZFS property`. This makes it easy to add/remove specific datasets, or just backup your whole pool.
|
||||||
|
|
||||||
Other settings are just specified on the commandline. This also makes it easier to setup and test zfs-autobackup and helps you fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
|
Other settings are just specified on the commandline: Simply setup and test your zfs-autobackup command and fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
|
||||||
|
|
||||||
Since its using ZFS commands, you can see what its actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (also something I missed in other tools)
|
Since its using ZFS commands, you can see what it's actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
|
||||||
|
|
||||||
An important feature thats missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your zfs datasets.
|
An important feature thats missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your system.
|
||||||
|
|
||||||
Another nice thing is progress reporting: Its very useful with HUGE datasets, when you want to know how many hours/days it will take.
|
|
||||||
|
|
||||||
zfs-autobackup tries to be the easiest to use backup tool for zfs.
|
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
* Works across operating systems: Tested with **Linux**, **FreeBSD/FreeNAS** and **SmartOS**.
|
* Works across operating systems: Tested with **Linux**, **FreeBSD/FreeNAS** and **SmartOS**.
|
||||||
* Works in combination with existing replication systems. (Like Proxmox HA)
|
* Low learning curve: no complex daemons or services, no additional software or networking needed. (Only read this page)
|
||||||
|
* Plays nicely with existing replication systems. (Like Proxmox HA)
|
||||||
* Automatically selects filesystems to backup by looking at a simple ZFS property. (recursive)
|
* Automatically selects filesystems to backup by looking at a simple ZFS property. (recursive)
|
||||||
* Creates consistent snapshots. (takes all snapshots at once, atomic.)
|
* Creates consistent snapshots. (takes all snapshots at once, atomicly.)
|
||||||
* Multiple backups modes:
|
* Multiple backups modes:
|
||||||
* Backup local data on the same server.
|
* Backup local data on the same server.
|
||||||
* "push" local data to a backup-server via SSH.
|
* "push" local data to a backup-server via SSH.
|
||||||
* "pull" remote data from a server via SSH and backup it locally.
|
* "pull" remote data from a server via SSH and backup it locally.
|
||||||
* Or even pull data from a server while pushing the backup to another server.
|
* Or even pull data from a server while pushing the backup to another server. (Zero trust between source and target server)
|
||||||
* Can be scheduled via a simple cronjob or run directly from commandline.
|
* Can be scheduled via a simple cronjob or run directly from commandline.
|
||||||
* Supports resuming of interrupted transfers. (via the zfs extensible_dataset feature)
|
* Supports resuming of interrupted transfers.
|
||||||
* Backups and snapshots can be named to prevent conflicts. (multiple backups from and to the same datasets are no problem)
|
* ZFS encryption support: Can decrypt / encrypt or even re-encrypt datasets during transfer.
|
||||||
* Always creates a new snapshot before starting.
|
* Supports sending with compression. (Using pigz, zstd etc)
|
||||||
|
* IO buffering to speed up transfer.
|
||||||
|
* Bandwidth rate limiting.
|
||||||
|
* Multiple backups from and to the same datasets are no problem.
|
||||||
|
* Creates the snapshot before doing anything else. (assuring you at least have a snapshot if all else fails)
|
||||||
* Checks everything but tries continue on non-fatal errors when possible. (Reports error-count when done)
|
* Checks everything but tries continue on non-fatal errors when possible. (Reports error-count when done)
|
||||||
* Ability to manually 'finish' failed backups to see whats going on.
|
* Ability to manually 'finish' failed backups to see whats going on.
|
||||||
* Easy to debug and has a test-mode. Actual unix commands are printed.
|
* Easy to debug and has a test-mode. Actual unix commands are printed.
|
||||||
@ -64,14 +44,18 @@ zfs-autobackup tries to be the easiest to use backup tool for zfs.
|
|||||||
* Uses zfs-holds on important snapshots so they cant be accidentally destroyed.
|
* Uses zfs-holds on important snapshots so they cant be accidentally destroyed.
|
||||||
* Automatic resuming of failed transfers.
|
* Automatic resuming of failed transfers.
|
||||||
* Can continue from existing common snapshots. (e.g. easy migration)
|
* Can continue from existing common snapshots. (e.g. easy migration)
|
||||||
* Gracefully handles destroyed datasets on source.
|
* Gracefully handles datasets that no longer exist on source.
|
||||||
|
* Support for ZFS sending/receiving through custom pipes.
|
||||||
* Easy installation:
|
* Easy installation:
|
||||||
* Just install zfs-autobackup via pip, or download it manually.
|
* Just install zfs-autobackup via pip.
|
||||||
* Written in python and uses zfs-commands, no 3rd party dependency's or libraries.
|
* Only needs to be installed on one side.
|
||||||
|
* Written in python and uses zfs-commands, no special 3rd party dependency's or compiled libraries needed.
|
||||||
* No separate config files or properties. Just one zfs-autobackup command you can copy/paste in your backup script.
|
* No separate config files or properties. Just one zfs-autobackup command you can copy/paste in your backup script.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
|
You only need to install zfs-autobackup on the side that initiates the backup. The other side doesnt need any extra configration.
|
||||||
|
|
||||||
### Using pip
|
### Using pip
|
||||||
|
|
||||||
The recommended way on most servers is to use [pip](https://pypi.org/project/zfs-autobackup/):
|
The recommended way on most servers is to use [pip](https://pypi.org/project/zfs-autobackup/):
|
||||||
@ -82,6 +66,8 @@ The recommended way on most servers is to use [pip](https://pypi.org/project/zfs
|
|||||||
|
|
||||||
This can also be used to upgrade zfs-autobackup to the newest stable version.
|
This can also be used to upgrade zfs-autobackup to the newest stable version.
|
||||||
|
|
||||||
|
To install the latest beta version add the `--pre` option.
|
||||||
|
|
||||||
### Using easy_install
|
### Using easy_install
|
||||||
|
|
||||||
On older servers you might have to use easy_install
|
On older servers you might have to use easy_install
|
||||||
@ -90,14 +76,6 @@ On older servers you might have to use easy_install
|
|||||||
[root@server ~]# easy_install zfs-autobackup
|
[root@server ~]# easy_install zfs-autobackup
|
||||||
```
|
```
|
||||||
|
|
||||||
### Direct download
|
|
||||||
|
|
||||||
Its also possible to just download <https://raw.githubusercontent.com/psy0rz/zfs_autobackup/v3.0/bin/zfs-autobackup> and run it directly.
|
|
||||||
|
|
||||||
The only requirement that is sometimes missing is the `argparse` python module. Optionally you can install `colorama` for colors.
|
|
||||||
|
|
||||||
It should work with python 2.7 and higher.
|
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
In this example we're going to backup a machine called `server1` to a machine called `backup`.
|
In this example we're going to backup a machine called `server1` to a machine called `backup`.
|
||||||
@ -178,6 +156,8 @@ rpool/swap autobackup:offsite1 true
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
ZFS properties are ```inherited``` by child datasets. Since we've set the property on the highest dataset, we're essentially backupping the whole pool.
|
||||||
|
|
||||||
Because we don't want to backup everything, we can exclude certain filesystem by setting the property to false:
|
Because we don't want to backup everything, we can exclude certain filesystem by setting the property to false:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
@ -193,6 +173,13 @@ rpool/swap autobackup:offsite1 false
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The autobackup-property can have 3 values:
|
||||||
|
* ```true```: Backup the dataset and all its children
|
||||||
|
* ```false```: Dont backup the dataset and all its children. (used to exclude certain datasets)
|
||||||
|
* ```child```: Only backup the children off the dataset, not the dataset itself.
|
||||||
|
|
||||||
|
Only use the zfs-command to set these properties, not the zpool command.
|
||||||
|
|
||||||
### Running zfs-autobackup
|
### Running zfs-autobackup
|
||||||
|
|
||||||
Run the script on the backup server and pull the data from the server specified by --ssh-source.
|
Run the script on the backup server and pull the data from the server specified by --ssh-source.
|
||||||
@ -292,6 +279,8 @@ root@ws1:~# zfs-autobackup test --verbose
|
|||||||
|
|
||||||
This also allows you to make several snapshots during the day, but only backup the data at night when the server is not busy.
|
This also allows you to make several snapshots during the day, but only backup the data at night when the server is not busy.
|
||||||
|
|
||||||
|
**Note**: In this mode it doesnt take a specified target-schedule into account when thinning, it only knows a snapshot is the common snapshot by looking at the holds. So make sure your source-schedule keeps the snapshots you still want to transfer at a later point.
|
||||||
|
|
||||||
## Thinning out obsolete snapshots
|
## Thinning out obsolete snapshots
|
||||||
|
|
||||||
The thinner is the thing that destroys old snapshots on the source and target.
|
The thinner is the thing that destroys old snapshots on the source and target.
|
||||||
@ -388,6 +377,70 @@ zfs-autobackup will re-evaluate this on every run: As soon as a snapshot doesn't
|
|||||||
|
|
||||||
Snapshots on the source that still have to be send to the target wont be destroyed off course. (If the target still wants them, according to the target schedule)
|
Snapshots on the source that still have to be send to the target wont be destroyed off course. (If the target still wants them, according to the target schedule)
|
||||||
|
|
||||||
|
## How zfs-autobackup handles encryption
|
||||||
|
|
||||||
|
In normal operation datasets are transferred unaltered:
|
||||||
|
|
||||||
|
* Source datasets that are encrypted will be send over as such and stay encrypted at the target side. (In ZFS this is called raw-mode) You dont need keys at the target side if you dont want to access the data.
|
||||||
|
* Source datasets that are plain will stay that way on the target. (Even if the specified target-path IS encrypted.)
|
||||||
|
|
||||||
|
Basically you dont have to do anything or worry about anything.
|
||||||
|
|
||||||
|
### Decrypting/encrypting
|
||||||
|
|
||||||
|
Things get different if you want to change the encryption-state of a dataset during transfer:
|
||||||
|
|
||||||
|
* If you want to decrypt encrypted datasets before sending them, you should use the `--decrypt` option. Datasets will then be stored plain at the target.
|
||||||
|
* If you want to encrypt plain datasets when they are received, you should use the `--encrypt` option. Datasets will then be stored encrypted at the target. (Datasets that are already encrypted will still be sent over unaltered in raw-mode.)
|
||||||
|
* If you also want re-encrypt encrypted datasets with the target-side encryption you can use both options.
|
||||||
|
|
||||||
|
Note 1: The --encrypt option will rely on inheriting encryption parameters from the parent datasets on the target side. You are responsible for setting those up and loading the keys. So --encrypt is no guarantee for encryption: If you dont set it up, it cant encrypt.
|
||||||
|
|
||||||
|
Note 2: Decide what you want at an early stage: If you change the --encrypt or --decrypt parameter after the inital sync you might get weird and wonderfull errors. (nothing dangerous)
|
||||||
|
|
||||||
|
I'll add some tips when the issues start to get in on github. :)
|
||||||
|
|
||||||
|
## Transfer buffering, compression and rate limiting.
|
||||||
|
|
||||||
|
If you're transferring over a slow link it might be useful to use `--compress=zstd-fast`. This will compress the data before sending, so it uses less bandwidth. An alternative to this is to use --zfs-compressed: This will transfer blocks that already have compression intact. (--compress will usually compress much better but uses much more resources. --zfs-compressed uses the least resources, but can be a disadvantage if you want to use a different compression method on the target.)
|
||||||
|
|
||||||
|
You can also limit the datarate by using the `--rate` option.
|
||||||
|
|
||||||
|
The `--buffer` option might also help since it acts as an IO buffer: zfs send can vary wildly between completely idle and huge bursts of data. When zfs send is idle, the buffer will continue transferring data over the slow link.
|
||||||
|
|
||||||
|
It's also possible to add custom send or receive pipes with `--send-pipe` and `--recv-pipe`.
|
||||||
|
|
||||||
|
These options all work together and the buffer on the receiving side is only added if appropriate. When all options are active:
|
||||||
|
|
||||||
|
#### On the sending side:
|
||||||
|
|
||||||
|
zfs send -> send buffer -> custom send pipes -> compression -> transfer rate limiter
|
||||||
|
|
||||||
|
#### On the receiving side:
|
||||||
|
decompression -> custom recv pipes -> buffer -> zfs recv
|
||||||
|
|
||||||
|
## Running custom commands before and after snapshotting
|
||||||
|
|
||||||
|
You can run commands before and after the snapshot to freeze databases to make the on for example to make the on-disk data consistent before snapshotting.
|
||||||
|
|
||||||
|
The commands will be executed on the source side. Use the `--pre-snapshot-cmd` and `--post-snapshot-cmd` options for this.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
zfs-autobackup \
|
||||||
|
--pre-snapshot-cmd 'daemon -f jexec mysqljail1 mysql -s -e "set autocommit=0;flush logs;flush tables with read lock;\\! echo \$\$ > /tmp/mysql_lock.pid && sleep 60"' \
|
||||||
|
--pre-snapshot-cmd 'daemon -f jexec mysqljail2 mysql -s -e "set autocommit=0;flush logs;flush tables with read lock;\\! echo \$\$ > /tmp/mysql_lock.pid && sleep 60"' \
|
||||||
|
--post-snapshot-cmd 'pkill -F /jails/mysqljail1/tmp/mysql_lock.pid' \
|
||||||
|
--post-snapshot-cmd 'pkill -F /jails/mysqljail2/tmp/mysql_lock.pid' \
|
||||||
|
backupfs1
|
||||||
|
```
|
||||||
|
|
||||||
|
Failure handling during pre/post commands:
|
||||||
|
|
||||||
|
* If a pre-command fails, zfs-autobackup will exit with an error. (after executing the post-commands)
|
||||||
|
* All post-commands are always executed. Even if the pre-commands or actual snapshot have failed. This way you can be sure that stuff is always cleanedup and unfreezed.
|
||||||
|
|
||||||
## Tips
|
## Tips
|
||||||
|
|
||||||
* Use ```--debug``` if something goes wrong and you want to see the commands that are executed. This will also stop at the first error.
|
* Use ```--debug``` if something goes wrong and you want to see the commands that are executed. This will also stop at the first error.
|
||||||
@ -396,7 +449,35 @@ Snapshots on the source that still have to be send to the target wont be destroy
|
|||||||
* Use ```--clear-refreservation``` to save space on your backup server.
|
* Use ```--clear-refreservation``` to save space on your backup server.
|
||||||
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot.
|
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot.
|
||||||
|
|
||||||
### Speeding up SSH
|
### Performance tips
|
||||||
|
|
||||||
|
If you have a large number of datasets its important to keep the following tips in mind.
|
||||||
|
|
||||||
|
Also it might help to use the --buffer option to add IO buffering during the data transfer. This might speed up things since it smooths out sudden IO bursts that are frequent during a zfs send or recv.
|
||||||
|
|
||||||
|
#### Some statistics
|
||||||
|
|
||||||
|
To get some idea of how fast zfs-autobackup is, I did some test on my laptop, with a SKHynix_HFS512GD9TNI-L2B0B disk. I'm using zfs 2.0.2.
|
||||||
|
|
||||||
|
I created 100 empty datasets and measured the total runtime of zfs-autobackup. I used all the performance tips below. (--no-holds, --allow-empty, ssh ControlMaster)
|
||||||
|
|
||||||
|
* without ssh: 15 seconds. (>6 datasets/s)
|
||||||
|
* either ssh-target or ssh-source=localhost: 20 seconds (5 datasets/s)
|
||||||
|
* both ssh-target and ssh-source=localhost: 24 seconds (4 datasets/s)
|
||||||
|
|
||||||
|
To be bold I created 2500 datasets, but that also was no problem. So it seems it should be possible to use zfs-autobackup with thousands of datasets.
|
||||||
|
|
||||||
|
If you need more performance let me know.
|
||||||
|
|
||||||
|
NOTE: There is actually a performance regression in ZFS version 2: https://github.com/openzfs/zfs/issues/11560 Use --no-progress as workaround.
|
||||||
|
|
||||||
|
#### Less work
|
||||||
|
|
||||||
|
You can make zfs-autobackup generate less work by using --no-holds and --allow-empty.
|
||||||
|
|
||||||
|
This saves a lot of extra zfs-commands per dataset.
|
||||||
|
|
||||||
|
#### Speeding up SSH
|
||||||
|
|
||||||
You can make your ssh connections persistent and greatly speed up zfs-autobackup:
|
You can make your ssh connections persistent and greatly speed up zfs-autobackup:
|
||||||
|
|
||||||
@ -431,25 +512,25 @@ Look in man ssh_config for many more options.
|
|||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
Here you find all the options:
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
[root@server ~]# zfs-autobackup --help
|
usage: zfs-autobackup [-h] [--ssh-config CONFIG-FILE] [--ssh-source USER@HOST]
|
||||||
usage: zfs-autobackup [-h] [--ssh-config SSH_CONFIG] [--ssh-source SSH_SOURCE]
|
[--ssh-target USER@HOST] [--keep-source SCHEDULE]
|
||||||
[--ssh-target SSH_TARGET] [--keep-source KEEP_SOURCE]
|
[--keep-target SCHEDULE] [--pre-snapshot-cmd COMMAND]
|
||||||
[--keep-target KEEP_TARGET] [--other-snapshots]
|
[--post-snapshot-cmd COMMAND] [--other-snapshots]
|
||||||
[--no-snapshot] [--no-send] [--min-change MIN_CHANGE]
|
[--no-snapshot] [--no-send] [--no-thinning] [--no-holds]
|
||||||
[--allow-empty] [--ignore-replicated] [--no-holds]
|
[--min-change BYTES] [--allow-empty] [--ignore-replicated]
|
||||||
[--strip-path STRIP_PATH] [--clear-refreservation]
|
[--strip-path N] [--clear-refreservation]
|
||||||
[--clear-mountpoint]
|
[--clear-mountpoint] [--filter-properties PROPERTY,...]
|
||||||
[--filter-properties FILTER_PROPERTIES]
|
[--set-properties PROPERTY=VALUE,...] [--rollback]
|
||||||
[--set-properties SET_PROPERTIES] [--rollback]
|
[--destroy-incompatible] [--destroy-missing SCHEDULE]
|
||||||
[--destroy-incompatible] [--ignore-transfer-errors]
|
[--ignore-transfer-errors] [--decrypt] [--encrypt]
|
||||||
[--raw] [--test] [--verbose] [--debug] [--debug-output]
|
[--zfs-compressed] [--test] [--verbose] [--debug]
|
||||||
[--progress]
|
[--debug-output] [--progress] [--send-pipe COMMAND]
|
||||||
|
[--recv-pipe COMMAND] [--compress TYPE] [--rate DATARATE]
|
||||||
|
[--buffer SIZE]
|
||||||
backup-name [target-path]
|
backup-name [target-path]
|
||||||
|
|
||||||
zfs-autobackup v3.0-rc12 - Copyright 2020 E.H.Eefting (edwin@datux.nl)
|
zfs-autobackup v3.1 - (c)2021 E.H.Eefting (edwin@datux.nl)
|
||||||
|
|
||||||
positional arguments:
|
positional arguments:
|
||||||
backup-name Name of the backup (you should set the zfs property
|
backup-name Name of the backup (you should set the zfs property
|
||||||
@ -461,38 +542,41 @@ positional arguments:
|
|||||||
|
|
||||||
optional arguments:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
--ssh-config SSH_CONFIG
|
--ssh-config CONFIG-FILE
|
||||||
Custom ssh client config
|
Custom ssh client config
|
||||||
--ssh-source SSH_SOURCE
|
--ssh-source USER@HOST
|
||||||
Source host to get backup from. (user@hostname)
|
Source host to get backup from.
|
||||||
Default None.
|
--ssh-target USER@HOST
|
||||||
--ssh-target SSH_TARGET
|
Target host to push backup to.
|
||||||
Target host to push backup to. (user@hostname) Default
|
--keep-source SCHEDULE
|
||||||
None.
|
|
||||||
--keep-source KEEP_SOURCE
|
|
||||||
Thinning schedule for old source snapshots. Default:
|
Thinning schedule for old source snapshots. Default:
|
||||||
10,1d1w,1w1m,1m1y
|
10,1d1w,1w1m,1m1y
|
||||||
--keep-target KEEP_TARGET
|
--keep-target SCHEDULE
|
||||||
Thinning schedule for old target snapshots. Default:
|
Thinning schedule for old target snapshots. Default:
|
||||||
10,1d1w,1w1m,1m1y
|
10,1d1w,1w1m,1m1y
|
||||||
|
--pre-snapshot-cmd COMMAND
|
||||||
|
Run COMMAND before snapshotting (can be used multiple
|
||||||
|
times.
|
||||||
|
--post-snapshot-cmd COMMAND
|
||||||
|
Run COMMAND after snapshotting (can be used multiple
|
||||||
|
times.
|
||||||
--other-snapshots Send over other snapshots as well, not just the ones
|
--other-snapshots Send over other snapshots as well, not just the ones
|
||||||
created by this tool.
|
created by this tool.
|
||||||
--no-snapshot Don't create new snapshots (useful for finishing
|
--no-snapshot Don't create new snapshots (useful for finishing
|
||||||
uncompleted backups, or cleanups)
|
uncompleted backups, or cleanups)
|
||||||
--no-send Don't send snapshots (useful for cleanups, or if you
|
--no-send Don't send snapshots (useful for cleanups, or if you
|
||||||
want a serperate send-cronjob)
|
want a serperate send-cronjob)
|
||||||
--min-change MIN_CHANGE
|
--no-thinning Do not destroy any snapshots.
|
||||||
Number of bytes written after which we consider a
|
--no-holds Don't hold snapshots. (Faster. Allows you to destroy
|
||||||
|
common snapshot.)
|
||||||
|
--min-change BYTES Number of bytes written after which we consider a
|
||||||
dataset changed (default 1)
|
dataset changed (default 1)
|
||||||
--allow-empty If nothing has changed, still create empty snapshots.
|
--allow-empty If nothing has changed, still create empty snapshots.
|
||||||
(same as --min-change=0)
|
(same as --min-change=0)
|
||||||
--ignore-replicated Ignore datasets that seem to be replicated some other
|
--ignore-replicated Ignore datasets that seem to be replicated some other
|
||||||
way. (No changes since lastest snapshot. Useful for
|
way. (No changes since lastest snapshot. Useful for
|
||||||
proxmox HA replication)
|
proxmox HA replication)
|
||||||
--no-holds Don't lock snapshots on the source. (Useful to allow
|
--strip-path N Number of directories to strip from target path (use 1
|
||||||
proxmox HA replication to switches nodes)
|
|
||||||
--strip-path STRIP_PATH
|
|
||||||
Number of directories to strip from target path (use 1
|
|
||||||
when cloning zones between 2 SmartOS machines)
|
when cloning zones between 2 SmartOS machines)
|
||||||
--clear-refreservation
|
--clear-refreservation
|
||||||
Filter "refreservation" property. (recommended, safes
|
Filter "refreservation" property. (recommended, safes
|
||||||
@ -500,11 +584,11 @@ optional arguments:
|
|||||||
--clear-mountpoint Set property canmount=noauto for new datasets.
|
--clear-mountpoint Set property canmount=noauto for new datasets.
|
||||||
(recommended, prevents mount conflicts. same as --set-
|
(recommended, prevents mount conflicts. same as --set-
|
||||||
properties canmount=noauto)
|
properties canmount=noauto)
|
||||||
--filter-properties FILTER_PROPERTIES
|
--filter-properties PROPERTY,...
|
||||||
List of properties to "filter" when receiving
|
List of properties to "filter" when receiving
|
||||||
filesystems. (you can still restore them with zfs
|
filesystems. (you can still restore them with zfs
|
||||||
inherit -S)
|
inherit -S)
|
||||||
--set-properties SET_PROPERTIES
|
--set-properties PROPERTY=VALUE,...
|
||||||
List of propererties to override when receiving
|
List of propererties to override when receiving
|
||||||
filesystems. (you can still restore them with zfs
|
filesystems. (you can still restore them with zfs
|
||||||
inherit -S)
|
inherit -S)
|
||||||
@ -514,22 +598,39 @@ optional arguments:
|
|||||||
--destroy-incompatible
|
--destroy-incompatible
|
||||||
Destroy incompatible snapshots on target. Use with
|
Destroy incompatible snapshots on target. Use with
|
||||||
care! (implies --rollback)
|
care! (implies --rollback)
|
||||||
|
--destroy-missing SCHEDULE
|
||||||
|
Destroy datasets on target that are missing on the
|
||||||
|
source. Specify the time since the last snapshot, e.g:
|
||||||
|
--destroy-missing 30d
|
||||||
--ignore-transfer-errors
|
--ignore-transfer-errors
|
||||||
Ignore transfer errors (still checks if received
|
Ignore transfer errors (still checks if received
|
||||||
filesystem exists. useful for acltype errors)
|
filesystem exists. useful for acltype errors)
|
||||||
--raw For encrypted datasets, send data exactly as it exists
|
--decrypt Decrypt data before sending it over.
|
||||||
on disk.
|
--encrypt Encrypt data after receiving it.
|
||||||
|
--zfs-compressed Transfer blocks that already have zfs-compression as-
|
||||||
|
is.
|
||||||
--test dont change anything, just show what would be done
|
--test dont change anything, just show what would be done
|
||||||
(still does all read-only operations)
|
(still does all read-only operations)
|
||||||
--verbose verbose output
|
--verbose verbose output
|
||||||
--debug Show zfs commands that are executed, stops after an
|
--debug Show zfs commands that are executed, stops after an
|
||||||
exception.
|
exception.
|
||||||
--debug-output Show zfs commands and their output/exit codes. (noisy)
|
--debug-output Show zfs commands and their output/exit codes. (noisy)
|
||||||
--progress show zfs progress output (to stderr). Enabled by
|
--progress show zfs progress output. Enabled automaticly on ttys.
|
||||||
default on ttys.
|
(use --no-progress to disable)
|
||||||
|
--send-pipe COMMAND pipe zfs send output through COMMAND (can be used
|
||||||
|
multiple times)
|
||||||
|
--recv-pipe COMMAND pipe zfs recv input through COMMAND (can be used
|
||||||
|
multiple times)
|
||||||
|
--compress TYPE Use compression during transfer, defaults to zstd-adapt
|
||||||
|
if TYPE is not specified. (gzip, pigz-fast, pigz-slow,
|
||||||
|
zstd-fast, zstd-slow, zstd-adapt, xz, lzo, lz4)
|
||||||
|
--rate DATARATE Limit data transfer rate (e.g. 128K. requires
|
||||||
|
mbuffer.)
|
||||||
|
--buffer SIZE Add zfs send and recv buffers to smooth out IO bursts.
|
||||||
|
(e.g. 128M. requires mbuffer)
|
||||||
|
|
||||||
|
Full manual at: https://github.com/psy0rz/zfs_autobackup
|
||||||
|
|
||||||
When a filesystem fails, zfs_backup will continue and report the number of
|
|
||||||
failures at that end. Also the exit code will indicate the number of failures.
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
@ -542,11 +643,23 @@ You forgot to setup automatic login via SSH keys, look in the example how to do
|
|||||||
|
|
||||||
This usually means you've created a new snapshot on the target side during a backup. If you restart zfs-autobackup, it will automaticly abort the invalid partially received snapshot and start over.
|
This usually means you've created a new snapshot on the target side during a backup. If you restart zfs-autobackup, it will automaticly abort the invalid partially received snapshot and start over.
|
||||||
|
|
||||||
|
### It says 'cannot receive incremental stream: destination has been modified since most recent snapshot'
|
||||||
|
|
||||||
|
This means files have been modified on the target side somehow.
|
||||||
|
|
||||||
|
You can use --rollback to automaticly rollback such changes. Also try destroying the target dataset and using --clear-mountpoint on the next run. This way it wont get mounted.
|
||||||
|
|
||||||
### It says 'internal error: Invalid argument'
|
### It says 'internal error: Invalid argument'
|
||||||
|
|
||||||
In some cases (Linux -> FreeBSD) this means certain properties are not fully supported on the target system.
|
In some cases (Linux -> FreeBSD) this means certain properties are not fully supported on the target system.
|
||||||
|
|
||||||
Try using something like: --filter-properties xattr
|
Try using something like: --filter-properties xattr or --ignore-transfer-errors.
|
||||||
|
|
||||||
|
### zfs receive fails, but snapshot seems to be received successful.
|
||||||
|
|
||||||
|
This happens if you transfer between different Operating systems/zfs versions or feature sets.
|
||||||
|
|
||||||
|
Try using the --ignore-transfer-errors option. This will ignore the error. It will still check if the snapshot is actually received correctly.
|
||||||
|
|
||||||
## Restore example
|
## Restore example
|
||||||
|
|
||||||
@ -643,21 +756,17 @@ for HOST in $HOSTS; do
|
|||||||
ssh $HOST "zfs set autobackup:data_$NAME=child rpool/data"
|
ssh $HOST "zfs set autobackup:data_$NAME=child rpool/data"
|
||||||
|
|
||||||
#backup data filesystems to a common directory
|
#backup data filesystems to a common directory
|
||||||
zfs-autobackup --keep-source=1d1w,1w1m --ssh-source $HOST data_$NAME $TARGET/data --clear-mountpoint --clear-refreservation --ignore-transfer-errors --strip-path 2 --verbose --ignore-replicated --min-change 200000 --no-holds $@
|
zfs-autobackup --keep-source=1d1w,1w1m --ssh-source $HOST data_$NAME $TARGET/data --clear-mountpoint --clear-refreservation --ignore-transfer-errors --strip-path 2 --verbose --ignore-replicated --min-change 300000 --no-holds $@
|
||||||
|
|
||||||
zabbix-job-status backup_$HOST""_data_$NAME daily $? >/dev/null 2>/dev/null
|
zabbix-job-status backup_$HOST""_data_$NAME daily $? >/dev/null 2>/dev/null
|
||||||
|
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
This script will also send the backup status to Zabbix. (if you've installed my zabbix-job-status script)
|
This script will also send the backup status to Zabbix. (if you've installed my zabbix-job-status script https://github.com/psy0rz/stuff/tree/master/zabbix-jobs)
|
||||||
|
|
||||||
# Sponsor list
|
# Sponsor list
|
||||||
|
|
||||||
This project was sponsorred by:
|
This project was sponsorred by:
|
||||||
|
|
||||||
* (None so far)
|
* JetBrains (Provided me with a license for their whole professional product line, https://www.jetbrains.com/pycharm/ )
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
1886
bin/zfs-autobackup
1886
bin/zfs-autobackup
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
|||||||
zfs-autobackup
|
|
||||||
17
ngrok.sh
17
ngrok.sh
@ -1,17 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
if ! [ -e ngrok ]; then
|
|
||||||
wget -O ngrok.zip https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
|
|
||||||
unzip ngrok.zip
|
|
||||||
fi
|
|
||||||
{
|
|
||||||
mkfifo pipe
|
|
||||||
echo "Executing nc"
|
|
||||||
nc -k -l -v 8888 <pipe | ( while true; do bash >pipe 2>&1; echo "restarting" ;sleep 1; done )
|
|
||||||
killall -SIGINT ngrok && echo "ngrok terminated"
|
|
||||||
} &
|
|
||||||
{
|
|
||||||
echo "Executing ngrok"
|
|
||||||
./ngrok authtoken $NGROK_TOKEN
|
|
||||||
./ngrok tcp 8888 --log=stdout
|
|
||||||
} &
|
|
||||||
wait
|
|
||||||
@ -1,6 +1,6 @@
|
|||||||
colorama
|
colorama
|
||||||
argparse
|
argparse
|
||||||
coverage==4.5.4
|
coverage
|
||||||
python-coveralls
|
python-coveralls
|
||||||
unittest2
|
unittest2
|
||||||
mock
|
mock
|
||||||
|
|||||||
@ -14,4 +14,3 @@ source tokentest
|
|||||||
|
|
||||||
python3 -m twine check dist/*
|
python3 -m twine check dist/*
|
||||||
python3 -m twine upload --repository-url https://test.pypi.org/legacy/ dist/* --verbose
|
python3 -m twine upload --repository-url https://test.pypi.org/legacy/ dist/* --verbose
|
||||||
|
|
||||||
14
setup.py
14
setup.py
@ -1,15 +1,13 @@
|
|||||||
import setuptools
|
import setuptools
|
||||||
import bin.zfs_autobackup
|
from zfs_autobackup.ZfsAutobackup import ZfsAutobackup
|
||||||
import os
|
import os
|
||||||
|
|
||||||
os.system("git tag -m ' ' -a v{}".format(bin.zfs_autobackup.VERSION))
|
|
||||||
|
|
||||||
with open("README.md", "r") as fh:
|
with open("README.md", "r") as fh:
|
||||||
long_description = fh.read()
|
long_description = fh.read()
|
||||||
|
|
||||||
setuptools.setup(
|
setuptools.setup(
|
||||||
name="zfs_autobackup",
|
name="zfs_autobackup",
|
||||||
version=bin.zfs_autobackup.VERSION,
|
version=ZfsAutobackup.VERSION,
|
||||||
author="Edwin Eefting",
|
author="Edwin Eefting",
|
||||||
author_email="edwin@datux.nl",
|
author_email="edwin@datux.nl",
|
||||||
description="ZFS autobackup is used to periodicly backup ZFS filesystems to other locations. It tries to be the most friendly to use and easy to debug ZFS backup tool.",
|
description="ZFS autobackup is used to periodicly backup ZFS filesystems to other locations. It tries to be the most friendly to use and easy to debug ZFS backup tool.",
|
||||||
@ -17,8 +15,14 @@ setuptools.setup(
|
|||||||
long_description_content_type="text/markdown",
|
long_description_content_type="text/markdown",
|
||||||
|
|
||||||
url="https://github.com/psy0rz/zfs_autobackup",
|
url="https://github.com/psy0rz/zfs_autobackup",
|
||||||
scripts=["bin/zfs-autobackup"],
|
entry_points={
|
||||||
|
'console_scripts':
|
||||||
|
[
|
||||||
|
'zfs-autobackup = zfs_autobackup:cli',
|
||||||
|
]
|
||||||
|
},
|
||||||
packages=setuptools.find_packages(),
|
packages=setuptools.find_packages(),
|
||||||
|
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Programming Language :: Python :: 2",
|
"Programming Language :: Python :: 2",
|
||||||
"Programming Language :: Python :: 3",
|
"Programming Language :: Python :: 3",
|
||||||
|
|||||||
@ -1,275 +0,0 @@
|
|||||||
|
|
||||||
from basetest import *
|
|
||||||
|
|
||||||
|
|
||||||
class TestZfsNode(unittest2.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
prepare_zpools()
|
|
||||||
self.longMessage=True
|
|
||||||
|
|
||||||
# generate a resumable state
|
|
||||||
#NOTE: this generates two resumable test_target1/test_source1/fs1 and test_target1/test_source1/fs1/sub
|
|
||||||
def generate_resume(self):
|
|
||||||
|
|
||||||
r=shelltest("zfs set compress=off test_source1 test_target1")
|
|
||||||
|
|
||||||
#big change on source
|
|
||||||
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/data bs=250M count=1")
|
|
||||||
|
|
||||||
#waste space on target
|
|
||||||
r=shelltest("dd if=/dev/zero of=/test_target1/waste bs=250M count=1")
|
|
||||||
|
|
||||||
#should fail and leave resume token (if supported)
|
|
||||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
|
||||||
|
|
||||||
#free up space
|
|
||||||
r=shelltest("rm /test_target1/waste")
|
|
||||||
#sync
|
|
||||||
r=shelltest("zfs umount test_target1")
|
|
||||||
r=shelltest("zfs mount test_target1")
|
|
||||||
|
|
||||||
|
|
||||||
#resume initial backup
|
|
||||||
def test_initial_resume(self):
|
|
||||||
|
|
||||||
#inital backup, leaves resume token
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
|
||||||
self.generate_resume()
|
|
||||||
|
|
||||||
#--test should resume and succeed
|
|
||||||
with OutputIO() as buf:
|
|
||||||
with redirect_stdout(buf):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
|
||||||
|
|
||||||
print(buf.getvalue())
|
|
||||||
|
|
||||||
#did we really resume?
|
|
||||||
if "0.6.5" in ZFS_USERSPACE:
|
|
||||||
#abort this late, for beter coverage
|
|
||||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
|
||||||
else:
|
|
||||||
self.assertIn(": resuming", buf.getvalue())
|
|
||||||
|
|
||||||
|
|
||||||
#should resume and succeed
|
|
||||||
with OutputIO() as buf:
|
|
||||||
with redirect_stdout(buf):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
|
||||||
|
|
||||||
print(buf.getvalue())
|
|
||||||
|
|
||||||
#did we really resume?
|
|
||||||
if "0.6.5" in ZFS_USERSPACE:
|
|
||||||
#abort this late, for beter coverage
|
|
||||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
|
||||||
else:
|
|
||||||
self.assertIn(": resuming", buf.getvalue())
|
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all test_target1")
|
|
||||||
self.assertMultiLineEqual(r,"""
|
|
||||||
test_target1
|
|
||||||
test_target1/test_source1
|
|
||||||
test_target1/test_source1/fs1
|
|
||||||
test_target1/test_source1/fs1@test-20101111000000
|
|
||||||
test_target1/test_source1/fs1/sub
|
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
|
||||||
test_target1/test_source2
|
|
||||||
test_target1/test_source2/fs2
|
|
||||||
test_target1/test_source2/fs2/sub
|
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
#resume incremental backup
|
|
||||||
def test_incremental_resume(self):
|
|
||||||
|
|
||||||
#initial backup
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
|
||||||
|
|
||||||
#incremental backup leaves resume token
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
|
||||||
self.generate_resume()
|
|
||||||
|
|
||||||
#--test should resume and succeed
|
|
||||||
with OutputIO() as buf:
|
|
||||||
with redirect_stdout(buf):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
|
||||||
|
|
||||||
print(buf.getvalue())
|
|
||||||
|
|
||||||
#did we really resume?
|
|
||||||
if "0.6.5" in ZFS_USERSPACE:
|
|
||||||
#abort this late, for beter coverage
|
|
||||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
|
||||||
else:
|
|
||||||
self.assertIn(": resuming", buf.getvalue())
|
|
||||||
|
|
||||||
#should resume and succeed
|
|
||||||
with OutputIO() as buf:
|
|
||||||
with redirect_stdout(buf):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
|
||||||
|
|
||||||
print(buf.getvalue())
|
|
||||||
|
|
||||||
#did we really resume?
|
|
||||||
if "0.6.5" in ZFS_USERSPACE:
|
|
||||||
#abort this late, for beter coverage
|
|
||||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
|
||||||
else:
|
|
||||||
self.assertIn(": resuming", buf.getvalue())
|
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all test_target1")
|
|
||||||
self.assertMultiLineEqual(r,"""
|
|
||||||
test_target1
|
|
||||||
test_target1/test_source1
|
|
||||||
test_target1/test_source1/fs1
|
|
||||||
test_target1/test_source1/fs1@test-20101111000000
|
|
||||||
test_target1/test_source1/fs1@test-20101111000001
|
|
||||||
test_target1/test_source1/fs1/sub
|
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
|
||||||
test_target1/test_source2
|
|
||||||
test_target1/test_source2/fs2
|
|
||||||
test_target1/test_source2/fs2/sub
|
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
# generate an invalid resume token, and verify if its aborted automaticly
|
|
||||||
def test_initial_resumeabort(self):
|
|
||||||
|
|
||||||
if "0.6.5" in ZFS_USERSPACE:
|
|
||||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
|
||||||
|
|
||||||
#inital backup, leaves resume token
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
|
||||||
self.generate_resume()
|
|
||||||
|
|
||||||
#remove corresponding source snapshot, so it becomes invalid
|
|
||||||
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
|
|
||||||
|
|
||||||
#NOTE: it can only abort the initial dataset if it has no subs
|
|
||||||
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
|
|
||||||
|
|
||||||
#--test try again, should abort old resume
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
|
||||||
|
|
||||||
#try again, should abort old resume
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all test_target1")
|
|
||||||
self.assertMultiLineEqual(r,"""
|
|
||||||
test_target1
|
|
||||||
test_target1/test_source1
|
|
||||||
test_target1/test_source1/fs1
|
|
||||||
test_target1/test_source1/fs1@test-20101111000001
|
|
||||||
test_target1/test_source1/fs1/sub
|
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
|
||||||
test_target1/test_source2
|
|
||||||
test_target1/test_source2/fs2
|
|
||||||
test_target1/test_source2/fs2/sub
|
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
# generate an invalid resume token, and verify if its aborted automaticly
|
|
||||||
def test_incremental_resumeabort(self):
|
|
||||||
|
|
||||||
if "0.6.5" in ZFS_USERSPACE:
|
|
||||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
|
||||||
|
|
||||||
#initial backup
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
|
||||||
|
|
||||||
#icremental backup, leaves resume token
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
|
||||||
self.generate_resume()
|
|
||||||
|
|
||||||
#remove corresponding source snapshot, so it becomes invalid
|
|
||||||
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
|
|
||||||
|
|
||||||
#--test try again, should abort old resume
|
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
|
||||||
|
|
||||||
#try again, should abort old resume
|
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all test_target1")
|
|
||||||
self.assertMultiLineEqual(r,"""
|
|
||||||
test_target1
|
|
||||||
test_target1/test_source1
|
|
||||||
test_target1/test_source1/fs1
|
|
||||||
test_target1/test_source1/fs1@test-20101111000000
|
|
||||||
test_target1/test_source1/fs1@test-20101111000002
|
|
||||||
test_target1/test_source1/fs1/sub
|
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
|
||||||
test_target1/test_source2
|
|
||||||
test_target1/test_source2/fs2
|
|
||||||
test_target1/test_source2/fs2/sub
|
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
#create a resume situation, where the other side doesnt want the snapshot anymore ( should abort resume )
|
|
||||||
def test_abort_unwanted_resume(self):
|
|
||||||
|
|
||||||
if "0.6.5" in ZFS_USERSPACE:
|
|
||||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
|
||||||
|
|
||||||
#generate resume
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
|
||||||
self.generate_resume()
|
|
||||||
|
|
||||||
with OutputIO() as buf:
|
|
||||||
with redirect_stdout(buf):
|
|
||||||
#incremental, doesnt want previous anymore
|
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-target=0 --debug --allow-empty".split(" ")).run())
|
|
||||||
|
|
||||||
print(buf.getvalue())
|
|
||||||
|
|
||||||
self.assertIn(": aborting resume, since", buf.getvalue())
|
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all test_target1")
|
|
||||||
self.assertMultiLineEqual(r,"""
|
|
||||||
test_target1
|
|
||||||
test_target1/test_source1
|
|
||||||
test_target1/test_source1/fs1
|
|
||||||
test_target1/test_source1/fs1@test-20101111000002
|
|
||||||
test_target1/test_source1/fs1/sub
|
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000002
|
|
||||||
test_target1/test_source2
|
|
||||||
test_target1/test_source2/fs2
|
|
||||||
test_target1/test_source2/fs2/sub
|
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000002
|
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
def test_missing_common(self):
|
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
|
||||||
|
|
||||||
#remove common snapshot and leave nothing
|
|
||||||
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
|
|
||||||
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
|
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
|
||||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
############# TODO:
|
|
||||||
def test_ignoretransfererrors(self):
|
|
||||||
|
|
||||||
self.skipTest("todo: create some kind of situation where zfs recv exits with an error but transfer is still ok (happens in practice with acltype)")
|
|
||||||
@ -1,20 +0,0 @@
|
|||||||
|
|
||||||
from basetest import *
|
|
||||||
|
|
||||||
|
|
||||||
class TestZfsNode(unittest2.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
prepare_zpools()
|
|
||||||
self.longMessage=True
|
|
||||||
|
|
||||||
# #resume initial backup
|
|
||||||
# def test_keepsource0(self):
|
|
||||||
|
|
||||||
# #somehow only specifying --allow-empty --keep-source 0 failed:
|
|
||||||
# with patch('time.strftime', return_value="20101111000000"):
|
|
||||||
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())
|
|
||||||
|
|
||||||
# with patch('time.strftime', return_value="20101111000001"):
|
|
||||||
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())
|
|
||||||
|
|
||||||
123
test_zfsnode.py
123
test_zfsnode.py
@ -1,123 +0,0 @@
|
|||||||
from basetest import *
|
|
||||||
|
|
||||||
|
|
||||||
class TestZfsNode(unittest2.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
prepare_zpools()
|
|
||||||
# return super().setUp()
|
|
||||||
|
|
||||||
|
|
||||||
def test_consistent_snapshot(self):
|
|
||||||
logger=Logger()
|
|
||||||
description="[Source]"
|
|
||||||
node=ZfsNode("test", logger, description=description)
|
|
||||||
|
|
||||||
with self.subTest("first snapshot"):
|
|
||||||
node.consistent_snapshot(node.selected_datasets, "test-1",100000)
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
|
||||||
self.assertEqual(r,"""
|
|
||||||
test_source1
|
|
||||||
test_source1/fs1
|
|
||||||
test_source1/fs1@test-1
|
|
||||||
test_source1/fs1/sub
|
|
||||||
test_source1/fs1/sub@test-1
|
|
||||||
test_source2
|
|
||||||
test_source2/fs2
|
|
||||||
test_source2/fs2/sub
|
|
||||||
test_source2/fs2/sub@test-1
|
|
||||||
test_source2/fs3
|
|
||||||
test_source2/fs3/sub
|
|
||||||
test_target1
|
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
with self.subTest("second snapshot, no changes, no snapshot"):
|
|
||||||
node.consistent_snapshot(node.selected_datasets, "test-2",1)
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
|
||||||
self.assertEqual(r,"""
|
|
||||||
test_source1
|
|
||||||
test_source1/fs1
|
|
||||||
test_source1/fs1@test-1
|
|
||||||
test_source1/fs1/sub
|
|
||||||
test_source1/fs1/sub@test-1
|
|
||||||
test_source2
|
|
||||||
test_source2/fs2
|
|
||||||
test_source2/fs2/sub
|
|
||||||
test_source2/fs2/sub@test-1
|
|
||||||
test_source2/fs3
|
|
||||||
test_source2/fs3/sub
|
|
||||||
test_target1
|
|
||||||
""")
|
|
||||||
|
|
||||||
with self.subTest("second snapshot, no changes, empty snapshot"):
|
|
||||||
node.consistent_snapshot(node.selected_datasets, "test-2",0)
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
|
||||||
self.assertEqual(r,"""
|
|
||||||
test_source1
|
|
||||||
test_source1/fs1
|
|
||||||
test_source1/fs1@test-1
|
|
||||||
test_source1/fs1@test-2
|
|
||||||
test_source1/fs1/sub
|
|
||||||
test_source1/fs1/sub@test-1
|
|
||||||
test_source1/fs1/sub@test-2
|
|
||||||
test_source2
|
|
||||||
test_source2/fs2
|
|
||||||
test_source2/fs2/sub
|
|
||||||
test_source2/fs2/sub@test-1
|
|
||||||
test_source2/fs2/sub@test-2
|
|
||||||
test_source2/fs3
|
|
||||||
test_source2/fs3/sub
|
|
||||||
test_target1
|
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
def test_getselected(self):
|
|
||||||
logger=Logger()
|
|
||||||
description="[Source]"
|
|
||||||
node=ZfsNode("test", logger, description=description)
|
|
||||||
s=pformat(node.selected_datasets)
|
|
||||||
print(s)
|
|
||||||
|
|
||||||
#basics
|
|
||||||
self.assertEqual (s, """[(local): test_source1/fs1,
|
|
||||||
(local): test_source1/fs1/sub,
|
|
||||||
(local): test_source2/fs2/sub]""")
|
|
||||||
|
|
||||||
#caching, so expect same result after changing it
|
|
||||||
subprocess.check_call("zfs set autobackup:test=true test_source2/fs3", shell=True)
|
|
||||||
self.assertEqual (s, """[(local): test_source1/fs1,
|
|
||||||
(local): test_source1/fs1/sub,
|
|
||||||
(local): test_source2/fs2/sub]""")
|
|
||||||
|
|
||||||
|
|
||||||
def test_validcommand(self):
|
|
||||||
logger=Logger()
|
|
||||||
description="[Source]"
|
|
||||||
node=ZfsNode("test", logger, description=description)
|
|
||||||
|
|
||||||
|
|
||||||
with self.subTest("test invalid option"):
|
|
||||||
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
|
|
||||||
with self.subTest("test valid option"):
|
|
||||||
self.assertTrue(node.valid_command(["zfs", "send", "-v", "nonexisting"]))
|
|
||||||
|
|
||||||
def test_supportedsendoptions(self):
|
|
||||||
logger=Logger()
|
|
||||||
description="[Source]"
|
|
||||||
node=ZfsNode("test", logger, description=description)
|
|
||||||
# -D propably always supported
|
|
||||||
self.assertGreater(len(node.supported_send_options),0)
|
|
||||||
|
|
||||||
|
|
||||||
def test_supportedrecvoptions(self):
|
|
||||||
logger=Logger()
|
|
||||||
description="[Source]"
|
|
||||||
#NOTE: this couldnt hang via ssh if we dont close filehandles properly. (which was a previous bug)
|
|
||||||
node=ZfsNode("test", logger, description=description, ssh_to='localhost')
|
|
||||||
self.assertIsInstance(node.supported_recv_options, list)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
6
tests/autoruntests
Executable file
6
tests/autoruntests
Executable file
@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#NOTE: run from top directory
|
||||||
|
|
||||||
|
find tests/*.py zfs_autobackup/*.py| entr -r ./tests/run_tests $@
|
||||||
|
|
||||||
@ -1,4 +1,6 @@
|
|||||||
|
|
||||||
|
# To run tests as non-root, use this hack:
|
||||||
|
# chmod 4755 /usr/sbin/zpool /usr/sbin/zfs
|
||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import random
|
import random
|
||||||
@ -8,7 +10,7 @@ import unittest2
|
|||||||
import subprocess
|
import subprocess
|
||||||
import time
|
import time
|
||||||
from pprint import *
|
from pprint import *
|
||||||
from bin.zfs_autobackup import *
|
from zfs_autobackup.ZfsAutobackup import *
|
||||||
from mock import *
|
from mock import *
|
||||||
import contextlib
|
import contextlib
|
||||||
import sys
|
import sys
|
||||||
@ -58,7 +60,8 @@ def redirect_stderr(target):
|
|||||||
|
|
||||||
def shelltest(cmd):
|
def shelltest(cmd):
|
||||||
"""execute and print result as nice copypastable string for unit tests (adds extra newlines on top/bottom)"""
|
"""execute and print result as nice copypastable string for unit tests (adds extra newlines on top/bottom)"""
|
||||||
ret=(subprocess.check_output(cmd , shell=True).decode('utf-8'))
|
|
||||||
|
ret=(subprocess.check_output("SUDO_ASKPASS=./password.sh sudo -A "+cmd , shell=True).decode('utf-8'))
|
||||||
print("######### result of: {}".format(cmd))
|
print("######### result of: {}".format(cmd))
|
||||||
print(ret)
|
print(ret)
|
||||||
print("#########")
|
print("#########")
|
||||||
@ -1,13 +1,15 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
SCRIPTDIR=`dirname $0`
|
||||||
|
|
||||||
|
#cd $SCRIPTDIR || exit 1
|
||||||
|
|
||||||
|
|
||||||
if [ "$USER" != "root" ]; then
|
if [ "$USER" != "root" ]; then
|
||||||
echo "Need root to do proper zfs testing"
|
echo "Need root to do proper zfs testing"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#reactivate python environment, if any (usefull in Travis)
|
|
||||||
[ "$VIRTUAL_ENV" ] && source $VIRTUAL_ENV/bin/activate
|
|
||||||
|
|
||||||
# test needs ssh access to localhost for testing
|
# test needs ssh access to localhost for testing
|
||||||
if ! [ -e /root/.ssh/id_rsa ]; then
|
if ! [ -e /root/.ssh/id_rsa ]; then
|
||||||
@ -16,16 +18,11 @@ if ! [ -e /root/.ssh/id_rsa ]; then
|
|||||||
ssh -oStrictHostKeyChecking=no localhost true || exit 1
|
ssh -oStrictHostKeyChecking=no localhost true || exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
coverage run --source bin.zfs_autobackup -m unittest discover -vv $@
|
|
||||||
|
coverage run --branch --source zfs_autobackup -m unittest discover -vvvvf $SCRIPTDIR $@ 2>&1
|
||||||
EXIT=$?
|
EXIT=$?
|
||||||
|
|
||||||
echo
|
echo
|
||||||
coverage report
|
coverage report
|
||||||
|
|
||||||
#this does automatic travis CI/https://coveralls.io/ intergration:
|
|
||||||
# if which coveralls > /dev/null; then
|
|
||||||
# echo "Submitting to coveralls.io:"
|
|
||||||
# coveralls
|
|
||||||
# fi
|
|
||||||
|
|
||||||
exit $EXIT
|
exit $EXIT
|
||||||
123
tests/test_cmdpipe.py
Normal file
123
tests/test_cmdpipe.py
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
from basetest import *
|
||||||
|
from zfs_autobackup.CmdPipe import CmdPipe,CmdItem
|
||||||
|
|
||||||
|
|
||||||
|
class TestCmdPipe(unittest2.TestCase):
|
||||||
|
|
||||||
|
def test_single(self):
|
||||||
|
"""single process stdout and stderr"""
|
||||||
|
p=CmdPipe(readonly=False, inp=None)
|
||||||
|
err=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["ls", "-d", "/", "/", "/nonexistent"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err, ["ls: cannot access '/nonexistent': No such file or directory"])
|
||||||
|
self.assertEqual(out, ["/","/"])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
def test_input(self):
|
||||||
|
"""test stdinput"""
|
||||||
|
p=CmdPipe(readonly=False, inp="test")
|
||||||
|
err=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["cat"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err, [])
|
||||||
|
self.assertEqual(out, ["test"])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
def test_pipe(self):
|
||||||
|
"""test piped"""
|
||||||
|
p=CmdPipe(readonly=False)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
err3=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["echo", "test"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||||
|
p.add(CmdItem(["tr", "e", "E"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||||
|
p.add(CmdItem(["tr", "t", "T"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, [])
|
||||||
|
self.assertEqual(err2, [])
|
||||||
|
self.assertEqual(err3, [])
|
||||||
|
self.assertEqual(out, ["TEsT"])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
#test str representation as well
|
||||||
|
self.assertEqual(str(p), "(echo test) | (tr e E) | (tr t T)")
|
||||||
|
|
||||||
|
def test_pipeerrors(self):
|
||||||
|
"""test piped stderrs """
|
||||||
|
p=CmdPipe(readonly=False)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
err3=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["ls", "/nonexistent1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
p.add(CmdItem(["ls", "/nonexistent2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
p.add(CmdItem(["ls", "/nonexistent3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, ["ls: cannot access '/nonexistent1': No such file or directory"])
|
||||||
|
self.assertEqual(err2, ["ls: cannot access '/nonexistent2': No such file or directory"])
|
||||||
|
self.assertEqual(err3, ["ls: cannot access '/nonexistent3': No such file or directory"])
|
||||||
|
self.assertEqual(out, [])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
def test_exitcode(self):
|
||||||
|
"""test piped exitcodes """
|
||||||
|
p=CmdPipe(readonly=False)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
err3=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["bash", "-c", "exit 1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,1)))
|
||||||
|
p.add(CmdItem(["bash", "-c", "exit 2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
p.add(CmdItem(["bash", "-c", "exit 3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,3)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, [])
|
||||||
|
self.assertEqual(err2, [])
|
||||||
|
self.assertEqual(err3, [])
|
||||||
|
self.assertEqual(out, [])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
def test_readonly_execute(self):
|
||||||
|
"""everything readonly, just should execute"""
|
||||||
|
|
||||||
|
p=CmdPipe(readonly=True)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
out=[]
|
||||||
|
|
||||||
|
def true_exit(exit_code):
|
||||||
|
return True
|
||||||
|
|
||||||
|
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), exit_handler=true_exit, readonly=True))
|
||||||
|
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), exit_handler=true_exit, readonly=True))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, [])
|
||||||
|
self.assertEqual(err2, [])
|
||||||
|
self.assertEqual(out, ["test2"])
|
||||||
|
self.assertTrue(executed)
|
||||||
|
|
||||||
|
def test_readonly_skip(self):
|
||||||
|
"""one command not readonly, skip"""
|
||||||
|
|
||||||
|
p=CmdPipe(readonly=True)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), readonly=False))
|
||||||
|
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), readonly=True))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, [])
|
||||||
|
self.assertEqual(err2, [])
|
||||||
|
self.assertEqual(out, [])
|
||||||
|
self.assertTrue(executed)
|
||||||
|
|
||||||
@ -13,17 +13,17 @@ class TestZfsNode(unittest2.TestCase):
|
|||||||
def test_destroymissing(self):
|
def test_destroymissing(self):
|
||||||
|
|
||||||
#initial backup
|
#initial backup
|
||||||
with patch('time.strftime', return_value="10101111000000"): #1000 years in past
|
with patch('time.strftime', return_value="test-19101111000000"): #1000 years in past
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds".split(" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"): #far in past
|
with patch('time.strftime', return_value="test-20101111000000"): #far in past
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --allow-empty".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
with self.subTest("Should do nothing yet"):
|
with self.subTest("Should do nothing yet"):
|
||||||
with OutputIO() as buf:
|
with OutputIO() as buf:
|
||||||
with redirect_stdout(buf):
|
with redirect_stdout(buf):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
print(buf.getvalue())
|
print(buf.getvalue())
|
||||||
self.assertNotIn(": Destroy missing", buf.getvalue())
|
self.assertNotIn(": Destroy missing", buf.getvalue())
|
||||||
@ -36,11 +36,11 @@ class TestZfsNode(unittest2.TestCase):
|
|||||||
|
|
||||||
with OutputIO() as buf:
|
with OutputIO() as buf:
|
||||||
with redirect_stdout(buf), redirect_stderr(buf):
|
with redirect_stdout(buf), redirect_stderr(buf):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
print(buf.getvalue())
|
print(buf.getvalue())
|
||||||
#should have done the snapshot cleanup for destoy missing:
|
#should have done the snapshot cleanup for destoy missing:
|
||||||
self.assertIn("fs1@test-10101111000000: Destroying", buf.getvalue())
|
self.assertIn("fs1@test-19101111000000: Destroying", buf.getvalue())
|
||||||
|
|
||||||
self.assertIn("fs1: Destroy missing: Still has children here.", buf.getvalue())
|
self.assertIn("fs1: Destroy missing: Still has children here.", buf.getvalue())
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ class TestZfsNode(unittest2.TestCase):
|
|||||||
with OutputIO() as buf:
|
with OutputIO() as buf:
|
||||||
with redirect_stdout(buf):
|
with redirect_stdout(buf):
|
||||||
#100y: lastest should not be old enough, while second to latest snapshot IS old enough:
|
#100y: lastest should not be old enough, while second to latest snapshot IS old enough:
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 100y".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 100y".split(" ")).run())
|
||||||
|
|
||||||
print(buf.getvalue())
|
print(buf.getvalue())
|
||||||
self.assertIn(": Waiting for deadline", buf.getvalue())
|
self.assertIn(": Waiting for deadline", buf.getvalue())
|
||||||
@ -62,7 +62,7 @@ class TestZfsNode(unittest2.TestCase):
|
|||||||
#past deadline, destroy
|
#past deadline, destroy
|
||||||
with OutputIO() as buf:
|
with OutputIO() as buf:
|
||||||
with redirect_stdout(buf):
|
with redirect_stdout(buf):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 1y".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 1y".split(" ")).run())
|
||||||
|
|
||||||
print(buf.getvalue())
|
print(buf.getvalue())
|
||||||
self.assertIn("sub: Destroying", buf.getvalue())
|
self.assertIn("sub: Destroying", buf.getvalue())
|
||||||
@ -75,7 +75,7 @@ class TestZfsNode(unittest2.TestCase):
|
|||||||
|
|
||||||
with OutputIO() as buf:
|
with OutputIO() as buf:
|
||||||
with redirect_stdout(buf):
|
with redirect_stdout(buf):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
print(buf.getvalue())
|
print(buf.getvalue())
|
||||||
|
|
||||||
@ -90,13 +90,13 @@ class TestZfsNode(unittest2.TestCase):
|
|||||||
|
|
||||||
with OutputIO() as buf:
|
with OutputIO() as buf:
|
||||||
with redirect_stdout(buf), redirect_stderr(buf):
|
with redirect_stdout(buf), redirect_stderr(buf):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
print(buf.getvalue())
|
print(buf.getvalue())
|
||||||
#now tries to destroy our own last snapshot (before the final destroy of the dataset)
|
#now tries to destroy our own last snapshot (before the final destroy of the dataset)
|
||||||
self.assertIn("fs1@test-20101111000000: Destroying", buf.getvalue())
|
self.assertIn("fs1@test-20101111000000: Destroying", buf.getvalue())
|
||||||
#but cant finish because still in use:
|
#but cant finish because still in use:
|
||||||
self.assertIn("fs1: Error during destoy missing", buf.getvalue())
|
self.assertIn("fs1: Error during --destroy-missing", buf.getvalue())
|
||||||
|
|
||||||
shelltest("zfs destroy test_target1/clone1")
|
shelltest("zfs destroy test_target1/clone1")
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ class TestZfsNode(unittest2.TestCase):
|
|||||||
|
|
||||||
with OutputIO() as buf:
|
with OutputIO() as buf:
|
||||||
with redirect_stdout(buf), redirect_stderr(buf):
|
with redirect_stdout(buf), redirect_stderr(buf):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
print(buf.getvalue())
|
print(buf.getvalue())
|
||||||
#should have done the snapshot cleanup for destoy missing:
|
#should have done the snapshot cleanup for destoy missing:
|
||||||
@ -113,7 +113,7 @@ class TestZfsNode(unittest2.TestCase):
|
|||||||
|
|
||||||
with OutputIO() as buf:
|
with OutputIO() as buf:
|
||||||
with redirect_stdout(buf), redirect_stderr(buf):
|
with redirect_stdout(buf), redirect_stderr(buf):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
print(buf.getvalue())
|
print(buf.getvalue())
|
||||||
#on second run it sees the dangling ex-parent but doesnt know what to do with it (since it has no own snapshot)
|
#on second run it sees the dangling ex-parent but doesnt know what to do with it (since it has no own snapshot)
|
||||||
@ -130,6 +130,6 @@ test_target1/test_source1
|
|||||||
test_target1/test_source2
|
test_target1/test_source2
|
||||||
test_target1/test_source2/fs2
|
test_target1/test_source2/fs2
|
||||||
test_target1/test_source2/fs2/sub
|
test_target1/test_source2/fs2/sub
|
||||||
test_target1/test_source2/fs2/sub@test-10101111000000
|
test_target1/test_source2/fs2/sub@test-19101111000000
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
""")
|
""")
|
||||||
192
tests/test_encryption.py
Normal file
192
tests/test_encryption.py
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
from zfs_autobackup.CmdPipe import CmdPipe
|
||||||
|
from basetest import *
|
||||||
|
import time
|
||||||
|
|
||||||
|
# We have to do a LOT to properly test encryption/decryption/raw transfers
|
||||||
|
#
|
||||||
|
# For every scenario we need at least:
|
||||||
|
# - plain source dataset
|
||||||
|
# - encrypted source dataset
|
||||||
|
# - plain target path
|
||||||
|
# - encrypted target path
|
||||||
|
# - do a full transfer
|
||||||
|
# - do a incremental transfer
|
||||||
|
|
||||||
|
# Scenarios:
|
||||||
|
# - Raw transfer
|
||||||
|
# - Decryption transfer (--decrypt)
|
||||||
|
# - Encryption transfer (--encrypt)
|
||||||
|
# - Re-encryption transfer (--decrypt --encrypt)
|
||||||
|
|
||||||
|
class TestZfsEncryption(unittest2.TestCase):
|
||||||
|
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
|
||||||
|
try:
|
||||||
|
shelltest("zfs get encryption test_source1")
|
||||||
|
except:
|
||||||
|
self.skipTest("Encryption not supported on this ZFS version.")
|
||||||
|
|
||||||
|
def prepare_encrypted_dataset(self, key, path, unload_key=False):
|
||||||
|
|
||||||
|
# create encrypted source dataset
|
||||||
|
shelltest("echo {} > /tmp/zfstest.key".format(key))
|
||||||
|
shelltest("zfs create -o keylocation=file:///tmp/zfstest.key -o keyformat=passphrase -o encryption=on {}".format(path))
|
||||||
|
|
||||||
|
if unload_key:
|
||||||
|
shelltest("zfs unmount {}".format(path))
|
||||||
|
shelltest("zfs unload-key {}".format(path))
|
||||||
|
|
||||||
|
# r=shelltest("dd if=/dev/zero of=/test_source1/fs1/enc1/data.txt bs=200000 count=1")
|
||||||
|
|
||||||
|
def test_raw(self):
|
||||||
|
"""send encrypted data unaltered (standard operation)"""
|
||||||
|
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsourcekeyless", unload_key=True) # raw mode shouldn't need a key
|
||||||
|
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_target1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsourcekeyless encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsourcekeyless -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot - -
|
||||||
|
test_target1/test_source1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/encryptedsource encryptionroot test_target1/test_source1/fs1/encryptedsource -
|
||||||
|
test_target1/test_source1/fs1/encryptedsourcekeyless encryptionroot test_target1/test_source1/fs1/encryptedsourcekeyless -
|
||||||
|
test_target1/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/test_source2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_decrypt(self):
|
||||||
|
"""decrypt data and store unencrypted (--decrypt)"""
|
||||||
|
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||||
|
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
|
self.assertEqual(r, """
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_target1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot - -
|
||||||
|
test_target1/test_source1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/encryptedsource encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/test_source2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_encrypt(self):
|
||||||
|
"""send normal data set and store encrypted on the other side (--encrypt) issue #60 """
|
||||||
|
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||||
|
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
|
self.assertEqual(r, """
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_target1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/test_source1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/encryptedsource encryptionroot test_target1/test_source1/fs1/encryptedsource -
|
||||||
|
test_target1/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/test_source2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_reencrypt(self):
|
||||||
|
"""reencrypt data (--decrypt --encrypt) """
|
||||||
|
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||||
|
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
|
self.assertEqual(r, """
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_target1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/test_source1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/encryptedsource encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/test_source2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||||
|
""")
|
||||||
|
|
||||||
@ -1,5 +1,5 @@
|
|||||||
from basetest import *
|
from basetest import *
|
||||||
|
from zfs_autobackup.ExecuteNode import *
|
||||||
|
|
||||||
print("THIS TEST REQUIRES SSH TO LOCALHOST")
|
print("THIS TEST REQUIRES SSH TO LOCALHOST")
|
||||||
|
|
||||||
@ -15,7 +15,7 @@ class TestExecuteNode(unittest2.TestCase):
|
|||||||
self.assertEqual(node.run(["echo","test"]), ["test"])
|
self.assertEqual(node.run(["echo","test"]), ["test"])
|
||||||
|
|
||||||
with self.subTest("error exit code"):
|
with self.subTest("error exit code"):
|
||||||
with self.assertRaises(subprocess.CalledProcessError):
|
with self.assertRaises(ExecuteError):
|
||||||
node.run(["false"])
|
node.run(["false"])
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -26,9 +26,9 @@ class TestExecuteNode(unittest2.TestCase):
|
|||||||
with self.subTest("multiline tabsplit"):
|
with self.subTest("multiline tabsplit"):
|
||||||
self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=True), [['l1c1', 'l1c2'], ['l2c1', 'l2c2']])
|
self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=True), [['l1c1', 'l1c2'], ['l2c1', 'l2c2']])
|
||||||
|
|
||||||
#escaping test (shouldnt be a problem locally, single quotes can be a problem remote via ssh)
|
#escaping test
|
||||||
with self.subTest("escape test"):
|
with self.subTest("escape test"):
|
||||||
s="><`'\"@&$()$bla\\/.*!#test _+-={}[]|"
|
s="><`'\"@&$()$bla\\/.* !#test _+-={}[]|${bla} $bla"
|
||||||
self.assertEqual(node.run(["echo",s]), [s])
|
self.assertEqual(node.run(["echo",s]), [s])
|
||||||
|
|
||||||
#return std err as well, trigger stderr by listing something non existing
|
#return std err as well, trigger stderr by listing something non existing
|
||||||
@ -51,6 +51,15 @@ class TestExecuteNode(unittest2.TestCase):
|
|||||||
with self.subTest("stdin process with inp=None (shouldn't hang)"):
|
with self.subTest("stdin process with inp=None (shouldn't hang)"):
|
||||||
self.assertEqual(node.run(["cat"]), [])
|
self.assertEqual(node.run(["cat"]), [])
|
||||||
|
|
||||||
|
# let the system do the piping with an unescaped |:
|
||||||
|
with self.subTest("system piping test"):
|
||||||
|
|
||||||
|
#first make sure the actual | character is still properly escaped:
|
||||||
|
self.assertEqual(node.run(["echo","|"]), ["|"])
|
||||||
|
|
||||||
|
#now pipe
|
||||||
|
self.assertEqual(node.run(["echo", "abc", node.PIPE, "tr", "a", "A" ]), ["Abc"])
|
||||||
|
|
||||||
def test_basics_local(self):
|
def test_basics_local(self):
|
||||||
node=ExecuteNode(debug_output=True)
|
node=ExecuteNode(debug_output=True)
|
||||||
self.basics(node)
|
self.basics(node)
|
||||||
@ -64,7 +73,7 @@ class TestExecuteNode(unittest2.TestCase):
|
|||||||
def test_readonly(self):
|
def test_readonly(self):
|
||||||
node=ExecuteNode(debug_output=True, readonly=True)
|
node=ExecuteNode(debug_output=True, readonly=True)
|
||||||
|
|
||||||
self.assertEqual(node.run(["echo","test"], readonly=False), None)
|
self.assertEqual(node.run(["echo","test"], readonly=False), [])
|
||||||
self.assertEqual(node.run(["echo","test"], readonly=True), ["test"])
|
self.assertEqual(node.run(["echo","test"], readonly=True), ["test"])
|
||||||
|
|
||||||
|
|
||||||
@ -81,29 +90,33 @@ class TestExecuteNode(unittest2.TestCase):
|
|||||||
nodeb.run(["true"], inp=output)
|
nodeb.run(["true"], inp=output)
|
||||||
|
|
||||||
with self.subTest("error on pipe input side"):
|
with self.subTest("error on pipe input side"):
|
||||||
with self.assertRaises(subprocess.CalledProcessError):
|
with self.assertRaises(ExecuteError):
|
||||||
output=nodea.run(["false"], pipe=True)
|
output=nodea.run(["false"], pipe=True)
|
||||||
nodeb.run(["true"], inp=output)
|
nodeb.run(["true"], inp=output)
|
||||||
|
|
||||||
|
with self.subTest("error on both sides, ignore exit codes"):
|
||||||
|
output=nodea.run(["false"], pipe=True, valid_exitcodes=[])
|
||||||
|
nodeb.run(["false"], inp=output, valid_exitcodes=[])
|
||||||
|
|
||||||
with self.subTest("error on pipe output side "):
|
with self.subTest("error on pipe output side "):
|
||||||
with self.assertRaises(subprocess.CalledProcessError):
|
with self.assertRaises(ExecuteError):
|
||||||
output=nodea.run(["true"], pipe=True)
|
output=nodea.run(["true"], pipe=True)
|
||||||
nodeb.run(["false"], inp=output)
|
nodeb.run(["false"], inp=output)
|
||||||
|
|
||||||
with self.subTest("error on both sides of pipe"):
|
with self.subTest("error on both sides of pipe"):
|
||||||
with self.assertRaises(subprocess.CalledProcessError):
|
with self.assertRaises(ExecuteError):
|
||||||
output=nodea.run(["false"], pipe=True)
|
output=nodea.run(["false"], pipe=True)
|
||||||
nodeb.run(["false"], inp=output)
|
nodeb.run(["false"], inp=output)
|
||||||
|
|
||||||
with self.subTest("check stderr on pipe output side"):
|
with self.subTest("check stderr on pipe output side"):
|
||||||
output=nodea.run(["true"], pipe=True)
|
output=nodea.run(["true"], pipe=True, valid_exitcodes=[0])
|
||||||
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], inp=output, return_stderr=True, valid_exitcodes=[0,2])
|
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], inp=output, return_stderr=True, valid_exitcodes=[2])
|
||||||
self.assertEqual(stdout,[])
|
self.assertEqual(stdout,[])
|
||||||
self.assertRegex(stderr[0], "nonexistingfile" )
|
self.assertRegex(stderr[0], "nonexistingfile" )
|
||||||
|
|
||||||
with self.subTest("check stderr on pipe input side (should be only printed)"):
|
with self.subTest("check stderr on pipe input side (should be only printed)"):
|
||||||
output=nodea.run(["ls", "nonexistingfile"], pipe=True)
|
output=nodea.run(["ls", "nonexistingfile"], pipe=True, valid_exitcodes=[2])
|
||||||
(stdout, stderr)=nodeb.run(["true"], inp=output, return_stderr=True, valid_exitcodes=[0,2])
|
(stdout, stderr)=nodeb.run(["true"], inp=output, return_stderr=True, valid_exitcodes=[0])
|
||||||
self.assertEqual(stdout,[])
|
self.assertEqual(stdout,[])
|
||||||
self.assertEqual(stderr,[])
|
self.assertEqual(stderr,[])
|
||||||
|
|
||||||
314
tests/test_externalfailures.py
Normal file
314
tests/test_externalfailures.py
Normal file
@ -0,0 +1,314 @@
|
|||||||
|
from basetest import *
|
||||||
|
|
||||||
|
|
||||||
|
class TestExternalFailures(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage = True
|
||||||
|
|
||||||
|
# generate a resumable state
|
||||||
|
# NOTE: this generates two resumable test_target1/test_source1/fs1 and test_target1/test_source1/fs1/sub
|
||||||
|
def generate_resume(self):
|
||||||
|
|
||||||
|
r = shelltest("zfs set compress=off test_source1 test_target1")
|
||||||
|
|
||||||
|
# big change on source
|
||||||
|
r = shelltest("dd if=/dev/zero of=/test_source1/fs1/data bs=250M count=1")
|
||||||
|
|
||||||
|
# waste space on target
|
||||||
|
r = shelltest("dd if=/dev/zero of=/test_target1/waste bs=250M count=1")
|
||||||
|
|
||||||
|
# should fail and leave resume token (if supported)
|
||||||
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
# free up space
|
||||||
|
r = shelltest("rm /test_target1/waste")
|
||||||
|
# sync
|
||||||
|
r = shelltest("zfs umount test_target1")
|
||||||
|
r = shelltest("zfs mount test_target1")
|
||||||
|
|
||||||
|
# resume initial backup
|
||||||
|
def test_initial_resume(self):
|
||||||
|
|
||||||
|
# inital backup, leaves resume token
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
# --test should resume and succeed
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
# did we really resume?
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
# abort this late, for beter coverage
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
else:
|
||||||
|
self.assertIn(": resuming", buf.getvalue())
|
||||||
|
|
||||||
|
# should resume and succeed
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
# did we really resume?
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
# abort this late, for beter coverage
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
else:
|
||||||
|
self.assertIn(": resuming", buf.getvalue())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
# resume incremental backup
|
||||||
|
def test_incremental_resume(self):
|
||||||
|
|
||||||
|
# initial backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
# incremental backup leaves resume token
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
# --test should resume and succeed
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
# did we really resume?
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
# abort this late, for beter coverage
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
else:
|
||||||
|
self.assertIn(": resuming", buf.getvalue())
|
||||||
|
|
||||||
|
# should resume and succeed
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
# did we really resume?
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
# abort this late, for beter coverage
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
else:
|
||||||
|
self.assertIn(": resuming", buf.getvalue())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
# generate an invalid resume token, and verify if its aborted automaticly
|
||||||
|
def test_initial_resumeabort(self):
|
||||||
|
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
|
||||||
|
# inital backup, leaves resume token
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
# remove corresponding source snapshot, so it becomes invalid
|
||||||
|
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
|
||||||
|
|
||||||
|
# NOTE: it can only abort the initial dataset if it has no subs
|
||||||
|
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
|
||||||
|
|
||||||
|
# --test try again, should abort old resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
# try again, should abort old resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
# generate an invalid resume token, and verify if its aborted automaticly
|
||||||
|
def test_incremental_resumeabort(self):
|
||||||
|
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
|
||||||
|
# initial backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
# icremental backup, leaves resume token
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
# remove corresponding source snapshot, so it becomes invalid
|
||||||
|
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
|
||||||
|
|
||||||
|
# --test try again, should abort old resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
# try again, should abort old resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
# create a resume situation, where the other side doesnt want the snapshot anymore ( should abort resume )
|
||||||
|
def test_abort_unwanted_resume(self):
|
||||||
|
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
# generate resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
# incremental, doesnt want previous anymore
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --keep-target=0 --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
self.assertIn("Aborting resume, we dont want that snapshot anymore.", buf.getvalue())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||||
|
""")
|
||||||
|
|
||||||
|
# test with empty snapshot list (this was a bug)
|
||||||
|
def test_abort_resume_emptysnapshotlist(self):
|
||||||
|
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
# generate resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
# incremental, doesnt want previous anymore
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --no-snapshot".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
self.assertIn("Aborting resume, its obsolete", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
def test_missing_common(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
# remove common snapshot and leave nothing
|
||||||
|
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
|
||||||
|
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
#UPDATE: offcourse the one thing that wasn't tested had a bug :( (in ExecuteNode.run()).
|
||||||
|
def test_ignoretransfererrors(self):
|
||||||
|
|
||||||
|
self.skipTest("Not sure how to implement a test for this without some serious hacking and patching.")
|
||||||
|
|
||||||
|
# #recreate target pool without any features
|
||||||
|
# # shelltest("zfs set compress=on test_source1; zpool destroy test_target1; zpool create test_target1 -o feature@project_quota=disabled /dev/ram2")
|
||||||
|
#
|
||||||
|
# with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --no-progress".split(" ")).run())
|
||||||
|
#
|
||||||
|
# r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
#
|
||||||
|
# self.assertMultiLineEqual(r, """
|
||||||
|
# test_target1
|
||||||
|
# test_target1/test_source1
|
||||||
|
# test_target1/test_source1/fs1
|
||||||
|
# test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
# test_target1/test_source1/fs1/sub
|
||||||
|
# test_target1/test_source1/fs1/sub@test-20101111000002
|
||||||
|
# test_target1/test_source2
|
||||||
|
# test_target1/test_source2/fs2
|
||||||
|
# test_target1/test_source2/fs2/sub
|
||||||
|
# test_target1/test_source2/fs2/sub@test-20101111000002
|
||||||
|
# """)
|
||||||
52
tests/test_log.py
Normal file
52
tests/test_log.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
import zfs_autobackup.LogConsole
|
||||||
|
from basetest import *
|
||||||
|
|
||||||
|
|
||||||
|
class TestLog(unittest2.TestCase):
|
||||||
|
|
||||||
|
def test_colored(self):
|
||||||
|
"""test with color output"""
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
l=LogConsole(show_verbose=False, show_debug=False, color=True)
|
||||||
|
l.verbose("verbose")
|
||||||
|
l.debug("debug")
|
||||||
|
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
l=LogConsole(show_verbose=True, show_debug=True, color=True)
|
||||||
|
l.verbose("verbose")
|
||||||
|
l.debug("debug")
|
||||||
|
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
l=LogConsole(show_verbose=False, show_debug=False, color=True)
|
||||||
|
l.error("error")
|
||||||
|
|
||||||
|
print(list(buf.getvalue()))
|
||||||
|
self.assertEqual(list(buf.getvalue()), ['\x1b', '[', '2', '2', 'm', ' ', ' ', 'v', 'e', 'r', 'b', 'o', 's', 'e', '\x1b', '[', '0', 'm', '\n', '\x1b', '[', '3', '2', 'm', '#', ' ', 'd', 'e', 'b', 'u', 'g', '\x1b', '[', '0', 'm', '\n', '\x1b', '[', '3', '1', 'm', '\x1b', '[', '1', 'm', '!', ' ', 'e', 'r', 'r', 'o', 'r', '\x1b', '[', '0', 'm', '\n'])
|
||||||
|
|
||||||
|
def test_nocolor(self):
|
||||||
|
"""test without color output"""
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
l=LogConsole(show_verbose=False, show_debug=False, color=False)
|
||||||
|
l.verbose("verbose")
|
||||||
|
l.debug("debug")
|
||||||
|
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
l=LogConsole(show_verbose=True, show_debug=True, color=False)
|
||||||
|
l.verbose("verbose")
|
||||||
|
l.debug("debug")
|
||||||
|
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
l=LogConsole(show_verbose=False, show_debug=False, color=False)
|
||||||
|
l.error("error")
|
||||||
|
|
||||||
|
print(list(buf.getvalue()))
|
||||||
|
self.assertEqual(list(buf.getvalue()), [' ', ' ', 'v', 'e', 'r', 'b', 'o', 's', 'e', '\n', '#', ' ', 'd', 'e', 'b', 'u', 'g', '\n', '!', ' ', 'e', 'r', 'r', 'o', 'r', '\n'])
|
||||||
|
|
||||||
|
|
||||||
|
zfs_autobackup.LogConsole.colorama=False
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
58
tests/test_regressions.py
Normal file
58
tests/test_regressions.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
|
||||||
|
from basetest import *
|
||||||
|
|
||||||
|
|
||||||
|
class TestZfsNode(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage=True
|
||||||
|
|
||||||
|
def test_keepsource0target10queuedsend(self):
|
||||||
|
"""Test if thinner doesnt destroy too much early on if there are no common snapshots YET. Issue #84"""
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000002
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000002
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||||
|
""")
|
||||||
97
tests/test_scaling.py
Normal file
97
tests/test_scaling.py
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
from basetest import *
|
||||||
|
|
||||||
|
from zfs_autobackup.ExecuteNode import ExecuteNode
|
||||||
|
|
||||||
|
run_orig=ExecuteNode.run
|
||||||
|
run_counter=0
|
||||||
|
|
||||||
|
def run_count(*args, **kwargs):
|
||||||
|
global run_counter
|
||||||
|
run_counter=run_counter+1
|
||||||
|
return (run_orig(*args, **kwargs))
|
||||||
|
|
||||||
|
class TestZfsScaling(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage = True
|
||||||
|
|
||||||
|
def test_manysnapshots(self):
|
||||||
|
"""count the number of commands when there are many snapshots."""
|
||||||
|
|
||||||
|
snapshot_count=100
|
||||||
|
|
||||||
|
print("Creating many snapshots...")
|
||||||
|
s=""
|
||||||
|
for i in range(1970,1970+snapshot_count):
|
||||||
|
s=s+"zfs snapshot test_source1/fs1@test-{:04}1111000000;".format(i)
|
||||||
|
|
||||||
|
shelltest(s)
|
||||||
|
|
||||||
|
global run_counter
|
||||||
|
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101112000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=343
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||||
|
|
||||||
|
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101112000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=47
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||||
|
|
||||||
|
def test_manydatasets(self):
|
||||||
|
"""count the number of commands when when there are many datasets"""
|
||||||
|
|
||||||
|
dataset_count=100
|
||||||
|
|
||||||
|
print("Creating many datasets...")
|
||||||
|
s=""
|
||||||
|
for i in range(0,dataset_count):
|
||||||
|
s=s+"zfs create test_source1/fs1/{};".format(i)
|
||||||
|
|
||||||
|
shelltest(s)
|
||||||
|
|
||||||
|
global run_counter
|
||||||
|
|
||||||
|
#first run
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101112000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=743
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||||
|
|
||||||
|
|
||||||
|
#second run, should have higher number of expected_runs
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101112000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=947
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||||
88
tests/test_sendrecvpipes.py
Normal file
88
tests/test_sendrecvpipes.py
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
import zfs_autobackup.compressors
|
||||||
|
from basetest import *
|
||||||
|
import time
|
||||||
|
|
||||||
|
class TestSendRecvPipes(unittest2.TestCase):
|
||||||
|
"""test input/output pipes for zfs send and recv"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage=True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_send_basics(self):
|
||||||
|
"""send basics (remote/local send pipe)"""
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("local local pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("remote local pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("local remote pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("remote remote pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||||
|
|
||||||
|
def test_compress(self):
|
||||||
|
"""send basics (remote/local send pipe)"""
|
||||||
|
|
||||||
|
for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys():
|
||||||
|
|
||||||
|
with self.subTest("compress "+compress):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
def test_buffer(self):
|
||||||
|
"""test different buffer configurations"""
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("local local pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--buffer=1M" ]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("remote local pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("local remote pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("remote remote pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||||
|
|
||||||
|
def test_rate(self):
|
||||||
|
"""test rate limit"""
|
||||||
|
|
||||||
|
|
||||||
|
start=time.time()
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k" ]).run())
|
||||||
|
|
||||||
|
#not a great way of verifying but it works.
|
||||||
|
self.assertGreater(time.time()-start, 5)
|
||||||
|
|
||||||
|
|
||||||
@ -1,6 +1,8 @@
|
|||||||
from basetest import *
|
from basetest import *
|
||||||
import pprint
|
import pprint
|
||||||
|
|
||||||
|
from zfs_autobackup.Thinner import Thinner
|
||||||
|
|
||||||
# randint is different in python 2 vs 3
|
# randint is different in python 2 vs 3
|
||||||
randint_compat = lambda lo, hi: lo + int(random.random() * (hi + 1 - lo))
|
randint_compat = lambda lo, hi: lo + int(random.random() * (hi + 1 - lo))
|
||||||
|
|
||||||
@ -21,6 +23,23 @@ class TestThinner(unittest2.TestCase):
|
|||||||
|
|
||||||
# return super().setUp()
|
# return super().setUp()
|
||||||
|
|
||||||
|
def test_exceptions(self):
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Invalid period"):
|
||||||
|
ThinnerRule("12X12m")
|
||||||
|
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Invalid ttl"):
|
||||||
|
ThinnerRule("12d12X")
|
||||||
|
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Period cant be"):
|
||||||
|
ThinnerRule("12d1d")
|
||||||
|
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Invalid schedule"):
|
||||||
|
ThinnerRule("XXX")
|
||||||
|
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Number of"):
|
||||||
|
Thinner("-1")
|
||||||
|
|
||||||
|
|
||||||
def test_incremental(self):
|
def test_incremental(self):
|
||||||
ok=['2023-01-03 10:53:16',
|
ok=['2023-01-03 10:53:16',
|
||||||
'2024-01-02 15:43:29',
|
'2024-01-02 15:43:29',
|
||||||
@ -136,5 +155,5 @@ class TestThinner(unittest2.TestCase):
|
|||||||
self.assertEqual(result, ok)
|
self.assertEqual(result, ok)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
# if __name__ == '__main__':
|
||||||
unittest.main()
|
# unittest.main()
|
||||||
@ -1,8 +1,9 @@
|
|||||||
|
from zfs_autobackup.CmdPipe import CmdPipe
|
||||||
|
|
||||||
from basetest import *
|
from basetest import *
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class TestZfsAutobackup(unittest2.TestCase):
|
class TestZfsAutobackup(unittest2.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@ -11,63 +12,53 @@ class TestZfsAutobackup(unittest2.TestCase):
|
|||||||
|
|
||||||
def test_invalidpars(self):
|
def test_invalidpars(self):
|
||||||
|
|
||||||
self.assertEqual(ZfsAutobackup("test test_target1 --keep-source -1".split(" ")).run(), 255)
|
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --keep-source -1".split(" ")).run(), 255)
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --resume --verbose --no-snapshot".split(" ")).run(), 0)
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertIn("The --resume", buf.getvalue())
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
self.assertEqual(ZfsAutobackup("test test_target_nonexisting --no-progress".split(" ")).run(), 255)
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
# correct message?
|
||||||
|
self.assertIn("Please create this dataset", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
def test_snapshotmode(self):
|
def test_snapshotmode(self):
|
||||||
"""test snapshot tool mode"""
|
"""test snapshot tool mode"""
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose".split(" ")).run())
|
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test --verbose --allow-empty --keep-source 0".split(" ")).run())
|
|
||||||
|
|
||||||
#on source: only has 1 and 2 (1 was hold)
|
|
||||||
#on target: has 0 and 1
|
|
||||||
#XXX:
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
test_source1
|
test_source1
|
||||||
test_source1/fs1
|
test_source1/fs1
|
||||||
test_source1/fs1@test-20101111000001
|
test_source1/fs1@test-20101111000000
|
||||||
test_source1/fs1@test-20101111000002
|
|
||||||
test_source1/fs1/sub
|
test_source1/fs1/sub
|
||||||
test_source1/fs1/sub@test-20101111000001
|
test_source1/fs1/sub@test-20101111000000
|
||||||
test_source1/fs1/sub@test-20101111000002
|
|
||||||
test_source2
|
test_source2
|
||||||
test_source2/fs2
|
test_source2/fs2
|
||||||
test_source2/fs2/sub
|
test_source2/fs2/sub
|
||||||
test_source2/fs2/sub@test-20101111000001
|
test_source2/fs2/sub@test-20101111000000
|
||||||
test_source2/fs2/sub@test-20101111000002
|
|
||||||
test_source2/fs3
|
test_source2/fs3
|
||||||
test_source2/fs3/sub
|
test_source2/fs3/sub
|
||||||
test_target1
|
test_target1
|
||||||
test_target1/test_source1
|
|
||||||
test_target1/test_source1/fs1
|
|
||||||
test_target1/test_source1/fs1@test-20101111000000
|
|
||||||
test_target1/test_source1/fs1@test-20101111000001
|
|
||||||
test_target1/test_source1/fs1/sub
|
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
|
||||||
test_target1/test_source2
|
|
||||||
test_target1/test_source2/fs2
|
|
||||||
test_target1/test_source2/fs2/sub
|
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_defaults(self):
|
def test_defaults(self):
|
||||||
|
|
||||||
with self.subTest("no datasets selected"):
|
with self.subTest("no datasets selected"):
|
||||||
with OutputIO() as buf:
|
with OutputIO() as buf:
|
||||||
with redirect_stderr(buf):
|
with redirect_stderr(buf):
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug".split(" ")).run())
|
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug --no-progress".split(" ")).run())
|
||||||
|
|
||||||
print(buf.getvalue())
|
print(buf.getvalue())
|
||||||
#correct message?
|
#correct message?
|
||||||
@ -76,8 +67,8 @@ test_target1/test_source2/fs2/sub@test-20101111000001
|
|||||||
|
|
||||||
with self.subTest("defaults with full verbose and debug"):
|
with self.subTest("defaults with full verbose and debug"):
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug --no-progress".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -105,8 +96,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
|||||||
""")
|
""")
|
||||||
|
|
||||||
with self.subTest("bare defaults, allow empty"):
|
with self.subTest("bare defaults, allow empty"):
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
@ -176,15 +167,15 @@ test_target1/test_source2/fs2/sub@test-20101111000001 userrefs 1 -
|
|||||||
|
|
||||||
#make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept
|
#make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept
|
||||||
with self.subTest("test time checking"):
|
with self.subTest("test time checking"):
|
||||||
with patch('time.strftime', return_value="20111111000000"):
|
with patch('time.strftime', return_value="test-20111111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
time_str="20111112000000" #month in the "future"
|
time_str="20111112000000" #month in the "future"
|
||||||
future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
|
future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
|
||||||
with patch('time.time', return_value=future_timestamp):
|
with patch('time.time', return_value=future_timestamp):
|
||||||
with patch('time.strftime', return_value="20111111000001"):
|
with patch('time.strftime', return_value="test-20111111000001"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
@ -218,14 +209,13 @@ test_target1/test_source2/fs2/sub@test-20111111000000
|
|||||||
test_target1/test_source2/fs2/sub@test-20111111000001
|
test_target1/test_source2/fs2/sub@test-20111111000001
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
|
||||||
def test_ignore_othersnaphots(self):
|
def test_ignore_othersnaphots(self):
|
||||||
|
|
||||||
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
||||||
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -259,8 +249,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
|||||||
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
||||||
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --other-snapshots".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --other-snapshots".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -294,8 +284,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
|||||||
|
|
||||||
def test_nosnapshot(self):
|
def test_nosnapshot(self):
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --no-progress".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
#(only parents are created )
|
#(only parents are created )
|
||||||
@ -318,12 +308,10 @@ test_target1/test_source2/fs2
|
|||||||
|
|
||||||
def test_nosend(self):
|
def test_nosend(self):
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send --no-progress".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
#(only parents are created )
|
|
||||||
#TODO: it probably shouldn't create these
|
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
test_source1
|
test_source1
|
||||||
test_source1/fs1
|
test_source1/fs1
|
||||||
@ -343,12 +331,10 @@ test_target1
|
|||||||
def test_ignorereplicated(self):
|
def test_ignorereplicated(self):
|
||||||
r=shelltest("zfs snapshot test_source1/fs1@otherreplication")
|
r=shelltest("zfs snapshot test_source1/fs1@otherreplication")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --ignore-replicated".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --ignore-replicated".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
#(only parents are created )
|
|
||||||
#TODO: it probably shouldn't create these
|
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
test_source1
|
test_source1
|
||||||
test_source1/fs1
|
test_source1/fs1
|
||||||
@ -374,8 +360,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
|||||||
|
|
||||||
def test_noholds(self):
|
def test_noholds(self):
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --no-progress".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
|
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -394,20 +380,20 @@ test_source2/fs3/sub userrefs - -
|
|||||||
test_target1 userrefs - -
|
test_target1 userrefs - -
|
||||||
test_target1/test_source1 userrefs - -
|
test_target1/test_source1 userrefs - -
|
||||||
test_target1/test_source1/fs1 userrefs - -
|
test_target1/test_source1/fs1 userrefs - -
|
||||||
test_target1/test_source1/fs1@test-20101111000000 userrefs 1 -
|
test_target1/test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||||
test_target1/test_source1/fs1/sub userrefs - -
|
test_target1/test_source1/fs1/sub userrefs - -
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 1 -
|
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||||
test_target1/test_source2 userrefs - -
|
test_target1/test_source2 userrefs - -
|
||||||
test_target1/test_source2/fs2 userrefs - -
|
test_target1/test_source2/fs2 userrefs - -
|
||||||
test_target1/test_source2/fs2/sub userrefs - -
|
test_target1/test_source2/fs2/sub userrefs - -
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 1 -
|
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
|
||||||
def test_strippath(self):
|
def test_strippath(self):
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1 --no-progress".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -442,8 +428,8 @@ test_target1/fs2/sub@test-20101111000000
|
|||||||
|
|
||||||
r=shelltest("zfs set refreservation=1M test_source1/fs1")
|
r=shelltest("zfs set refreservation=1M test_source1/fs1")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --clear-refreservation".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-refreservation".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1")
|
r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1")
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -480,8 +466,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000 refreservation -
|
|||||||
self.skipTest("This zfs-userspace version doesnt support -o")
|
self.skipTest("This zfs-userspace version doesnt support -o")
|
||||||
|
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --clear-mountpoint --debug".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-mountpoint --debug".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1")
|
r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1")
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -513,35 +499,35 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
|
|||||||
def test_rollback(self):
|
def test_rollback(self):
|
||||||
|
|
||||||
#initial backup
|
#initial backup
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
#make change
|
#make change
|
||||||
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
||||||
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
|
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
#should fail (busy)
|
#should fail (busy)
|
||||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
#rollback, should succeed
|
#rollback, should succeed
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --rollback".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
def test_destroyincompat(self):
|
def test_destroyincompat(self):
|
||||||
|
|
||||||
#initial backup
|
#initial backup
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
#add multiple compatible snapshot (written is still 0)
|
#add multiple compatible snapshot (written is still 0)
|
||||||
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1")
|
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1")
|
||||||
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2")
|
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
#should be ok, is compatible
|
#should be ok, is compatible
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
#add incompatible snapshot by changing and snapshotting
|
#add incompatible snapshot by changing and snapshotting
|
||||||
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
||||||
@ -549,110 +535,64 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
|
|||||||
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")
|
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")
|
||||||
|
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
#--test should fail, now incompatible
|
#--test should fail, now incompatible
|
||||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty --test".split(" ")).run())
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --test".split(" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
#should fail, now incompatible
|
#should fail, now incompatible
|
||||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000003"):
|
with patch('time.strftime', return_value="test-20101111000003"):
|
||||||
#--test should succeed by destroying incompatibles
|
#--test should succeed by destroying incompatibles
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000003"):
|
with patch('time.strftime', return_value="test-20101111000003"):
|
||||||
#should succeed by destroying incompatibles
|
#should succeed by destroying incompatibles
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --destroy-incompatible".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_keepsourcetarget(self):
|
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
|
||||||
|
|
||||||
#should still have all snapshots
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
|
||||||
self.assertMultiLineEqual(r, """
|
self.assertMultiLineEqual(r, """
|
||||||
test_source1
|
|
||||||
test_source1/fs1
|
|
||||||
test_source1/fs1@test-20101111000000
|
|
||||||
test_source1/fs1@test-20101111000001
|
|
||||||
test_source1/fs1/sub
|
|
||||||
test_source1/fs1/sub@test-20101111000000
|
|
||||||
test_source1/fs1/sub@test-20101111000001
|
|
||||||
test_source2
|
|
||||||
test_source2/fs2
|
|
||||||
test_source2/fs2/sub
|
|
||||||
test_source2/fs2/sub@test-20101111000000
|
|
||||||
test_source2/fs2/sub@test-20101111000001
|
|
||||||
test_source2/fs3
|
|
||||||
test_source2/fs3/sub
|
|
||||||
test_target1
|
test_target1
|
||||||
test_target1/test_source1
|
test_target1/test_source1
|
||||||
test_target1/test_source1/fs1
|
test_target1/test_source1/fs1
|
||||||
test_target1/test_source1/fs1@test-20101111000000
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@compatible1
|
||||||
|
test_target1/test_source1/fs1@compatible2
|
||||||
test_target1/test_source1/fs1@test-20101111000001
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1@test-20101111000003
|
||||||
test_target1/test_source1/fs1/sub
|
test_target1/test_source1/fs1/sub
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||||
test_target1/test_source2
|
test_target1/test_source2
|
||||||
test_target1/test_source2/fs2
|
test_target1/test_source2/fs2
|
||||||
test_target1/test_source2/fs2/sub
|
test_target1/test_source2/fs2/sub
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
""")
|
|
||||||
|
|
||||||
|
|
||||||
#run again with keep=0
|
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source=0 --keep-target=0".split(" ")).run())
|
|
||||||
|
|
||||||
#should only have last snapshots
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
|
||||||
self.assertMultiLineEqual(r,"""
|
|
||||||
test_source1
|
|
||||||
test_source1/fs1
|
|
||||||
test_source1/fs1@test-20101111000002
|
|
||||||
test_source1/fs1/sub
|
|
||||||
test_source1/fs1/sub@test-20101111000002
|
|
||||||
test_source2
|
|
||||||
test_source2/fs2
|
|
||||||
test_source2/fs2/sub
|
|
||||||
test_source2/fs2/sub@test-20101111000002
|
|
||||||
test_source2/fs3
|
|
||||||
test_source2/fs3/sub
|
|
||||||
test_target1
|
|
||||||
test_target1/test_source1
|
|
||||||
test_target1/test_source1/fs1
|
|
||||||
test_target1/test_source1/fs1@test-20101111000002
|
|
||||||
test_target1/test_source1/fs1/sub
|
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000002
|
|
||||||
test_target1/test_source2
|
|
||||||
test_target1/test_source2/fs2
|
|
||||||
test_target1/test_source2/fs2/sub
|
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000002
|
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_ssh(self):
|
def test_ssh(self):
|
||||||
|
|
||||||
#test all ssh directions
|
#test all ssh directions
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-source localhost".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-target localhost".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
@ -696,8 +636,8 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
|||||||
def test_minchange(self):
|
def test_minchange(self):
|
||||||
|
|
||||||
#initial
|
#initial
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
|
||||||
|
|
||||||
#make small change, use umount to reflect the changes immediately
|
#make small change, use umount to reflect the changes immediately
|
||||||
r=shelltest("zfs set compress=off test_source1")
|
r=shelltest("zfs set compress=off test_source1")
|
||||||
@ -706,16 +646,16 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
|||||||
|
|
||||||
|
|
||||||
#too small change, takes no snapshots
|
#too small change, takes no snapshots
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
|
||||||
|
|
||||||
#make big change
|
#make big change
|
||||||
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/change.txt bs=200000 count=1")
|
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/change.txt bs=200000 count=1")
|
||||||
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
|
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
|
||||||
|
|
||||||
#bigger change, should take snapshot
|
#bigger change, should take snapshot
|
||||||
with patch('time.strftime', return_value="20101111000002"):
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -747,8 +687,8 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
|||||||
def test_test(self):
|
def test_test(self):
|
||||||
|
|
||||||
#initial
|
#initial
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -764,13 +704,13 @@ test_target1
|
|||||||
""")
|
""")
|
||||||
|
|
||||||
#actual make initial backup
|
#actual make initial backup
|
||||||
with patch('time.strftime', return_value="20101111000001"):
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
#test incremental
|
#test incremental
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --allow-empty --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -805,8 +745,8 @@ test_target1/test_source2/fs2/sub@test-20101111000001
|
|||||||
shelltest("zfs create test_target1/test_source1")
|
shelltest("zfs create test_target1/test_source1")
|
||||||
shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1")
|
shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1")
|
||||||
|
|
||||||
with patch('time.strftime', return_value="20101111000000"):
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
self.assertMultiLineEqual(r,"""
|
self.assertMultiLineEqual(r,"""
|
||||||
@ -835,13 +775,127 @@ test_target1/test_source2/fs2/sub
|
|||||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
def test_keep0(self):
|
||||||
|
"""test if keep-source=0 and keep-target=0 dont delete common snapshot and break backup"""
|
||||||
|
|
||||||
###########################
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
# TODO:
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0".split(" ")).run())
|
||||||
|
|
||||||
def test_raw(self):
|
#make snapshot, shouldnt delete 0
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
self.skipTest("todo: later when travis supports zfs 0.8")
|
#make snapshot 2, shouldnt delete 0 since it has holds, but will delete 1 since it has no holds
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1@test-20101111000002
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs2/sub@test-20101111000002
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
#make another backup but with no-holds. we should naturally endup with only number 3
|
||||||
|
with patch('time.strftime', return_value="test-20101111000003"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000003
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000003
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000003
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000003
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
# run with snapshot-only for 4, since we used no-holds, it will delete 3 on the source, breaking the backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000004"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000004
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000004
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000004
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000003
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_progress(self):
|
||||||
|
|
||||||
|
r=shelltest("dd if=/dev/zero of=/test_source1/data.txt bs=200000 count=1")
|
||||||
|
r = shelltest("zfs snapshot test_source1@test")
|
||||||
|
|
||||||
|
l=LogConsole(show_verbose=True, show_debug=False, color=False)
|
||||||
|
n=ZfsNode(snapshot_time_format="bla", hold_name="bla", logger=l)
|
||||||
|
d=ZfsDataset(n,"test_source1@test")
|
||||||
|
|
||||||
|
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True, zfs_compressed=True)
|
||||||
|
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
try:
|
||||||
|
n.run(["sleep", "2"], inp=sp)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
# correct message?
|
||||||
|
self.assertRegex(buf.getvalue(),".*>>> .*minutes left.*")
|
||||||
81
tests/test_zfsautobackup31.py
Normal file
81
tests/test_zfsautobackup31.py
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
from basetest import *
|
||||||
|
import time
|
||||||
|
|
||||||
|
class TestZfsAutobackup31(unittest2.TestCase):
|
||||||
|
"""various new 3.1 features"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage=True
|
||||||
|
|
||||||
|
def test_no_thinning(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_re_replication(self):
|
||||||
|
"""test re-replication of something thats already a backup (new in v3.1-beta5)"""
|
||||||
|
|
||||||
|
shelltest("zfs create test_target1/a")
|
||||||
|
shelltest("zfs create test_target1/b")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/a --no-progress --verbose --debug".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/b --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
|
||||||
|
#NOTE: it wont backup test_target1/a/test_source2/fs2/sub to test_target1/b since it doesnt have the zfs_autobackup property anymore.
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_target1/a/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/a/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/a/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/b/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/b/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/b/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_zfs_compressed(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(
|
||||||
|
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --zfs-compressed".split(" ")).run())
|
||||||
|
|
||||||
176
tests/test_zfsnode.py
Normal file
176
tests/test_zfsnode.py
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
from basetest import *
|
||||||
|
from zfs_autobackup.LogStub import LogStub
|
||||||
|
from zfs_autobackup.ExecuteNode import ExecuteError
|
||||||
|
|
||||||
|
|
||||||
|
class TestZfsNode(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
# return super().setUp()
|
||||||
|
|
||||||
|
def test_consistent_snapshot(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||||
|
|
||||||
|
with self.subTest("first snapshot"):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000001", 100000)
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self.subTest("second snapshot, no changes, no snapshot"):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 1)
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self.subTest("second snapshot, no changes, empty snapshot"):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 0)
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1@test-20101111000002
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs2/sub@test-20101111000002
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_consistent_snapshot_prepostcmds(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test", hold_name="test", logger=logger, description=description, debug_output=True)
|
||||||
|
|
||||||
|
with self.subTest("Test if all cmds are executed correctly (no failures)"):
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||||
|
0,
|
||||||
|
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||||
|
post_snapshot_cmds=["echo post1 >&2", "echo post2 >&2"]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertIn("STDOUT > pre1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > pre2", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post2", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("Failure in the middle, only pre1 and both post1 and post2 should be executed, no snapshot should be attempted"):
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
with self.assertRaises(ExecuteError):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||||
|
0,
|
||||||
|
pre_snapshot_cmds=["echo pre1", "false", "echo pre2"],
|
||||||
|
post_snapshot_cmds=["echo post1", "false", "echo post2"]
|
||||||
|
)
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > pre1", buf.getvalue())
|
||||||
|
self.assertNotIn("STDOUT > pre2", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post2", buf.getvalue())
|
||||||
|
|
||||||
|
with self.subTest("Snapshot fails"):
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
with self.assertRaises(ExecuteError):
|
||||||
|
#same snapshot name as before so it fails
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||||
|
0,
|
||||||
|
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||||
|
post_snapshot_cmds=["echo post1", "echo post2"]
|
||||||
|
)
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > pre1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > pre2", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post2", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
def test_getselected(self):
|
||||||
|
|
||||||
|
# should be excluded by property
|
||||||
|
shelltest("zfs create test_source1/fs1/subexcluded")
|
||||||
|
shelltest("zfs set autobackup:test=false test_source1/fs1/subexcluded")
|
||||||
|
|
||||||
|
# should be excluded by being unchanged
|
||||||
|
shelltest("zfs create test_source1/fs1/unchanged")
|
||||||
|
shelltest("zfs snapshot test_source1/fs1/unchanged@somesnapshot")
|
||||||
|
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||||
|
s = pformat(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=True, min_change=1))
|
||||||
|
print(s)
|
||||||
|
|
||||||
|
# basics
|
||||||
|
self.assertEqual(s, """[(local): test_source1/fs1,
|
||||||
|
(local): test_source1/fs1/sub,
|
||||||
|
(local): test_source2/fs2/sub]""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_validcommand(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||||
|
|
||||||
|
with self.subTest("test invalid option"):
|
||||||
|
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
|
||||||
|
with self.subTest("test valid option"):
|
||||||
|
self.assertTrue(node.valid_command(["zfs", "send", "-v", "nonexisting"]))
|
||||||
|
|
||||||
|
def test_supportedsendoptions(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||||
|
# -D propably always supported
|
||||||
|
self.assertGreater(len(node.supported_send_options), 0)
|
||||||
|
|
||||||
|
def test_supportedrecvoptions(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
# NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description, ssh_to='localhost')
|
||||||
|
self.assertIsInstance(node.supported_recv_options, list)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
39
zfs_autobackup/CachedProperty.py
Normal file
39
zfs_autobackup/CachedProperty.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# NOTE: this should inherit from (object) to function correctly with python 2.7
|
||||||
|
class CachedProperty(object):
|
||||||
|
""" A property that is only computed once per instance and
|
||||||
|
then stores the result in _cached_properties of the object.
|
||||||
|
|
||||||
|
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, func):
|
||||||
|
self.__doc__ = getattr(func, '__doc__')
|
||||||
|
self.func = func
|
||||||
|
|
||||||
|
def __get__(self, obj, cls):
|
||||||
|
if obj is None:
|
||||||
|
return self
|
||||||
|
|
||||||
|
propname = self.func.__name__
|
||||||
|
|
||||||
|
if not hasattr(obj, '_cached_properties'):
|
||||||
|
obj._cached_properties = {}
|
||||||
|
|
||||||
|
if propname not in obj._cached_properties:
|
||||||
|
obj._cached_properties[propname] = self.func(obj)
|
||||||
|
# value = obj.__dict__[propname] = self.func(obj)
|
||||||
|
|
||||||
|
return obj._cached_properties[propname]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def clear(obj):
|
||||||
|
"""clears cache of obj"""
|
||||||
|
if hasattr(obj, '_cached_properties'):
|
||||||
|
obj._cached_properties = {}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_cached(obj, propname):
|
||||||
|
if hasattr(obj, '_cached_properties') and propname in obj._cached_properties:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
160
zfs_autobackup/CmdPipe.py
Normal file
160
zfs_autobackup/CmdPipe.py
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
import select
|
||||||
|
|
||||||
|
try:
|
||||||
|
from shlex import quote as cmd_quote
|
||||||
|
except ImportError:
|
||||||
|
from pipes import quote as cmd_quote
|
||||||
|
|
||||||
|
|
||||||
|
class CmdItem:
|
||||||
|
"""one command item, to be added to a CmdPipe"""
|
||||||
|
|
||||||
|
def __init__(self, cmd, readonly=False, stderr_handler=None, exit_handler=None, shell=False):
|
||||||
|
"""create item. caller has to make sure cmd is properly escaped when using shell.
|
||||||
|
:type cmd: list of str
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.cmd = cmd
|
||||||
|
self.readonly = readonly
|
||||||
|
self.stderr_handler = stderr_handler
|
||||||
|
self.exit_handler = exit_handler
|
||||||
|
self.shell = shell
|
||||||
|
self.process = None
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""return copy-pastable version of command."""
|
||||||
|
if self.shell:
|
||||||
|
# its already copy pastable for a shell:
|
||||||
|
return " ".join(self.cmd)
|
||||||
|
else:
|
||||||
|
# make it copy-pastable, will make a mess of quotes sometimes, but is correct
|
||||||
|
return " ".join(map(cmd_quote, self.cmd))
|
||||||
|
|
||||||
|
def create(self, stdin):
|
||||||
|
"""actually create the subprocess (called by CmdPipe)"""
|
||||||
|
|
||||||
|
# make sure the command gets all the data in utf8 format:
|
||||||
|
# (this is necessary if LC_ALL=en_US.utf8 is not set in the environment)
|
||||||
|
encoded_cmd = []
|
||||||
|
for arg in self.cmd:
|
||||||
|
encoded_cmd.append(arg.encode('utf-8'))
|
||||||
|
|
||||||
|
self.process = subprocess.Popen(encoded_cmd, env=os.environ, stdout=subprocess.PIPE, stdin=stdin,
|
||||||
|
stderr=subprocess.PIPE, shell=self.shell)
|
||||||
|
|
||||||
|
|
||||||
|
class CmdPipe:
|
||||||
|
"""a pipe of one or more commands. also takes care of utf-8 encoding/decoding and line based parsing"""
|
||||||
|
|
||||||
|
def __init__(self, readonly=False, inp=None):
|
||||||
|
"""
|
||||||
|
:param inp: input string for stdin
|
||||||
|
:param readonly: Only execute if entire pipe consist of readonly commands
|
||||||
|
"""
|
||||||
|
# list of commands + error handlers to execute
|
||||||
|
self.items = []
|
||||||
|
|
||||||
|
self.inp = inp
|
||||||
|
self.readonly = readonly
|
||||||
|
self._should_execute = True
|
||||||
|
|
||||||
|
def add(self, cmd_item):
|
||||||
|
"""adds a CmdItem to pipe.
|
||||||
|
:type cmd_item: CmdItem
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.items.append(cmd_item)
|
||||||
|
|
||||||
|
if not cmd_item.readonly and self.readonly:
|
||||||
|
self._should_execute = False
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""transform whole pipe into oneliner for debugging and testing. this should generate a copy-pastable string for in a console """
|
||||||
|
|
||||||
|
ret = ""
|
||||||
|
for item in self.items:
|
||||||
|
if ret:
|
||||||
|
ret = ret + " | "
|
||||||
|
ret = ret + "({})".format(item) # this will do proper escaping to make it copypastable
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def should_execute(self):
|
||||||
|
return self._should_execute
|
||||||
|
|
||||||
|
def execute(self, stdout_handler):
|
||||||
|
"""run the pipe. returns True all exit handlers returned true"""
|
||||||
|
|
||||||
|
if not self._should_execute:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# first process should have actual user input as stdin:
|
||||||
|
selectors = []
|
||||||
|
|
||||||
|
# create processes
|
||||||
|
last_stdout = None
|
||||||
|
stdin = subprocess.PIPE
|
||||||
|
for item in self.items:
|
||||||
|
|
||||||
|
item.create(stdin)
|
||||||
|
selectors.append(item.process.stderr)
|
||||||
|
|
||||||
|
if last_stdout is None:
|
||||||
|
# we're the first process in the pipe, do we have some input?
|
||||||
|
if self.inp is not None:
|
||||||
|
# TODO: make streaming to support big inputs?
|
||||||
|
item.process.stdin.write(self.inp.encode('utf-8'))
|
||||||
|
item.process.stdin.close()
|
||||||
|
else:
|
||||||
|
# last stdout was piped to this stdin already, so close it because we dont need it anymore
|
||||||
|
last_stdout.close()
|
||||||
|
|
||||||
|
last_stdout = item.process.stdout
|
||||||
|
stdin = last_stdout
|
||||||
|
|
||||||
|
# monitor last stdout as well
|
||||||
|
selectors.append(last_stdout)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# wait for output on one of the stderrs or last_stdout
|
||||||
|
(read_ready, write_ready, ex_ready) = select.select(selectors, [], [])
|
||||||
|
eof_count = 0
|
||||||
|
done_count = 0
|
||||||
|
|
||||||
|
# read line and call appropriate handlers
|
||||||
|
if last_stdout in read_ready:
|
||||||
|
line = last_stdout.readline().decode('utf-8').rstrip()
|
||||||
|
if line != "":
|
||||||
|
stdout_handler(line)
|
||||||
|
else:
|
||||||
|
eof_count = eof_count + 1
|
||||||
|
|
||||||
|
for item in self.items:
|
||||||
|
if item.process.stderr in read_ready:
|
||||||
|
line = item.process.stderr.readline().decode('utf-8').rstrip()
|
||||||
|
if line != "":
|
||||||
|
item.stderr_handler(line)
|
||||||
|
else:
|
||||||
|
eof_count = eof_count + 1
|
||||||
|
|
||||||
|
if item.process.poll() is not None:
|
||||||
|
done_count = done_count + 1
|
||||||
|
|
||||||
|
# all filehandles are eof and all processes are done (poll() is not None)
|
||||||
|
if eof_count == len(selectors) and done_count == len(self.items):
|
||||||
|
break
|
||||||
|
|
||||||
|
# close filehandles
|
||||||
|
last_stdout.close()
|
||||||
|
for item in self.items:
|
||||||
|
item.process.stderr.close()
|
||||||
|
|
||||||
|
# call exit handlers
|
||||||
|
success = True
|
||||||
|
for item in self.items:
|
||||||
|
if item.exit_handler is not None:
|
||||||
|
success=item.exit_handler(item.process.returncode) and success
|
||||||
|
|
||||||
|
return success
|
||||||
161
zfs_autobackup/ExecuteNode.py
Normal file
161
zfs_autobackup/ExecuteNode.py
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
import os
|
||||||
|
import select
|
||||||
|
import subprocess
|
||||||
|
from .CmdPipe import CmdPipe, CmdItem
|
||||||
|
from .LogStub import LogStub
|
||||||
|
|
||||||
|
try:
|
||||||
|
from shlex import quote as cmd_quote
|
||||||
|
except ImportError:
|
||||||
|
from pipes import quote as cmd_quote
|
||||||
|
|
||||||
|
|
||||||
|
class ExecuteError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ExecuteNode(LogStub):
|
||||||
|
"""an endpoint to execute local or remote commands via ssh"""
|
||||||
|
|
||||||
|
PIPE=1
|
||||||
|
|
||||||
|
def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False):
|
||||||
|
"""ssh_config: custom ssh config
|
||||||
|
ssh_to: server you want to ssh to. none means local
|
||||||
|
readonly: only execute commands that don't make any changes (useful for testing-runs)
|
||||||
|
debug_output: show output and exit codes of commands in debugging output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.ssh_config = ssh_config
|
||||||
|
self.ssh_to = ssh_to
|
||||||
|
self.readonly = readonly
|
||||||
|
self.debug_output = debug_output
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if self.ssh_to is None:
|
||||||
|
return "(local)"
|
||||||
|
else:
|
||||||
|
return self.ssh_to
|
||||||
|
|
||||||
|
def _parse_stdout(self, line):
|
||||||
|
"""parse stdout. can be overridden in subclass"""
|
||||||
|
if self.debug_output:
|
||||||
|
self.debug("STDOUT > " + line.rstrip())
|
||||||
|
|
||||||
|
def _parse_stderr(self, line, hide_errors):
|
||||||
|
"""parse stderr. can be overridden in subclass"""
|
||||||
|
if hide_errors:
|
||||||
|
self.debug("STDERR > " + line.rstrip())
|
||||||
|
else:
|
||||||
|
self.error("STDERR > " + line.rstrip())
|
||||||
|
|
||||||
|
def _quote(self, cmd):
|
||||||
|
"""return quoted version of command. if it has value PIPE it will add an actual | """
|
||||||
|
if cmd==self.PIPE:
|
||||||
|
return('|')
|
||||||
|
else:
|
||||||
|
return(cmd_quote(cmd))
|
||||||
|
|
||||||
|
def _shell_cmd(self, cmd):
|
||||||
|
"""prefix specified ssh shell to command and escape shell characters"""
|
||||||
|
|
||||||
|
ret=[]
|
||||||
|
|
||||||
|
#add remote shell
|
||||||
|
if not self.is_local():
|
||||||
|
ret=["ssh"]
|
||||||
|
|
||||||
|
if self.ssh_config is not None:
|
||||||
|
ret.extend(["-F", self.ssh_config])
|
||||||
|
|
||||||
|
ret.append(self.ssh_to)
|
||||||
|
|
||||||
|
ret.append(" ".join(map(self._quote, cmd)))
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def is_local(self):
|
||||||
|
return self.ssh_to is None
|
||||||
|
|
||||||
|
def run(self, cmd, inp=None, tab_split=False, valid_exitcodes=None, readonly=False, hide_errors=False,
|
||||||
|
return_stderr=False, pipe=False):
|
||||||
|
"""run a command on the node , checks output and parses/handle output and returns it
|
||||||
|
|
||||||
|
Either uses a local shell (sh -c) or remote shell (ssh) to execute the command. Therefore the command can have stuff like actual pipes in it, if you dont want to use pipe=True to pipe stuff.
|
||||||
|
|
||||||
|
:param cmd: the actual command, should be a list, where the first item is the command
|
||||||
|
and the rest are parameters. use ExecuteNode.PIPE to add an unescaped |
|
||||||
|
(if you want to use system piping instead of python piping)
|
||||||
|
:param pipe: return CmdPipe instead of executing it.
|
||||||
|
:param inp: Can be None, a string or a CmdPipe that was previously returned.
|
||||||
|
:param tab_split: split tabbed files in output into a list
|
||||||
|
:param valid_exitcodes: list of valid exit codes for this command (checks exit code of both sides of a pipe)
|
||||||
|
Use [] to accept all exit codes. Default [0]
|
||||||
|
:param readonly: make this True if the command doesn't make any changes and is safe to execute in testmode
|
||||||
|
:param hide_errors: don't show stderr output as error, instead show it as debugging output (use to hide expected errors)
|
||||||
|
:param return_stderr: return both stdout and stderr as a tuple. (normally only returns stdout)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# create new pipe?
|
||||||
|
if not isinstance(inp, CmdPipe):
|
||||||
|
cmd_pipe = CmdPipe(self.readonly, inp)
|
||||||
|
else:
|
||||||
|
# add stuff to existing pipe
|
||||||
|
cmd_pipe = inp
|
||||||
|
|
||||||
|
# stderr parser
|
||||||
|
error_lines = []
|
||||||
|
|
||||||
|
def stderr_handler(line):
|
||||||
|
if tab_split:
|
||||||
|
error_lines.append(line.rstrip().split('\t'))
|
||||||
|
else:
|
||||||
|
error_lines.append(line.rstrip())
|
||||||
|
self._parse_stderr(line, hide_errors)
|
||||||
|
|
||||||
|
# exit code hanlder
|
||||||
|
if valid_exitcodes is None:
|
||||||
|
valid_exitcodes = [0]
|
||||||
|
|
||||||
|
def exit_handler(exit_code):
|
||||||
|
if self.debug_output:
|
||||||
|
self.debug("EXIT > {}".format(exit_code))
|
||||||
|
|
||||||
|
if (valid_exitcodes != []) and (exit_code not in valid_exitcodes):
|
||||||
|
self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code, valid_exitcodes))
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
# add shell command and handlers to pipe
|
||||||
|
cmd_item=CmdItem(cmd=self._shell_cmd(cmd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local())
|
||||||
|
cmd_pipe.add(cmd_item)
|
||||||
|
|
||||||
|
# return pipe instead of executing?
|
||||||
|
if pipe:
|
||||||
|
return cmd_pipe
|
||||||
|
|
||||||
|
# stdout parser
|
||||||
|
output_lines = []
|
||||||
|
|
||||||
|
def stdout_handler(line):
|
||||||
|
if tab_split:
|
||||||
|
output_lines.append(line.rstrip().split('\t'))
|
||||||
|
else:
|
||||||
|
output_lines.append(line.rstrip())
|
||||||
|
self._parse_stdout(line)
|
||||||
|
|
||||||
|
if cmd_pipe.should_execute():
|
||||||
|
self.debug("CMD > {}".format(cmd_pipe))
|
||||||
|
else:
|
||||||
|
self.debug("CMDSKIP> {}".format(cmd_pipe))
|
||||||
|
|
||||||
|
# execute and calls handlers in CmdPipe
|
||||||
|
if not cmd_pipe.execute(stdout_handler=stdout_handler):
|
||||||
|
raise(ExecuteError("Last command returned error"))
|
||||||
|
|
||||||
|
if return_stderr:
|
||||||
|
return output_lines, error_lines
|
||||||
|
else:
|
||||||
|
return output_lines
|
||||||
66
zfs_autobackup/LogConsole.py
Normal file
66
zfs_autobackup/LogConsole.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
# python 2 compatibility
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
class LogConsole:
|
||||||
|
"""Log-class that outputs to console, adding colors if needed"""
|
||||||
|
|
||||||
|
def __init__(self, show_debug, show_verbose, color):
|
||||||
|
self.last_log = ""
|
||||||
|
self.show_debug = show_debug
|
||||||
|
self.show_verbose = show_verbose
|
||||||
|
|
||||||
|
if color:
|
||||||
|
# try to use color, failback if colorama not available
|
||||||
|
self.colorama=False
|
||||||
|
try:
|
||||||
|
import colorama
|
||||||
|
global colorama
|
||||||
|
self.colorama = True
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.colorama=False
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
if self.colorama:
|
||||||
|
print(colorama.Fore.RED + colorama.Style.BRIGHT + "! " + txt + colorama.Style.RESET_ALL, file=sys.stderr)
|
||||||
|
else:
|
||||||
|
print("! " + txt, file=sys.stderr)
|
||||||
|
sys.stderr.flush()
|
||||||
|
|
||||||
|
def warning(self, txt):
|
||||||
|
if self.colorama:
|
||||||
|
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + " NOTE: " + txt + colorama.Style.RESET_ALL)
|
||||||
|
else:
|
||||||
|
print(" NOTE: " + txt)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
if self.show_verbose:
|
||||||
|
if self.colorama:
|
||||||
|
print(colorama.Style.NORMAL + " " + txt + colorama.Style.RESET_ALL)
|
||||||
|
else:
|
||||||
|
print(" " + txt)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
def debug(self, txt):
|
||||||
|
if self.show_debug:
|
||||||
|
if self.colorama:
|
||||||
|
print(colorama.Fore.GREEN + "# " + txt + colorama.Style.RESET_ALL)
|
||||||
|
else:
|
||||||
|
print("# " + txt)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
def progress(self, txt):
|
||||||
|
"""print progress output to stderr (stays on same line)"""
|
||||||
|
self.clear_progress()
|
||||||
|
print(">>> {}\r".format(txt), end='', file=sys.stderr)
|
||||||
|
sys.stderr.flush()
|
||||||
|
|
||||||
|
def clear_progress(self):
|
||||||
|
import colorama
|
||||||
|
print(colorama.ansi.clear_line(), end='', file=sys.stderr)
|
||||||
|
sys.stderr.flush()
|
||||||
18
zfs_autobackup/LogStub.py
Normal file
18
zfs_autobackup/LogStub.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#Used for baseclasses that dont implement their own logging (Like ExecuteNode)
|
||||||
|
#Usually logging is implemented in subclasses (Like ZfsNode thats a subclass of ExecuteNode), but for regression testing its nice to have these stubs.
|
||||||
|
|
||||||
|
class LogStub:
|
||||||
|
"""Just a stub, usually overriden in subclasses."""
|
||||||
|
|
||||||
|
# simple logging stubs
|
||||||
|
def debug(self, txt):
|
||||||
|
print("DEBUG : " + txt)
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
print("VERBOSE: " + txt)
|
||||||
|
|
||||||
|
def warning(self, txt):
|
||||||
|
print("WARNING: " + txt)
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
print("ERROR : " + txt)
|
||||||
99
zfs_autobackup/Thinner.py
Normal file
99
zfs_autobackup/Thinner.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
import time
|
||||||
|
|
||||||
|
from .ThinnerRule import ThinnerRule
|
||||||
|
|
||||||
|
|
||||||
|
class Thinner:
|
||||||
|
"""progressive thinner (universal, used for cleaning up snapshots)"""
|
||||||
|
|
||||||
|
def __init__(self, schedule_str=""):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
schedule_str: comma seperated list of ThinnerRules. A plain number specifies how many snapshots to always keep.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.rules = []
|
||||||
|
self.always_keep = 0
|
||||||
|
|
||||||
|
if schedule_str == "":
|
||||||
|
return
|
||||||
|
|
||||||
|
rule_strs = schedule_str.split(",")
|
||||||
|
for rule_str in rule_strs:
|
||||||
|
if rule_str.lstrip('-').isdigit():
|
||||||
|
self.always_keep = int(rule_str)
|
||||||
|
if self.always_keep < 0:
|
||||||
|
raise (Exception("Number of snapshots to keep cant be negative: {}".format(self.always_keep)))
|
||||||
|
else:
|
||||||
|
self.rules.append(ThinnerRule(rule_str))
|
||||||
|
|
||||||
|
def human_rules(self):
|
||||||
|
"""get list of human readable rules"""
|
||||||
|
ret = []
|
||||||
|
if self.always_keep:
|
||||||
|
ret.append("Keep the last {} snapshot{}.".format(self.always_keep, self.always_keep != 1 and "s" or ""))
|
||||||
|
for rule in self.rules:
|
||||||
|
ret.append(rule.human_str)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def thin(self, objects, keep_objects=None, now=None):
|
||||||
|
"""thin list of objects with current schedule rules. objects: list of
|
||||||
|
objects to thin. every object should have timestamp attribute.
|
||||||
|
|
||||||
|
return( keeps, removes )
|
||||||
|
|
||||||
|
Args:
|
||||||
|
objects: list of objects to check (should have a timestamp attribute)
|
||||||
|
keep_objects: objects to always keep (if they also are in the in the normal objects list)
|
||||||
|
now: if specified, use this time as current time
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not keep_objects:
|
||||||
|
keep_objects = []
|
||||||
|
|
||||||
|
# always keep a number of the last objets?
|
||||||
|
if self.always_keep:
|
||||||
|
# all of them
|
||||||
|
if len(objects) <= self.always_keep:
|
||||||
|
return objects, []
|
||||||
|
|
||||||
|
# determine which ones
|
||||||
|
always_keep_objects = objects[-self.always_keep:]
|
||||||
|
else:
|
||||||
|
always_keep_objects = []
|
||||||
|
|
||||||
|
# determine time blocks
|
||||||
|
time_blocks = {}
|
||||||
|
for rule in self.rules:
|
||||||
|
time_blocks[rule.period] = {}
|
||||||
|
|
||||||
|
if not now:
|
||||||
|
now = int(time.time())
|
||||||
|
|
||||||
|
keeps = []
|
||||||
|
removes = []
|
||||||
|
|
||||||
|
# traverse objects
|
||||||
|
for thisobject in objects:
|
||||||
|
# important they are ints!
|
||||||
|
timestamp = int(thisobject.timestamp)
|
||||||
|
age = int(now) - timestamp
|
||||||
|
|
||||||
|
# store in the correct time blocks, per period-size, if not too old yet
|
||||||
|
# e.g.: look if there is ANY timeblock that wants to keep this object
|
||||||
|
keep = False
|
||||||
|
for rule in self.rules:
|
||||||
|
if age <= rule.ttl:
|
||||||
|
block_nr = int(timestamp / rule.period)
|
||||||
|
if block_nr not in time_blocks[rule.period]:
|
||||||
|
time_blocks[rule.period][block_nr] = True
|
||||||
|
keep = True
|
||||||
|
|
||||||
|
# keep it according to schedule, or keep it because it is in the keep_objects list
|
||||||
|
if keep or thisobject in keep_objects or thisobject in always_keep_objects:
|
||||||
|
keeps.append(thisobject)
|
||||||
|
else:
|
||||||
|
removes.append(thisobject)
|
||||||
|
|
||||||
|
return keeps, removes
|
||||||
71
zfs_autobackup/ThinnerRule.py
Normal file
71
zfs_autobackup/ThinnerRule.py
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class ThinnerRule:
|
||||||
|
"""a thinning schedule rule for Thinner"""
|
||||||
|
|
||||||
|
TIME_NAMES = {
|
||||||
|
'y': 3600 * 24 * 365.25,
|
||||||
|
'm': 3600 * 24 * 30,
|
||||||
|
'w': 3600 * 24 * 7,
|
||||||
|
'd': 3600 * 24,
|
||||||
|
'h': 3600,
|
||||||
|
'min': 60,
|
||||||
|
's': 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
TIME_DESC = {
|
||||||
|
'y': 'year',
|
||||||
|
'm': 'month',
|
||||||
|
'w': 'week',
|
||||||
|
'd': 'day',
|
||||||
|
'h': 'hour',
|
||||||
|
'min': 'minute',
|
||||||
|
's': 'second',
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, rule_str):
|
||||||
|
"""parse scheduling string
|
||||||
|
example:
|
||||||
|
daily snapshot, remove after a week: 1d1w
|
||||||
|
weekly snapshot, remove after a month: 1w1m
|
||||||
|
monthly snapshot, remove after 6 months: 1m6m
|
||||||
|
yearly snapshot, remove after 2 year: 1y2y
|
||||||
|
keep all snapshots, remove after a day 1s1d
|
||||||
|
keep nothing: 1s1s
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
rule_str = rule_str.lower()
|
||||||
|
matches = re.findall("([0-9]*)([a-z]*)([0-9]*)([a-z]*)", rule_str)[0]
|
||||||
|
|
||||||
|
if '' in matches:
|
||||||
|
raise (Exception("Invalid schedule string: '{}'".format(rule_str)))
|
||||||
|
|
||||||
|
period_amount = int(matches[0])
|
||||||
|
period_unit = matches[1]
|
||||||
|
ttl_amount = int(matches[2])
|
||||||
|
ttl_unit = matches[3]
|
||||||
|
|
||||||
|
if period_unit not in self.TIME_NAMES:
|
||||||
|
raise (Exception("Invalid period string in schedule: '{}'".format(rule_str)))
|
||||||
|
|
||||||
|
if ttl_unit not in self.TIME_NAMES:
|
||||||
|
raise (Exception("Invalid ttl string in schedule: '{}'".format(rule_str)))
|
||||||
|
|
||||||
|
self.period = period_amount * self.TIME_NAMES[period_unit]
|
||||||
|
self.ttl = ttl_amount * self.TIME_NAMES[ttl_unit]
|
||||||
|
|
||||||
|
if self.period > self.ttl:
|
||||||
|
raise (Exception("Period cant be longer than ttl in schedule: '{}'".format(rule_str)))
|
||||||
|
|
||||||
|
self.rule_str = rule_str
|
||||||
|
|
||||||
|
self.human_str = "Keep every {} {}{}, delete after {} {}{}.".format(
|
||||||
|
period_amount, self.TIME_DESC[period_unit], period_amount != 1 and "s" or "", ttl_amount,
|
||||||
|
self.TIME_DESC[ttl_unit], ttl_amount != 1 and "s" or "")
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""get schedule as a schedule string"""
|
||||||
|
|
||||||
|
return self.rule_str
|
||||||
601
zfs_autobackup/ZfsAutobackup.py
Normal file
601
zfs_autobackup/ZfsAutobackup.py
Normal file
@ -0,0 +1,601 @@
|
|||||||
|
import argparse
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
from . import compressors
|
||||||
|
from .ExecuteNode import ExecuteNode
|
||||||
|
from .Thinner import Thinner
|
||||||
|
from .ZfsDataset import ZfsDataset
|
||||||
|
from .LogConsole import LogConsole
|
||||||
|
from .ZfsNode import ZfsNode
|
||||||
|
from .ThinnerRule import ThinnerRule
|
||||||
|
|
||||||
|
|
||||||
|
class ZfsAutobackup:
|
||||||
|
"""main class"""
|
||||||
|
|
||||||
|
VERSION = "3.1.1-rc1"
|
||||||
|
HEADER = "zfs-autobackup v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(VERSION)
|
||||||
|
|
||||||
|
def __init__(self, argv, print_arguments=True):
|
||||||
|
|
||||||
|
# helps with investigating failed regression tests:
|
||||||
|
if print_arguments:
|
||||||
|
print("ARGUMENTS: " + " ".join(argv))
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=self.HEADER,
|
||||||
|
epilog='Full manual at: https://github.com/psy0rz/zfs_autobackup')
|
||||||
|
parser.add_argument('--ssh-config', metavar='CONFIG-FILE', default=None, help='Custom ssh client config')
|
||||||
|
parser.add_argument('--ssh-source', metavar='USER@HOST', default=None,
|
||||||
|
help='Source host to get backup from.')
|
||||||
|
parser.add_argument('--ssh-target', metavar='USER@HOST', default=None,
|
||||||
|
help='Target host to push backup to.')
|
||||||
|
parser.add_argument('--keep-source', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
|
||||||
|
help='Thinning schedule for old source snapshots. Default: %(default)s')
|
||||||
|
parser.add_argument('--keep-target', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
|
||||||
|
help='Thinning schedule for old target snapshots. Default: %(default)s')
|
||||||
|
|
||||||
|
parser.add_argument('backup_name', metavar='BACKUP-NAME', default=None, nargs='?',
|
||||||
|
help='Name of the backup (you should set the zfs property "autobackup:backup-name" to '
|
||||||
|
'true on filesystems you want to backup')
|
||||||
|
parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
|
||||||
|
help='Target ZFS filesystem (optional: if not specified, zfs-autobackup will only operate '
|
||||||
|
'as snapshot-tool on source)')
|
||||||
|
|
||||||
|
parser.add_argument('--pre-snapshot-cmd', metavar="COMMAND", default=[], action='append',
|
||||||
|
help='Run COMMAND before snapshotting (can be used multiple times.')
|
||||||
|
parser.add_argument('--post-snapshot-cmd', metavar="COMMAND", default=[], action='append',
|
||||||
|
help='Run COMMAND after snapshotting (can be used multiple times.')
|
||||||
|
parser.add_argument('--other-snapshots', action='store_true',
|
||||||
|
help='Send over other snapshots as well, not just the ones created by this tool.')
|
||||||
|
parser.add_argument('--no-snapshot', action='store_true',
|
||||||
|
help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
|
||||||
|
parser.add_argument('--no-send', action='store_true',
|
||||||
|
help='Don\'t send snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
||||||
|
parser.add_argument('--no-thinning', action='store_true', help="Do not destroy any snapshots.")
|
||||||
|
parser.add_argument('--no-holds', action='store_true',
|
||||||
|
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
|
||||||
|
parser.add_argument('--min-change', metavar='BYTES', type=int, default=1,
|
||||||
|
help='Number of bytes written after which we consider a dataset changed (default %('
|
||||||
|
'default)s)')
|
||||||
|
parser.add_argument('--allow-empty', action='store_true',
|
||||||
|
help='If nothing has changed, still create empty snapshots. (same as --min-change=0)')
|
||||||
|
|
||||||
|
parser.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
|
||||||
|
parser.add_argument('--exclude-unchanged', action='store_true',
|
||||||
|
help='Exclude datasets that have no changes since any last snapshot. (Useful in combination with proxmox HA replication)')
|
||||||
|
parser.add_argument('--exclude-received', action='store_true',
|
||||||
|
help='Exclude datasets that have the origin of their autobackup: property as "received". '
|
||||||
|
'This can avoid recursive replication between two backup partners.')
|
||||||
|
parser.add_argument('--strip-path', metavar='N', default=0, type=int,
|
||||||
|
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
|
||||||
|
'SmartOS machines)')
|
||||||
|
|
||||||
|
parser.add_argument('--clear-refreservation', action='store_true',
|
||||||
|
help='Filter "refreservation" property. (recommended, safes space. same as '
|
||||||
|
'--filter-properties refreservation)')
|
||||||
|
parser.add_argument('--clear-mountpoint', action='store_true',
|
||||||
|
help='Set property canmount=noauto for new datasets. (recommended, prevents mount '
|
||||||
|
'conflicts. same as --set-properties canmount=noauto)')
|
||||||
|
parser.add_argument('--filter-properties', metavar='PROPERTY,...', type=str,
|
||||||
|
help='List of properties to "filter" when receiving filesystems. (you can still restore '
|
||||||
|
'them with zfs inherit -S)')
|
||||||
|
parser.add_argument('--set-properties', metavar='PROPERTY=VALUE,...', type=str,
|
||||||
|
help='List of propererties to override when receiving filesystems. (you can still restore '
|
||||||
|
'them with zfs inherit -S)')
|
||||||
|
parser.add_argument('--rollback', action='store_true',
|
||||||
|
help='Rollback changes to the latest target snapshot before starting. (normally you can '
|
||||||
|
'prevent changes by setting the readonly property on the target_path to on)')
|
||||||
|
parser.add_argument('--destroy-incompatible', action='store_true',
|
||||||
|
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
|
||||||
|
parser.add_argument('--destroy-missing', metavar="SCHEDULE", type=str, default=None,
|
||||||
|
help='Destroy datasets on target that are missing on the source. Specify the time since '
|
||||||
|
'the last snapshot, e.g: --destroy-missing 30d')
|
||||||
|
parser.add_argument('--ignore-transfer-errors', action='store_true',
|
||||||
|
help='Ignore transfer errors (still checks if received filesystem exists. useful for '
|
||||||
|
'acltype errors)')
|
||||||
|
|
||||||
|
parser.add_argument('--decrypt', action='store_true',
|
||||||
|
help='Decrypt data before sending it over.')
|
||||||
|
|
||||||
|
parser.add_argument('--encrypt', action='store_true',
|
||||||
|
help='Encrypt data after receiving it.')
|
||||||
|
|
||||||
|
parser.add_argument('--zfs-compressed', action='store_true',
|
||||||
|
help='Transfer blocks that already have zfs-compression as-is.')
|
||||||
|
|
||||||
|
parser.add_argument('--test','--dry-run', '-n', action='store_true',
|
||||||
|
help='Dry run, dont change anything, just show what would be done (still does all read-only '
|
||||||
|
'operations)')
|
||||||
|
parser.add_argument('--verbose','-v', action='store_true', help='verbose output')
|
||||||
|
parser.add_argument('--debug','-d', action='store_true',
|
||||||
|
help='Show zfs commands that are executed, stops after an exception.')
|
||||||
|
parser.add_argument('--debug-output', action='store_true',
|
||||||
|
help='Show zfs commands and their output/exit codes. (noisy)')
|
||||||
|
parser.add_argument('--progress', action='store_true',
|
||||||
|
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
|
||||||
|
parser.add_argument('--no-progress', action='store_true',
|
||||||
|
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
||||||
|
|
||||||
|
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
|
||||||
|
parser.add_argument('--raw', action='store_true', help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
# these things all do stuff by piping zfs send/recv IO
|
||||||
|
parser.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
|
||||||
|
help='pipe zfs send output through COMMAND (can be used multiple times)')
|
||||||
|
parser.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
|
||||||
|
help='pipe zfs recv input through COMMAND (can be used multiple times)')
|
||||||
|
parser.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-fast',
|
||||||
|
choices=compressors.choices(),
|
||||||
|
help='Use compression during transfer, defaults to zstd-fast if TYPE is not specified. ({})'.format(
|
||||||
|
", ".join(compressors.choices())))
|
||||||
|
parser.add_argument('--rate', metavar='DATARATE', default=None,
|
||||||
|
help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
|
||||||
|
parser.add_argument('--buffer', metavar='SIZE', default=None,
|
||||||
|
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
|
||||||
|
|
||||||
|
parser.add_argument('--snapshot-format', metavar='FORMAT', default="{}-%Y%m%d%H%M%S",
|
||||||
|
help='Snapshot naming format. Default: %(default)s')
|
||||||
|
parser.add_argument('--property-format', metavar='FORMAT', default="autobackup:{}",
|
||||||
|
help='Select property naming format. Default: %(default)s')
|
||||||
|
parser.add_argument('--hold-format', metavar='FORMAT', default="zfs_autobackup:{}",
|
||||||
|
help='Hold naming format. Default: %(default)s')
|
||||||
|
|
||||||
|
parser.add_argument('--version', action='store_true',
|
||||||
|
help='Show version.')
|
||||||
|
|
||||||
|
# note args is the only global variable we use, since its a global readonly setting anyway
|
||||||
|
args = parser.parse_args(argv)
|
||||||
|
|
||||||
|
self.args = args
|
||||||
|
|
||||||
|
if args.version:
|
||||||
|
print(self.HEADER)
|
||||||
|
sys.exit(255)
|
||||||
|
|
||||||
|
# auto enable progress?
|
||||||
|
if sys.stderr.isatty() and not args.no_progress:
|
||||||
|
args.progress = True
|
||||||
|
|
||||||
|
if args.debug_output:
|
||||||
|
args.debug = True
|
||||||
|
|
||||||
|
if self.args.test:
|
||||||
|
self.args.verbose = True
|
||||||
|
|
||||||
|
if args.allow_empty:
|
||||||
|
args.min_change = 0
|
||||||
|
|
||||||
|
if args.destroy_incompatible:
|
||||||
|
args.rollback = True
|
||||||
|
|
||||||
|
self.log = LogConsole(show_debug=self.args.debug, show_verbose=self.args.verbose, color=sys.stdout.isatty())
|
||||||
|
self.verbose(self.HEADER)
|
||||||
|
|
||||||
|
if args.backup_name==None:
|
||||||
|
parser.print_usage()
|
||||||
|
self.log.error("Please specify BACKUP-NAME")
|
||||||
|
sys.exit(255)
|
||||||
|
|
||||||
|
if args.resume:
|
||||||
|
self.warning("The --resume option isn't needed anymore (its autodetected now)")
|
||||||
|
|
||||||
|
if args.raw:
|
||||||
|
self.warning(
|
||||||
|
"The --raw option isn't needed anymore (its autodetected now). Also see --encrypt and --decrypt.")
|
||||||
|
|
||||||
|
if args.target_path is not None and args.target_path[0] == "/":
|
||||||
|
self.log.error("Target should not start with a /")
|
||||||
|
sys.exit(255)
|
||||||
|
|
||||||
|
if args.compress and args.ssh_source is None and args.ssh_target is None:
|
||||||
|
self.warning("Using compression, but transfer is local.")
|
||||||
|
|
||||||
|
if args.compress and args.zfs_compressed:
|
||||||
|
self.warning("Using --compress with --zfs-compressed, might be inefficient.")
|
||||||
|
|
||||||
|
if args.ignore_replicated:
|
||||||
|
self.warning("--ignore-replicated has been renamed, using --exclude-unchanged")
|
||||||
|
args.exclude_unchanged = True
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
self.log.verbose(txt)
|
||||||
|
|
||||||
|
def warning(self, txt):
|
||||||
|
self.log.warning(txt)
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
self.log.error(txt)
|
||||||
|
|
||||||
|
def debug(self, txt):
|
||||||
|
self.log.debug(txt)
|
||||||
|
|
||||||
|
def set_title(self, title):
|
||||||
|
self.log.verbose("")
|
||||||
|
self.log.verbose("#### " + title)
|
||||||
|
|
||||||
|
def progress(self, txt):
|
||||||
|
self.log.progress(txt)
|
||||||
|
|
||||||
|
def clear_progress(self):
|
||||||
|
self.log.clear_progress()
|
||||||
|
|
||||||
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
|
def thin_missing_targets(self, target_dataset, used_target_datasets):
|
||||||
|
"""thin target datasets that are missing on the source."""
|
||||||
|
|
||||||
|
self.debug("Thinning obsolete datasets")
|
||||||
|
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
|
||||||
|
dataset not in used_target_datasets]
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
for dataset in missing_datasets:
|
||||||
|
|
||||||
|
count = count + 1
|
||||||
|
if self.args.progress:
|
||||||
|
self.progress("Analysing missing {}/{}".format(count, len(missing_datasets)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
dataset.debug("Missing on source, thinning")
|
||||||
|
dataset.thin()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
dataset.error("Error during thinning of missing datasets ({})".format(str(e)))
|
||||||
|
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
|
def destroy_missing_targets(self, target_dataset, used_target_datasets):
|
||||||
|
"""destroy target datasets that are missing on the source and that meet the requirements"""
|
||||||
|
|
||||||
|
self.debug("Destroying obsolete datasets")
|
||||||
|
|
||||||
|
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
|
||||||
|
dataset not in used_target_datasets]
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
for dataset in missing_datasets:
|
||||||
|
|
||||||
|
count = count + 1
|
||||||
|
if self.args.progress:
|
||||||
|
self.progress("Analysing destroy missing {}/{}".format(count, len(missing_datasets)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# cant do anything without our own snapshots
|
||||||
|
if not dataset.our_snapshots:
|
||||||
|
if dataset.datasets:
|
||||||
|
# its not a leaf, just ignore
|
||||||
|
dataset.debug("Destroy missing: ignoring")
|
||||||
|
else:
|
||||||
|
dataset.verbose(
|
||||||
|
"Destroy missing: has no snapshots made by us. (please destroy manually)")
|
||||||
|
else:
|
||||||
|
# past the deadline?
|
||||||
|
deadline_ttl = ThinnerRule("0s" + self.args.destroy_missing).ttl
|
||||||
|
now = int(time.time())
|
||||||
|
if dataset.our_snapshots[-1].timestamp + deadline_ttl > now:
|
||||||
|
dataset.verbose("Destroy missing: Waiting for deadline.")
|
||||||
|
else:
|
||||||
|
|
||||||
|
dataset.debug("Destroy missing: Removing our snapshots.")
|
||||||
|
|
||||||
|
# remove all our snaphots, except last, to safe space in case we fail later on
|
||||||
|
for snapshot in dataset.our_snapshots[:-1]:
|
||||||
|
snapshot.destroy(fail_exception=True)
|
||||||
|
|
||||||
|
# does it have other snapshots?
|
||||||
|
has_others = False
|
||||||
|
for snapshot in dataset.snapshots:
|
||||||
|
if not snapshot.is_ours():
|
||||||
|
has_others = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if has_others:
|
||||||
|
dataset.verbose("Destroy missing: Still in use by other snapshots")
|
||||||
|
else:
|
||||||
|
if dataset.datasets:
|
||||||
|
dataset.verbose("Destroy missing: Still has children here.")
|
||||||
|
else:
|
||||||
|
dataset.verbose("Destroy missing.")
|
||||||
|
dataset.our_snapshots[-1].destroy(fail_exception=True)
|
||||||
|
dataset.destroy(fail_exception=True)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
dataset.error("Error during --destroy-missing: {}".format(str(e)))
|
||||||
|
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
def get_send_pipes(self, logger):
|
||||||
|
"""determine the zfs send pipe"""
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
|
||||||
|
# IO buffer
|
||||||
|
if self.args.buffer:
|
||||||
|
logger("zfs send buffer : {}".format(self.args.buffer))
|
||||||
|
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m" + self.args.buffer])
|
||||||
|
|
||||||
|
# custom pipes
|
||||||
|
for send_pipe in self.args.send_pipe:
|
||||||
|
ret.append(ExecuteNode.PIPE)
|
||||||
|
ret.extend(send_pipe.split(" "))
|
||||||
|
logger("zfs send custom pipe : {}".format(send_pipe))
|
||||||
|
|
||||||
|
# compression
|
||||||
|
if self.args.compress != None:
|
||||||
|
ret.append(ExecuteNode.PIPE)
|
||||||
|
cmd = compressors.compress_cmd(self.args.compress)
|
||||||
|
ret.extend(cmd)
|
||||||
|
logger("zfs send compression : {}".format(" ".join(cmd)))
|
||||||
|
|
||||||
|
# transfer rate
|
||||||
|
if self.args.rate:
|
||||||
|
logger("zfs send transfer rate : {}".format(self.args.rate))
|
||||||
|
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R" + self.args.rate])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def get_recv_pipes(self, logger):
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
|
||||||
|
# decompression
|
||||||
|
if self.args.compress != None:
|
||||||
|
cmd = compressors.decompress_cmd(self.args.compress)
|
||||||
|
ret.extend(cmd)
|
||||||
|
ret.append(ExecuteNode.PIPE)
|
||||||
|
logger("zfs recv decompression : {}".format(" ".join(cmd)))
|
||||||
|
|
||||||
|
# custom pipes
|
||||||
|
for recv_pipe in self.args.recv_pipe:
|
||||||
|
ret.extend(recv_pipe.split(" "))
|
||||||
|
ret.append(ExecuteNode.PIPE)
|
||||||
|
logger("zfs recv custom pipe : {}".format(recv_pipe))
|
||||||
|
|
||||||
|
# IO buffer
|
||||||
|
if self.args.buffer:
|
||||||
|
# only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
|
||||||
|
if self.args.ssh_source != None or self.args.ssh_target != None or self.args.recv_pipe or self.args.send_pipe or self.args.compress != None:
|
||||||
|
logger("zfs recv buffer : {}".format(self.args.buffer))
|
||||||
|
ret.extend(["mbuffer", "-q", "-s128k", "-m" + self.args.buffer, ExecuteNode.PIPE])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
|
def sync_datasets(self, source_node, source_datasets, target_node):
|
||||||
|
"""Sync datasets, or thin-only on both sides
|
||||||
|
:type target_node: ZfsNode
|
||||||
|
:type source_datasets: list of ZfsDataset
|
||||||
|
:type source_node: ZfsNode
|
||||||
|
"""
|
||||||
|
|
||||||
|
send_pipes = self.get_send_pipes(source_node.verbose)
|
||||||
|
recv_pipes = self.get_recv_pipes(target_node.verbose)
|
||||||
|
|
||||||
|
fail_count = 0
|
||||||
|
count = 0
|
||||||
|
target_datasets = []
|
||||||
|
for source_dataset in source_datasets:
|
||||||
|
|
||||||
|
# stats
|
||||||
|
if self.args.progress:
|
||||||
|
count = count + 1
|
||||||
|
self.progress("Analysing dataset {}/{} ({} failed)".format(count, len(source_datasets), fail_count))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# determine corresponding target_dataset
|
||||||
|
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
||||||
|
target_dataset = ZfsDataset(target_node, target_name)
|
||||||
|
target_datasets.append(target_dataset)
|
||||||
|
|
||||||
|
# ensure parents exists
|
||||||
|
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
|
||||||
|
if not self.args.no_send \
|
||||||
|
and target_dataset.parent not in target_datasets \
|
||||||
|
and not target_dataset.parent.exists:
|
||||||
|
target_dataset.parent.create_filesystem(parents=True)
|
||||||
|
|
||||||
|
# determine common zpool features (cached, so no problem we call it often)
|
||||||
|
source_features = source_node.get_zfs_pool(source_dataset.split_path()[0]).features
|
||||||
|
target_features = target_node.get_zfs_pool(target_dataset.split_path()[0]).features
|
||||||
|
common_features = source_features and target_features
|
||||||
|
|
||||||
|
# sync the snapshots of this dataset
|
||||||
|
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress,
|
||||||
|
features=common_features, filter_properties=self.filter_properties_list(),
|
||||||
|
set_properties=self.set_properties_list(),
|
||||||
|
ignore_recv_exit_code=self.args.ignore_transfer_errors,
|
||||||
|
holds=not self.args.no_holds, rollback=self.args.rollback,
|
||||||
|
also_other_snapshots=self.args.other_snapshots,
|
||||||
|
no_send=self.args.no_send,
|
||||||
|
destroy_incompatible=self.args.destroy_incompatible,
|
||||||
|
send_pipes=send_pipes, recv_pipes=recv_pipes,
|
||||||
|
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
|
||||||
|
zfs_compressed=self.args.zfs_compressed)
|
||||||
|
except Exception as e:
|
||||||
|
fail_count = fail_count + 1
|
||||||
|
source_dataset.error("FAILED: " + str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
raise
|
||||||
|
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
target_path_dataset = ZfsDataset(target_node, self.args.target_path)
|
||||||
|
if not self.args.no_thinning:
|
||||||
|
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
|
||||||
|
|
||||||
|
if self.args.destroy_missing is not None:
|
||||||
|
self.destroy_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
|
||||||
|
|
||||||
|
return fail_count
|
||||||
|
|
||||||
|
def thin_source(self, source_datasets):
|
||||||
|
|
||||||
|
self.set_title("Thinning source")
|
||||||
|
|
||||||
|
for source_dataset in source_datasets:
|
||||||
|
source_dataset.thin(skip_holds=True)
|
||||||
|
|
||||||
|
def filter_properties_list(self):
|
||||||
|
|
||||||
|
if self.args.filter_properties:
|
||||||
|
filter_properties = self.args.filter_properties.split(",")
|
||||||
|
else:
|
||||||
|
filter_properties = []
|
||||||
|
|
||||||
|
if self.args.clear_refreservation:
|
||||||
|
filter_properties.append("refreservation")
|
||||||
|
|
||||||
|
return filter_properties
|
||||||
|
|
||||||
|
def set_properties_list(self):
|
||||||
|
|
||||||
|
if self.args.set_properties:
|
||||||
|
set_properties = self.args.set_properties.split(",")
|
||||||
|
else:
|
||||||
|
set_properties = []
|
||||||
|
|
||||||
|
if self.args.clear_mountpoint:
|
||||||
|
set_properties.append("canmount=noauto")
|
||||||
|
|
||||||
|
return set_properties
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
if self.args.test:
|
||||||
|
self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
|
||||||
|
|
||||||
|
#format all the names
|
||||||
|
property_name = self.args.property_format.format(self.args.backup_name)
|
||||||
|
snapshot_time_format = self.args.snapshot_format.format(self.args.backup_name)
|
||||||
|
hold_name = self.args.hold_format.format(self.args.backup_name)
|
||||||
|
|
||||||
|
self.verbose("")
|
||||||
|
self.verbose("Selecting dataset property : {}".format(property_name))
|
||||||
|
self.verbose("Snapshot format : {}".format(snapshot_time_format))
|
||||||
|
|
||||||
|
if not self.args.no_holds:
|
||||||
|
self.verbose("Hold name : {}".format(hold_name))
|
||||||
|
|
||||||
|
|
||||||
|
################ create source zfsNode
|
||||||
|
self.set_title("Source settings")
|
||||||
|
|
||||||
|
description = "[Source]"
|
||||||
|
if self.args.no_thinning:
|
||||||
|
source_thinner = None
|
||||||
|
else:
|
||||||
|
source_thinner = Thinner(self.args.keep_source)
|
||||||
|
source_node = ZfsNode(snapshot_time_format=snapshot_time_format, hold_name=hold_name, logger=self, ssh_config=self.args.ssh_config,
|
||||||
|
ssh_to=self.args.ssh_source, readonly=self.args.test,
|
||||||
|
debug_output=self.args.debug_output, description=description, thinner=source_thinner)
|
||||||
|
|
||||||
|
|
||||||
|
################# select source datasets
|
||||||
|
self.set_title("Selecting")
|
||||||
|
|
||||||
|
# Note: Before version v3.1-beta5, we always used exclude_received. This was a problem if you wanted to
|
||||||
|
# replicate an existing backup to another host and use the same backupname/snapshots. However, exclude_received
|
||||||
|
# may still need to be used to explicitly exclude a backup with the 'received' source to avoid accidental
|
||||||
|
# recursive replication of a zvol that is currently being received in another session (as it will have changes).
|
||||||
|
exclude_paths = []
|
||||||
|
exclude_received = self.args.exclude_received
|
||||||
|
if self.args.ssh_source == self.args.ssh_target:
|
||||||
|
if self.args.target_path:
|
||||||
|
# target and source are the same, make sure to exclude target_path
|
||||||
|
self.warning("Source and target are on the same host, excluding target-path from selection.")
|
||||||
|
exclude_paths.append(self.args.target_path)
|
||||||
|
else:
|
||||||
|
self.warning("Source and target are on the same host, excluding received datasets from selection.")
|
||||||
|
exclude_received = True
|
||||||
|
|
||||||
|
source_datasets = source_node.selected_datasets(property_name=property_name,exclude_received=exclude_received,
|
||||||
|
exclude_paths=exclude_paths,
|
||||||
|
exclude_unchanged=self.args.exclude_unchanged,
|
||||||
|
min_change=self.args.min_change)
|
||||||
|
if not source_datasets:
|
||||||
|
self.error(
|
||||||
|
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
|
||||||
|
"you want to select.".format(
|
||||||
|
self.args.backup_name))
|
||||||
|
return 255
|
||||||
|
|
||||||
|
################# snapshotting
|
||||||
|
if not self.args.no_snapshot:
|
||||||
|
self.set_title("Snapshotting")
|
||||||
|
snapshot_name=time.strftime(snapshot_time_format)
|
||||||
|
source_node.consistent_snapshot(source_datasets, snapshot_name,
|
||||||
|
min_changed_bytes=self.args.min_change,
|
||||||
|
pre_snapshot_cmds=self.args.pre_snapshot_cmd,
|
||||||
|
post_snapshot_cmds=self.args.post_snapshot_cmd)
|
||||||
|
|
||||||
|
################# sync
|
||||||
|
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
|
||||||
|
if self.args.target_path:
|
||||||
|
|
||||||
|
# create target_node
|
||||||
|
self.set_title("Target settings")
|
||||||
|
if self.args.no_thinning:
|
||||||
|
target_thinner = None
|
||||||
|
else:
|
||||||
|
target_thinner = Thinner(self.args.keep_target)
|
||||||
|
target_node = ZfsNode(snapshot_time_format=snapshot_time_format, hold_name=hold_name, logger=self, ssh_config=self.args.ssh_config,
|
||||||
|
ssh_to=self.args.ssh_target,
|
||||||
|
readonly=self.args.test, debug_output=self.args.debug_output,
|
||||||
|
description="[Target]",
|
||||||
|
thinner=target_thinner)
|
||||||
|
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
|
||||||
|
|
||||||
|
self.set_title("Synchronising")
|
||||||
|
|
||||||
|
# check if exists, to prevent vague errors
|
||||||
|
target_dataset = ZfsDataset(target_node, self.args.target_path)
|
||||||
|
if not target_dataset.exists:
|
||||||
|
raise (Exception(
|
||||||
|
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
|
||||||
|
|
||||||
|
# do the actual sync
|
||||||
|
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
|
||||||
|
fail_count = self.sync_datasets(
|
||||||
|
source_node=source_node,
|
||||||
|
source_datasets=source_datasets,
|
||||||
|
target_node=target_node)
|
||||||
|
|
||||||
|
# no target specified, run in snapshot-only mode
|
||||||
|
else:
|
||||||
|
if not self.args.no_thinning:
|
||||||
|
self.thin_source(source_datasets)
|
||||||
|
fail_count = 0
|
||||||
|
|
||||||
|
if not fail_count:
|
||||||
|
if self.args.test:
|
||||||
|
self.set_title("All tests successful.")
|
||||||
|
else:
|
||||||
|
self.set_title("All operations completed successfully")
|
||||||
|
if not self.args.target_path:
|
||||||
|
self.verbose("(No target_path specified, only operated as snapshot tool.)")
|
||||||
|
|
||||||
|
else:
|
||||||
|
if fail_count != 255:
|
||||||
|
self.error("{} dataset(s) failed!".format(fail_count))
|
||||||
|
|
||||||
|
if self.args.test:
|
||||||
|
self.verbose("")
|
||||||
|
self.warning("TEST MODE - DID NOT MAKE ANY CHANGES!")
|
||||||
|
|
||||||
|
return fail_count
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.error("Exception: " + str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
raise
|
||||||
|
return 255
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
self.error("Aborted")
|
||||||
|
return 255
|
||||||
1104
zfs_autobackup/ZfsDataset.py
Normal file
1104
zfs_autobackup/ZfsDataset.py
Normal file
File diff suppressed because it is too large
Load Diff
259
zfs_autobackup/ZfsNode.py
Normal file
259
zfs_autobackup/ZfsNode.py
Normal file
@ -0,0 +1,259 @@
|
|||||||
|
# python 2 compatibility
|
||||||
|
from __future__ import print_function
|
||||||
|
import re
|
||||||
|
import shlex
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .ExecuteNode import ExecuteNode
|
||||||
|
from .Thinner import Thinner
|
||||||
|
from .CachedProperty import CachedProperty
|
||||||
|
from .ZfsPool import ZfsPool
|
||||||
|
from .ZfsDataset import ZfsDataset
|
||||||
|
from .ExecuteNode import ExecuteError
|
||||||
|
|
||||||
|
|
||||||
|
class ZfsNode(ExecuteNode):
|
||||||
|
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
|
||||||
|
|
||||||
|
def __init__(self, snapshot_time_format, hold_name, logger, ssh_config=None, ssh_to=None, readonly=False,
|
||||||
|
description="",
|
||||||
|
debug_output=False, thinner=None):
|
||||||
|
|
||||||
|
self.snapshot_time_format = snapshot_time_format
|
||||||
|
self.hold_name = hold_name
|
||||||
|
|
||||||
|
self.description = description
|
||||||
|
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
|
if ssh_config:
|
||||||
|
self.verbose("Using custom SSH config: {}".format(ssh_config))
|
||||||
|
|
||||||
|
if ssh_to:
|
||||||
|
self.verbose("Datasets on: {}".format(ssh_to))
|
||||||
|
else:
|
||||||
|
self.verbose("Datasets are local")
|
||||||
|
|
||||||
|
if thinner is not None:
|
||||||
|
rules = thinner.human_rules()
|
||||||
|
if rules:
|
||||||
|
for rule in rules:
|
||||||
|
self.verbose(rule)
|
||||||
|
else:
|
||||||
|
self.verbose("Keep no old snaphots")
|
||||||
|
|
||||||
|
self.__thinner = thinner
|
||||||
|
|
||||||
|
# list of ZfsPools
|
||||||
|
self.__pools = {}
|
||||||
|
|
||||||
|
self._progress_total_bytes = 0
|
||||||
|
self._progress_start_time = time.time()
|
||||||
|
|
||||||
|
ExecuteNode.__init__(self, ssh_config=ssh_config, ssh_to=ssh_to, readonly=readonly, debug_output=debug_output)
|
||||||
|
|
||||||
|
def thin(self, objects, keep_objects):
|
||||||
|
if self.__thinner is not None:
|
||||||
|
return self.__thinner.thin(objects, keep_objects)
|
||||||
|
else:
|
||||||
|
return (keep_objects, [])
|
||||||
|
|
||||||
|
@CachedProperty
|
||||||
|
def supported_send_options(self):
|
||||||
|
"""list of supported options, for optimizing sends"""
|
||||||
|
# not every zfs implementation supports them all
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
for option in ["-L", "-e", "-c"]:
|
||||||
|
if self.valid_command(["zfs", "send", option, "zfs_autobackup_option_test"]):
|
||||||
|
ret.append(option)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@CachedProperty
|
||||||
|
def supported_recv_options(self):
|
||||||
|
"""list of supported options"""
|
||||||
|
# not every zfs implementation supports them all
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
for option in ["-s"]:
|
||||||
|
if self.valid_command(["zfs", "recv", option, "zfs_autobackup_option_test"]):
|
||||||
|
ret.append(option)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def valid_command(self, cmd):
|
||||||
|
"""test if a specified zfs options are valid exit code. use this to determine support options"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.run(cmd, hide_errors=True, valid_exitcodes=[0, 1])
|
||||||
|
except ExecuteError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
# TODO: also create a get_zfs_dataset() function that stores all the objects in a dict. This should optimize
|
||||||
|
# caching a bit and is more consistent.
|
||||||
|
def get_zfs_pool(self, name):
|
||||||
|
"""get a ZfsPool() object from specified name. stores objects internally to enable caching"""
|
||||||
|
|
||||||
|
return self.__pools.setdefault(name, ZfsPool(self, name))
|
||||||
|
|
||||||
|
def reset_progress(self):
|
||||||
|
"""reset progress output counters"""
|
||||||
|
self._progress_total_bytes = 0
|
||||||
|
self._progress_start_time = time.time()
|
||||||
|
|
||||||
|
def parse_zfs_progress(self, line, hide_errors, prefix):
|
||||||
|
"""try to parse progress output of zfs recv -Pv, and don't show it as error to the user """
|
||||||
|
|
||||||
|
# is it progress output?
|
||||||
|
progress_fields = line.rstrip().split("\t")
|
||||||
|
|
||||||
|
if (line.find("nvlist version") == 0 or
|
||||||
|
line.find("resume token contents") == 0 or
|
||||||
|
len(progress_fields) != 1 or
|
||||||
|
line.find("skipping ") == 0 or
|
||||||
|
re.match("send from .*estimated size is ", line)):
|
||||||
|
|
||||||
|
# always output for debugging offcourse
|
||||||
|
self.debug(prefix + line.rstrip())
|
||||||
|
|
||||||
|
# actual useful info
|
||||||
|
if len(progress_fields) >= 3:
|
||||||
|
if progress_fields[0] == 'full' or progress_fields[0] == 'size':
|
||||||
|
self._progress_total_bytes = int(progress_fields[2])
|
||||||
|
elif progress_fields[0] == 'incremental':
|
||||||
|
self._progress_total_bytes = int(progress_fields[3])
|
||||||
|
elif progress_fields[1].isnumeric():
|
||||||
|
bytes_ = int(progress_fields[1])
|
||||||
|
if self._progress_total_bytes:
|
||||||
|
percentage = min(100, int(bytes_ * 100 / self._progress_total_bytes))
|
||||||
|
speed = int(bytes_ / (time.time() - self._progress_start_time) / (1024 * 1024))
|
||||||
|
bytes_left = self._progress_total_bytes - bytes_
|
||||||
|
minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60)
|
||||||
|
|
||||||
|
self.logger.progress(
|
||||||
|
"Transfer {}% {}MB/s (total {}MB, {} minutes left)".format(percentage, speed, int(
|
||||||
|
self._progress_total_bytes / (1024 * 1024)), minutes_left))
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
# still do the normal stderr output handling
|
||||||
|
if hide_errors:
|
||||||
|
self.debug(prefix + line.rstrip())
|
||||||
|
else:
|
||||||
|
self.error(prefix + line.rstrip())
|
||||||
|
|
||||||
|
# def _parse_stderr_pipe(self, line, hide_errors):
|
||||||
|
# self.parse_zfs_progress(line, hide_errors, "STDERR|> ")
|
||||||
|
|
||||||
|
def _parse_stderr(self, line, hide_errors):
|
||||||
|
self.parse_zfs_progress(line, hide_errors, "STDERR > ")
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
self.logger.verbose("{} {}".format(self.description, txt))
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
self.logger.error("{} {}".format(self.description, txt))
|
||||||
|
|
||||||
|
def warning(self, txt):
|
||||||
|
self.logger.warning("{} {}".format(self.description, txt))
|
||||||
|
|
||||||
|
def debug(self, txt):
|
||||||
|
self.logger.debug("{} {}".format(self.description, txt))
|
||||||
|
|
||||||
|
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_snapshot_cmds=[],
|
||||||
|
post_snapshot_cmds=[]):
|
||||||
|
"""create a consistent (atomic) snapshot of specified datasets, per pool.
|
||||||
|
"""
|
||||||
|
|
||||||
|
pools = {}
|
||||||
|
|
||||||
|
# collect snapshots that we want to make, per pool
|
||||||
|
# self.debug(datasets)
|
||||||
|
for dataset in datasets:
|
||||||
|
if not dataset.is_changed_ours(min_changed_bytes):
|
||||||
|
dataset.verbose("No changes since {}".format(dataset.our_snapshots[-1].snapshot_name))
|
||||||
|
continue
|
||||||
|
|
||||||
|
# force_exist, since we're making it
|
||||||
|
snapshot = ZfsDataset(dataset.zfs_node, dataset.name + "@" + snapshot_name, force_exists=True)
|
||||||
|
|
||||||
|
pool = dataset.split_path()[0]
|
||||||
|
if pool not in pools:
|
||||||
|
pools[pool] = []
|
||||||
|
|
||||||
|
pools[pool].append(snapshot)
|
||||||
|
|
||||||
|
# update cache, but try to prevent an unneeded zfs list
|
||||||
|
if self.readonly or CachedProperty.is_cached(dataset, 'snapshots'):
|
||||||
|
dataset.snapshots.append(snapshot) # NOTE: this will trigger zfs list if its not cached
|
||||||
|
|
||||||
|
if not pools:
|
||||||
|
self.verbose("No changes anywhere: not creating snapshots.")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
for cmd in pre_snapshot_cmds:
|
||||||
|
self.verbose("Running pre-snapshot-cmd")
|
||||||
|
self.run(cmd=shlex.split(cmd), readonly=False)
|
||||||
|
|
||||||
|
# create consistent snapshot per pool
|
||||||
|
for (pool_name, snapshots) in pools.items():
|
||||||
|
cmd = ["zfs", "snapshot"]
|
||||||
|
|
||||||
|
cmd.extend(map(lambda snapshot_: str(snapshot_), snapshots))
|
||||||
|
|
||||||
|
self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name))
|
||||||
|
self.run(cmd, readonly=False)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
for cmd in post_snapshot_cmds:
|
||||||
|
self.verbose("Running post-snapshot-cmd")
|
||||||
|
try:
|
||||||
|
self.run(cmd=shlex.split(cmd), readonly=False)
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change):
|
||||||
|
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
|
||||||
|
|
||||||
|
returns: list of ZfsDataset
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.debug("Getting selected datasets")
|
||||||
|
|
||||||
|
# get all source filesystems that have the backup property
|
||||||
|
lines = self.run(tab_split=True, readonly=True, cmd=[
|
||||||
|
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H",
|
||||||
|
property_name
|
||||||
|
])
|
||||||
|
|
||||||
|
# The returnlist of selected ZfsDataset's:
|
||||||
|
selected_filesystems = []
|
||||||
|
|
||||||
|
# list of sources, used to resolve inherited sources
|
||||||
|
sources = {}
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
(name, value, raw_source) = line
|
||||||
|
dataset = ZfsDataset(self, name)
|
||||||
|
|
||||||
|
# "resolve" inherited sources
|
||||||
|
sources[name] = raw_source
|
||||||
|
if raw_source.find("inherited from ") == 0:
|
||||||
|
inherited = True
|
||||||
|
inherited_from = re.sub("^inherited from ", "", raw_source)
|
||||||
|
source = sources[inherited_from]
|
||||||
|
else:
|
||||||
|
inherited = False
|
||||||
|
source = raw_source
|
||||||
|
|
||||||
|
# determine it
|
||||||
|
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
|
||||||
|
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
|
||||||
|
min_change=min_change):
|
||||||
|
selected_filesystems.append(dataset)
|
||||||
|
|
||||||
|
return selected_filesystems
|
||||||
63
zfs_autobackup/ZfsPool.py
Normal file
63
zfs_autobackup/ZfsPool.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
from .CachedProperty import CachedProperty
|
||||||
|
|
||||||
|
|
||||||
|
class ZfsPool():
|
||||||
|
"""a zfs pool"""
|
||||||
|
|
||||||
|
def __init__(self, zfs_node, name):
|
||||||
|
"""name: name of the pool
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.zfs_node = zfs_node
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "{}: {}".format(self.zfs_node, self.name)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.name
|
||||||
|
|
||||||
|
def __eq__(self, obj):
|
||||||
|
if not isinstance(obj, ZfsPool):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.name == obj.name
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
self.zfs_node.verbose("zpool {}: {}".format(self.name, txt))
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
self.zfs_node.error("zpool {}: {}".format(self.name, txt))
|
||||||
|
|
||||||
|
def debug(self, txt):
|
||||||
|
self.zfs_node.debug("zpool {}: {}".format(self.name, txt))
|
||||||
|
|
||||||
|
@CachedProperty
|
||||||
|
def properties(self):
|
||||||
|
"""all zpool properties"""
|
||||||
|
|
||||||
|
self.debug("Getting zpool properties")
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
"zpool", "get", "-H", "-p", "all", self.name
|
||||||
|
]
|
||||||
|
|
||||||
|
ret = {}
|
||||||
|
|
||||||
|
for pair in self.zfs_node.run(tab_split=True, cmd=cmd, readonly=True, valid_exitcodes=[0]):
|
||||||
|
ret[pair[1]] = pair[2]
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@property
|
||||||
|
def features(self):
|
||||||
|
"""get list of active zpool features"""
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
for (key, value) in self.properties.items():
|
||||||
|
if key.startswith("feature@"):
|
||||||
|
feature = key.split("@")[1]
|
||||||
|
if value == 'enabled' or value == 'active':
|
||||||
|
ret.append(feature)
|
||||||
|
|
||||||
|
return ret
|
||||||
9
zfs_autobackup/__init__.py
Executable file
9
zfs_autobackup/__init__.py
Executable file
@ -0,0 +1,9 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def cli():
|
||||||
|
import sys
|
||||||
|
from .ZfsAutobackup import ZfsAutobackup
|
||||||
|
|
||||||
|
zfs_autobackup = ZfsAutobackup(sys.argv[1:], False)
|
||||||
|
sys.exit(zfs_autobackup.run())
|
||||||
10
zfs_autobackup/__main__.py
Executable file
10
zfs_autobackup/__main__.py
Executable file
@ -0,0 +1,10 @@
|
|||||||
|
# (c)edwin@datux.nl - Released under GPL V3
|
||||||
|
#
|
||||||
|
# Greetings from eth0 2019 :)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
from . import cli
|
||||||
|
cli()
|
||||||
|
|
||||||
75
zfs_autobackup/compressors.py
Normal file
75
zfs_autobackup/compressors.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
# Adopted from Syncoid :)
|
||||||
|
|
||||||
|
# this software is licensed for use under the Free Software Foundation's GPL v3.0 license, as retrieved
|
||||||
|
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
|
||||||
|
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
|
||||||
|
|
||||||
|
COMPRESS_CMDS = {
|
||||||
|
'gzip': {
|
||||||
|
'cmd': 'gzip',
|
||||||
|
'args': [ '-3' ],
|
||||||
|
'dcmd': 'zcat',
|
||||||
|
'dargs': [],
|
||||||
|
},
|
||||||
|
'pigz-fast': {
|
||||||
|
'cmd': 'pigz',
|
||||||
|
'args': [ '-3' ],
|
||||||
|
'dcmd': 'pigz',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'pigz-slow': {
|
||||||
|
'cmd': 'pigz',
|
||||||
|
'args': [ '-9' ],
|
||||||
|
'dcmd': 'pigz',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'zstd-fast': {
|
||||||
|
'cmd': 'zstdmt',
|
||||||
|
'args': [ '-3' ],
|
||||||
|
'dcmd': 'zstdmt',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'zstd-slow': {
|
||||||
|
'cmd': 'zstdmt',
|
||||||
|
'args': [ '-19' ],
|
||||||
|
'dcmd': 'zstdmt',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'zstd-adapt': {
|
||||||
|
'cmd': 'zstdmt',
|
||||||
|
'args': [ '--adapt' ],
|
||||||
|
'dcmd': 'zstdmt',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'xz': {
|
||||||
|
'cmd': 'xz',
|
||||||
|
'args': [],
|
||||||
|
'dcmd': 'xz',
|
||||||
|
'dargs': [ '-d' ],
|
||||||
|
},
|
||||||
|
'lzo': {
|
||||||
|
'cmd': 'lzop',
|
||||||
|
'args': [],
|
||||||
|
'dcmd': 'lzop',
|
||||||
|
'dargs': [ '-dfc' ],
|
||||||
|
},
|
||||||
|
'lz4': {
|
||||||
|
'cmd': 'lz4',
|
||||||
|
'args': [],
|
||||||
|
'dcmd': 'lz4',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def compress_cmd(compressor):
|
||||||
|
ret=[ COMPRESS_CMDS[compressor]['cmd'] ]
|
||||||
|
ret.extend( COMPRESS_CMDS[compressor]['args'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def decompress_cmd(compressor):
|
||||||
|
ret= [ COMPRESS_CMDS[compressor]['dcmd'] ]
|
||||||
|
ret.extend(COMPRESS_CMDS[compressor]['dargs'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def choices():
|
||||||
|
return COMPRESS_CMDS.keys()
|
||||||
Reference in New Issue
Block a user