Compare commits
192 Commits
v3.0-rc7
...
v3.0.1-bet
| Author | SHA1 | Date | |
|---|---|---|---|
| 29e6c056d1 | |||
| deadbe9383 | |||
| 5cbec2e06f | |||
| 66d284f183 | |||
| ae64fd6e99 | |||
| 305bd3008d | |||
| 17fec7d1ee | |||
| f5b0a4f272 | |||
| 06c8416771 | |||
| 4f9b7b6cef | |||
| 0214584e4c | |||
| b6627eb389 | |||
| 48f1f6ec5d | |||
| e33e47c10c | |||
| 01f0078ccf | |||
| 9fad773bfb | |||
| 7493a0bc55 | |||
| 0649f42d66 | |||
| 6fefadf884 | |||
| ce05e1ba4c | |||
| 35584149ff | |||
| 427f74d2f0 | |||
| 9b2c321fe7 | |||
| d02a6df0f3 | |||
| 7fb5a7db92 | |||
| 64e53952fe | |||
| b7ef6c9528 | |||
| b7b3e785ce | |||
| 50070bc70f | |||
| 0fb0faccae | |||
| ab77b91d4e | |||
| bbe7a112fd | |||
| 8a09a49951 | |||
| 8092b08e7f | |||
| 075c96bf47 | |||
| 2cbfa0f38a | |||
| 47c50583c0 | |||
| e40eb71f39 | |||
| fab3bf3b3e | |||
| 1afe2407fa | |||
| 3550100099 | |||
| 9e2a6dba3d | |||
| b31b74aa92 | |||
| 222568ad31 | |||
| 35f739b8dd | |||
| 52f9e0d810 | |||
| 7bbf041a70 | |||
| b6796ded84 | |||
| 930bf6cf50 | |||
| fcc8470758 | |||
| fde4a5ed6a | |||
| 12c45f95c3 | |||
| 10e7e5b95f | |||
| 656b435a7f | |||
| 1c1c6647f1 | |||
| 39514de86a | |||
| 49f6e36749 | |||
| 371de417a4 | |||
| 6965c04dc6 | |||
| 9e645e9237 | |||
| fa372799f5 | |||
| da55436863 | |||
| 4d0db0b5d3 | |||
| 75f5e0ee9f | |||
| d0ab60168b | |||
| b48726185c | |||
| 74da005870 | |||
| 6e0664ad8e | |||
| f508e72f5e | |||
| 4918a2c055 | |||
| e65d1ac860 | |||
| fd7015b77a | |||
| f524845dbb | |||
| 51c15ec618 | |||
| 9fe13a4207 | |||
| 7b8b536d53 | |||
| 122035dfef | |||
| 7b278be0b9 | |||
| cc1a9a3d72 | |||
| eaad31e8b4 | |||
| 470b4aaf55 | |||
| fc3026abdc | |||
| 0b1081e87f | |||
| 8699ec5c69 | |||
| cba6470500 | |||
| d08f7bf3c1 | |||
| d19cb2c842 | |||
| f2b284c407 | |||
| a6cdd4b89e | |||
| 8176326126 | |||
| ad2542e930 | |||
| b926c86a7b | |||
| 915a29b36e | |||
| f363142926 | |||
| 27c598344b | |||
| ce817eb05c | |||
| b97bde3f6d | |||
| fa14dcdce1 | |||
| c34bf22f4e | |||
| 01425e735d | |||
| 56a2f26dfa | |||
| 8729fcac74 | |||
| 6151096dc8 | |||
| 1e8b02db28 | |||
| 6d69c8f2b4 | |||
| fc853622dd | |||
| 37a9f49d8d | |||
| ff33f46cb8 | |||
| e1610b6874 | |||
| 10c45affd7 | |||
| 4d12b8da5f | |||
| 58e098324e | |||
| 1ffd9a15a3 | |||
| e54c275685 | |||
| ee4fade2e4 | |||
| c5f1a87c40 | |||
| 2fe854905d | |||
| 1c86c6f866 | |||
| 8bb9769a8b | |||
| 3ef7c32237 | |||
| c254ad3b82 | |||
| fb7da316f8 | |||
| fedae35221 | |||
| 1cb26d48b6 | |||
| 87e0599130 | |||
| 252086e2e6 | |||
| 4d15f29b5b | |||
| 3bc37d143c | |||
| 4dc4bdbba5 | |||
| d2fe9b9ec7 | |||
| 2143d22ae5 | |||
| 138c913e58 | |||
| 2305fdf033 | |||
| 797fb23baa | |||
| 82c7ac5e53 | |||
| 293ab1d075 | |||
| 50e94baf4e | |||
| 47bd4ed490 | |||
| 0d26420b15 | |||
| 3e243a0916 | |||
| 499ccc6fd0 | |||
| ca294f9dd6 | |||
| 9772fc80cf | |||
| 83905c4614 | |||
| 7a3c309123 | |||
| 022dc1b7fc | |||
| 136289b4d6 | |||
| 5bf49cf19e | |||
| 735938eded | |||
| d83fa2f97f | |||
| b0db6d13cc | |||
| c7762d8163 | |||
| bc17825582 | |||
| b22113aad4 | |||
| 4e1bfd8cba | |||
| 0388026f94 | |||
| b718e282b1 | |||
| b6fb07a436 | |||
| 4f78f0cd22 | |||
| 2fa95f098b | |||
| c864e5ffad | |||
| 6f6a2ceee2 | |||
| 0813a8cef6 | |||
| 55f491915a | |||
| 04971f2f29 | |||
| e1344dd9da | |||
| ea390df6f6 | |||
| 9be1f334cb | |||
| de877362c9 | |||
| 9b1254a6d9 | |||
| c110943f20 | |||
| e94eb11f63 | |||
| 0d498e3f44 | |||
| dd301dc422 | |||
| 9e6d90adfe | |||
| a6b688c976 | |||
| 10f1290ad9 | |||
| b51eefa139 | |||
| 805d7e3536 | |||
| 8f0472e8f5 | |||
| 002aa6a731 | |||
| 8a960389d1 | |||
| c7cd73ae1f | |||
| c8c1d0fd27 | |||
| c090979f3e | |||
| 3a4062c983 | |||
| bcf73c6e5c | |||
| 9cf5ce188a | |||
| a226309ce5 | |||
| 231f41e195 | |||
| 7c1546fb49 | |||
| b1dd2b55f8 |
46
.github/workflows/python-publish.yml
vendored
Normal file
46
.github/workflows/python-publish.yml
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
# This workflow will upload a Python Package using Twine when a release is created
|
||||
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
|
||||
|
||||
name: Upload Python Package
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Python 3.x
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Set up Python 2.x
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '2.x'
|
||||
|
||||
- name: Install dependencies 3.x
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip3 install setuptools wheel twine
|
||||
|
||||
- name: Install dependencies 2.x
|
||||
run: |
|
||||
python2 -m pip install --upgrade pip
|
||||
pip2 install setuptools wheel twine
|
||||
|
||||
- name: Build and publish
|
||||
env:
|
||||
TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }}
|
||||
TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
|
||||
run: |
|
||||
python3 setup.py sdist bdist_wheel
|
||||
python2 setup.py sdist bdist_wheel
|
||||
twine check dist/*
|
||||
twine upload dist/*
|
||||
76
.github/workflows/regression.yml
vendored
Normal file
76
.github/workflows/regression.yml
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
name: Regression tests
|
||||
|
||||
|
||||
on: ["push", "pull_request"]
|
||||
|
||||
|
||||
|
||||
jobs:
|
||||
|
||||
ubuntu20:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
|
||||
|
||||
|
||||
- name: Prepare
|
||||
run: sudo apt update && sudo apt install zfsutils-linux && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||
|
||||
|
||||
- name: Regression test
|
||||
run: sudo -E ./tests/run_tests
|
||||
|
||||
|
||||
- name: Coveralls
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github
|
||||
|
||||
ubuntu18:
|
||||
runs-on: ubuntu-18.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
|
||||
|
||||
|
||||
- name: Prepare
|
||||
run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||
|
||||
|
||||
- name: Regression test
|
||||
run: sudo -E ./tests/run_tests
|
||||
|
||||
|
||||
- name: Coveralls
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github
|
||||
|
||||
ubuntu18_python2:
|
||||
runs-on: ubuntu-18.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Set up Python 2.x
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '2.x'
|
||||
|
||||
- name: Prepare
|
||||
run: sudo apt update && sudo apt install zfsutils-linux python-setuptools && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls
|
||||
|
||||
- name: Regression test
|
||||
run: sudo -E ./tests/run_tests
|
||||
|
||||
- name: Coveralls
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@ -6,3 +6,9 @@ build/
|
||||
zfs_autobackup.egg-info
|
||||
.eggs/
|
||||
__pycache__
|
||||
.coverage
|
||||
*.pyc
|
||||
python2.env
|
||||
venv
|
||||
.idea
|
||||
OQ
|
||||
467
README.md
467
README.md
@ -1,75 +1,65 @@
|
||||
|
||||
# ZFS autobackup
|
||||
|
||||
## New in v3
|
||||
|
||||
* Complete rewrite, cleaner object oriented code.
|
||||
* Python 3 and 2 support.
|
||||
* Installable via pip.
|
||||
* Backwards compatible with your current backups and parameters.
|
||||
* Progressive thinning (via a destroy schedule. default schedule should be fine for most people)
|
||||
* Cleaner output, with optional color support (pip install colorama).
|
||||
* Clear distinction between local and remote output.
|
||||
* Summary at the beginning, displaying what will happen and the current thinning-schedule.
|
||||
* More effient destroying/skipping snaphots on the fly. (no more space issues if your backup is way behind)
|
||||
* Progress indicator (--progress)
|
||||
* Better property management (--set-properties and --filter-properties)
|
||||
* Better resume handling, automaticly abort invalid resumes.
|
||||
* More robust error handling.
|
||||
* Prepared for future enhanchements.
|
||||
* Supports raw backups for encryption.
|
||||
* Custom SSH client config.
|
||||
[](https://github.com/psy0rz/zfs_autobackup/actions?query=workflow%3A%22Regression+tests%22) [](https://coveralls.io/github/psy0rz/zfs_autobackup) [](https://pypi.org/project/zfs-autobackup/)
|
||||
|
||||
## Introduction
|
||||
|
||||
This is a tool I wrote to make replicating ZFS datasets easy and reliable. You can either use it as a backup tool or as a replication tool.
|
||||
This is a tool I wrote to make replicating ZFS datasets easy and reliable.
|
||||
|
||||
You can select what to backup by setting a custom `ZFS property`. This allows you to set and forget: Configure it so it backups your entire pool, and you never have to worry about backupping again. Even new datasets you create later will be backupped.
|
||||
You can either use it as a **backup** tool, **replication** tool or **snapshot** tool.
|
||||
|
||||
Other settings are just specified on the commandline. This also makes it easier to setup and test zfs-autobackup and helps you fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
|
||||
You can select what to backup by setting a custom `ZFS property`. This makes it easy to add/remove specific datasets, or just backup your whole pool.
|
||||
|
||||
Since its using ZFS commands, you can see what its actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (also something I missed in other tools)
|
||||
Other settings are just specified on the commandline: Simply setup and test your zfs-autobackup command and fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
|
||||
|
||||
An imporant feature thats missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your zfs datasets.
|
||||
Since its using ZFS commands, you can see what its actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
|
||||
|
||||
Another nice thing is progress reporting with `--progress`. Its very usefull with HUGE datasets, when you want to know how many hours/days it will take.
|
||||
An important feature thats missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your system.
|
||||
|
||||
zfs-autobackup tries to be the easiest to use backup tool for zfs.
|
||||
|
||||
## Features
|
||||
|
||||
* Works across operating systems: Tested with Linux, FreeBSD/FreeNAS and SmartOS.
|
||||
* Works in combination with existing replication systems. (Like Proxmox HA)
|
||||
* Works across operating systems: Tested with **Linux**, **FreeBSD/FreeNAS** and **SmartOS**.
|
||||
* Low learning curve: no complex daemons or services, no additional software or networking needed. (Only read this page)
|
||||
* Plays nicely with existing replication systems. (Like Proxmox HA)
|
||||
* Automatically selects filesystems to backup by looking at a simple ZFS property. (recursive)
|
||||
* Creates consistent snapshots. (takes all snapshots at once, atomic.)
|
||||
* Creates consistent snapshots. (takes all snapshots at once, atomicly.)
|
||||
* Multiple backups modes:
|
||||
* Backup local data on the same server.
|
||||
* "push" local data to a backup-server via SSH.
|
||||
* "pull" remote data from a server via SSH and backup it locally.
|
||||
* Or even pull data from a server while pushing the backup to another server.
|
||||
* Or even pull data from a server while pushing the backup to another server. (Zero trust between source and target server)
|
||||
* Can be scheduled via a simple cronjob or run directly from commandline.
|
||||
* Supports resuming of interrupted transfers. (via the zfs extensible_dataset feature)
|
||||
* Backups and snapshots can be named to prevent conflicts. (multiple backups from and to the same filesystems are no problem)
|
||||
* Always creates a new snapshot before starting.
|
||||
* Supports resuming of interrupted transfers.
|
||||
* Multiple backups from and to the same datasets are no problem.
|
||||
* Creates the snapshot before doing anything else. (assuring you at least have a snapshot if all else fails)
|
||||
* Checks everything but tries continue on non-fatal errors when possible. (Reports error-count when done)
|
||||
* Ability to 'finish' aborted backups to see what goes wrong.
|
||||
* Ability to manually 'finish' failed backups to see whats going on.
|
||||
* Easy to debug and has a test-mode. Actual unix commands are printed.
|
||||
* Keeps latest X snapshots remote and locally. (default 30, configurable)
|
||||
* Uses **progressive thinning** for older snapshots.
|
||||
* Uses zfs-holds on important snapshots so they cant be accidentally destroyed.
|
||||
* Automatic resuming of failed transfers.
|
||||
* Can continue from existing common snapshots. (e.g. easy migration)
|
||||
* Gracefully handles destroyed datasets on source.
|
||||
* Easy installation:
|
||||
* Just install zfs-autobackup via pip, or download it manually.
|
||||
* Written in python and uses zfs-commands, no 3rd party dependency's or libraries.
|
||||
* Written in python and uses zfs-commands, no 3rd party dependency's or libraries needed.
|
||||
* No separate config files or properties. Just one zfs-autobackup command you can copy/paste in your backup script.
|
||||
|
||||
## Installation
|
||||
|
||||
### Using pip
|
||||
|
||||
The recommended way on most servers is to use pip:
|
||||
The recommended way on most servers is to use [pip](https://pypi.org/project/zfs-autobackup/):
|
||||
|
||||
```console
|
||||
[root@server ~]# pip install zfs-autobackup
|
||||
[root@server ~]# pip install --upgrade zfs-autobackup
|
||||
```
|
||||
|
||||
This can also be used to upgrade zfs-autobackup to the newest stable version.
|
||||
|
||||
### Using easy_install
|
||||
|
||||
On older servers you might have to use easy_install
|
||||
@ -78,17 +68,9 @@ On older servers you might have to use easy_install
|
||||
[root@server ~]# easy_install zfs-autobackup
|
||||
```
|
||||
|
||||
### Direct download
|
||||
|
||||
Its also possible to just download <https://raw.githubusercontent.com/psy0rz/zfs_autobackup/master/bin/zfs-autobackup> and run it directly.
|
||||
|
||||
The only requirement that is sometimes missing is the `argparse` python module. Optionally you can install `colorma` for colors.
|
||||
|
||||
It should work with python 2.7 and higher.
|
||||
|
||||
## Example
|
||||
|
||||
In this example we're going to backup a machine called `pve` to a machine called `backup`.
|
||||
In this example we're going to backup a machine called `server1` to a machine called `backup`.
|
||||
|
||||
### Setup SSH login
|
||||
|
||||
@ -96,7 +78,7 @@ zfs-autobackup needs passwordless login via ssh. This means generating an ssh ke
|
||||
|
||||
#### Generate SSH key on `backup`
|
||||
|
||||
On the server that runs zfs-autobackup you need to create an SSH key. You only need to do this once.
|
||||
On the backup-server that runs zfs-autobackup you need to create an SSH key. You only need to do this once.
|
||||
|
||||
Use the `ssh-keygen` command and leave the passphrase empty:
|
||||
|
||||
@ -125,14 +107,14 @@ The key's randomart image is:
|
||||
root@backup:~#
|
||||
```
|
||||
|
||||
#### Copy SSH key to `pve`
|
||||
#### Copy SSH key to `server1`
|
||||
|
||||
Now you need to copy the public part of the key to `pve`
|
||||
Now you need to copy the public part of the key to `server1`
|
||||
|
||||
The `ssh-copy-id` command is a handy tool to automate this. It will just ask for your password.
|
||||
|
||||
```console
|
||||
root@backup:~# ssh-copy-id root@pve.server.com
|
||||
root@backup:~# ssh-copy-id root@server1.server.com
|
||||
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
|
||||
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
|
||||
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
|
||||
@ -140,11 +122,12 @@ Password:
|
||||
|
||||
Number of key(s) added: 1
|
||||
|
||||
Now try logging into the machine, with: "ssh 'root@pve.server.com'"
|
||||
Now try logging into the machine, with: "ssh 'root@server1.server.com'"
|
||||
and check to make sure that only the key(s) you wanted were added.
|
||||
|
||||
root@backup:~#
|
||||
```
|
||||
This allows the backup-server to login to `server1` as root without password.
|
||||
|
||||
### Select filesystems to backup
|
||||
|
||||
@ -153,27 +136,27 @@ Its important to choose a unique and consistent backup name. In this case we nam
|
||||
On the source zfs system set the ```autobackup:offsite1``` zfs property to true:
|
||||
|
||||
```console
|
||||
[root@pve ~]# zfs set autobackup:offsite1=true rpool
|
||||
[root@pve ~]# zfs get -t filesystem,volume autobackup:offsite1
|
||||
[root@server1 ~]# zfs set autobackup:offsite1=true rpool
|
||||
[root@server1 ~]# zfs get -t filesystem,volume autobackup:offsite1
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
rpool autobackup:offsite1 true local
|
||||
rpool/ROOT autobackup:offsite1 true inherited from rpool
|
||||
rpool/ROOT/pve-1 autobackup:offsite1 true inherited from rpool
|
||||
rpool/ROOT/server1-1 autobackup:offsite1 true inherited from rpool
|
||||
rpool/data autobackup:offsite1 true inherited from rpool
|
||||
rpool/data/vm-100-disk-0 autobackup:offsite1 true inherited from rpool
|
||||
rpool/swap autobackup:offsite1 true inherited from rpool
|
||||
...
|
||||
```
|
||||
|
||||
Because we dont want to backup everything, we can exclude certain filesystem by setting the property to false:
|
||||
Because we don't want to backup everything, we can exclude certain filesystem by setting the property to false:
|
||||
|
||||
```console
|
||||
[root@pve ~]# zfs set autobackup:offsite1=false rpool/swap
|
||||
[root@pve ~]# zfs get -t filesystem,volume autobackup:offsite1
|
||||
[root@server1 ~]# zfs set autobackup:offsite1=false rpool/swap
|
||||
[root@server1 ~]# zfs get -t filesystem,volume autobackup:offsite1
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
rpool autobackup:offsite1 true local
|
||||
rpool/ROOT autobackup:offsite1 true inherited from rpool
|
||||
rpool/ROOT/pve-1 autobackup:offsite1 true inherited from rpool
|
||||
rpool/ROOT/server1-1 autobackup:offsite1 true inherited from rpool
|
||||
rpool/data autobackup:offsite1 true inherited from rpool
|
||||
rpool/data/vm-100-disk-0 autobackup:offsite1 true inherited from rpool
|
||||
rpool/swap autobackup:offsite1 false local
|
||||
@ -182,30 +165,30 @@ rpool/swap autobackup:offsite1 false
|
||||
|
||||
### Running zfs-autobackup
|
||||
|
||||
Run the script on the backup server and pull the data from the server specfied by --ssh-source.
|
||||
Run the script on the backup server and pull the data from the server specified by --ssh-source.
|
||||
|
||||
```console
|
||||
[root@backup ~]# zfs-autobackup --ssh-source pve.server.com offsite1 backup/pve --progress --verbose
|
||||
[root@backup ~]# zfs-autobackup --ssh-source server1.server.com offsite1 backup/server1 --progress --verbose
|
||||
|
||||
#### Settings summary
|
||||
[Source] Datasets on: pve.server.com
|
||||
[Source] Datasets on: server1.server.com
|
||||
[Source] Keep the last 10 snapshots.
|
||||
[Source] Keep oldest of 1 day, delete after 1 week.
|
||||
[Source] Keep oldest of 1 week, delete after 1 month.
|
||||
[Source] Keep oldest of 1 month, delete after 1 year.
|
||||
[Source] Keep every 1 day, delete after 1 week.
|
||||
[Source] Keep every 1 week, delete after 1 month.
|
||||
[Source] Keep every 1 month, delete after 1 year.
|
||||
[Source] Send all datasets that have 'autobackup:offsite1=true' or 'autobackup:offsite1=child'
|
||||
|
||||
[Target] Datasets are local
|
||||
[Target] Keep the last 10 snapshots.
|
||||
[Target] Keep oldest of 1 day, delete after 1 week.
|
||||
[Target] Keep oldest of 1 week, delete after 1 month.
|
||||
[Target] Keep oldest of 1 month, delete after 1 year.
|
||||
[Target] Receive datasets under: backup/pve
|
||||
[Target] Keep every 1 day, delete after 1 week.
|
||||
[Target] Keep every 1 week, delete after 1 month.
|
||||
[Target] Keep every 1 month, delete after 1 year.
|
||||
[Target] Receive datasets under: backup/server1
|
||||
|
||||
#### Selecting
|
||||
[Source] rpool: Selected (direct selection)
|
||||
[Source] rpool/ROOT: Selected (inherited selection)
|
||||
[Source] rpool/ROOT/pve-1: Selected (inherited selection)
|
||||
[Source] rpool/ROOT/server1-1: Selected (inherited selection)
|
||||
[Source] rpool/data: Selected (inherited selection)
|
||||
[Source] rpool/data/vm-100-disk-0: Selected (inherited selection)
|
||||
[Source] rpool/swap: Ignored (disabled)
|
||||
@ -217,13 +200,13 @@ Run the script on the backup server and pull the data from the server specfied b
|
||||
[Source] Creating snapshot offsite1-20200218180123
|
||||
|
||||
#### Sending and thinning
|
||||
[Target] backup/pve/rpool/ROOT/pve-1@offsite1-20200218175435: receiving full
|
||||
[Target] backup/pve/rpool/ROOT/pve-1@offsite1-20200218175547: receiving incremental
|
||||
[Target] backup/pve/rpool/ROOT/pve-1@offsite1-20200218175706: receiving incremental
|
||||
[Target] backup/pve/rpool/ROOT/pve-1@offsite1-20200218180049: receiving incremental
|
||||
[Target] backup/pve/rpool/ROOT/pve-1@offsite1-20200218180123: receiving incremental
|
||||
[Target] backup/pve/rpool/data@offsite1-20200218175435: receiving full
|
||||
[Target] backup/pve/rpool/data/vm-100-disk-0@offsite1-20200218175435: receiving full
|
||||
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218175435: receiving full
|
||||
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218175547: receiving incremental
|
||||
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218175706: receiving incremental
|
||||
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218180049: receiving incremental
|
||||
[Target] backup/server1/rpool/ROOT/server1-1@offsite1-20200218180123: receiving incremental
|
||||
[Target] backup/server1/rpool/data@offsite1-20200218175435: receiving full
|
||||
[Target] backup/server1/rpool/data/vm-100-disk-0@offsite1-20200218175435: receiving full
|
||||
...
|
||||
```
|
||||
|
||||
@ -235,22 +218,181 @@ Its also possible to let a server push its backup to the backup-server. However
|
||||
|
||||
Now every time you run the command, zfs-autobackup will create a new snapshot and replicate your data.
|
||||
|
||||
Older snapshots will evertually be deleted, depending on the `--keep-source` and `--keep-target` settings. (The defaults are shown above under the 'Settings summary')
|
||||
Older snapshots will eventually be deleted, depending on the `--keep-source` and `--keep-target` settings. (The defaults are shown above under the 'Settings summary')
|
||||
|
||||
Once you've got the correct settings for your situation, you can just store the command in a cronjob.
|
||||
|
||||
Or just create a script and run it manually when you need it.
|
||||
|
||||
## Use as snapshot tool
|
||||
|
||||
You can use zfs-autobackup to only make snapshots.
|
||||
|
||||
Just dont specify the target-path:
|
||||
```console
|
||||
root@ws1:~# zfs-autobackup test --verbose
|
||||
zfs-autobackup v3.0 - Copyright 2020 E.H.Eefting (edwin@datux.nl)
|
||||
|
||||
#### Source settings
|
||||
[Source] Datasets are local
|
||||
[Source] Keep the last 10 snapshots.
|
||||
[Source] Keep every 1 day, delete after 1 week.
|
||||
[Source] Keep every 1 week, delete after 1 month.
|
||||
[Source] Keep every 1 month, delete after 1 year.
|
||||
[Source] Selects all datasets that have property 'autobackup:test=true' (or childs of datasets that have 'autobackup:test=child')
|
||||
|
||||
#### Selecting
|
||||
[Source] test_source1/fs1: Selected (direct selection)
|
||||
[Source] test_source1/fs1/sub: Selected (inherited selection)
|
||||
[Source] test_source2/fs2: Ignored (only childs)
|
||||
[Source] test_source2/fs2/sub: Selected (inherited selection)
|
||||
|
||||
#### Snapshotting
|
||||
[Source] Creating snapshots test-20200710125958 in pool test_source1
|
||||
[Source] Creating snapshots test-20200710125958 in pool test_source2
|
||||
|
||||
#### Thinning source
|
||||
[Source] test_source1/fs1@test-20200710125948: Destroying
|
||||
[Source] test_source1/fs1/sub@test-20200710125948: Destroying
|
||||
[Source] test_source2/fs2/sub@test-20200710125948: Destroying
|
||||
|
||||
#### All operations completed successfully
|
||||
(No target_path specified, only operated as snapshot tool.)
|
||||
```
|
||||
|
||||
This also allows you to make several snapshots during the day, but only backup the data at night when the server is not busy.
|
||||
|
||||
## Thinning out obsolete snapshots
|
||||
|
||||
The thinner is the thing that destroys old snapshots on the source and target.
|
||||
|
||||
The thinner operates "stateless": There is nothing in the name or properties of a snapshot that indicates how long it will be kept. Everytime zfs-autobackup runs, it will look at the timestamp of all the existing snapshots. From there it will determine which snapshots are obsolete according to your schedule. The advantage of this stateless system is that you can always change the schedule.
|
||||
|
||||
Note that the thinner will ONLY destroy snapshots that are matching the naming pattern of zfs-autobackup. If you use `--other-snapshots`, it wont destroy those snapshots after replicating them to the target.
|
||||
|
||||
### Destroying missing datasets
|
||||
|
||||
When a dataset has been destroyed or deselected on the source, but still exists on the target we call it a missing dataset. Missing datasets will be still thinned out according to the schedule.
|
||||
|
||||
The final snapshot will never be destroyed, unless you specify a **deadline** with the `--destroy-missing` option:
|
||||
|
||||
In that case it will look at the last snapshot we took and determine if is older than the deadline you specified. e.g: `--destroy-missing 30d` will start destroying things 30 days after the last snapshot.
|
||||
|
||||
#### After the deadline
|
||||
|
||||
When the deadline is passed, all our snapshots, except the last one will be destroyed. Irregardless of the normal thinning schedule.
|
||||
|
||||
The dataset has to have the following properties to be finally really destroyed:
|
||||
|
||||
* The dataset has no direct child-filesystems or volumes.
|
||||
* The only snapshot left is the last one created by zfs-autobackup.
|
||||
* The remaining snapshot has no clones.
|
||||
|
||||
### Thinning schedule
|
||||
|
||||
The default thinning schedule is: `10,1d1w,1w1m,1m1y`.
|
||||
|
||||
The schedule consists of multiple rules separated by a `,`
|
||||
|
||||
A plain number specifies how many snapshots you want to always keep, regardless of time or interval.
|
||||
|
||||
The format of the other rules is: `<Interval><TTL>`.
|
||||
|
||||
* Interval: The minimum interval between the snapshots. Snapshots with intervals smaller than this will be destroyed.
|
||||
* TTL: The maximum time to life time of a snapshot, after that they will be destroyed.
|
||||
* These are the time units you can use for interval and TTL:
|
||||
* `y`: Years
|
||||
* `m`: Months
|
||||
* `d`: Days
|
||||
* `h`: Hours
|
||||
* `min`: Minutes
|
||||
* `s`: Seconds
|
||||
|
||||
Since this might sound very complicated, the `--verbose` option will show you what it all means:
|
||||
|
||||
```console
|
||||
[Source] Keep the last 10 snapshots.
|
||||
[Source] Keep every 1 day, delete after 1 week.
|
||||
[Source] Keep every 1 week, delete after 1 month.
|
||||
[Source] Keep every 1 month, delete after 1 year.
|
||||
```
|
||||
|
||||
A snapshot will only be destroyed if it not needed anymore by ANY of the rules.
|
||||
|
||||
You can specify as many rules as you need. The order of the rules doesn't matter.
|
||||
|
||||
Keep in mind its up to you to actually run zfs-autobackup often enough: If you want to keep hourly snapshots, you have to make sure you at least run it every hour.
|
||||
|
||||
However, its no problem if you run it more or less often than that: The thinner will still keep an optimal set of snapshots to match your schedule as good as possible.
|
||||
|
||||
If you want to keep as few snapshots as possible, just specify 0. (`--keep-source=0` for example)
|
||||
|
||||
If you want to keep ALL the snapshots, just specify a very high number.
|
||||
|
||||
### More details about the Thinner
|
||||
|
||||
We will give a practical example of how the thinner operates.
|
||||
|
||||
Say we want have 3 thinner rules:
|
||||
|
||||
* We want to keep daily snapshots for 7 days.
|
||||
* We want to keep weekly snapshots for 4 weeks.
|
||||
* We want to keep monthly snapshots for 12 months.
|
||||
|
||||
So far we have taken 4 snapshots at random moments:
|
||||
|
||||

|
||||
|
||||
For every rule, the thinner will divide the timeline in blocks and assign each snapshot to a block.
|
||||
|
||||
A block can only be assigned one snapshot: If multiple snapshots fall into the same block, it only assigns it to the oldest that we want to keep.
|
||||
|
||||
The colors show to which block a snapshot belongs:
|
||||
|
||||
* Snapshot 1: This snapshot belongs to daily block 1, weekly block 0 and monthly block 0. However the daily block is too old.
|
||||
* Snapshot 2: Since weekly block 0 and monthly block 0 already have a snapshot, it only belongs to daily block 4.
|
||||
* Snapshot 3: This snapshot belongs to daily block 8 and weekly block 1.
|
||||
* Snapshot 4: Since daily block 8 already has a snapshot, this one doesn't belong to anything and can be deleted right away. (it will be keeped for now since its the last snapshot)
|
||||
|
||||
zfs-autobackup will re-evaluate this on every run: As soon as a snapshot doesn't belong to any block anymore it will be destroyed.
|
||||
|
||||
Snapshots on the source that still have to be send to the target wont be destroyed off course. (If the target still wants them, according to the target schedule)
|
||||
|
||||
## Tips
|
||||
|
||||
* Use ```--debug``` if something goes wrong and you want to see the commands that are executed. This will also stop at the first error.
|
||||
* You can split up the snapshotting and sending tasks by creating two cronjobs. Use ```--no-send``` for the snapshotter-cronjob and use ```--no-snapshot``` for the send-cronjob. This is usefull if you only want to send at night or if your send take too long.
|
||||
* You can split up the snapshotting and sending tasks by creating two cronjobs. Create a separate snapshotter-cronjob by just omitting target-path.
|
||||
* Set the ```readonly``` property of the target filesystem to ```on```. This prevents changes on the target side. (Normally, if there are changes the next backup will fail and will require a zfs rollback.) Note that readonly means you cant change the CONTENTS of the dataset directly. Its still possible to receive new datasets and manipulate properties etc.
|
||||
* Use ```--clear-refreservation``` to save space on your backup server.
|
||||
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot.
|
||||
* Use ```--resume``` to be able to resume aborted backups. (not all zfs versions support this)
|
||||
|
||||
### Speeding up SSH
|
||||
### Performance tips
|
||||
|
||||
If you have a large number of datasets its important to keep the following tips in mind.
|
||||
|
||||
#### Some statistics
|
||||
|
||||
To get some idea of how fast zfs-autobackup is, I did some test on my laptop, with a SKHynix_HFS512GD9TNI-L2B0B disk. I'm using zfs 2.0.2.
|
||||
|
||||
I created 100 empty datasets and measured the total runtime of zfs-autobackup. I used all the performance tips below. (--no-holds, --allow-empty, ssh ControlMaster)
|
||||
|
||||
* without ssh: 15 seconds. (>6 datasets/s)
|
||||
* either ssh-target or ssh-source=localhost: 20 seconds (5 datasets/s)
|
||||
* both ssh-target and ssh-source=localhost: 24 seconds (4 datasets/s)
|
||||
|
||||
To be bold I created 2500 datasets, but that also was no problem. So it seems it should be possible to use zfs-autobackup with thousands of datasets.
|
||||
|
||||
If you need more performance let me know.
|
||||
|
||||
NOTE: There is actually a performance regression in ZFS version 2: https://github.com/openzfs/zfs/issues/11560 Use --no-progress as workaround.
|
||||
|
||||
#### Less work
|
||||
|
||||
You can make zfs-autobackup generate less work by using --no-holds and --allow-empty.
|
||||
|
||||
This saves a lot of extra zfs-commands per dataset.
|
||||
|
||||
#### Speeding up SSH
|
||||
|
||||
You can make your ssh connections persistent and greatly speed up zfs-autobackup:
|
||||
|
||||
@ -292,23 +434,26 @@ Here you find all the options:
|
||||
usage: zfs-autobackup [-h] [--ssh-config SSH_CONFIG] [--ssh-source SSH_SOURCE]
|
||||
[--ssh-target SSH_TARGET] [--keep-source KEEP_SOURCE]
|
||||
[--keep-target KEEP_TARGET] [--other-snapshots]
|
||||
[--no-snapshot] [--no-send] [--allow-empty]
|
||||
[--ignore-replicated] [--no-holds] [--resume]
|
||||
[--no-snapshot] [--no-send] [--min-change MIN_CHANGE]
|
||||
[--allow-empty] [--ignore-replicated] [--no-holds]
|
||||
[--strip-path STRIP_PATH] [--clear-refreservation]
|
||||
[--clear-mountpoint]
|
||||
[--filter-properties FILTER_PROPERTIES]
|
||||
[--set-properties SET_PROPERTIES] [--rollback]
|
||||
[--ignore-transfer-errors] [--raw] [--test] [--verbose]
|
||||
[--debug] [--debug-output] [--progress]
|
||||
backup_name target_path
|
||||
[--destroy-incompatible] [--ignore-transfer-errors]
|
||||
[--raw] [--test] [--verbose] [--debug] [--debug-output]
|
||||
[--progress]
|
||||
backup-name [target-path]
|
||||
|
||||
zfs-autobackup v3.0-rc6 - Copyright 2020 E.H.Eefting (edwin@datux.nl)
|
||||
zfs-autobackup v3.0-rc12 - Copyright 2020 E.H.Eefting (edwin@datux.nl)
|
||||
|
||||
positional arguments:
|
||||
backup_name Name of the backup (you should set the zfs property
|
||||
backup-name Name of the backup (you should set the zfs property
|
||||
"autobackup:backup-name" to true on filesystems you
|
||||
want to backup
|
||||
target_path Target ZFS filesystem
|
||||
target-path Target ZFS filesystem (optional: if not specified,
|
||||
zfs-autobackup will only operate as snapshot-tool on
|
||||
source)
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
@ -328,25 +473,23 @@ optional arguments:
|
||||
10,1d1w,1w1m,1m1y
|
||||
--other-snapshots Send over other snapshots as well, not just the ones
|
||||
created by this tool.
|
||||
--no-snapshot Dont create new snapshots (usefull for finishing
|
||||
--no-snapshot Don't create new snapshots (useful for finishing
|
||||
uncompleted backups, or cleanups)
|
||||
--no-send Dont send snapshots (usefull for cleanups, or if you
|
||||
--no-send Don't send snapshots (useful for cleanups, or if you
|
||||
want a serperate send-cronjob)
|
||||
--min-change MIN_CHANGE
|
||||
Number of bytes written after which we consider a
|
||||
dataset changed (default 1)
|
||||
--allow-empty If nothing has changed, still create empty snapshots.
|
||||
(same as --min-change=0)
|
||||
--ignore-replicated Ignore datasets that seem to be replicated some other
|
||||
way. (No changes since lastest snapshot. Usefull for
|
||||
way. (No changes since lastest snapshot. Useful for
|
||||
proxmox HA replication)
|
||||
--no-holds Dont lock snapshots on the source. (Usefull to allow
|
||||
--no-holds Don't lock snapshots on the source. (Useful to allow
|
||||
proxmox HA replication to switches nodes)
|
||||
--resume Support resuming of interrupted transfers by using the
|
||||
zfs extensible_dataset feature (both zpools should
|
||||
have it enabled) Disadvantage is that you need to use
|
||||
zfs recv -A if another snapshot is created on the
|
||||
target during a receive. Otherwise it will keep
|
||||
failing.
|
||||
--strip-path STRIP_PATH
|
||||
Number of directory to strip from path (use 1 when
|
||||
cloning zones between 2 SmartOS machines)
|
||||
Number of directories to strip from target path (use 1
|
||||
when cloning zones between 2 SmartOS machines)
|
||||
--clear-refreservation
|
||||
Filter "refreservation" property. (recommended, safes
|
||||
space. same as --filter-properties refreservation)
|
||||
@ -354,19 +497,22 @@ optional arguments:
|
||||
(recommended, prevents mount conflicts. same as --set-
|
||||
properties canmount=noauto)
|
||||
--filter-properties FILTER_PROPERTIES
|
||||
List of propererties to "filter" when receiving
|
||||
List of properties to "filter" when receiving
|
||||
filesystems. (you can still restore them with zfs
|
||||
inherit -S)
|
||||
--set-properties SET_PROPERTIES
|
||||
List of propererties to override when receiving
|
||||
filesystems. (you can still restore them with zfs
|
||||
inherit -S)
|
||||
--rollback Rollback changes on the target before starting a
|
||||
backup. (normally you can prevent changes by setting
|
||||
--rollback Rollback changes to the latest target snapshot before
|
||||
starting. (normally you can prevent changes by setting
|
||||
the readonly property on the target_path to on)
|
||||
--destroy-incompatible
|
||||
Destroy incompatible snapshots on target. Use with
|
||||
care! (implies --rollback)
|
||||
--ignore-transfer-errors
|
||||
Ignore transfer errors (still checks if received
|
||||
filesystem exists. usefull for acltype errors)
|
||||
filesystem exists. useful for acltype errors)
|
||||
--raw For encrypted datasets, send data exactly as it exists
|
||||
on disk.
|
||||
--test dont change anything, just show what would be done
|
||||
@ -375,7 +521,8 @@ optional arguments:
|
||||
--debug Show zfs commands that are executed, stops after an
|
||||
exception.
|
||||
--debug-output Show zfs commands and their output/exit codes. (noisy)
|
||||
--progress show zfs progress output (to stderr)
|
||||
--progress show zfs progress output (to stderr). Enabled by
|
||||
default on ttys.
|
||||
|
||||
When a filesystem fails, zfs_backup will continue and report the number of
|
||||
failures at that end. Also the exit code will indicate the number of failures.
|
||||
@ -389,10 +536,15 @@ You forgot to setup automatic login via SSH keys, look in the example how to do
|
||||
|
||||
### It says 'cannot receive incremental stream: invalid backup stream'
|
||||
|
||||
This usually means you've created a new snapshot on the target side during a backup:
|
||||
This usually means you've created a new snapshot on the target side during a backup. If you restart zfs-autobackup, it will automaticly abort the invalid partially received snapshot and start over.
|
||||
|
||||
* Solution 1: Restart zfs-autobackup and make sure you dont use --resume. If you did use --resume, be sure to "abort" the recveive on the target side with zfs recv -A.
|
||||
* Solution 2: Destroy the newly created snapshot and restart zfs-autobackup.
|
||||
### It says 'cannot receive incremental stream: destination has been modified since most recent snapshot'
|
||||
|
||||
This means files have been modified on the target side somehow.
|
||||
|
||||
You can use --rollback to automaticly rollback such changes.
|
||||
|
||||
Note: This usually happens if the source-side has a non-standard mountpoint for a dataset, and you're using --clear-mountpoint. In this case the target side creates a mountpoint in the parent dataset, causing the change.
|
||||
|
||||
### It says 'internal error: Invalid argument'
|
||||
|
||||
@ -420,51 +572,92 @@ Put this command directly after the zfs_backup command in your cronjob:
|
||||
zabbix-job-status backup_smartos01_fs1 daily $?
|
||||
```
|
||||
|
||||
This will update the zabbix server with the exitcode and will also alert you if the job didnt run for more than 2 days.
|
||||
This will update the zabbix server with the exit code and will also alert you if the job didn't run for more than 2 days.
|
||||
|
||||
## Backuping up a proxmox cluster with HA replication
|
||||
## Backup a proxmox cluster with HA replication
|
||||
|
||||
Due to the nature of proxmox we had to make a few enhancements to zfs-autobackup. This will probably also benefit other systems that use their own replication in combination with zfs-autobackup.
|
||||
|
||||
All data under rpool/data can be on multiple nodes of the cluster. The naming of those filesystem is unique over the whole cluster. Because of this we should backup rpool/data of all nodes to the same destination. This way we wont have duplicate backups of the filesystems that are replicated. Because of various options, you can even migrate hosts and zfs-autobackup will be fine. (and it will get the next backup from the new node automaticly)
|
||||
All data under rpool/data can be on multiple nodes of the cluster. The naming of those filesystem is unique over the whole cluster. Because of this we should backup rpool/data of all nodes to the same destination. This way we wont have duplicate backups of the filesystems that are replicated. Because of various options, you can even migrate hosts and zfs-autobackup will be fine. (and it will get the next backup from the new node automatically)
|
||||
|
||||
In the example below we have 3 nodes, named h4, h5 and h6.
|
||||
|
||||
The backup will go to a machine named smartos03.
|
||||
In the example below we have 3 nodes, named pve1, pve2 and pve3.
|
||||
|
||||
### Preparing the proxmox nodes
|
||||
|
||||
On each node select the filesystems as following:
|
||||
No preparation is needed, the script will take care of everything. You only need to setup the ssh keys, so that the backup server can access the proxmox server.
|
||||
|
||||
```console
|
||||
root@h4:~# zfs set autobackup:h4_smartos03=true rpool
|
||||
root@h4:~# zfs set autobackup:h4_smartos03=false rpool/data
|
||||
root@h4:~# zfs set autobackup:data_smartos03=child rpool/data
|
||||
TIP: make sure your backup server is firewalled and cannot be reached from any production machine.
|
||||
|
||||
### SSH config on backup server
|
||||
|
||||
I use ~/.ssh/config to specify how to reach the various hosts.
|
||||
|
||||
In this example we are making an offsite copy and use portforwarding to reach the proxmox machines:
|
||||
```
|
||||
Host *
|
||||
ControlPath ~/.ssh/control-master-%r@%h:%p
|
||||
ControlMaster auto
|
||||
ControlPersist 3600
|
||||
Compression yes
|
||||
|
||||
Host pve1
|
||||
Hostname some.host.com
|
||||
Port 10001
|
||||
|
||||
Host pve2
|
||||
Hostname some.host.com
|
||||
Port 10002
|
||||
|
||||
Host pve3
|
||||
Hostname some.host.com
|
||||
Port 10003
|
||||
```
|
||||
|
||||
* rpool will be backuped the usual way, and is named h4_smartos03. (each node will have a unique name)
|
||||
* rpool/data will be excluded from the usual backup
|
||||
* The CHILDREN of rpool/data be selected for a cluster wide backup named data_smartos03. (each node uses the same backup name)
|
||||
### Backup script
|
||||
|
||||
### Preparing the backup server
|
||||
I use the following backup script on the backup server.
|
||||
|
||||
Extra options needed for proxmox with HA:
|
||||
|
||||
* --no-holds: To allow proxmox to destroy our snapshots if a VM migrates to another node.
|
||||
* --ignore-replicated: To ignore the replicated filesystems of proxmox on the receiving proxmox nodes. (e.g: only backup from the node where the VM is active)
|
||||
|
||||
I use the following backup script on the backup server:
|
||||
Adjust the variables HOSTS TARGET and NAME to your needs.
|
||||
|
||||
```shell
|
||||
for H in h4 h5 h6; do
|
||||
echo "################################### DATA $H"
|
||||
#backup data filesystems to a common place
|
||||
./zfs-autobackup --ssh-source root@$H data_smartos03 zones/backup/zfsbackups/pxe1_data --clear-refreservation --clear-mountpoint --ignore-transfer-errors --strip-path 2 --verbose --resume --ignore-replicated --no-holds $@
|
||||
zabbix-job-status backup_$H""_data_smartos03 daily $? >/dev/null 2>/dev/null
|
||||
#!/bin/bash
|
||||
|
||||
HOSTS="pve1 pve2 pve3"
|
||||
TARGET=rpool/pvebackups
|
||||
NAME=prox
|
||||
|
||||
zfs create -p $TARGET/data &>/dev/null
|
||||
for HOST in $HOSTS; do
|
||||
|
||||
echo "################################### RPOOL $HOST"
|
||||
|
||||
# enable backup
|
||||
ssh $HOST "zfs set autobackup:rpool_$NAME=child rpool/ROOT"
|
||||
|
||||
#backup rpool to specific directory per host
|
||||
zfs create -p $TARGET/rpools/$HOST &>/dev/null
|
||||
zfs-autobackup --keep-source=1d1w,1w1m --ssh-source $HOST rpool_$NAME $TARGET/rpools/$HOST --clear-mountpoint --clear-refreservation --ignore-transfer-errors --strip-path 2 --verbose --no-holds $@
|
||||
|
||||
zabbix-job-status backup_$HOST""_rpool_$NAME daily $? >/dev/null 2>/dev/null
|
||||
|
||||
|
||||
echo "################################### DATA $HOST"
|
||||
|
||||
# enable backup
|
||||
ssh $HOST "zfs set autobackup:data_$NAME=child rpool/data"
|
||||
|
||||
#backup data filesystems to a common directory
|
||||
zfs-autobackup --keep-source=1d1w,1w1m --ssh-source $HOST data_$NAME $TARGET/data --clear-mountpoint --clear-refreservation --ignore-transfer-errors --strip-path 2 --verbose --ignore-replicated --min-change 200000 --no-holds $@
|
||||
|
||||
zabbix-job-status backup_$HOST""_data_$NAME daily $? >/dev/null 2>/dev/null
|
||||
|
||||
echo "################################### RPOOL $H"
|
||||
#backup rpool to own place
|
||||
./zfs-autobackup --ssh-source root@$H $H""_smartos03 zones/backup/zfsbackups/$H --verbose --clear-refreservation --clear-mountpoint --resume --ignore-transfer-errors $@
|
||||
zabbix-job-status backup_$H""_smartos03 daily $? >/dev/null 2>/dev/null
|
||||
done
|
||||
```
|
||||
|
||||
This script will also send the backup status to Zabbix. (if you've installed my zabbix-job-status script)
|
||||
|
||||
# Sponsor list
|
||||
|
||||
This project was sponsorred by:
|
||||
|
||||
* (None so far)
|
||||
|
||||
1574
bin/zfs-autobackup
1574
bin/zfs-autobackup
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
zfs-autobackup
|
||||
BIN
doc/thinner.odg
Normal file
BIN
doc/thinner.odg
Normal file
Binary file not shown.
BIN
doc/thinner.png
Normal file
BIN
doc/thinner.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
6
requirements.txt
Normal file
6
requirements.txt
Normal file
@ -0,0 +1,6 @@
|
||||
colorama
|
||||
argparse
|
||||
coverage==4.5.4
|
||||
python-coveralls
|
||||
unittest2
|
||||
mock
|
||||
@ -14,4 +14,3 @@ source tokentest
|
||||
|
||||
python3 -m twine check dist/*
|
||||
python3 -m twine upload --repository-url https://test.pypi.org/legacy/ dist/* --verbose
|
||||
|
||||
14
setup.py
14
setup.py
@ -1,15 +1,13 @@
|
||||
import setuptools
|
||||
import bin.zfs_autobackup
|
||||
from zfs_autobackup.ZfsAutobackup import ZfsAutobackup
|
||||
import os
|
||||
|
||||
os.system("git tag -m ' ' -a v{}".format(bin.zfs_autobackup.VERSION))
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setuptools.setup(
|
||||
name="zfs_autobackup",
|
||||
version=bin.zfs_autobackup.VERSION,
|
||||
version=ZfsAutobackup.VERSION,
|
||||
author="Edwin Eefting",
|
||||
author_email="edwin@datux.nl",
|
||||
description="ZFS autobackup is used to periodicly backup ZFS filesystems to other locations. It tries to be the most friendly to use and easy to debug ZFS backup tool.",
|
||||
@ -17,8 +15,14 @@ setuptools.setup(
|
||||
long_description_content_type="text/markdown",
|
||||
|
||||
url="https://github.com/psy0rz/zfs_autobackup",
|
||||
scripts=["bin/zfs-autobackup"],
|
||||
entry_points={
|
||||
'console_scripts':
|
||||
[
|
||||
'zfs-autobackup = zfs_autobackup:cli',
|
||||
]
|
||||
},
|
||||
packages=setuptools.find_packages(),
|
||||
|
||||
classifiers=[
|
||||
"Programming Language :: Python :: 2",
|
||||
"Programming Language :: Python :: 3",
|
||||
|
||||
91
tests/basetest.py
Normal file
91
tests/basetest.py
Normal file
@ -0,0 +1,91 @@
|
||||
|
||||
|
||||
import subprocess
|
||||
import random
|
||||
|
||||
#default test stuff
|
||||
import unittest2
|
||||
import subprocess
|
||||
import time
|
||||
from pprint import *
|
||||
from zfs_autobackup.ZfsAutobackup import *
|
||||
from mock import *
|
||||
import contextlib
|
||||
import sys
|
||||
import io
|
||||
|
||||
TEST_POOLS="test_source1 test_source2 test_target1"
|
||||
ZFS_USERSPACE= subprocess.check_output("dpkg-query -W zfsutils-linux |cut -f2", shell=True).decode('utf-8').rstrip()
|
||||
ZFS_KERNEL= subprocess.check_output("modinfo zfs|grep ^version |sed 's/.* //'", shell=True).decode('utf-8').rstrip()
|
||||
|
||||
print("###########################################")
|
||||
print("#### Unit testing against:")
|
||||
print("#### Python :"+sys.version.replace("\n", " "))
|
||||
print("#### ZFS userspace :"+ZFS_USERSPACE)
|
||||
print("#### ZFS kernel :"+ZFS_KERNEL)
|
||||
print("#############################################")
|
||||
|
||||
|
||||
|
||||
# for python2 compatibility
|
||||
if sys.version_info.major==2:
|
||||
OutputIO=io.BytesIO
|
||||
else:
|
||||
OutputIO=io.StringIO
|
||||
|
||||
|
||||
# for python2 compatibility (python 3 has this already)
|
||||
@contextlib.contextmanager
|
||||
def redirect_stdout(target):
|
||||
original = sys.stdout
|
||||
try:
|
||||
sys.stdout = target
|
||||
yield
|
||||
finally:
|
||||
sys.stdout = original
|
||||
|
||||
# for python2 compatibility (python 3 has this already)
|
||||
@contextlib.contextmanager
|
||||
def redirect_stderr(target):
|
||||
original = sys.stderr
|
||||
try:
|
||||
sys.stderr = target
|
||||
yield
|
||||
finally:
|
||||
sys.stderr = original
|
||||
|
||||
|
||||
|
||||
def shelltest(cmd):
|
||||
"""execute and print result as nice copypastable string for unit tests (adds extra newlines on top/bottom)"""
|
||||
ret=(subprocess.check_output(cmd , shell=True).decode('utf-8'))
|
||||
print("######### result of: {}".format(cmd))
|
||||
print(ret)
|
||||
print("#########")
|
||||
ret='\n'+ret
|
||||
return(ret)
|
||||
|
||||
def prepare_zpools():
|
||||
print("Preparing zfs filesystems...")
|
||||
|
||||
#need ram blockdevice
|
||||
subprocess.check_call("modprobe brd rd_size=512000", shell=True)
|
||||
|
||||
#remove old stuff
|
||||
subprocess.call("zpool destroy test_source1 2>/dev/null", shell=True)
|
||||
subprocess.call("zpool destroy test_source2 2>/dev/null", shell=True)
|
||||
subprocess.call("zpool destroy test_target1 2>/dev/null", shell=True)
|
||||
|
||||
#create pools
|
||||
subprocess.check_call("zpool create test_source1 /dev/ram0", shell=True)
|
||||
subprocess.check_call("zpool create test_source2 /dev/ram1", shell=True)
|
||||
subprocess.check_call("zpool create test_target1 /dev/ram2", shell=True)
|
||||
|
||||
#create test structure
|
||||
subprocess.check_call("zfs create -p test_source1/fs1/sub", shell=True)
|
||||
subprocess.check_call("zfs create -p test_source2/fs2/sub", shell=True)
|
||||
subprocess.check_call("zfs create -p test_source2/fs3/sub", shell=True)
|
||||
subprocess.check_call("zfs set autobackup:test=true test_source1/fs1", shell=True)
|
||||
subprocess.check_call("zfs set autobackup:test=child test_source2/fs2", shell=True)
|
||||
|
||||
print("Prepare done")
|
||||
28
tests/run_tests
Executable file
28
tests/run_tests
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
SCRIPTDIR=`dirname $0`
|
||||
|
||||
#cd $SCRIPTDIR || exit 1
|
||||
|
||||
|
||||
if [ "$USER" != "root" ]; then
|
||||
echo "Need root to do proper zfs testing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# test needs ssh access to localhost for testing
|
||||
if ! [ -e /root/.ssh/id_rsa ]; then
|
||||
ssh-keygen -t rsa -f /root/.ssh/id_rsa -P '' || exit 1
|
||||
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys || exit 1
|
||||
ssh -oStrictHostKeyChecking=no localhost true || exit 1
|
||||
fi
|
||||
|
||||
|
||||
coverage run --source zfs_autobackup -m unittest discover -vvvvf $SCRIPTDIR $@ 2>&1
|
||||
EXIT=$?
|
||||
|
||||
echo
|
||||
coverage report
|
||||
|
||||
exit $EXIT
|
||||
135
tests/test_destroymissing.py
Normal file
135
tests/test_destroymissing.py
Normal file
@ -0,0 +1,135 @@
|
||||
|
||||
from basetest import *
|
||||
|
||||
|
||||
class TestZfsNode(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
prepare_zpools()
|
||||
self.longMessage=True
|
||||
|
||||
|
||||
|
||||
def test_destroymissing(self):
|
||||
|
||||
#initial backup
|
||||
with patch('time.strftime', return_value="10101111000000"): #1000 years in past
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"): #far in past
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
with self.subTest("Should do nothing yet"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
self.assertNotIn(": Destroy missing", buf.getvalue())
|
||||
|
||||
|
||||
with self.subTest("missing dataset of us that still has children"):
|
||||
|
||||
#just deselect it so it counts as 'missing'
|
||||
shelltest("zfs set autobackup:test=child test_source1/fs1")
|
||||
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf), redirect_stderr(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
#should have done the snapshot cleanup for destoy missing:
|
||||
self.assertIn("fs1@test-10101111000000: Destroying", buf.getvalue())
|
||||
|
||||
self.assertIn("fs1: Destroy missing: Still has children here.", buf.getvalue())
|
||||
|
||||
shelltest("zfs inherit autobackup:test test_source1/fs1")
|
||||
|
||||
|
||||
with self.subTest("Normal destroyed leaf"):
|
||||
shelltest("zfs destroy -r test_source1/fs1/sub")
|
||||
|
||||
#wait for deadline of last snapshot
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
#100y: lastest should not be old enough, while second to latest snapshot IS old enough:
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 100y".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
self.assertIn(": Waiting for deadline", buf.getvalue())
|
||||
|
||||
#past deadline, destroy
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 1y".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
self.assertIn("sub: Destroying", buf.getvalue())
|
||||
|
||||
|
||||
with self.subTest("Leaf with other snapshot still using it"):
|
||||
shelltest("zfs destroy -r test_source1/fs1")
|
||||
shelltest("zfs snapshot -r test_target1/test_source1/fs1@other1")
|
||||
|
||||
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
#cant finish because still in use:
|
||||
self.assertIn("fs1: Destroy missing: Still in use", buf.getvalue())
|
||||
|
||||
shelltest("zfs destroy test_target1/test_source1/fs1@other1")
|
||||
|
||||
|
||||
with self.subTest("In use by clone"):
|
||||
shelltest("zfs clone test_target1/test_source1/fs1@test-20101111000000 test_target1/clone1")
|
||||
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf), redirect_stderr(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
#now tries to destroy our own last snapshot (before the final destroy of the dataset)
|
||||
self.assertIn("fs1@test-20101111000000: Destroying", buf.getvalue())
|
||||
#but cant finish because still in use:
|
||||
self.assertIn("fs1: Error during destoy missing", buf.getvalue())
|
||||
|
||||
shelltest("zfs destroy test_target1/clone1")
|
||||
|
||||
|
||||
with self.subTest("Should leave test_source1 parent"):
|
||||
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf), redirect_stderr(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
#should have done the snapshot cleanup for destoy missing:
|
||||
self.assertIn("fs1: Destroying", buf.getvalue())
|
||||
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf), redirect_stderr(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
#on second run it sees the dangling ex-parent but doesnt know what to do with it (since it has no own snapshot)
|
||||
self.assertIn("test_source1: Destroy missing: has no snapshots made by us.", buf.getvalue())
|
||||
|
||||
|
||||
|
||||
|
||||
#end result
|
||||
r=shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-10101111000000
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
135
tests/test_executenode.py
Normal file
135
tests/test_executenode.py
Normal file
@ -0,0 +1,135 @@
|
||||
from basetest import *
|
||||
from zfs_autobackup.ExecuteNode import ExecuteNode
|
||||
|
||||
print("THIS TEST REQUIRES SSH TO LOCALHOST")
|
||||
|
||||
class TestExecuteNode(unittest2.TestCase):
|
||||
|
||||
# def setUp(self):
|
||||
|
||||
# return super().setUp()
|
||||
|
||||
def basics(self, node ):
|
||||
|
||||
with self.subTest("simple echo"):
|
||||
self.assertEqual(node.run(["echo","test"]), ["test"])
|
||||
|
||||
with self.subTest("error exit code"):
|
||||
with self.assertRaises(subprocess.CalledProcessError):
|
||||
node.run(["false"])
|
||||
|
||||
#
|
||||
with self.subTest("multiline without tabsplit"):
|
||||
self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=False), ["l1c1\tl1c2", "l2c1\tl2c2"])
|
||||
|
||||
#multiline tabsplit
|
||||
with self.subTest("multiline tabsplit"):
|
||||
self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=True), [['l1c1', 'l1c2'], ['l2c1', 'l2c2']])
|
||||
|
||||
#escaping test (shouldnt be a problem locally, single quotes can be a problem remote via ssh)
|
||||
with self.subTest("escape test"):
|
||||
s="><`'\"@&$()$bla\\/.*!#test _+-={}[]|"
|
||||
self.assertEqual(node.run(["echo",s]), [s])
|
||||
|
||||
#return std err as well, trigger stderr by listing something non existing
|
||||
with self.subTest("stderr return"):
|
||||
(stdout, stderr)=node.run(["ls", "nonexistingfile"], return_stderr=True, valid_exitcodes=[2])
|
||||
self.assertEqual(stdout,[])
|
||||
self.assertRegex(stderr[0],"nonexistingfile")
|
||||
|
||||
#slow command, make sure things dont exit too early
|
||||
with self.subTest("early exit test"):
|
||||
start_time=time.time()
|
||||
self.assertEqual(node.run(["sleep","1"]), [])
|
||||
self.assertGreaterEqual(time.time()-start_time,1)
|
||||
|
||||
#input a string and check it via cat
|
||||
with self.subTest("stdin input string"):
|
||||
self.assertEqual(node.run(["cat"], inp="test"), ["test"])
|
||||
|
||||
#command that wants input, while we dont have input, shouldnt hang forever.
|
||||
with self.subTest("stdin process with inp=None (shouldn't hang)"):
|
||||
self.assertEqual(node.run(["cat"]), [])
|
||||
|
||||
def test_basics_local(self):
|
||||
node=ExecuteNode(debug_output=True)
|
||||
self.basics(node)
|
||||
|
||||
def test_basics_remote(self):
|
||||
node=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||
self.basics(node)
|
||||
|
||||
################
|
||||
|
||||
def test_readonly(self):
|
||||
node=ExecuteNode(debug_output=True, readonly=True)
|
||||
|
||||
self.assertEqual(node.run(["echo","test"], readonly=False), None)
|
||||
self.assertEqual(node.run(["echo","test"], readonly=True), ["test"])
|
||||
|
||||
|
||||
################
|
||||
|
||||
def pipe(self, nodea, nodeb):
|
||||
|
||||
with self.subTest("pipe data"):
|
||||
output=nodea.run(["dd", "if=/dev/zero", "count=1000"], pipe=True)
|
||||
self.assertEqual(nodeb.run(["md5sum"], inp=output), ["816df6f64deba63b029ca19d880ee10a -"])
|
||||
|
||||
with self.subTest("exit code both ends of pipe ok"):
|
||||
output=nodea.run(["true"], pipe=True)
|
||||
nodeb.run(["true"], inp=output)
|
||||
|
||||
with self.subTest("error on pipe input side"):
|
||||
with self.assertRaises(subprocess.CalledProcessError):
|
||||
output=nodea.run(["false"], pipe=True)
|
||||
nodeb.run(["true"], inp=output)
|
||||
|
||||
with self.subTest("error on pipe output side "):
|
||||
with self.assertRaises(subprocess.CalledProcessError):
|
||||
output=nodea.run(["true"], pipe=True)
|
||||
nodeb.run(["false"], inp=output)
|
||||
|
||||
with self.subTest("error on both sides of pipe"):
|
||||
with self.assertRaises(subprocess.CalledProcessError):
|
||||
output=nodea.run(["false"], pipe=True)
|
||||
nodeb.run(["false"], inp=output)
|
||||
|
||||
with self.subTest("check stderr on pipe output side"):
|
||||
output=nodea.run(["true"], pipe=True)
|
||||
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], inp=output, return_stderr=True, valid_exitcodes=[0,2])
|
||||
self.assertEqual(stdout,[])
|
||||
self.assertRegex(stderr[0], "nonexistingfile" )
|
||||
|
||||
with self.subTest("check stderr on pipe input side (should be only printed)"):
|
||||
output=nodea.run(["ls", "nonexistingfile"], pipe=True)
|
||||
(stdout, stderr)=nodeb.run(["true"], inp=output, return_stderr=True, valid_exitcodes=[0,2])
|
||||
self.assertEqual(stdout,[])
|
||||
self.assertEqual(stderr,[] )
|
||||
|
||||
|
||||
|
||||
|
||||
def test_pipe_local_local(self):
|
||||
nodea=ExecuteNode(debug_output=True)
|
||||
nodeb=ExecuteNode(debug_output=True)
|
||||
self.pipe(nodea, nodeb)
|
||||
|
||||
def test_pipe_remote_remote(self):
|
||||
nodea=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||
nodeb=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||
self.pipe(nodea, nodeb)
|
||||
|
||||
def test_pipe_local_remote(self):
|
||||
nodea=ExecuteNode(debug_output=True)
|
||||
nodeb=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||
self.pipe(nodea, nodeb)
|
||||
|
||||
def test_pipe_remote_local(self):
|
||||
nodea=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||
nodeb=ExecuteNode(debug_output=True)
|
||||
self.pipe(nodea, nodeb)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
266
tests/test_externalfailures.py
Normal file
266
tests/test_externalfailures.py
Normal file
@ -0,0 +1,266 @@
|
||||
from basetest import *
|
||||
|
||||
|
||||
class TestZfsNode(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
prepare_zpools()
|
||||
self.longMessage = True
|
||||
|
||||
# generate a resumable state
|
||||
# NOTE: this generates two resumable test_target1/test_source1/fs1 and test_target1/test_source1/fs1/sub
|
||||
def generate_resume(self):
|
||||
|
||||
r = shelltest("zfs set compress=off test_source1 test_target1")
|
||||
|
||||
# big change on source
|
||||
r = shelltest("dd if=/dev/zero of=/test_source1/fs1/data bs=250M count=1")
|
||||
|
||||
# waste space on target
|
||||
r = shelltest("dd if=/dev/zero of=/test_target1/waste bs=250M count=1")
|
||||
|
||||
# should fail and leave resume token (if supported)
|
||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
# free up space
|
||||
r = shelltest("rm /test_target1/waste")
|
||||
# sync
|
||||
r = shelltest("zfs umount test_target1")
|
||||
r = shelltest("zfs mount test_target1")
|
||||
|
||||
# resume initial backup
|
||||
def test_initial_resume(self):
|
||||
|
||||
# inital backup, leaves resume token
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.generate_resume()
|
||||
|
||||
# --test should resume and succeed
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
# did we really resume?
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
# abort this late, for beter coverage
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
else:
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
|
||||
# should resume and succeed
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
# did we really resume?
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
# abort this late, for beter coverage
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
else:
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
# resume incremental backup
|
||||
def test_incremental_resume(self):
|
||||
|
||||
# initial backup
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
# incremental backup leaves resume token
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.generate_resume()
|
||||
|
||||
# --test should resume and succeed
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
# did we really resume?
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
# abort this late, for beter coverage
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
else:
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
|
||||
# should resume and succeed
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
# did we really resume?
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
# abort this late, for beter coverage
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
else:
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
# generate an invalid resume token, and verify if its aborted automaticly
|
||||
def test_initial_resumeabort(self):
|
||||
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
|
||||
# inital backup, leaves resume token
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.generate_resume()
|
||||
|
||||
# remove corresponding source snapshot, so it becomes invalid
|
||||
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
|
||||
|
||||
# NOTE: it can only abort the initial dataset if it has no subs
|
||||
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
|
||||
|
||||
# --test try again, should abort old resume
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
||||
|
||||
# try again, should abort old resume
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
# generate an invalid resume token, and verify if its aborted automaticly
|
||||
def test_incremental_resumeabort(self):
|
||||
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
|
||||
# initial backup
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
# icremental backup, leaves resume token
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.generate_resume()
|
||||
|
||||
# remove corresponding source snapshot, so it becomes invalid
|
||||
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
|
||||
|
||||
# --test try again, should abort old resume
|
||||
with patch('time.strftime', return_value="20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
||||
|
||||
# try again, should abort old resume
|
||||
with patch('time.strftime', return_value="20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000002
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
# create a resume situation, where the other side doesnt want the snapshot anymore ( should abort resume )
|
||||
def test_abort_unwanted_resume(self):
|
||||
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
# generate resume
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.generate_resume()
|
||||
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
# incremental, doesnt want previous anymore
|
||||
with patch('time.strftime', return_value="20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --verbose --keep-target=0 --debug --allow-empty".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
self.assertIn(": aborting resume, since", buf.getvalue())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000002
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
""")
|
||||
|
||||
def test_missing_common(self):
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
# remove common snapshot and leave nothing
|
||||
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
|
||||
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
|
||||
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
############# TODO:
|
||||
def test_ignoretransfererrors(self):
|
||||
|
||||
self.skipTest(
|
||||
"todo: create some kind of situation where zfs recv exits with an error but transfer is still ok (happens in practice with acltype)")
|
||||
19
tests/test_regressions.py
Normal file
19
tests/test_regressions.py
Normal file
@ -0,0 +1,19 @@
|
||||
|
||||
from basetest import *
|
||||
|
||||
|
||||
class TestZfsNode(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
prepare_zpools()
|
||||
self.longMessage=True
|
||||
|
||||
# #resume initial backup
|
||||
# def test_keepsource0(self):
|
||||
|
||||
# #somehow only specifying --allow-empty --keep-source 0 failed:
|
||||
# with patch('time.strftime', return_value="20101111000000"):
|
||||
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())
|
||||
|
||||
# with patch('time.strftime', return_value="20101111000001"):
|
||||
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-source 0".split(" ")).run())
|
||||
95
tests/test_scaling.py
Normal file
95
tests/test_scaling.py
Normal file
@ -0,0 +1,95 @@
|
||||
from basetest import *
|
||||
|
||||
from zfs_autobackup.ExecuteNode import ExecuteNode
|
||||
|
||||
run_orig=ExecuteNode.run
|
||||
run_counter=0
|
||||
|
||||
def run_count(*args, **kwargs):
|
||||
global run_counter
|
||||
run_counter=run_counter+1
|
||||
return (run_orig(*args, **kwargs))
|
||||
|
||||
class TestZfsScaling(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
prepare_zpools()
|
||||
self.longMessage = True
|
||||
|
||||
def test_manysnapshots(self):
|
||||
"""count the number of commands when there are many snapshots."""
|
||||
|
||||
snapshot_count=100
|
||||
|
||||
print("Creating many snapshots...")
|
||||
s=""
|
||||
for i in range(1970,1970+snapshot_count):
|
||||
s=s+"zfs snapshot test_source1/fs1@test-{:04}1111000000;".format(i)
|
||||
|
||||
shelltest(s)
|
||||
|
||||
global run_counter
|
||||
|
||||
run_counter=0
|
||||
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||
|
||||
with patch('time.strftime', return_value="20101112000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||
expected_runs=343
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||
|
||||
|
||||
run_counter=0
|
||||
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||
|
||||
with patch('time.strftime', return_value="20101112000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||
expected_runs=47
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||
|
||||
def test_manydatasets(self):
|
||||
"""count the number of commands when when there are many datasets"""
|
||||
|
||||
dataset_count=100
|
||||
|
||||
print("Creating many datasets...")
|
||||
s=""
|
||||
for i in range(0,dataset_count):
|
||||
s=s+"zfs create test_source1/fs1/{};".format(i)
|
||||
|
||||
shelltest(s)
|
||||
|
||||
global run_counter
|
||||
|
||||
run_counter=0
|
||||
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||
|
||||
with patch('time.strftime', return_value="20101112000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||
expected_runs=743
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||
|
||||
|
||||
run_counter=0
|
||||
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||
|
||||
with patch('time.strftime', return_value="20101112000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||
expected_runs=947
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||
142
tests/test_thinner.py
Normal file
142
tests/test_thinner.py
Normal file
@ -0,0 +1,142 @@
|
||||
from basetest import *
|
||||
import pprint
|
||||
|
||||
from zfs_autobackup.Thinner import Thinner
|
||||
|
||||
#randint is different in python 2 vs 3
|
||||
randint_compat = lambda lo, hi: lo + int(random.random() * (hi + 1 - lo))
|
||||
|
||||
|
||||
class Thing:
|
||||
def __init__(self, timestamp):
|
||||
self.timestamp=timestamp
|
||||
|
||||
def __str__(self):
|
||||
# age=now-self.timestamp
|
||||
struct=time.gmtime(self.timestamp)
|
||||
return("{}".format(time.strftime("%Y-%m-%d %H:%M:%S",struct)))
|
||||
|
||||
|
||||
class TestThinner(unittest2.TestCase):
|
||||
|
||||
# def setUp(self):
|
||||
|
||||
# return super().setUp()
|
||||
|
||||
def test_incremental(self):
|
||||
ok=['2023-01-03 10:53:16',
|
||||
'2024-01-02 15:43:29',
|
||||
'2025-01-01 06:15:32',
|
||||
'2026-01-01 02:48:23',
|
||||
'2026-04-07 20:07:36',
|
||||
'2026-05-07 02:30:29',
|
||||
'2026-06-06 01:19:46',
|
||||
'2026-07-06 06:38:09',
|
||||
'2026-08-05 05:08:53',
|
||||
'2026-09-04 03:33:04',
|
||||
'2026-10-04 05:27:09',
|
||||
'2026-11-04 04:01:17',
|
||||
'2026-12-03 13:49:56',
|
||||
'2027-01-01 17:02:00',
|
||||
'2027-01-03 04:26:42',
|
||||
'2027-02-01 14:16:02',
|
||||
'2027-02-12 03:31:02',
|
||||
'2027-02-18 00:33:10',
|
||||
'2027-02-26 21:09:54',
|
||||
'2027-03-02 08:05:18',
|
||||
'2027-03-03 16:46:09',
|
||||
'2027-03-04 06:39:14',
|
||||
'2027-03-06 03:35:41',
|
||||
'2027-03-08 12:24:42',
|
||||
'2027-03-08 20:34:57']
|
||||
|
||||
|
||||
|
||||
|
||||
#some arbitrary date
|
||||
now=1589229252
|
||||
#we want deterministic results
|
||||
random.seed(1337)
|
||||
thinner=Thinner("5,10s1min,1d1w,1w1m,1m12m,1y5y")
|
||||
things=[]
|
||||
|
||||
#thin incrementally while adding
|
||||
for i in range(0,5000):
|
||||
|
||||
#increase random amount of time and maybe add a thing
|
||||
now=now+randint_compat(0,3600*24)
|
||||
if random.random()>=0.5:
|
||||
things.append(Thing(now))
|
||||
|
||||
(keeps, removes)=thinner.thin(things, now=now)
|
||||
things=keeps
|
||||
|
||||
|
||||
result=[]
|
||||
for thing in things:
|
||||
result.append(str(thing))
|
||||
|
||||
print("Thinner result incremental:")
|
||||
pprint.pprint(result)
|
||||
|
||||
self.assertEqual(result, ok)
|
||||
|
||||
|
||||
def test_full(self):
|
||||
ok=['2022-03-09 01:56:23',
|
||||
'2023-01-03 10:53:16',
|
||||
'2024-01-02 15:43:29',
|
||||
'2025-01-01 06:15:32',
|
||||
'2026-01-01 02:48:23',
|
||||
'2026-03-14 09:08:04',
|
||||
'2026-04-07 20:07:36',
|
||||
'2026-05-07 02:30:29',
|
||||
'2026-06-06 01:19:46',
|
||||
'2026-07-06 06:38:09',
|
||||
'2026-08-05 05:08:53',
|
||||
'2026-09-04 03:33:04',
|
||||
'2026-10-04 05:27:09',
|
||||
'2026-11-04 04:01:17',
|
||||
'2026-12-03 13:49:56',
|
||||
'2027-01-01 17:02:00',
|
||||
'2027-01-03 04:26:42',
|
||||
'2027-02-01 14:16:02',
|
||||
'2027-02-08 02:41:14',
|
||||
'2027-02-12 03:31:02',
|
||||
'2027-02-18 00:33:10',
|
||||
'2027-02-26 21:09:54',
|
||||
'2027-03-02 08:05:18',
|
||||
'2027-03-03 16:46:09',
|
||||
'2027-03-04 06:39:14',
|
||||
'2027-03-06 03:35:41',
|
||||
'2027-03-08 12:24:42',
|
||||
'2027-03-08 20:34:57']
|
||||
|
||||
#some arbitrary date
|
||||
now=1589229252
|
||||
#we want deterministic results
|
||||
random.seed(1337)
|
||||
thinner=Thinner("5,10s1min,1d1w,1w1m,1m12m,1y5y")
|
||||
things=[]
|
||||
|
||||
for i in range(0,5000):
|
||||
|
||||
#increase random amount of time and maybe add a thing
|
||||
now=now+randint_compat(0,3600*24)
|
||||
if random.random()>=0.5:
|
||||
things.append(Thing(now))
|
||||
|
||||
(things, removes)=thinner.thin(things, now=now)
|
||||
|
||||
result=[]
|
||||
for thing in things:
|
||||
result.append(str(thing))
|
||||
|
||||
print("Thinner result full:")
|
||||
pprint.pprint(result)
|
||||
|
||||
self.assertEqual(result, ok)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
850
tests/test_zfsautobackup.py
Normal file
850
tests/test_zfsautobackup.py
Normal file
@ -0,0 +1,850 @@
|
||||
from basetest import *
|
||||
import time
|
||||
|
||||
|
||||
|
||||
class TestZfsAutobackup(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
prepare_zpools()
|
||||
self.longMessage=True
|
||||
|
||||
def test_invalidpars(self):
|
||||
|
||||
self.assertEqual(ZfsAutobackup("test test_target1 --keep-source -1".split(" ")).run(), 255)
|
||||
|
||||
def test_snapshotmode(self):
|
||||
"""test snapshot tool mode"""
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test --verbose".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
""")
|
||||
|
||||
|
||||
|
||||
def test_defaults(self):
|
||||
|
||||
with self.subTest("no datasets selected"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stderr(buf):
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
#correct message?
|
||||
self.assertIn("No source filesystems selected", buf.getvalue())
|
||||
|
||||
|
||||
with self.subTest("defaults with full verbose and debug"):
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
with self.subTest("bare defaults, allow empty"):
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1@test-20101111000001
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source1/fs1/sub@test-20101111000001
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs2/sub@test-20101111000001
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
""")
|
||||
|
||||
with self.subTest("verify holds"):
|
||||
|
||||
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
test_source1 userrefs - -
|
||||
test_source1/fs1 userrefs - -
|
||||
test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||
test_source1/fs1@test-20101111000001 userrefs 1 -
|
||||
test_source1/fs1/sub userrefs - -
|
||||
test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||
test_source1/fs1/sub@test-20101111000001 userrefs 1 -
|
||||
test_source2 userrefs - -
|
||||
test_source2/fs2 userrefs - -
|
||||
test_source2/fs2/sub userrefs - -
|
||||
test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||
test_source2/fs2/sub@test-20101111000001 userrefs 1 -
|
||||
test_source2/fs3 userrefs - -
|
||||
test_source2/fs3/sub userrefs - -
|
||||
test_target1 userrefs - -
|
||||
test_target1/test_source1 userrefs - -
|
||||
test_target1/test_source1/fs1 userrefs - -
|
||||
test_target1/test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||
test_target1/test_source1/fs1@test-20101111000001 userrefs 1 -
|
||||
test_target1/test_source1/fs1/sub userrefs - -
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001 userrefs 1 -
|
||||
test_target1/test_source2 userrefs - -
|
||||
test_target1/test_source2/fs2 userrefs - -
|
||||
test_target1/test_source2/fs2/sub userrefs - -
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001 userrefs 1 -
|
||||
""")
|
||||
|
||||
#make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept
|
||||
with self.subTest("test time checking"):
|
||||
with patch('time.strftime', return_value="20111111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose".split(" ")).run())
|
||||
|
||||
|
||||
time_str="20111112000000" #month in the "future"
|
||||
future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
|
||||
with patch('time.time', return_value=future_timestamp):
|
||||
with patch('time.strftime', return_value="20111111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y".split(" ")).run())
|
||||
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20111111000000
|
||||
test_source1/fs1@test-20111111000001
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20111111000000
|
||||
test_source1/fs1/sub@test-20111111000001
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20111111000000
|
||||
test_source2/fs2/sub@test-20111111000001
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20111111000000
|
||||
test_target1/test_source1/fs1@test-20111111000001
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20111111000000
|
||||
test_target1/test_source1/fs1/sub@test-20111111000001
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20111111000000
|
||||
test_target1/test_source2/fs2/sub@test-20111111000001
|
||||
""")
|
||||
|
||||
|
||||
def test_ignore_othersnaphots(self):
|
||||
|
||||
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
||||
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@othersimple
|
||||
test_source1/fs1@otherdate-20001111000000
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
def test_othersnaphots(self):
|
||||
|
||||
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
||||
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --other-snapshots".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@othersimple
|
||||
test_source1/fs1@otherdate-20001111000000
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@othersimple
|
||||
test_target1/test_source1/fs1@otherdate-20001111000000
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
|
||||
def test_nosnapshot(self):
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
#(only parents are created )
|
||||
#TODO: it probably shouldn't create these
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1/sub
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
""")
|
||||
|
||||
|
||||
def test_nosend(self):
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
#(only parents are created )
|
||||
#TODO: it probably shouldn't create these
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
""")
|
||||
|
||||
|
||||
def test_ignorereplicated(self):
|
||||
r=shelltest("zfs snapshot test_source1/fs1@otherreplication")
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --ignore-replicated".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
#(only parents are created )
|
||||
#TODO: it probably shouldn't create these
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@otherreplication
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
def test_noholds(self):
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
test_source1 userrefs - -
|
||||
test_source1/fs1 userrefs - -
|
||||
test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||
test_source1/fs1/sub userrefs - -
|
||||
test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||
test_source2 userrefs - -
|
||||
test_source2/fs2 userrefs - -
|
||||
test_source2/fs2/sub userrefs - -
|
||||
test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||
test_source2/fs3 userrefs - -
|
||||
test_source2/fs3/sub userrefs - -
|
||||
test_target1 userrefs - -
|
||||
test_target1/test_source1 userrefs - -
|
||||
test_target1/test_source1/fs1 userrefs - -
|
||||
test_target1/test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||
test_target1/test_source1/fs1/sub userrefs - -
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||
test_target1/test_source2 userrefs - -
|
||||
test_target1/test_source2/fs2 userrefs - -
|
||||
test_target1/test_source2/fs2/sub userrefs - -
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||
""")
|
||||
|
||||
|
||||
def test_strippath(self):
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/fs1
|
||||
test_target1/fs1@test-20101111000000
|
||||
test_target1/fs1/sub
|
||||
test_target1/fs1/sub@test-20101111000000
|
||||
test_target1/fs2
|
||||
test_target1/fs2/sub
|
||||
test_target1/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
|
||||
def test_clearrefres(self):
|
||||
|
||||
#on zfs utils 0.6.x -x isnt supported
|
||||
r=shelltest("zfs recv -x bla test >/dev/null </dev/zero; echo $?")
|
||||
if r=="\n2\n":
|
||||
self.skipTest("This zfs-userspace version doesnt support -x")
|
||||
|
||||
r=shelltest("zfs set refreservation=1M test_source1/fs1")
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --clear-refreservation".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
test_source1 refreservation none default
|
||||
test_source1/fs1 refreservation 1M local
|
||||
test_source1/fs1@test-20101111000000 refreservation - -
|
||||
test_source1/fs1/sub refreservation none default
|
||||
test_source1/fs1/sub@test-20101111000000 refreservation - -
|
||||
test_source2 refreservation none default
|
||||
test_source2/fs2 refreservation none default
|
||||
test_source2/fs2/sub refreservation none default
|
||||
test_source2/fs2/sub@test-20101111000000 refreservation - -
|
||||
test_source2/fs3 refreservation none default
|
||||
test_source2/fs3/sub refreservation none default
|
||||
test_target1 refreservation none default
|
||||
test_target1/test_source1 refreservation none default
|
||||
test_target1/test_source1/fs1 refreservation none default
|
||||
test_target1/test_source1/fs1@test-20101111000000 refreservation - -
|
||||
test_target1/test_source1/fs1/sub refreservation none default
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000 refreservation - -
|
||||
test_target1/test_source2 refreservation none default
|
||||
test_target1/test_source2/fs2 refreservation none default
|
||||
test_target1/test_source2/fs2/sub refreservation none default
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000 refreservation - -
|
||||
""")
|
||||
|
||||
|
||||
def test_clearmount(self):
|
||||
|
||||
#on zfs utils 0.6.x -o isnt supported
|
||||
r=shelltest("zfs recv -o bla=1 test >/dev/null </dev/zero; echo $?")
|
||||
if r=="\n2\n":
|
||||
self.skipTest("This zfs-userspace version doesnt support -o")
|
||||
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --clear-mountpoint --debug".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
test_source1 canmount on default
|
||||
test_source1/fs1 canmount on default
|
||||
test_source1/fs1@test-20101111000000 canmount - -
|
||||
test_source1/fs1/sub canmount on default
|
||||
test_source1/fs1/sub@test-20101111000000 canmount - -
|
||||
test_source2 canmount on default
|
||||
test_source2/fs2 canmount on default
|
||||
test_source2/fs2/sub canmount on default
|
||||
test_source2/fs2/sub@test-20101111000000 canmount - -
|
||||
test_source2/fs3 canmount on default
|
||||
test_source2/fs3/sub canmount on default
|
||||
test_target1 canmount on default
|
||||
test_target1/test_source1 canmount on default
|
||||
test_target1/test_source1/fs1 canmount noauto local
|
||||
test_target1/test_source1/fs1@test-20101111000000 canmount - -
|
||||
test_target1/test_source1/fs1/sub canmount noauto local
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000 canmount - -
|
||||
test_target1/test_source2 canmount on default
|
||||
test_target1/test_source2/fs2 canmount on default
|
||||
test_target1/test_source2/fs2/sub canmount noauto local
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
|
||||
""")
|
||||
|
||||
|
||||
def test_rollback(self):
|
||||
|
||||
#initial backup
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
#make change
|
||||
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
||||
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
|
||||
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
#should fail (busy)
|
||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="20101111000002"):
|
||||
#rollback, should succeed
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --rollback".split(" ")).run())
|
||||
|
||||
|
||||
def test_destroyincompat(self):
|
||||
|
||||
#initial backup
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
#add multiple compatible snapshot (written is still 0)
|
||||
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1")
|
||||
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2")
|
||||
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
#should be ok, is compatible
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
#add incompatible snapshot by changing and snapshotting
|
||||
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
||||
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
|
||||
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")
|
||||
|
||||
|
||||
with patch('time.strftime', return_value="20101111000002"):
|
||||
#--test should fail, now incompatible
|
||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty --test".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="20101111000002"):
|
||||
#should fail, now incompatible
|
||||
self.assertTrue(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="20101111000003"):
|
||||
#--test should succeed by destroying incompatibles
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="20101111000003"):
|
||||
#should succeed by destroying incompatibles
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --destroy-incompatible".split(" ")).run())
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def test_ssh(self):
|
||||
|
||||
#test all ssh directions
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-source localhost".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-target localhost".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
|
||||
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1@test-20101111000001
|
||||
test_source1/fs1@test-20101111000002
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source1/fs1/sub@test-20101111000001
|
||||
test_source1/fs1/sub@test-20101111000002
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs2/sub@test-20101111000001
|
||||
test_source2/fs2/sub@test-20101111000002
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1@test-20101111000002
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
""")
|
||||
|
||||
|
||||
def test_minchange(self):
|
||||
|
||||
#initial
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
|
||||
|
||||
#make small change, use umount to reflect the changes immediately
|
||||
r=shelltest("zfs set compress=off test_source1")
|
||||
r=shelltest("touch /test_source1/fs1/change.txt")
|
||||
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
|
||||
|
||||
|
||||
#too small change, takes no snapshots
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
|
||||
|
||||
#make big change
|
||||
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/change.txt bs=200000 count=1")
|
||||
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
|
||||
|
||||
#bigger change, should take snapshot
|
||||
with patch('time.strftime', return_value="20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --min-change 100000".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1@test-20101111000002
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000002
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
def test_test(self):
|
||||
|
||||
#initial
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1/sub
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
""")
|
||||
|
||||
#actual make initial backup
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
|
||||
#test incremental
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --test".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000001
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000001
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000001
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
""")
|
||||
|
||||
|
||||
def test_migrate(self):
|
||||
"""test migration from other snapshotting systems. zfs-autobackup should be able to continue from any common snapshot, not just its own."""
|
||||
|
||||
shelltest("zfs snapshot test_source1/fs1@migrate1")
|
||||
shelltest("zfs create test_target1/test_source1")
|
||||
shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1")
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@migrate1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@migrate1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
def test_keep0(self):
|
||||
"""test if keep-source=0 and keep-target=0 dont delete common snapshot and break backup"""
|
||||
|
||||
with patch('time.strftime', return_value="20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=0 --keep-target=0".split(" ")).run())
|
||||
|
||||
#make snapshot, shouldnt delete 0
|
||||
with patch('time.strftime', return_value="20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||
|
||||
#make snapshot 2, shouldnt delete 0 since it has holds, but will delete 1 since it has no holds
|
||||
with patch('time.strftime', return_value="20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1@test-20101111000002
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source1/fs1/sub@test-20101111000002
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs2/sub@test-20101111000002
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
#make another backup but with no-holds. we should naturally endup with only number 3
|
||||
with patch('time.strftime', return_value="20101111000003"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000003
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000003
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000003
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000003
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||
""")
|
||||
|
||||
|
||||
# make snapshot 4, since we used no-holds, it will delete 3 on the source, breaking the backup
|
||||
with patch('time.strftime', return_value="20101111000004"):
|
||||
self.assertFalse(ZfsAutobackup("test --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000004
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000004
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000004
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000003
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||
""")
|
||||
|
||||
###########################
|
||||
# TODO:
|
||||
|
||||
def test_raw(self):
|
||||
|
||||
self.skipTest("todo: later when travis supports zfs 0.8")
|
||||
125
tests/test_zfsnode.py
Normal file
125
tests/test_zfsnode.py
Normal file
@ -0,0 +1,125 @@
|
||||
from basetest import *
|
||||
from zfs_autobackup.LogStub import LogStub
|
||||
|
||||
|
||||
|
||||
class TestZfsNode(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
prepare_zpools()
|
||||
# return super().setUp()
|
||||
|
||||
|
||||
def test_consistent_snapshot(self):
|
||||
logger=LogStub()
|
||||
description="[Source]"
|
||||
node=ZfsNode("test", logger, description=description)
|
||||
|
||||
with self.subTest("first snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets, "test-1",100000)
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-1
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-1
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-1
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
""")
|
||||
|
||||
|
||||
with self.subTest("second snapshot, no changes, no snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets, "test-2",1)
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-1
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-1
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-1
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
""")
|
||||
|
||||
with self.subTest("second snapshot, no changes, empty snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets, "test-2",0)
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-1
|
||||
test_source1/fs1@test-2
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-1
|
||||
test_source1/fs1/sub@test-2
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-1
|
||||
test_source2/fs2/sub@test-2
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
""")
|
||||
|
||||
|
||||
def test_getselected(self):
|
||||
logger=LogStub()
|
||||
description="[Source]"
|
||||
node=ZfsNode("test", logger, description=description)
|
||||
s=pformat(node.selected_datasets)
|
||||
print(s)
|
||||
|
||||
#basics
|
||||
self.assertEqual (s, """[(local): test_source1/fs1,
|
||||
(local): test_source1/fs1/sub,
|
||||
(local): test_source2/fs2/sub]""")
|
||||
|
||||
#caching, so expect same result after changing it
|
||||
subprocess.check_call("zfs set autobackup:test=true test_source2/fs3", shell=True)
|
||||
self.assertEqual (s, """[(local): test_source1/fs1,
|
||||
(local): test_source1/fs1/sub,
|
||||
(local): test_source2/fs2/sub]""")
|
||||
|
||||
|
||||
def test_validcommand(self):
|
||||
logger=LogStub()
|
||||
description="[Source]"
|
||||
node=ZfsNode("test", logger, description=description)
|
||||
|
||||
|
||||
with self.subTest("test invalid option"):
|
||||
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
|
||||
with self.subTest("test valid option"):
|
||||
self.assertTrue(node.valid_command(["zfs", "send", "-v", "nonexisting"]))
|
||||
|
||||
def test_supportedsendoptions(self):
|
||||
logger=LogStub()
|
||||
description="[Source]"
|
||||
node=ZfsNode("test", logger, description=description)
|
||||
# -D propably always supported
|
||||
self.assertGreater(len(node.supported_send_options),0)
|
||||
|
||||
|
||||
def test_supportedrecvoptions(self):
|
||||
logger=LogStub()
|
||||
description="[Source]"
|
||||
#NOTE: this couldnt hang via ssh if we dont close filehandles properly. (which was a previous bug)
|
||||
node=ZfsNode("test", logger, description=description, ssh_to='localhost')
|
||||
self.assertIsInstance(node.supported_recv_options, list)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
39
zfs_autobackup/CachedProperty.py
Normal file
39
zfs_autobackup/CachedProperty.py
Normal file
@ -0,0 +1,39 @@
|
||||
# NOTE: this should inherit from (object) to function correctly with python 2.7
|
||||
class CachedProperty(object):
|
||||
""" A property that is only computed once per instance and
|
||||
then stores the result in _cached_properties of the object.
|
||||
|
||||
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
|
||||
"""
|
||||
|
||||
def __init__(self, func):
|
||||
self.__doc__ = getattr(func, '__doc__')
|
||||
self.func = func
|
||||
|
||||
def __get__(self, obj, cls):
|
||||
if obj is None:
|
||||
return self
|
||||
|
||||
propname = self.func.__name__
|
||||
|
||||
if not hasattr(obj, '_cached_properties'):
|
||||
obj._cached_properties = {}
|
||||
|
||||
if propname not in obj._cached_properties:
|
||||
obj._cached_properties[propname] = self.func(obj)
|
||||
# value = obj.__dict__[propname] = self.func(obj)
|
||||
|
||||
return obj._cached_properties[propname]
|
||||
|
||||
@staticmethod
|
||||
def clear(obj):
|
||||
"""clears cache of obj"""
|
||||
if hasattr(obj, '_cached_properties'):
|
||||
obj._cached_properties = {}
|
||||
|
||||
@staticmethod
|
||||
def is_cached(obj, propname):
|
||||
if hasattr(obj, '_cached_properties') and propname in obj._cached_properties:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
199
zfs_autobackup/ExecuteNode.py
Normal file
199
zfs_autobackup/ExecuteNode.py
Normal file
@ -0,0 +1,199 @@
|
||||
import os
|
||||
import select
|
||||
import subprocess
|
||||
|
||||
from zfs_autobackup.LogStub import LogStub
|
||||
|
||||
class ExecuteNode(LogStub):
|
||||
"""an endpoint to execute local or remote commands via ssh"""
|
||||
|
||||
def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False):
|
||||
"""ssh_config: custom ssh config
|
||||
ssh_to: server you want to ssh to. none means local
|
||||
readonly: only execute commands that don't make any changes (useful for testing-runs)
|
||||
debug_output: show output and exit codes of commands in debugging output.
|
||||
"""
|
||||
|
||||
self.ssh_config = ssh_config
|
||||
self.ssh_to = ssh_to
|
||||
self.readonly = readonly
|
||||
self.debug_output = debug_output
|
||||
|
||||
def __repr__(self):
|
||||
if self.ssh_to is None:
|
||||
return "(local)"
|
||||
else:
|
||||
return self.ssh_to
|
||||
|
||||
def _parse_stdout(self, line):
|
||||
"""parse stdout. can be overridden in subclass"""
|
||||
if self.debug_output:
|
||||
self.debug("STDOUT > " + line.rstrip())
|
||||
|
||||
def _parse_stderr(self, line, hide_errors):
|
||||
"""parse stderr. can be overridden in subclass"""
|
||||
if hide_errors:
|
||||
self.debug("STDERR > " + line.rstrip())
|
||||
else:
|
||||
self.error("STDERR > " + line.rstrip())
|
||||
|
||||
def _parse_stderr_pipe(self, line, hide_errors):
|
||||
"""parse stderr from pipe input process. can be overridden in subclass"""
|
||||
if hide_errors:
|
||||
self.debug("STDERR|> " + line.rstrip())
|
||||
else:
|
||||
self.error("STDERR|> " + line.rstrip())
|
||||
|
||||
def run(self, cmd, inp=None, tab_split=False, valid_exitcodes=None, readonly=False, hide_errors=False, pipe=False,
|
||||
return_stderr=False):
|
||||
"""run a command on the node cmd: the actual command, should be a list, where the first item is the command
|
||||
and the rest are parameters. input: Can be None, a string or a pipe-handle you got from another run()
|
||||
tab_split: split tabbed files in output into a list valid_exitcodes: list of valid exit codes for this
|
||||
command (checks exit code of both sides of a pipe) readonly: make this True if the command doesn't make any
|
||||
changes and is safe to execute in testmode hide_errors: don't show stderr output as error, instead show it as
|
||||
debugging output (use to hide expected errors) pipe: Instead of executing, return a pipe-handle to be used to
|
||||
input to another run() command. (just like a | in linux) return_stderr: return both stdout and stderr as a
|
||||
tuple. (only returns stderr from this side of the pipe)
|
||||
"""
|
||||
|
||||
if not valid_exitcodes:
|
||||
valid_exitcodes = [0]
|
||||
|
||||
encoded_cmd = []
|
||||
|
||||
# use ssh?
|
||||
if self.ssh_to is not None:
|
||||
encoded_cmd.append("ssh".encode('utf-8'))
|
||||
|
||||
if self.ssh_config is not None:
|
||||
encoded_cmd.extend(["-F".encode('utf-8'), self.ssh_config.encode('utf-8')])
|
||||
|
||||
encoded_cmd.append(self.ssh_to.encode('utf-8'))
|
||||
|
||||
# make sure the command gets all the data in utf8 format:
|
||||
# (this is necessary if LC_ALL=en_US.utf8 is not set in the environment)
|
||||
for arg in cmd:
|
||||
# add single quotes for remote commands to support spaces and other weird stuff (remote commands are
|
||||
# executed in a shell) and escape existing single quotes (bash needs ' to end the quoted string,
|
||||
# then a \' for the actual quote and then another ' to start a new quoted string) (and then python
|
||||
# needs the double \ to get a single \)
|
||||
encoded_cmd.append(("'" + arg.replace("'", "'\\''") + "'").encode('utf-8'))
|
||||
|
||||
else:
|
||||
for arg in cmd:
|
||||
encoded_cmd.append(arg.encode('utf-8'))
|
||||
|
||||
# debug and test stuff
|
||||
debug_txt = ""
|
||||
for c in encoded_cmd:
|
||||
debug_txt = debug_txt + " " + c.decode()
|
||||
|
||||
if pipe:
|
||||
debug_txt = debug_txt + " |"
|
||||
|
||||
if self.readonly and not readonly:
|
||||
self.debug("SKIP > " + debug_txt)
|
||||
else:
|
||||
if pipe:
|
||||
self.debug("PIPE > " + debug_txt)
|
||||
else:
|
||||
self.debug("RUN > " + debug_txt)
|
||||
|
||||
# determine stdin
|
||||
if inp is None:
|
||||
# NOTE: Not None, otherwise it reads stdin from terminal!
|
||||
stdin = subprocess.PIPE
|
||||
elif isinstance(inp, str) or type(inp) == 'unicode':
|
||||
self.debug("INPUT > \n" + inp.rstrip())
|
||||
stdin = subprocess.PIPE
|
||||
elif isinstance(inp, subprocess.Popen):
|
||||
self.debug("Piping input")
|
||||
stdin = inp.stdout
|
||||
else:
|
||||
raise (Exception("Program error: Incompatible input"))
|
||||
|
||||
if self.readonly and not readonly:
|
||||
# todo: what happens if input is piped?
|
||||
return
|
||||
|
||||
# execute and parse/return results
|
||||
p = subprocess.Popen(encoded_cmd, env=os.environ, stdout=subprocess.PIPE, stdin=stdin, stderr=subprocess.PIPE)
|
||||
|
||||
# Note: make streaming?
|
||||
if isinstance(inp, str) or type(inp) == 'unicode':
|
||||
p.stdin.write(inp.encode('utf-8'))
|
||||
|
||||
if p.stdin:
|
||||
p.stdin.close()
|
||||
|
||||
# return pipe
|
||||
if pipe:
|
||||
return p
|
||||
|
||||
# handle all outputs
|
||||
if isinstance(inp, subprocess.Popen):
|
||||
selectors = [p.stdout, p.stderr, inp.stderr]
|
||||
inp.stdout.close() # otherwise inputprocess wont exit when ours does
|
||||
else:
|
||||
selectors = [p.stdout, p.stderr]
|
||||
|
||||
output_lines = []
|
||||
error_lines = []
|
||||
while True:
|
||||
(read_ready, write_ready, ex_ready) = select.select(selectors, [], [])
|
||||
eof_count = 0
|
||||
if p.stdout in read_ready:
|
||||
line = p.stdout.readline().decode('utf-8')
|
||||
if line != "":
|
||||
if tab_split:
|
||||
output_lines.append(line.rstrip().split('\t'))
|
||||
else:
|
||||
output_lines.append(line.rstrip())
|
||||
self._parse_stdout(line)
|
||||
else:
|
||||
eof_count = eof_count + 1
|
||||
if p.stderr in read_ready:
|
||||
line = p.stderr.readline().decode('utf-8')
|
||||
if line != "":
|
||||
if tab_split:
|
||||
error_lines.append(line.rstrip().split('\t'))
|
||||
else:
|
||||
error_lines.append(line.rstrip())
|
||||
self._parse_stderr(line, hide_errors)
|
||||
else:
|
||||
eof_count = eof_count + 1
|
||||
if isinstance(inp, subprocess.Popen) and (inp.stderr in read_ready):
|
||||
line = inp.stderr.readline().decode('utf-8')
|
||||
if line != "":
|
||||
self._parse_stderr_pipe(line, hide_errors)
|
||||
else:
|
||||
eof_count = eof_count + 1
|
||||
|
||||
# stop if both processes are done and all filehandles are EOF:
|
||||
if (p.poll() is not None) and (
|
||||
(not isinstance(inp, subprocess.Popen)) or inp.poll() is not None) and eof_count == len(selectors):
|
||||
break
|
||||
|
||||
p.stderr.close()
|
||||
p.stdout.close()
|
||||
|
||||
if self.debug_output:
|
||||
self.debug("EXIT > {}".format(p.returncode))
|
||||
|
||||
# handle piped process error output and exit codes
|
||||
if isinstance(inp, subprocess.Popen):
|
||||
inp.stderr.close()
|
||||
inp.stdout.close()
|
||||
|
||||
if self.debug_output:
|
||||
self.debug("EXIT |> {}".format(inp.returncode))
|
||||
if valid_exitcodes and inp.returncode not in valid_exitcodes:
|
||||
raise (subprocess.CalledProcessError(inp.returncode, "(pipe)"))
|
||||
|
||||
if valid_exitcodes and p.returncode not in valid_exitcodes:
|
||||
raise (subprocess.CalledProcessError(p.returncode, encoded_cmd))
|
||||
|
||||
if return_stderr:
|
||||
return output_lines, error_lines
|
||||
else:
|
||||
return output_lines
|
||||
46
zfs_autobackup/LogConsole.py
Normal file
46
zfs_autobackup/LogConsole.py
Normal file
@ -0,0 +1,46 @@
|
||||
# python 2 compatibility
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
colorama = False
|
||||
if sys.stdout.isatty():
|
||||
try:
|
||||
import colorama
|
||||
except ImportError:
|
||||
colorama = False
|
||||
pass
|
||||
|
||||
|
||||
class LogConsole:
|
||||
"""Log-class that outputs to console, adding colors if needed"""
|
||||
|
||||
def __init__(self, show_debug=False, show_verbose=False):
|
||||
self.last_log = ""
|
||||
self.show_debug = show_debug
|
||||
self.show_verbose = show_verbose
|
||||
|
||||
@staticmethod
|
||||
def error(txt):
|
||||
if colorama:
|
||||
print(colorama.Fore.RED + colorama.Style.BRIGHT + "! " + txt + colorama.Style.RESET_ALL, file=sys.stderr)
|
||||
else:
|
||||
print("! " + txt, file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
|
||||
def verbose(self, txt):
|
||||
if self.show_verbose:
|
||||
if colorama:
|
||||
print(colorama.Style.NORMAL + " " + txt + colorama.Style.RESET_ALL)
|
||||
else:
|
||||
print(" " + txt)
|
||||
sys.stdout.flush()
|
||||
|
||||
def debug(self, txt):
|
||||
if self.show_debug:
|
||||
if colorama:
|
||||
print(colorama.Fore.GREEN + "# " + txt + colorama.Style.RESET_ALL)
|
||||
else:
|
||||
print("# " + txt)
|
||||
sys.stdout.flush()
|
||||
15
zfs_autobackup/LogStub.py
Normal file
15
zfs_autobackup/LogStub.py
Normal file
@ -0,0 +1,15 @@
|
||||
#Used for baseclasses that dont implement their own logging (Like ExecuteNode)
|
||||
#Usually logging is implemented in subclasses (Like ZfsNode thats a subclass of ExecuteNode), but for regression testing its nice to have these stubs.
|
||||
|
||||
class LogStub:
|
||||
"""Just a stub, usually overriden in subclasses."""
|
||||
|
||||
# simple logging stubs
|
||||
def debug(self, txt):
|
||||
print("DEBUG : " + txt)
|
||||
|
||||
def verbose(self, txt):
|
||||
print("VERBOSE: " + txt)
|
||||
|
||||
def error(self, txt):
|
||||
print("ERROR : " + txt)
|
||||
94
zfs_autobackup/Thinner.py
Normal file
94
zfs_autobackup/Thinner.py
Normal file
@ -0,0 +1,94 @@
|
||||
import time
|
||||
|
||||
from zfs_autobackup.ThinnerRule import ThinnerRule
|
||||
|
||||
|
||||
class Thinner:
|
||||
"""progressive thinner (universal, used for cleaning up snapshots)"""
|
||||
|
||||
def __init__(self, schedule_str=""):
|
||||
"""schedule_str: comma seperated list of ThinnerRules. A plain number specifies how many snapshots to always
|
||||
keep.
|
||||
"""
|
||||
|
||||
self.rules = []
|
||||
self.always_keep = 0
|
||||
|
||||
if schedule_str == "":
|
||||
return
|
||||
|
||||
rule_strs = schedule_str.split(",")
|
||||
for rule_str in rule_strs:
|
||||
if rule_str.isdigit():
|
||||
self.always_keep = int(rule_str)
|
||||
if self.always_keep < 0:
|
||||
raise (Exception("Number of snapshots to keep cant be negative: {}".format(self.always_keep)))
|
||||
else:
|
||||
self.rules.append(ThinnerRule(rule_str))
|
||||
|
||||
def human_rules(self):
|
||||
"""get list of human readable rules"""
|
||||
ret = []
|
||||
if self.always_keep:
|
||||
ret.append("Keep the last {} snapshot{}.".format(self.always_keep, self.always_keep != 1 and "s" or ""))
|
||||
for rule in self.rules:
|
||||
ret.append(rule.human_str)
|
||||
|
||||
return ret
|
||||
|
||||
def thin(self, objects, keep_objects=None, now=None):
|
||||
"""thin list of objects with current schedule rules. objects: list of objects to thin. every object should
|
||||
have timestamp attribute. keep_objects: objects to always keep (these should also be in normal objects list,
|
||||
so we can use them to perhaps delete other obsolete objects)
|
||||
|
||||
return( keeps, removes )
|
||||
"""
|
||||
|
||||
if not keep_objects:
|
||||
keep_objects = []
|
||||
|
||||
# always keep a number of the last objets?
|
||||
if self.always_keep:
|
||||
# all of them
|
||||
if len(objects) <= self.always_keep:
|
||||
return objects, []
|
||||
|
||||
# determine which ones
|
||||
always_keep_objects = objects[-self.always_keep:]
|
||||
else:
|
||||
always_keep_objects = []
|
||||
|
||||
# determine time blocks
|
||||
time_blocks = {}
|
||||
for rule in self.rules:
|
||||
time_blocks[rule.period] = {}
|
||||
|
||||
if not now:
|
||||
now = int(time.time())
|
||||
|
||||
keeps = []
|
||||
removes = []
|
||||
|
||||
# traverse objects
|
||||
for thisobject in objects:
|
||||
# important they are ints!
|
||||
timestamp = int(thisobject.timestamp)
|
||||
age = int(now) - timestamp
|
||||
|
||||
# store in the correct time blocks, per period-size, if not too old yet
|
||||
# e.g.: look if there is ANY timeblock that wants to keep this object
|
||||
keep = False
|
||||
for rule in self.rules:
|
||||
if age <= rule.ttl:
|
||||
block_nr = int(timestamp / rule.period)
|
||||
if block_nr not in time_blocks[rule.period]:
|
||||
time_blocks[rule.period][block_nr] = True
|
||||
keep = True
|
||||
|
||||
# keep it according to schedule, or keep it because it is in the keep_objects list
|
||||
if keep or thisobject in keep_objects or thisobject in always_keep_objects:
|
||||
keeps.append(thisobject)
|
||||
else:
|
||||
removes.append(thisobject)
|
||||
|
||||
return keeps, removes
|
||||
68
zfs_autobackup/ThinnerRule.py
Normal file
68
zfs_autobackup/ThinnerRule.py
Normal file
@ -0,0 +1,68 @@
|
||||
import re
|
||||
|
||||
|
||||
class ThinnerRule:
|
||||
"""a thinning schedule rule for Thinner"""
|
||||
|
||||
TIME_NAMES = {
|
||||
'y': 3600 * 24 * 365.25,
|
||||
'm': 3600 * 24 * 30,
|
||||
'w': 3600 * 24 * 7,
|
||||
'd': 3600 * 24,
|
||||
'h': 3600,
|
||||
'min': 60,
|
||||
's': 1,
|
||||
}
|
||||
|
||||
TIME_DESC = {
|
||||
'y': 'year',
|
||||
'm': 'month',
|
||||
'w': 'week',
|
||||
'd': 'day',
|
||||
'h': 'hour',
|
||||
'min': 'minute',
|
||||
's': 'second',
|
||||
}
|
||||
|
||||
def __init__(self, rule_str):
|
||||
"""parse scheduling string
|
||||
example:
|
||||
daily snapshot, remove after a week: 1d1w
|
||||
weekly snapshot, remove after a month: 1w1m
|
||||
monthly snapshot, remove after 6 months: 1m6m
|
||||
yearly snapshot, remove after 2 year: 1y2y
|
||||
keep all snapshots, remove after a day 1s1d
|
||||
keep nothing: 1s1s
|
||||
|
||||
"""
|
||||
|
||||
rule_str = rule_str.lower()
|
||||
matches = re.findall("([0-9]*)([a-z]*)([0-9]*)([a-z]*)", rule_str)[0]
|
||||
|
||||
period_amount = int(matches[0])
|
||||
period_unit = matches[1]
|
||||
ttl_amount = int(matches[2])
|
||||
ttl_unit = matches[3]
|
||||
|
||||
if period_unit not in self.TIME_NAMES:
|
||||
raise (Exception("Invalid period string in schedule: '{}'".format(rule_str)))
|
||||
|
||||
if ttl_unit not in self.TIME_NAMES:
|
||||
raise (Exception("Invalid ttl string in schedule: '{}'".format(rule_str)))
|
||||
|
||||
self.period = period_amount * self.TIME_NAMES[period_unit]
|
||||
self.ttl = ttl_amount * self.TIME_NAMES[ttl_unit]
|
||||
|
||||
if self.period > self.ttl:
|
||||
raise (Exception("Period cant be longer than ttl in schedule: '{}'".format(rule_str)))
|
||||
|
||||
self.rule_str = rule_str
|
||||
|
||||
self.human_str = "Keep every {} {}{}, delete after {} {}{}.".format(
|
||||
period_amount, self.TIME_DESC[period_unit], period_amount != 1 and "s" or "", ttl_amount,
|
||||
self.TIME_DESC[ttl_unit], ttl_amount != 1 and "s" or "")
|
||||
|
||||
def __str__(self):
|
||||
"""get schedule as a schedule string"""
|
||||
|
||||
return self.rule_str
|
||||
376
zfs_autobackup/ZfsAutobackup.py
Normal file
376
zfs_autobackup/ZfsAutobackup.py
Normal file
@ -0,0 +1,376 @@
|
||||
import argparse
|
||||
import sys
|
||||
import time
|
||||
|
||||
from zfs_autobackup.Thinner import Thinner
|
||||
from zfs_autobackup.ZfsDataset import ZfsDataset
|
||||
from zfs_autobackup.LogConsole import LogConsole
|
||||
from zfs_autobackup.ZfsNode import ZfsNode
|
||||
from zfs_autobackup.ThinnerRule import ThinnerRule
|
||||
|
||||
|
||||
class ZfsAutobackup:
|
||||
"""main class"""
|
||||
|
||||
VERSION = "3.0.1-beta4"
|
||||
HEADER = "zfs-autobackup v{} - Copyright 2020 E.H.Eefting (edwin@datux.nl)".format(VERSION)
|
||||
|
||||
def __init__(self, argv, print_arguments=True):
|
||||
|
||||
# helps with investigating failed regression tests:
|
||||
if print_arguments:
|
||||
print("ARGUMENTS: " + " ".join(argv))
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=self.HEADER,
|
||||
epilog='When a filesystem fails, zfs_backup will continue and report the number of failures at that end. '
|
||||
'Also the exit code will indicate the number of failures. Full manual at: https://github.com/psy0rz/zfs_autobackup')
|
||||
parser.add_argument('--ssh-config', default=None, help='Custom ssh client config')
|
||||
parser.add_argument('--ssh-source', default=None,
|
||||
help='Source host to get backup from. (user@hostname) Default %(default)s.')
|
||||
parser.add_argument('--ssh-target', default=None,
|
||||
help='Target host to push backup to. (user@hostname) Default %(default)s.')
|
||||
parser.add_argument('--keep-source', type=str, default="10,1d1w,1w1m,1m1y",
|
||||
help='Thinning schedule for old source snapshots. Default: %(default)s')
|
||||
parser.add_argument('--keep-target', type=str, default="10,1d1w,1w1m,1m1y",
|
||||
help='Thinning schedule for old target snapshots. Default: %(default)s')
|
||||
|
||||
parser.add_argument('backup_name', metavar='backup-name',
|
||||
help='Name of the backup (you should set the zfs property "autobackup:backup-name" to '
|
||||
'true on filesystems you want to backup')
|
||||
parser.add_argument('target_path', metavar='target-path', default=None, nargs='?',
|
||||
help='Target ZFS filesystem (optional: if not specified, zfs-autobackup will only operate '
|
||||
'as snapshot-tool on source)')
|
||||
|
||||
parser.add_argument('--other-snapshots', action='store_true',
|
||||
help='Send over other snapshots as well, not just the ones created by this tool.')
|
||||
parser.add_argument('--no-snapshot', action='store_true',
|
||||
help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
|
||||
parser.add_argument('--no-send', action='store_true',
|
||||
help='Don\'t send snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
||||
# parser.add_argument('--no-thinning', action='store_true', help='Don\'t run the thinner.')
|
||||
parser.add_argument('--min-change', type=int, default=1,
|
||||
help='Number of bytes written after which we consider a dataset changed (default %('
|
||||
'default)s)')
|
||||
parser.add_argument('--allow-empty', action='store_true',
|
||||
help='If nothing has changed, still create empty snapshots. (same as --min-change=0)')
|
||||
parser.add_argument('--ignore-replicated', action='store_true',
|
||||
help='Ignore datasets that seem to be replicated some other way. (No changes since '
|
||||
'lastest snapshot. Useful for proxmox HA replication)')
|
||||
parser.add_argument('--no-holds', action='store_true',
|
||||
help='Don\'t hold snapshots. (Faster)')
|
||||
|
||||
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
|
||||
parser.add_argument('--strip-path', default=0, type=int,
|
||||
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
|
||||
'SmartOS machines)')
|
||||
# parser.add_argument('--buffer', default="", help='Use mbuffer with specified size to speedup zfs transfer.
|
||||
# (e.g. --buffer 1G) Will also show nice progress output.')
|
||||
|
||||
parser.add_argument('--clear-refreservation', action='store_true',
|
||||
help='Filter "refreservation" property. (recommended, safes space. same as '
|
||||
'--filter-properties refreservation)')
|
||||
parser.add_argument('--clear-mountpoint', action='store_true',
|
||||
help='Set property canmount=noauto for new datasets. (recommended, prevents mount '
|
||||
'conflicts. same as --set-properties canmount=noauto)')
|
||||
parser.add_argument('--filter-properties', type=str,
|
||||
help='List of properties to "filter" when receiving filesystems. (you can still restore '
|
||||
'them with zfs inherit -S)')
|
||||
parser.add_argument('--set-properties', type=str,
|
||||
help='List of propererties to override when receiving filesystems. (you can still restore '
|
||||
'them with zfs inherit -S)')
|
||||
parser.add_argument('--rollback', action='store_true',
|
||||
help='Rollback changes to the latest target snapshot before starting. (normally you can '
|
||||
'prevent changes by setting the readonly property on the target_path to on)')
|
||||
parser.add_argument('--destroy-incompatible', action='store_true',
|
||||
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
|
||||
parser.add_argument('--destroy-missing', type=str, default=None,
|
||||
help='Destroy datasets on target that are missing on the source. Specify the time since '
|
||||
'the last snapshot, e.g: --destroy-missing 30d')
|
||||
parser.add_argument('--ignore-transfer-errors', action='store_true',
|
||||
help='Ignore transfer errors (still checks if received filesystem exists. useful for '
|
||||
'acltype errors)')
|
||||
parser.add_argument('--raw', action='store_true',
|
||||
help='For encrypted datasets, send data exactly as it exists on disk.')
|
||||
|
||||
parser.add_argument('--test', action='store_true',
|
||||
help='dont change anything, just show what would be done (still does all read-only '
|
||||
'operations)')
|
||||
parser.add_argument('--verbose', action='store_true', help='verbose output')
|
||||
parser.add_argument('--debug', action='store_true',
|
||||
help='Show zfs commands that are executed, stops after an exception.')
|
||||
parser.add_argument('--debug-output', action='store_true',
|
||||
help='Show zfs commands and their output/exit codes. (noisy)')
|
||||
parser.add_argument('--progress', action='store_true',
|
||||
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
|
||||
parser.add_argument('--no-progress', action='store_true', help=argparse.SUPPRESS) #needed to workaround a zfs recv -v bug
|
||||
|
||||
# note args is the only global variable we use, since its a global readonly setting anyway
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
self.args = args
|
||||
|
||||
#auto enable progress?
|
||||
if sys.stderr.isatty() and not args.no_progress:
|
||||
args.progress = True
|
||||
|
||||
if args.debug_output:
|
||||
args.debug = True
|
||||
|
||||
if self.args.test:
|
||||
self.args.verbose = True
|
||||
|
||||
if args.allow_empty:
|
||||
args.min_change = 0
|
||||
|
||||
if args.destroy_incompatible:
|
||||
args.rollback = True
|
||||
|
||||
self.log = LogConsole(show_debug=self.args.debug, show_verbose=self.args.verbose)
|
||||
|
||||
if args.resume:
|
||||
self.verbose("NOTE: The --resume option isn't needed anymore (its autodetected now)")
|
||||
|
||||
if args.target_path is not None and args.target_path[0] == "/":
|
||||
self.log.error("Target should not start with a /")
|
||||
sys.exit(255)
|
||||
|
||||
def verbose(self, txt):
|
||||
self.log.verbose(txt)
|
||||
|
||||
def error(self, txt):
|
||||
self.log.error(txt)
|
||||
|
||||
def debug(self, txt):
|
||||
self.log.debug(txt)
|
||||
|
||||
def set_title(self, title):
|
||||
self.log.verbose("")
|
||||
self.log.verbose("#### " + title)
|
||||
|
||||
# sync datasets, or thin-only on both sides
|
||||
# target is needed for this.
|
||||
def sync_datasets(self, source_node, source_datasets):
|
||||
|
||||
description = "[Target]"
|
||||
|
||||
self.set_title("Target settings")
|
||||
|
||||
target_thinner = Thinner(self.args.keep_target)
|
||||
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, ssh_to=self.args.ssh_target,
|
||||
readonly=self.args.test, debug_output=self.args.debug_output, description=description,
|
||||
thinner=target_thinner)
|
||||
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
|
||||
|
||||
if self.args.no_send:
|
||||
self.set_title("Thinning source and target")
|
||||
else:
|
||||
self.set_title("Sending and thinning")
|
||||
|
||||
# check if exists, to prevent vague errors
|
||||
target_dataset = ZfsDataset(target_node, self.args.target_path)
|
||||
if not target_dataset.exists:
|
||||
self.error("Target path '{}' does not exist. Please create this dataset first.".format(target_dataset))
|
||||
return 255
|
||||
|
||||
if self.args.filter_properties:
|
||||
filter_properties = self.args.filter_properties.split(",")
|
||||
else:
|
||||
filter_properties = []
|
||||
|
||||
if self.args.set_properties:
|
||||
set_properties = self.args.set_properties.split(",")
|
||||
else:
|
||||
set_properties = []
|
||||
|
||||
if self.args.clear_refreservation:
|
||||
filter_properties.append("refreservation")
|
||||
|
||||
if self.args.clear_mountpoint:
|
||||
set_properties.append("canmount=noauto")
|
||||
|
||||
# sync datasets
|
||||
fail_count = 0
|
||||
target_datasets = []
|
||||
for source_dataset in source_datasets:
|
||||
|
||||
try:
|
||||
# determine corresponding target_dataset
|
||||
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
||||
target_dataset = ZfsDataset(target_node, target_name)
|
||||
target_datasets.append(target_dataset)
|
||||
|
||||
# ensure parents exists
|
||||
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
|
||||
if not self.args.no_send \
|
||||
and target_dataset.parent not in target_datasets \
|
||||
and not target_dataset.parent.exists:
|
||||
target_dataset.parent.create_filesystem(parents=True)
|
||||
|
||||
# determine common zpool features
|
||||
source_features = source_node.get_zfs_pool(source_dataset.split_path()[0]).features
|
||||
target_features = target_node.get_zfs_pool(target_dataset.split_path()[0]).features
|
||||
common_features = source_features and target_features
|
||||
# source_dataset.debug("Common features: {}".format(common_features))
|
||||
|
||||
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress,
|
||||
features=common_features, filter_properties=filter_properties,
|
||||
set_properties=set_properties,
|
||||
ignore_recv_exit_code=self.args.ignore_transfer_errors,
|
||||
holds=not self.args.no_holds, rollback=self.args.rollback,
|
||||
raw=self.args.raw, other_snapshots=self.args.other_snapshots,
|
||||
no_send=self.args.no_send,
|
||||
destroy_incompatible=self.args.destroy_incompatible)
|
||||
except Exception as e:
|
||||
fail_count = fail_count + 1
|
||||
source_dataset.error("FAILED: " + str(e))
|
||||
if self.args.debug:
|
||||
raise
|
||||
|
||||
# if not self.args.no_thinning:
|
||||
self.thin_missing_targets(ZfsDataset(target_node, self.args.target_path), target_datasets)
|
||||
|
||||
return fail_count
|
||||
|
||||
def thin_missing_targets(self, target_dataset, used_target_datasets):
|
||||
"""thin/destroy target datasets that are missing on the source."""
|
||||
|
||||
self.debug("Thinning obsolete datasets")
|
||||
|
||||
for dataset in target_dataset.recursive_datasets:
|
||||
try:
|
||||
if dataset not in used_target_datasets:
|
||||
dataset.debug("Missing on source, thinning")
|
||||
dataset.thin()
|
||||
|
||||
# destroy_missing enabled?
|
||||
if self.args.destroy_missing is not None:
|
||||
|
||||
# cant do anything without our own snapshots
|
||||
if not dataset.our_snapshots:
|
||||
if dataset.datasets:
|
||||
dataset.debug("Destroy missing: ignoring")
|
||||
else:
|
||||
dataset.verbose(
|
||||
"Destroy missing: has no snapshots made by us. (please destroy manually)")
|
||||
else:
|
||||
# past the deadline?
|
||||
deadline_ttl = ThinnerRule("0s" + self.args.destroy_missing).ttl
|
||||
now = int(time.time())
|
||||
if dataset.our_snapshots[-1].timestamp + deadline_ttl > now:
|
||||
dataset.verbose("Destroy missing: Waiting for deadline.")
|
||||
else:
|
||||
|
||||
dataset.debug("Destroy missing: Removing our snapshots.")
|
||||
|
||||
# remove all our snaphots, except last, to safe space in case we fail later on
|
||||
for snapshot in dataset.our_snapshots[:-1]:
|
||||
snapshot.destroy(fail_exception=True)
|
||||
|
||||
# does it have other snapshots?
|
||||
has_others = False
|
||||
for snapshot in dataset.snapshots:
|
||||
if not snapshot.is_ours():
|
||||
has_others = True
|
||||
break
|
||||
|
||||
if has_others:
|
||||
dataset.verbose("Destroy missing: Still in use by other snapshots")
|
||||
else:
|
||||
if dataset.datasets:
|
||||
dataset.verbose("Destroy missing: Still has children here.")
|
||||
else:
|
||||
dataset.verbose("Destroy missing.")
|
||||
dataset.our_snapshots[-1].destroy(fail_exception=True)
|
||||
dataset.destroy(fail_exception=True)
|
||||
|
||||
except Exception as e:
|
||||
dataset.error("Error during destoy missing ({})".format(str(e)))
|
||||
|
||||
def thin_source(self, source_datasets):
|
||||
|
||||
self.set_title("Thinning source")
|
||||
|
||||
for source_dataset in source_datasets:
|
||||
source_dataset.thin(skip_holds=True)
|
||||
|
||||
def run(self):
|
||||
|
||||
try:
|
||||
self.verbose(self.HEADER)
|
||||
|
||||
if self.args.test:
|
||||
self.verbose("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
|
||||
|
||||
self.set_title("Source settings")
|
||||
|
||||
description = "[Source]"
|
||||
source_thinner = Thinner(self.args.keep_source)
|
||||
source_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
|
||||
ssh_to=self.args.ssh_source, readonly=self.args.test,
|
||||
debug_output=self.args.debug_output, description=description, thinner=source_thinner)
|
||||
source_node.verbose(
|
||||
"Selects all datasets that have property 'autobackup:{}=true' (or childs of datasets that have "
|
||||
"'autobackup:{}=child')".format(
|
||||
self.args.backup_name, self.args.backup_name))
|
||||
|
||||
self.set_title("Selecting")
|
||||
selected_source_datasets = source_node.selected_datasets
|
||||
if not selected_source_datasets:
|
||||
self.error(
|
||||
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
|
||||
"you want to select.".format(
|
||||
self.args.backup_name))
|
||||
return 255
|
||||
|
||||
source_datasets = []
|
||||
|
||||
# filter out already replicated stuff?
|
||||
if not self.args.ignore_replicated:
|
||||
source_datasets = selected_source_datasets
|
||||
else:
|
||||
self.set_title("Filtering already replicated filesystems")
|
||||
for selected_source_dataset in selected_source_datasets:
|
||||
if selected_source_dataset.is_changed(self.args.min_change):
|
||||
source_datasets.append(selected_source_dataset)
|
||||
else:
|
||||
selected_source_dataset.verbose("Ignoring, already replicated")
|
||||
|
||||
if not self.args.no_snapshot:
|
||||
self.set_title("Snapshotting")
|
||||
source_node.consistent_snapshot(source_datasets, source_node.new_snapshotname(),
|
||||
min_changed_bytes=self.args.min_change)
|
||||
|
||||
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
|
||||
if self.args.target_path:
|
||||
fail_count = self.sync_datasets(source_node, source_datasets)
|
||||
else:
|
||||
self.thin_source(source_datasets)
|
||||
fail_count = 0
|
||||
|
||||
if not fail_count:
|
||||
if self.args.test:
|
||||
self.set_title("All tests successfull.")
|
||||
else:
|
||||
self.set_title("All operations completed successfully")
|
||||
if not self.args.target_path:
|
||||
self.verbose("(No target_path specified, only operated as snapshot tool.)")
|
||||
|
||||
else:
|
||||
if fail_count != 255:
|
||||
self.error("{} failures!".format(fail_count))
|
||||
|
||||
if self.args.test:
|
||||
self.verbose("")
|
||||
self.verbose("TEST MODE - DID NOT MAKE ANY CHANGES!")
|
||||
|
||||
return fail_count
|
||||
|
||||
except Exception as e:
|
||||
self.error("Exception: " + str(e))
|
||||
if self.args.debug:
|
||||
raise
|
||||
return 255
|
||||
except KeyboardInterrupt:
|
||||
self.error("Aborted")
|
||||
return 255
|
||||
802
zfs_autobackup/ZfsDataset.py
Normal file
802
zfs_autobackup/ZfsDataset.py
Normal file
@ -0,0 +1,802 @@
|
||||
import re
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
from zfs_autobackup.CachedProperty import CachedProperty
|
||||
|
||||
|
||||
class ZfsDataset:
|
||||
"""a zfs dataset (filesystem/volume/snapshot/clone)
|
||||
Note that a dataset doesn't have to actually exist (yet/anymore)
|
||||
Also most properties are cached for performance-reasons, but also to allow --test to function correctly.
|
||||
|
||||
"""
|
||||
|
||||
# illegal properties per dataset type. these will be removed from --set-properties and --filter-properties
|
||||
ILLEGAL_PROPERTIES = {
|
||||
'filesystem': [],
|
||||
'volume': ["canmount"],
|
||||
}
|
||||
|
||||
def __init__(self, zfs_node, name, force_exists=None):
|
||||
"""name: full path of the zfs dataset exists: specify if you already know a dataset exists or not. for
|
||||
performance and testing reasons. (otherwise it will have to check with zfs list when needed)
|
||||
"""
|
||||
self.zfs_node = zfs_node
|
||||
self.name = name # full name
|
||||
self._virtual_snapshots = []
|
||||
self.invalidate()
|
||||
self.force_exists = force_exists
|
||||
|
||||
def __repr__(self):
|
||||
return "{}: {}".format(self.zfs_node, self.name)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __eq__(self, obj):
|
||||
if not isinstance(obj, ZfsDataset):
|
||||
return False
|
||||
|
||||
return self.name == obj.name
|
||||
|
||||
def verbose(self, txt):
|
||||
self.zfs_node.verbose("{}: {}".format(self.name, txt))
|
||||
|
||||
def error(self, txt):
|
||||
self.zfs_node.error("{}: {}".format(self.name, txt))
|
||||
|
||||
def debug(self, txt):
|
||||
self.zfs_node.debug("{}: {}".format(self.name, txt))
|
||||
|
||||
def invalidate(self):
|
||||
"""clear caches"""
|
||||
CachedProperty.clear(self)
|
||||
self.force_exists = None
|
||||
self._virtual_snapshots = []
|
||||
|
||||
def split_path(self):
|
||||
"""return the path elements as an array"""
|
||||
return self.name.split("/")
|
||||
|
||||
def lstrip_path(self, count):
|
||||
"""return name with first count components stripped"""
|
||||
return "/".join(self.split_path()[count:])
|
||||
|
||||
def rstrip_path(self, count):
|
||||
"""return name with last count components stripped"""
|
||||
return "/".join(self.split_path()[:-count])
|
||||
|
||||
@property
|
||||
def filesystem_name(self):
|
||||
"""filesystem part of the name (before the @)"""
|
||||
if self.is_snapshot:
|
||||
(filesystem, snapshot) = self.name.split("@")
|
||||
return filesystem
|
||||
else:
|
||||
return self.name
|
||||
|
||||
@property
|
||||
def snapshot_name(self):
|
||||
"""snapshot part of the name"""
|
||||
if not self.is_snapshot:
|
||||
raise (Exception("This is not a snapshot"))
|
||||
|
||||
(filesystem, snapshot_name) = self.name.split("@")
|
||||
return snapshot_name
|
||||
|
||||
@property
|
||||
def is_snapshot(self):
|
||||
"""true if this dataset is a snapshot"""
|
||||
return self.name.find("@") != -1
|
||||
|
||||
@CachedProperty
|
||||
def parent(self):
|
||||
"""get zfs-parent of this dataset. for snapshots this means it will get the filesystem/volume that it belongs
|
||||
to. otherwise it will return the parent according to path
|
||||
|
||||
we cache this so everything in the parent that is cached also stays.
|
||||
"""
|
||||
if self.is_snapshot:
|
||||
return ZfsDataset(self.zfs_node, self.filesystem_name)
|
||||
else:
|
||||
return ZfsDataset(self.zfs_node, self.rstrip_path(1))
|
||||
|
||||
def find_prev_snapshot(self, snapshot, other_snapshots=False):
|
||||
"""find previous snapshot in this dataset. None if it doesn't exist.
|
||||
|
||||
other_snapshots: set to true to also return snapshots that where not created by us. (is_ours)
|
||||
"""
|
||||
|
||||
if self.is_snapshot:
|
||||
raise (Exception("Please call this on a dataset."))
|
||||
|
||||
index = self.find_snapshot_index(snapshot)
|
||||
while index:
|
||||
index = index - 1
|
||||
if other_snapshots or self.snapshots[index].is_ours():
|
||||
return self.snapshots[index]
|
||||
return None
|
||||
|
||||
def find_next_snapshot(self, snapshot, other_snapshots=False):
|
||||
"""find next snapshot in this dataset. None if it doesn't exist"""
|
||||
|
||||
if self.is_snapshot:
|
||||
raise (Exception("Please call this on a dataset."))
|
||||
|
||||
index = self.find_snapshot_index(snapshot)
|
||||
while index is not None and index < len(self.snapshots) - 1:
|
||||
index = index + 1
|
||||
if other_snapshots or self.snapshots[index].is_ours():
|
||||
return self.snapshots[index]
|
||||
return None
|
||||
|
||||
@CachedProperty
|
||||
def exists(self):
|
||||
"""check if dataset exists.
|
||||
Use force to force a specific value to be cached, if you already know. Useful for performance reasons"""
|
||||
|
||||
if self.force_exists is not None:
|
||||
self.debug("Checking if filesystem exists: was forced to {}".format(self.force_exists))
|
||||
return self.force_exists
|
||||
else:
|
||||
self.debug("Checking if filesystem exists")
|
||||
|
||||
return (self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True, valid_exitcodes=[0, 1],
|
||||
hide_errors=True) and True)
|
||||
|
||||
def create_filesystem(self, parents=False):
|
||||
"""create a filesystem"""
|
||||
if parents:
|
||||
self.verbose("Creating filesystem and parents")
|
||||
self.zfs_node.run(["zfs", "create", "-p", self.name])
|
||||
else:
|
||||
self.verbose("Creating filesystem")
|
||||
self.zfs_node.run(["zfs", "create", self.name])
|
||||
|
||||
self.force_exists = True
|
||||
|
||||
def destroy(self, fail_exception=False):
|
||||
"""destroy the dataset. by default failures are not an exception, so we can continue making backups"""
|
||||
|
||||
self.verbose("Destroying")
|
||||
|
||||
if self.is_snapshot:
|
||||
self.release()
|
||||
|
||||
try:
|
||||
self.zfs_node.run(["zfs", "destroy", self.name])
|
||||
self.invalidate()
|
||||
self.force_exists = False
|
||||
return True
|
||||
except subprocess.CalledProcessError:
|
||||
if not fail_exception:
|
||||
return False
|
||||
else:
|
||||
raise
|
||||
|
||||
@CachedProperty
|
||||
def properties(self):
|
||||
"""all zfs properties"""
|
||||
|
||||
cmd = [
|
||||
"zfs", "get", "-H", "-o", "property,value", "-p", "all", self.name
|
||||
]
|
||||
|
||||
if not self.exists:
|
||||
return {}
|
||||
|
||||
self.debug("Getting zfs properties")
|
||||
|
||||
ret = {}
|
||||
for pair in self.zfs_node.run(tab_split=True, cmd=cmd, readonly=True, valid_exitcodes=[0]):
|
||||
if len(pair) == 2:
|
||||
ret[pair[0]] = pair[1]
|
||||
|
||||
return ret
|
||||
|
||||
def is_changed(self, min_changed_bytes=1):
|
||||
"""dataset is changed since ANY latest snapshot ?"""
|
||||
self.debug("Checking if dataset is changed")
|
||||
|
||||
if min_changed_bytes == 0:
|
||||
return True
|
||||
|
||||
if int(self.properties['written']) < min_changed_bytes:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def is_ours(self):
|
||||
"""return true if this snapshot is created by this backup_name"""
|
||||
if re.match("^" + self.zfs_node.backup_name + "-[0-9]*$", self.snapshot_name):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@property
|
||||
def _hold_name(self):
|
||||
return "zfs_autobackup:" + self.zfs_node.backup_name
|
||||
|
||||
@property
|
||||
def holds(self):
|
||||
"""get list of holds for dataset"""
|
||||
|
||||
output = self.zfs_node.run(["zfs", "holds", "-H", self.name], valid_exitcodes=[0], tab_split=True,
|
||||
readonly=True)
|
||||
return map(lambda fields: fields[1], output)
|
||||
|
||||
def is_hold(self):
|
||||
"""did we hold this snapshot?"""
|
||||
return self._hold_name in self.holds
|
||||
|
||||
def hold(self):
|
||||
"""hold dataset"""
|
||||
self.debug("holding")
|
||||
self.zfs_node.run(["zfs", "hold", self._hold_name, self.name], valid_exitcodes=[0, 1])
|
||||
|
||||
def release(self):
|
||||
"""release dataset"""
|
||||
if self.zfs_node.readonly or self.is_hold():
|
||||
self.debug("releasing")
|
||||
self.zfs_node.run(["zfs", "release", self._hold_name, self.name], valid_exitcodes=[0, 1])
|
||||
|
||||
@property
|
||||
def timestamp(self):
|
||||
"""get timestamp from snapshot name. Only works for our own snapshots with the correct format."""
|
||||
time_str = re.findall("^.*-([0-9]*)$", self.snapshot_name)[0]
|
||||
if len(time_str) != 14:
|
||||
raise (Exception("Snapshot has invalid timestamp in name: {}".format(self.snapshot_name)))
|
||||
|
||||
# new format:
|
||||
time_secs = time.mktime(time.strptime(time_str, "%Y%m%d%H%M%S"))
|
||||
return time_secs
|
||||
|
||||
def from_names(self, names):
|
||||
"""convert a list of names to a list ZfsDatasets for this zfs_node"""
|
||||
ret = []
|
||||
for name in names:
|
||||
ret.append(ZfsDataset(self.zfs_node, name))
|
||||
|
||||
return ret
|
||||
|
||||
# def add_virtual_snapshot(self, snapshot):
|
||||
# """pretend a snapshot exists (usefull in test mode)"""
|
||||
#
|
||||
# # NOTE: we could just call self.snapshots.append() but this would trigger a zfs list which is not always needed.
|
||||
# if CachedProperty.is_cached(self, 'snapshots'):
|
||||
# # already cached so add it
|
||||
# print ("ADDED")
|
||||
# self.snapshots.append(snapshot)
|
||||
# else:
|
||||
# # self.snapshots will add it when requested
|
||||
# print ("ADDED VIRT")
|
||||
# self._virtual_snapshots.append(snapshot)
|
||||
|
||||
@CachedProperty
|
||||
def snapshots(self):
|
||||
"""get all snapshots of this dataset"""
|
||||
|
||||
|
||||
if not self.exists:
|
||||
return []
|
||||
|
||||
self.debug("Getting snapshots")
|
||||
|
||||
cmd = [
|
||||
"zfs", "list", "-d", "1", "-r", "-t", "snapshot", "-H", "-o", "name", self.name
|
||||
]
|
||||
|
||||
return self.from_names(self.zfs_node.run(cmd=cmd, readonly=True))
|
||||
|
||||
@property
|
||||
def our_snapshots(self):
|
||||
"""get list of snapshots creates by us of this dataset"""
|
||||
ret = []
|
||||
for snapshot in self.snapshots:
|
||||
if snapshot.is_ours():
|
||||
ret.append(snapshot)
|
||||
|
||||
return ret
|
||||
|
||||
def find_snapshot(self, snapshot):
|
||||
"""find snapshot by snapshot (can be a snapshot_name or a different ZfsDataset )"""
|
||||
|
||||
if not isinstance(snapshot, ZfsDataset):
|
||||
snapshot_name = snapshot
|
||||
else:
|
||||
snapshot_name = snapshot.snapshot_name
|
||||
|
||||
for snapshot in self.snapshots:
|
||||
if snapshot.snapshot_name == snapshot_name:
|
||||
return snapshot
|
||||
|
||||
return None
|
||||
|
||||
def find_snapshot_index(self, snapshot):
|
||||
"""find snapshot index by snapshot (can be a snapshot_name or ZfsDataset)"""
|
||||
|
||||
if not isinstance(snapshot, ZfsDataset):
|
||||
snapshot_name = snapshot
|
||||
else:
|
||||
snapshot_name = snapshot.snapshot_name
|
||||
|
||||
index = 0
|
||||
for snapshot in self.snapshots:
|
||||
if snapshot.snapshot_name == snapshot_name:
|
||||
return index
|
||||
index = index + 1
|
||||
|
||||
return None
|
||||
|
||||
@CachedProperty
|
||||
def written_since_ours(self):
|
||||
"""get number of bytes written since our last snapshot"""
|
||||
|
||||
latest_snapshot = self.our_snapshots[-1]
|
||||
|
||||
self.debug("Getting bytes written since our last snapshot")
|
||||
cmd = ["zfs", "get", "-H", "-ovalue", "-p", "written@" + str(latest_snapshot), self.name]
|
||||
|
||||
output = self.zfs_node.run(readonly=True, tab_split=False, cmd=cmd, valid_exitcodes=[0])
|
||||
|
||||
return int(output[0])
|
||||
|
||||
def is_changed_ours(self, min_changed_bytes=1):
|
||||
"""dataset is changed since OUR latest snapshot?"""
|
||||
|
||||
if min_changed_bytes == 0:
|
||||
return True
|
||||
|
||||
if not self.our_snapshots:
|
||||
return True
|
||||
|
||||
# NOTE: filesystems can have a very small amount written without actual changes in some cases
|
||||
if self.written_since_ours < min_changed_bytes:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@CachedProperty
|
||||
def recursive_datasets(self, types="filesystem,volume"):
|
||||
"""get all (non-snapshot) datasets recursively under us"""
|
||||
|
||||
self.debug("Getting all recursive datasets under us")
|
||||
|
||||
names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
|
||||
"zfs", "list", "-r", "-t", types, "-o", "name", "-H", self.name
|
||||
])
|
||||
|
||||
return self.from_names(names[1:])
|
||||
|
||||
@CachedProperty
|
||||
def datasets(self, types="filesystem,volume"):
|
||||
"""get all (non-snapshot) datasets directly under us"""
|
||||
|
||||
self.debug("Getting all datasets under us")
|
||||
|
||||
names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
|
||||
"zfs", "list", "-r", "-t", types, "-o", "name", "-H", "-d", "1", self.name
|
||||
])
|
||||
|
||||
return self.from_names(names[1:])
|
||||
|
||||
def send_pipe(self, features, prev_snapshot=None, resume_token=None, show_progress=False, raw=False):
|
||||
"""returns a pipe with zfs send output for this snapshot
|
||||
|
||||
resume_token: resume sending from this token. (in that case we don't need to know snapshot names)
|
||||
|
||||
"""
|
||||
# build source command
|
||||
cmd = []
|
||||
|
||||
cmd.extend(["zfs", "send", ])
|
||||
|
||||
# all kind of performance options:
|
||||
if 'large_blocks' in features and "-L" in self.zfs_node.supported_send_options:
|
||||
cmd.append("-L") # large block support (only if recordsize>128k which is seldomly used)
|
||||
|
||||
if 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options:
|
||||
cmd.append("-e") # WRITE_EMBEDDED, more compact stream
|
||||
|
||||
if "-c" in self.zfs_node.supported_send_options:
|
||||
cmd.append("-c") # use compressed WRITE records
|
||||
|
||||
# NOTE: performance is usually worse with this option, according to manual
|
||||
# also -D will be depricated in newer ZFS versions
|
||||
# if not resume:
|
||||
# if "-D" in self.zfs_node.supported_send_options:
|
||||
# cmd.append("-D") # dedupped stream, sends less duplicate data
|
||||
|
||||
# raw? (for encryption)
|
||||
if raw:
|
||||
cmd.append("--raw")
|
||||
|
||||
# progress output
|
||||
if show_progress:
|
||||
cmd.append("-v")
|
||||
cmd.append("-P")
|
||||
|
||||
# resume a previous send? (don't need more parameters in that case)
|
||||
if resume_token:
|
||||
cmd.extend(["-t", resume_token])
|
||||
|
||||
else:
|
||||
# send properties
|
||||
cmd.append("-p")
|
||||
|
||||
# incremental?
|
||||
if prev_snapshot:
|
||||
cmd.extend(["-i", "@" + prev_snapshot.snapshot_name])
|
||||
|
||||
cmd.append(self.name)
|
||||
|
||||
# if args.buffer and args.ssh_source!="local":
|
||||
# cmd.append("|mbuffer -m {}".format(args.buffer))
|
||||
|
||||
# NOTE: this doesn't start the send yet, it only returns a subprocess.Pipe
|
||||
return self.zfs_node.run(cmd, pipe=True)
|
||||
|
||||
def recv_pipe(self, pipe, features, filter_properties=None, set_properties=None, ignore_exit_code=False):
|
||||
"""starts a zfs recv for this snapshot and uses pipe as input
|
||||
|
||||
note: you can it both on a snapshot or filesystem object.
|
||||
The resulting zfs command is the same, only our object cache is invalidated differently.
|
||||
"""
|
||||
|
||||
if set_properties is None:
|
||||
set_properties = []
|
||||
|
||||
if filter_properties is None:
|
||||
filter_properties = []
|
||||
|
||||
# build target command
|
||||
cmd = []
|
||||
|
||||
cmd.extend(["zfs", "recv"])
|
||||
|
||||
# don't mount filesystem that is received
|
||||
cmd.append("-u")
|
||||
|
||||
for property_ in filter_properties:
|
||||
cmd.extend(["-x", property_])
|
||||
|
||||
for property_ in set_properties:
|
||||
cmd.extend(["-o", property_])
|
||||
|
||||
# verbose output
|
||||
cmd.append("-v")
|
||||
|
||||
if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options:
|
||||
# support resuming
|
||||
self.debug("Enabled resume support")
|
||||
cmd.append("-s")
|
||||
|
||||
cmd.append(self.filesystem_name)
|
||||
|
||||
if ignore_exit_code:
|
||||
valid_exitcodes = []
|
||||
else:
|
||||
valid_exitcodes = [0]
|
||||
|
||||
self.zfs_node.reset_progress()
|
||||
self.zfs_node.run(cmd, inp=pipe, valid_exitcodes=valid_exitcodes)
|
||||
|
||||
# invalidate cache, but we at least know we exist now
|
||||
self.invalidate()
|
||||
|
||||
# in test mode we assume everything was ok and it exists
|
||||
if self.zfs_node.readonly:
|
||||
self.force_exists = True
|
||||
|
||||
# check if transfer was really ok (exit codes have been wrong before due to bugs in zfs-utils and can be
|
||||
# ignored by some parameters)
|
||||
if not self.exists:
|
||||
self.error("error during transfer")
|
||||
raise (Exception("Target doesn't exist after transfer, something went wrong."))
|
||||
|
||||
# if args.buffer and args.ssh_target!="local":
|
||||
# cmd.append("|mbuffer -m {}".format(args.buffer))
|
||||
|
||||
def transfer_snapshot(self, target_snapshot, features, prev_snapshot=None, show_progress=False,
|
||||
filter_properties=None, set_properties=None, ignore_recv_exit_code=False, resume_token=None,
|
||||
raw=False):
|
||||
"""transfer this snapshot to target_snapshot. specify prev_snapshot for incremental transfer
|
||||
|
||||
connects a send_pipe() to recv_pipe()
|
||||
"""
|
||||
|
||||
if set_properties is None:
|
||||
set_properties = []
|
||||
if filter_properties is None:
|
||||
filter_properties = []
|
||||
|
||||
self.debug("Transfer snapshot to {}".format(target_snapshot.filesystem_name))
|
||||
|
||||
if resume_token:
|
||||
target_snapshot.verbose("resuming")
|
||||
|
||||
# initial or increment
|
||||
if not prev_snapshot:
|
||||
target_snapshot.verbose("receiving full".format(self.snapshot_name))
|
||||
else:
|
||||
# incremental
|
||||
target_snapshot.verbose("receiving incremental".format(self.snapshot_name))
|
||||
|
||||
# do it
|
||||
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
|
||||
resume_token=resume_token, raw=raw)
|
||||
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
|
||||
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code)
|
||||
|
||||
def abort_resume(self):
|
||||
"""abort current resume state"""
|
||||
self.zfs_node.run(["zfs", "recv", "-A", self.name])
|
||||
|
||||
def rollback(self):
|
||||
"""rollback to latest existing snapshot on this dataset"""
|
||||
|
||||
for snapshot in reversed(self.snapshots):
|
||||
if snapshot.exists:
|
||||
self.debug("Rolling back")
|
||||
self.zfs_node.run(["zfs", "rollback", snapshot.name])
|
||||
return
|
||||
|
||||
def get_resume_snapshot(self, resume_token):
|
||||
"""returns snapshot that will be resumed by this resume token (run this on source with target-token)"""
|
||||
# use zfs send -n option to determine this
|
||||
# NOTE: on smartos stderr, on linux stdout
|
||||
(stdout, stderr) = self.zfs_node.run(["zfs", "send", "-t", resume_token, "-n", "-v"], valid_exitcodes=[0, 255],
|
||||
readonly=True, return_stderr=True)
|
||||
if stdout:
|
||||
lines = stdout
|
||||
else:
|
||||
lines = stderr
|
||||
for line in lines:
|
||||
matches = re.findall("toname = .*@(.*)", line)
|
||||
if matches:
|
||||
snapshot_name = matches[0]
|
||||
snapshot = ZfsDataset(self.zfs_node, self.filesystem_name + "@" + snapshot_name)
|
||||
snapshot.debug("resume token belongs to this snapshot")
|
||||
return snapshot
|
||||
|
||||
return None
|
||||
|
||||
def thin_list(self, keeps=None, ignores=None):
|
||||
"""determines list of snapshots that should be kept or deleted based on the thinning schedule. cull the herd!
|
||||
keep: list of snapshots to always keep (usually the last) ignores: snapshots to completely ignore (usually
|
||||
incompatible target snapshots that are going to be destroyed anyway)
|
||||
|
||||
returns: ( keeps, obsoletes )
|
||||
"""
|
||||
|
||||
if ignores is None:
|
||||
ignores = []
|
||||
if keeps is None:
|
||||
keeps = []
|
||||
|
||||
snapshots = [snapshot for snapshot in self.our_snapshots if snapshot not in ignores]
|
||||
|
||||
return self.zfs_node.thinner.thin(snapshots, keep_objects=keeps)
|
||||
|
||||
def thin(self, skip_holds=False):
|
||||
"""destroys snapshots according to thin_list, except last snapshot"""
|
||||
|
||||
(keeps, obsoletes) = self.thin_list(keeps=self.our_snapshots[-1:])
|
||||
for obsolete in obsoletes:
|
||||
if skip_holds and obsolete.is_hold():
|
||||
obsolete.verbose("Keeping (common snapshot)")
|
||||
else:
|
||||
obsolete.destroy()
|
||||
self.snapshots.remove(obsolete)
|
||||
|
||||
def find_common_snapshot(self, target_dataset):
|
||||
"""find latest common snapshot between us and target
|
||||
returns None if its an initial transfer
|
||||
"""
|
||||
if not target_dataset.snapshots:
|
||||
# target has nothing yet
|
||||
return None
|
||||
else:
|
||||
# snapshot=self.find_snapshot(target_dataset.snapshots[-1].snapshot_name)
|
||||
|
||||
# if not snapshot:
|
||||
# try to common snapshot
|
||||
for source_snapshot in reversed(self.snapshots):
|
||||
if target_dataset.find_snapshot(source_snapshot):
|
||||
source_snapshot.debug("common snapshot")
|
||||
return source_snapshot
|
||||
target_dataset.error("Cant find common snapshot with source.")
|
||||
raise (Exception("You probably need to delete the target dataset to fix this."))
|
||||
|
||||
def find_start_snapshot(self, common_snapshot, other_snapshots):
|
||||
"""finds first snapshot to send"""
|
||||
|
||||
if not common_snapshot:
|
||||
if not self.snapshots:
|
||||
start_snapshot = None
|
||||
else:
|
||||
# start from beginning
|
||||
start_snapshot = self.snapshots[0]
|
||||
|
||||
if not start_snapshot.is_ours() and not other_snapshots:
|
||||
# try to start at a snapshot thats ours
|
||||
start_snapshot = self.find_next_snapshot(start_snapshot, other_snapshots)
|
||||
else:
|
||||
start_snapshot = self.find_next_snapshot(common_snapshot, other_snapshots)
|
||||
|
||||
return start_snapshot
|
||||
|
||||
def find_incompatible_snapshots(self, common_snapshot):
|
||||
"""returns a list of snapshots that is incompatible for a zfs recv onto the common_snapshot.
|
||||
all direct followup snapshots with written=0 are compatible."""
|
||||
|
||||
ret = []
|
||||
|
||||
if common_snapshot and self.snapshots:
|
||||
followup = True
|
||||
for snapshot in self.snapshots[self.find_snapshot_index(common_snapshot) + 1:]:
|
||||
if not followup or int(snapshot.properties['written']) != 0:
|
||||
followup = False
|
||||
ret.append(snapshot)
|
||||
|
||||
return ret
|
||||
|
||||
def get_allowed_properties(self, filter_properties, set_properties):
|
||||
"""only returns lists of allowed properties for this dataset type"""
|
||||
|
||||
allowed_filter_properties = []
|
||||
allowed_set_properties = []
|
||||
illegal_properties = self.ILLEGAL_PROPERTIES[self.properties['type']]
|
||||
for set_property in set_properties:
|
||||
(property_, value) = set_property.split("=")
|
||||
if property_ not in illegal_properties:
|
||||
allowed_set_properties.append(set_property)
|
||||
|
||||
for filter_property in filter_properties:
|
||||
if filter_property not in illegal_properties:
|
||||
allowed_filter_properties.append(filter_property)
|
||||
|
||||
return allowed_filter_properties, allowed_set_properties
|
||||
|
||||
def sync_snapshots(self, target_dataset, features, show_progress=False, filter_properties=None, set_properties=None,
|
||||
ignore_recv_exit_code=False, holds=True, rollback=False, raw=False, other_snapshots=False,
|
||||
no_send=False, destroy_incompatible=False):
|
||||
"""sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way."""
|
||||
|
||||
if set_properties is None:
|
||||
set_properties = []
|
||||
if filter_properties is None:
|
||||
filter_properties = []
|
||||
|
||||
# determine common and start snapshot
|
||||
target_dataset.debug("Determining start snapshot")
|
||||
common_snapshot = self.find_common_snapshot(target_dataset)
|
||||
start_snapshot = self.find_start_snapshot(common_snapshot, other_snapshots)
|
||||
# should be destroyed before attempting zfs recv:
|
||||
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(common_snapshot)
|
||||
|
||||
# make target snapshot list the same as source, by adding virtual non-existing ones to the list.
|
||||
target_dataset.debug("Creating virtual target snapshots")
|
||||
source_snapshot = start_snapshot
|
||||
while source_snapshot:
|
||||
# create virtual target snapshot
|
||||
virtual_snapshot = ZfsDataset(target_dataset.zfs_node,
|
||||
target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name,
|
||||
force_exists=False)
|
||||
target_dataset.snapshots.append(virtual_snapshot)
|
||||
source_snapshot = self.find_next_snapshot(source_snapshot, other_snapshots)
|
||||
|
||||
# now let thinner decide what we want on both sides as final state (after all transfers are done)
|
||||
if self.our_snapshots:
|
||||
self.debug("Create thinning list")
|
||||
(source_keeps, source_obsoletes) = self.thin_list(keeps=[self.our_snapshots[-1]])
|
||||
else:
|
||||
source_obsoletes = []
|
||||
|
||||
if target_dataset.our_snapshots:
|
||||
(target_keeps, target_obsoletes) = target_dataset.thin_list(keeps=[target_dataset.our_snapshots[-1]],
|
||||
ignores=incompatible_target_snapshots)
|
||||
else:
|
||||
target_keeps = []
|
||||
target_obsoletes = []
|
||||
|
||||
# on source: destroy all obsoletes before common. but after common, only delete snapshots that target also
|
||||
# doesn't want to explicitly keep
|
||||
before_common = True
|
||||
for source_snapshot in self.snapshots:
|
||||
if common_snapshot and source_snapshot.snapshot_name == common_snapshot.snapshot_name:
|
||||
before_common = False
|
||||
# never destroy common snapshot
|
||||
else:
|
||||
target_snapshot = target_dataset.find_snapshot(source_snapshot)
|
||||
if (source_snapshot in source_obsoletes) and (before_common or (target_snapshot not in target_keeps)):
|
||||
source_snapshot.destroy()
|
||||
|
||||
# on target: destroy everything thats obsolete, except common_snapshot
|
||||
for target_snapshot in target_dataset.snapshots:
|
||||
if (target_snapshot in target_obsoletes) and (
|
||||
not common_snapshot or target_snapshot.snapshot_name != common_snapshot.snapshot_name):
|
||||
if target_snapshot.exists:
|
||||
target_snapshot.destroy()
|
||||
|
||||
# now actually transfer the snapshots, if we want
|
||||
if no_send:
|
||||
return
|
||||
|
||||
# resume?
|
||||
resume_token = None
|
||||
if 'receive_resume_token' in target_dataset.properties:
|
||||
resume_token = target_dataset.properties['receive_resume_token']
|
||||
# not valid anymore?
|
||||
resume_snapshot = self.get_resume_snapshot(resume_token)
|
||||
if not resume_snapshot or start_snapshot.snapshot_name != resume_snapshot.snapshot_name:
|
||||
target_dataset.verbose("Cant resume, resume token no longer valid.")
|
||||
target_dataset.abort_resume()
|
||||
resume_token = None
|
||||
|
||||
# incompatible target snapshots?
|
||||
if incompatible_target_snapshots:
|
||||
if not destroy_incompatible:
|
||||
for snapshot in incompatible_target_snapshots:
|
||||
snapshot.error("Incompatible snapshot")
|
||||
raise (Exception("Please destroy incompatible snapshots or use --destroy-incompatible."))
|
||||
else:
|
||||
for snapshot in incompatible_target_snapshots:
|
||||
snapshot.verbose("Incompatible snapshot")
|
||||
snapshot.destroy()
|
||||
target_dataset.snapshots.remove(snapshot)
|
||||
|
||||
# rollback target to latest?
|
||||
if rollback:
|
||||
target_dataset.rollback()
|
||||
|
||||
# now actually transfer the snapshots
|
||||
prev_source_snapshot = common_snapshot
|
||||
source_snapshot = start_snapshot
|
||||
while source_snapshot:
|
||||
target_snapshot = target_dataset.find_snapshot(source_snapshot) # still virtual
|
||||
|
||||
# does target actually want it?
|
||||
if target_snapshot not in target_obsoletes:
|
||||
# NOTE: should we let transfer_snapshot handle this?
|
||||
(allowed_filter_properties, allowed_set_properties) = self.get_allowed_properties(filter_properties,
|
||||
set_properties)
|
||||
source_snapshot.transfer_snapshot(target_snapshot, features=features,
|
||||
prev_snapshot=prev_source_snapshot, show_progress=show_progress,
|
||||
filter_properties=allowed_filter_properties,
|
||||
set_properties=allowed_set_properties,
|
||||
ignore_recv_exit_code=ignore_recv_exit_code,
|
||||
resume_token=resume_token, raw=raw)
|
||||
resume_token = None
|
||||
|
||||
# hold the new common snapshots and release the previous ones
|
||||
if holds:
|
||||
target_snapshot.hold()
|
||||
source_snapshot.hold()
|
||||
|
||||
if prev_source_snapshot:
|
||||
if holds:
|
||||
prev_source_snapshot.release()
|
||||
target_dataset.find_snapshot(prev_source_snapshot).release()
|
||||
|
||||
# we may now destroy the previous source snapshot if its obsolete
|
||||
if prev_source_snapshot in source_obsoletes:
|
||||
prev_source_snapshot.destroy()
|
||||
|
||||
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
|
||||
# the rest was already destroyed or will not be send)
|
||||
prev_target_snapshot = target_dataset.find_snapshot(prev_source_snapshot)
|
||||
if prev_target_snapshot in target_obsoletes:
|
||||
prev_target_snapshot.destroy()
|
||||
|
||||
prev_source_snapshot = source_snapshot
|
||||
else:
|
||||
source_snapshot.debug("skipped (target doesn't need it)")
|
||||
# was it actually a resume?
|
||||
if resume_token:
|
||||
target_dataset.debug("aborting resume, since we don't want that snapshot anymore")
|
||||
target_dataset.abort_resume()
|
||||
resume_token = None
|
||||
|
||||
source_snapshot = self.find_next_snapshot(source_snapshot, other_snapshots)
|
||||
238
zfs_autobackup/ZfsNode.py
Normal file
238
zfs_autobackup/ZfsNode.py
Normal file
@ -0,0 +1,238 @@
|
||||
# python 2 compatibility
|
||||
from __future__ import print_function
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from zfs_autobackup.ExecuteNode import ExecuteNode
|
||||
from zfs_autobackup.Thinner import Thinner
|
||||
from zfs_autobackup.CachedProperty import CachedProperty
|
||||
from zfs_autobackup.ZfsPool import ZfsPool
|
||||
from zfs_autobackup.ZfsDataset import ZfsDataset
|
||||
|
||||
|
||||
class ZfsNode(ExecuteNode):
|
||||
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
|
||||
|
||||
def __init__(self, backup_name, logger, ssh_config=None, ssh_to=None, readonly=False, description="",
|
||||
debug_output=False, thinner=Thinner()):
|
||||
self.backup_name = backup_name
|
||||
if not description and ssh_to:
|
||||
self.description = ssh_to
|
||||
else:
|
||||
self.description = description
|
||||
|
||||
self.logger = logger
|
||||
|
||||
if ssh_config:
|
||||
self.verbose("Using custom SSH config: {}".format(ssh_config))
|
||||
|
||||
if ssh_to:
|
||||
self.verbose("Datasets on: {}".format(ssh_to))
|
||||
else:
|
||||
self.verbose("Datasets are local")
|
||||
|
||||
rules = thinner.human_rules()
|
||||
if rules:
|
||||
for rule in rules:
|
||||
self.verbose(rule)
|
||||
else:
|
||||
self.verbose("Keep no old snaphots")
|
||||
|
||||
self.thinner = thinner
|
||||
|
||||
# list of ZfsPools
|
||||
self.__pools = {}
|
||||
|
||||
self._progress_total_bytes = 0
|
||||
self._progress_start_time = time.time()
|
||||
|
||||
ExecuteNode.__init__(self, ssh_config=ssh_config, ssh_to=ssh_to, readonly=readonly, debug_output=debug_output)
|
||||
|
||||
@CachedProperty
|
||||
def supported_send_options(self):
|
||||
"""list of supported options, for optimizing sends"""
|
||||
# not every zfs implementation supports them all
|
||||
|
||||
ret = []
|
||||
for option in ["-L", "-e", "-c"]:
|
||||
if self.valid_command(["zfs", "send", option, "zfs_autobackup_option_test"]):
|
||||
ret.append(option)
|
||||
return ret
|
||||
|
||||
@CachedProperty
|
||||
def supported_recv_options(self):
|
||||
"""list of supported options"""
|
||||
# not every zfs implementation supports them all
|
||||
|
||||
ret = []
|
||||
for option in ["-s"]:
|
||||
if self.valid_command(["zfs", "recv", option, "zfs_autobackup_option_test"]):
|
||||
ret.append(option)
|
||||
return ret
|
||||
|
||||
def valid_command(self, cmd):
|
||||
"""test if a specified zfs options are valid exit code. use this to determine support options"""
|
||||
|
||||
try:
|
||||
self.run(cmd, hide_errors=True, valid_exitcodes=[0, 1])
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
# TODO: also create a get_zfs_dataset() function that stores all the objects in a dict. This should optimize
|
||||
# caching a bit and is more consistent.
|
||||
def get_zfs_pool(self, name):
|
||||
"""get a ZfsPool() object from specified name. stores objects internally to enable caching"""
|
||||
|
||||
return self.__pools.setdefault(name, ZfsPool(self, name))
|
||||
|
||||
def reset_progress(self):
|
||||
"""reset progress output counters"""
|
||||
self._progress_total_bytes = 0
|
||||
self._progress_start_time = time.time()
|
||||
|
||||
def parse_zfs_progress(self, line, hide_errors, prefix):
|
||||
"""try to parse progress output of zfs recv -Pv, and don't show it as error to the user """
|
||||
|
||||
# is it progress output?
|
||||
progress_fields = line.rstrip().split("\t")
|
||||
|
||||
if (line.find("nvlist version") == 0 or
|
||||
line.find("resume token contents") == 0 or
|
||||
len(progress_fields) != 1 or
|
||||
line.find("skipping ") == 0 or
|
||||
re.match("send from .*estimated size is ", line)):
|
||||
|
||||
# always output for debugging offcourse
|
||||
self.debug(prefix + line.rstrip())
|
||||
|
||||
# actual useful info
|
||||
if len(progress_fields) >= 3:
|
||||
if progress_fields[0] == 'full' or progress_fields[0] == 'size':
|
||||
self._progress_total_bytes = int(progress_fields[2])
|
||||
elif progress_fields[0] == 'incremental':
|
||||
self._progress_total_bytes = int(progress_fields[3])
|
||||
else:
|
||||
bytes_ = int(progress_fields[1])
|
||||
if self._progress_total_bytes:
|
||||
percentage = min(100, int(bytes_ * 100 / self._progress_total_bytes))
|
||||
speed = int(bytes_ / (time.time() - self._progress_start_time) / (1024 * 1024))
|
||||
bytes_left = self._progress_total_bytes - bytes_
|
||||
minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60)
|
||||
|
||||
print(">>> {}% {}MB/s (total {}MB, {} minutes left) \r".format(percentage, speed, int(
|
||||
self._progress_total_bytes / (1024 * 1024)), minutes_left), end='', file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
|
||||
return
|
||||
|
||||
# still do the normal stderr output handling
|
||||
if hide_errors:
|
||||
self.debug(prefix + line.rstrip())
|
||||
else:
|
||||
self.error(prefix + line.rstrip())
|
||||
|
||||
def _parse_stderr_pipe(self, line, hide_errors):
|
||||
self.parse_zfs_progress(line, hide_errors, "STDERR|> ")
|
||||
|
||||
def _parse_stderr(self, line, hide_errors):
|
||||
self.parse_zfs_progress(line, hide_errors, "STDERR > ")
|
||||
|
||||
def verbose(self, txt):
|
||||
self.logger.verbose("{} {}".format(self.description, txt))
|
||||
|
||||
def error(self, txt):
|
||||
self.logger.error("{} {}".format(self.description, txt))
|
||||
|
||||
def debug(self, txt):
|
||||
self.logger.debug("{} {}".format(self.description, txt))
|
||||
|
||||
def new_snapshotname(self):
|
||||
"""determine uniq new snapshotname"""
|
||||
return self.backup_name + "-" + time.strftime("%Y%m%d%H%M%S")
|
||||
|
||||
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes):
|
||||
"""create a consistent (atomic) snapshot of specified datasets, per pool.
|
||||
"""
|
||||
|
||||
pools = {}
|
||||
|
||||
# collect snapshots that we want to make, per pool
|
||||
# self.debug(datasets)
|
||||
for dataset in datasets:
|
||||
if not dataset.is_changed_ours(min_changed_bytes):
|
||||
dataset.verbose("No changes since {}".format(dataset.our_snapshots[-1].snapshot_name))
|
||||
continue
|
||||
|
||||
# force_exist, since we're making it
|
||||
snapshot = ZfsDataset(dataset.zfs_node, dataset.name + "@" + snapshot_name, force_exists=True)
|
||||
|
||||
pool = dataset.split_path()[0]
|
||||
if pool not in pools:
|
||||
pools[pool] = []
|
||||
|
||||
pools[pool].append(snapshot)
|
||||
|
||||
# update cache, but try to prevent an unneeded zfs list
|
||||
if self.readonly or CachedProperty.is_cached(dataset, 'snapshots'):
|
||||
dataset.snapshots.append(snapshot) # NOTE: this will trigger zfs list if its not cached
|
||||
|
||||
if not pools:
|
||||
self.verbose("No changes anywhere: not creating snapshots.")
|
||||
return
|
||||
|
||||
# create consistent snapshot per pool
|
||||
for (pool_name, snapshots) in pools.items():
|
||||
cmd = ["zfs", "snapshot"]
|
||||
|
||||
cmd.extend(map(lambda snapshot_: str(snapshot_), snapshots))
|
||||
|
||||
self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name))
|
||||
self.run(cmd, readonly=False)
|
||||
|
||||
@CachedProperty
|
||||
def selected_datasets(self):
|
||||
"""determine filesystems that should be backupped by looking at the special autobackup-property, systemwide
|
||||
|
||||
returns: list of ZfsDataset
|
||||
"""
|
||||
|
||||
self.debug("Getting selected datasets")
|
||||
|
||||
# get all source filesystems that have the backup property
|
||||
lines = self.run(tab_split=True, readonly=True, cmd=[
|
||||
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-s", "local,inherited", "-H",
|
||||
"autobackup:" + self.backup_name
|
||||
])
|
||||
|
||||
# determine filesystems that should be actually backupped
|
||||
selected_filesystems = []
|
||||
direct_filesystems = []
|
||||
for line in lines:
|
||||
(name, value, source) = line
|
||||
dataset = ZfsDataset(self, name)
|
||||
|
||||
if value == "false":
|
||||
dataset.verbose("Ignored (disabled)")
|
||||
|
||||
else:
|
||||
if source == "local" and (value == "true" or value == "child"):
|
||||
direct_filesystems.append(name)
|
||||
|
||||
if source == "local" and value == "true":
|
||||
dataset.verbose("Selected (direct selection)")
|
||||
selected_filesystems.append(dataset)
|
||||
elif source.find("inherited from ") == 0 and (value == "true" or value == "child"):
|
||||
inherited_from = re.sub("^inherited from ", "", source)
|
||||
if inherited_from in direct_filesystems:
|
||||
selected_filesystems.append(dataset)
|
||||
dataset.verbose("Selected (inherited selection)")
|
||||
else:
|
||||
dataset.debug("Ignored (already a backup)")
|
||||
else:
|
||||
dataset.verbose("Ignored (only childs)")
|
||||
|
||||
return selected_filesystems
|
||||
64
zfs_autobackup/ZfsPool.py
Normal file
64
zfs_autobackup/ZfsPool.py
Normal file
@ -0,0 +1,64 @@
|
||||
from zfs_autobackup.CachedProperty import CachedProperty
|
||||
|
||||
|
||||
class ZfsPool():
|
||||
"""a zfs pool"""
|
||||
|
||||
def __init__(self, zfs_node, name):
|
||||
"""name: name of the pool
|
||||
"""
|
||||
|
||||
self.zfs_node = zfs_node
|
||||
self.name = name
|
||||
|
||||
def __repr__(self):
|
||||
return "{}: {}".format(self.zfs_node, self.name)
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __eq__(self, obj):
|
||||
if not isinstance(obj, ZfsPool):
|
||||
return False
|
||||
|
||||
return self.name == obj.name
|
||||
|
||||
def verbose(self, txt):
|
||||
self.zfs_node.verbose("zpool {}: {}".format(self.name, txt))
|
||||
|
||||
def error(self, txt):
|
||||
self.zfs_node.error("zpool {}: {}".format(self.name, txt))
|
||||
|
||||
def debug(self, txt):
|
||||
self.zfs_node.debug("zpool {}: {}".format(self.name, txt))
|
||||
|
||||
@CachedProperty
|
||||
def properties(self):
|
||||
"""all zpool properties"""
|
||||
|
||||
self.debug("Getting zpool properties")
|
||||
|
||||
cmd = [
|
||||
"zpool", "get", "-H", "-p", "all", self.name
|
||||
]
|
||||
|
||||
ret = {}
|
||||
|
||||
for pair in self.zfs_node.run(tab_split=True, cmd=cmd, readonly=True, valid_exitcodes=[0]):
|
||||
if len(pair) == 4:
|
||||
ret[pair[1]] = pair[2]
|
||||
|
||||
return ret
|
||||
|
||||
@property
|
||||
def features(self):
|
||||
"""get list of active zpool features"""
|
||||
|
||||
ret = []
|
||||
for (key, value) in self.properties.items():
|
||||
if key.startswith("feature@"):
|
||||
feature = key.split("@")[1]
|
||||
if value == 'enabled' or value == 'active':
|
||||
ret.append(feature)
|
||||
|
||||
return ret
|
||||
9
zfs_autobackup/__init__.py
Executable file
9
zfs_autobackup/__init__.py
Executable file
@ -0,0 +1,9 @@
|
||||
|
||||
|
||||
|
||||
def cli():
|
||||
import sys
|
||||
from zfs_autobackup.ZfsAutobackup import ZfsAutobackup
|
||||
|
||||
zfs_autobackup = ZfsAutobackup(sys.argv[1:], False)
|
||||
sys.exit(zfs_autobackup.run())
|
||||
10
zfs_autobackup/__main__.py
Executable file
10
zfs_autobackup/__main__.py
Executable file
@ -0,0 +1,10 @@
|
||||
# (c)edwin@datux.nl - Released under GPL V3
|
||||
#
|
||||
# Greetings from eth0 2019 :)
|
||||
|
||||
import sys
|
||||
|
||||
if __name__ == "__main__":
|
||||
from . import cli
|
||||
cli()
|
||||
|
||||
Reference in New Issue
Block a user