Compare commits
144 Commits
v3.2-alpha
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| 539b043a9b | |||
| b38a717b43 | |||
| 6d4f22b69e | |||
| 7122dc92af | |||
| 843b87f319 | |||
| 7feae675a6 | |||
| 7586cacb49 | |||
| e0c09e9975 | |||
| de3dff77b8 | |||
| a62e793247 | |||
| 439ea6a3bc | |||
| ff86e3c67f | |||
| 8b8be80ab7 | |||
| 5cca819916 | |||
| 477e980ba2 | |||
| b817df8779 | |||
| 46580fb500 | |||
| aa2c283746 | |||
| 16ab4f8183 | |||
| 50f8aba101 | |||
| 771127d34a | |||
| ea8beee7c8 | |||
| defbc2d0bf | |||
| 4e4de2de5a | |||
| de898fc258 | |||
| bdc156e48d | |||
| f3caca48f2 | |||
| c0a8cb33ad | |||
| feb3972cd7 | |||
| e30a393d0e | |||
| f8cd77e6e4 | |||
| 06420978d5 | |||
| 54e590175d | |||
| 6e5a6764c5 | |||
| a7d05a7064 | |||
| d90ea7edd2 | |||
| 090a2d1343 | |||
| 7cffec1d26 | |||
| aac62f3fe6 | |||
| a12b651d17 | |||
| 62f078eaec | |||
| fd1e7d5b33 | |||
| 03ff730a70 | |||
| 2c5d3c50e1 | |||
| ee1d17b6ff | |||
| 0ff989691f | |||
| 088710fd39 | |||
| c12d63470f | |||
| 4df9e52a97 | |||
| 3155702c47 | |||
| a77fc9afe7 | |||
| 7533d9bcc2 | |||
| bc57ee8d08 | |||
| be53c454da | |||
| 8a6a62ce9c | |||
| 428e6edc13 | |||
| 23fbafab42 | |||
| cdd151d45f | |||
| ab43689a0f | |||
| 535e21863b | |||
| a078be3e9e | |||
| 00b230792a | |||
| 8b600c9e9c | |||
| 60840a4213 | |||
| 7f91473188 | |||
| e106e7f1da | |||
| d531e9fdaf | |||
| a322eb96ae | |||
| 564daaa1f8 | |||
| 4d3aa6da22 | |||
| aedeb726d4 | |||
| 78d7dbab6d | |||
| 0b587b3800 | |||
| a331dab20f | |||
| 3f1696024e | |||
| 911db9b023 | |||
| 4873913fa8 | |||
| 244509a006 | |||
| f9d3576752 | |||
| 75161c1bd2 | |||
| 5d7d6f6a6c | |||
| 7c372cf211 | |||
| 8854303b7a | |||
| 233745c345 | |||
| b68ca19e5f | |||
| 28ed44b1c8 | |||
| 1cedea5f5f | |||
| d99c202e75 | |||
| 44c6896ddd | |||
| e4356cb516 | |||
| cab2f98bb8 | |||
| 8276d07feb | |||
| 82ad7c2480 | |||
| f29cf13db3 | |||
| 0c6c75bf58 | |||
| f4e81bddb7 | |||
| f530cf40f3 | |||
| e7e1590919 | |||
| 0d882ec031 | |||
| 6a58a294a3 | |||
| 3f755fcc69 | |||
| d7d76032de | |||
| b7e10242b9 | |||
| bcc7983492 | |||
| 490b293ba1 | |||
| 2d42d1d1a5 | |||
| a2f85690a3 | |||
| a807ec320e | |||
| 3e6a327647 | |||
| ed61f03b4b | |||
| f397e7be59 | |||
| b60dd4c109 | |||
| 10a85ff0b7 | |||
| 770389156a | |||
| bb9ce25a37 | |||
| 2fe008acf5 | |||
| 14c45d2b34 | |||
| a115f0bd17 | |||
| 626c84fe47 | |||
| 4d27b3b6ea | |||
| 3ca1bce9b2 | |||
| f0d00aa4e8 | |||
| 60560b884b | |||
| af9d768410 | |||
| f990c2565a | |||
| af179fa424 | |||
| 355aa0e84b | |||
| 494b41f4f1 | |||
| ef532d3ffb | |||
| 7109873884 | |||
| acb0172ddf | |||
| 53db61de96 | |||
| 3a947e5fee | |||
| 8233e7b35e | |||
| e1fb7a37be | |||
| 2ffd3baf77 | |||
| a8b43c286f | |||
| 609ad19dd9 | |||
| f2761ecee8 | |||
| 86706ca24f | |||
| 88d856d813 | |||
| 81d0bee7ae | |||
| fa3f44a045 | |||
| 02dca218b8 |
11
.github/ISSUE_TEMPLATE/issue.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/issue.md
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Issue
|
||||
about: 'Use this if you have issues or feature requests'
|
||||
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
(Please add the commandline that you use to the issue. AT LEAST add the output of --verbose, but usual --debug is needed as well. Sometimes it helps if you add the output of --debug-output instead, but its huge, so use an attachment for that.)
|
||||
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "python" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
71
.github/workflows/codeql-analysis.yml
vendored
Normal file
71
.github/workflows/codeql-analysis.yml
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
# schedule:
|
||||
# - cron: '26 23 * * 3'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'python' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||
# Learn more about CodeQL language support at https://git.io/codeql-language-support
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
18
.github/workflows/python-publish.yml
vendored
18
.github/workflows/python-publish.yml
vendored
@ -20,20 +20,20 @@ jobs:
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
- name: Set up Python 2.x
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '2.x'
|
||||
# - name: Set up Python 2.x
|
||||
# uses: actions/setup-python@v2
|
||||
# with:
|
||||
# python-version: '2.x'
|
||||
|
||||
- name: Install dependencies 3.x
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip3 install setuptools wheel twine
|
||||
|
||||
- name: Install dependencies 2.x
|
||||
run: |
|
||||
python2 -m pip install --upgrade pip
|
||||
pip2 install setuptools wheel twine
|
||||
# - name: Install dependencies 2.x
|
||||
# run: |
|
||||
# python2 -m pip install --upgrade pip
|
||||
# pip2 install setuptools wheel twine
|
||||
|
||||
- name: Build and publish
|
||||
env:
|
||||
@ -41,6 +41,6 @@ jobs:
|
||||
TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
|
||||
run: |
|
||||
python3 setup.py sdist bdist_wheel
|
||||
python2 setup.py sdist bdist_wheel
|
||||
# python2 setup.py sdist bdist_wheel
|
||||
twine check dist/*
|
||||
twine upload dist/*
|
||||
|
||||
42
.github/workflows/regression.yml
vendored
42
.github/workflows/regression.yml
vendored
@ -6,15 +6,12 @@ on: ["push", "pull_request"]
|
||||
|
||||
|
||||
jobs:
|
||||
|
||||
ubuntu20:
|
||||
runs-on: ubuntu-20.04
|
||||
ubuntu22:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
|
||||
|
||||
uses: actions/checkout@v3.5.0
|
||||
|
||||
- name: Prepare
|
||||
run: sudo apt update && sudo apt install zfsutils-linux lzop pigz zstd gzip xz-utils lz4 mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||
@ -29,17 +26,15 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github || true
|
||||
|
||||
ubuntu18:
|
||||
runs-on: ubuntu-18.04
|
||||
ubuntu20:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
|
||||
|
||||
uses: actions/checkout@v3.5.0
|
||||
|
||||
- name: Prepare
|
||||
run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||
run: sudo apt update && sudo apt install zfsutils-linux lzop pigz zstd gzip xz-utils lz4 mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||
|
||||
|
||||
- name: Regression test
|
||||
@ -51,26 +46,3 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github || true
|
||||
|
||||
ubuntu18_python2:
|
||||
runs-on: ubuntu-18.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Set up Python 2.x
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '2.x'
|
||||
|
||||
- name: Prepare
|
||||
run: sudo apt update && sudo apt install zfsutils-linux python-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls colorama
|
||||
|
||||
- name: Regression test
|
||||
run: sudo -E ./tests/run_tests
|
||||
|
||||
- name: Coveralls
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: coveralls --service=github || true
|
||||
|
||||
29
README.md
29
README.md
@ -2,6 +2,7 @@
|
||||
# ZFS autobackup
|
||||
|
||||
[](https://github.com/psy0rz/zfs_autobackup/actions?query=workflow%3A%22Regression+tests%22) [](https://coveralls.io/github/psy0rz/zfs_autobackup) [](https://pypi.org/project/zfs-autobackup/)
|
||||
[](https://github.com/psy0rz/zfs_autobackup/actions/workflows/codeql-analysis.yml)
|
||||
|
||||
## Introduction
|
||||
|
||||
@ -13,14 +14,14 @@ You can select what to backup by setting a custom `ZFS property`. This makes it
|
||||
|
||||
Other settings are just specified on the commandline: Simply setup and test your zfs-autobackup command and fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
|
||||
|
||||
Since its using ZFS commands, you can see what it's actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
|
||||
Since it's using ZFS commands, you can see what it's actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or errors. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
|
||||
|
||||
An important feature thats missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your system.
|
||||
An important feature that's missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your system.
|
||||
|
||||
## Features
|
||||
|
||||
* Works across operating systems: Tested with **Linux**, **FreeBSD/FreeNAS** and **SmartOS**.
|
||||
* Low learning curve: no complex daemons or services, no additional software or networking needed. (Only read this page)
|
||||
* Low learning curve: no complex daemons or services, no additional software or networking needed.
|
||||
* Plays nicely with existing replication systems. (Like Proxmox HA)
|
||||
* Automatically selects filesystems to backup by looking at a simple ZFS property.
|
||||
* Creates consistent snapshots. (takes all snapshots at once, atomicly.)
|
||||
@ -30,6 +31,7 @@ An important feature thats missing from other tools is a reliable `--test` optio
|
||||
* "pull" remote data from a server via SSH and backup it locally.
|
||||
* "pull+push": Zero trust between source and target.
|
||||
* Can be scheduled via simple cronjob or run directly from commandline.
|
||||
* Also supports complex backup geometries.
|
||||
* ZFS encryption support: Can decrypt / encrypt or even re-encrypt datasets during transfer.
|
||||
* Supports sending with compression. (Using pigz, zstd etc)
|
||||
* IO buffering to speed up transfer.
|
||||
@ -41,9 +43,10 @@ An important feature thats missing from other tools is a reliable `--test` optio
|
||||
* Uses progressive thinning for older snapshots.
|
||||
* Uses zfs-holds on important snapshots to prevent accidental deletion.
|
||||
* Automatic resuming of failed transfers.
|
||||
* Easy migration from existing zfs backups.
|
||||
* Easy migration from other zfs backup systems to zfs-autobackup.
|
||||
* Gracefully handles datasets that no longer exist on source.
|
||||
* Complete and clean logging.
|
||||
* Complete and clean logging.
|
||||
* All code is regression tested against actual ZFS environments.
|
||||
* Easy installation:
|
||||
* Just install zfs-autobackup via pip.
|
||||
* Only needs to be installed on one side.
|
||||
@ -54,8 +57,18 @@ An important feature thats missing from other tools is a reliable `--test` optio
|
||||
|
||||
Please look at our wiki to [Get started](https://github.com/psy0rz/zfs_autobackup/wiki).
|
||||
|
||||
# Sponsor list
|
||||
Or read the [Full manual](https://github.com/psy0rz/zfs_autobackup/wiki/Manual)
|
||||
|
||||
This project was sponsorred by:
|
||||
# Tips
|
||||
|
||||
* JetBrains (Provided me with a license for their whole professional product line, https://www.jetbrains.com/pycharm/ )
|
||||
To release files that are blocked, use this command if you want to delete
|
||||
|
||||
```sh
|
||||
zfs list -t snap -o name | grep <dataset> | xargs -n 1 zfs release -r zfs_autobackup:offsite1
|
||||
```
|
||||
|
||||
If delete fails after, check other holds on the snapshot
|
||||
|
||||
```sh
|
||||
zfs holds path@snapshotname
|
||||
```
|
||||
1
scripts/autoupload
Executable file
1
scripts/autoupload
Executable file
@ -0,0 +1 @@
|
||||
find zfs_autobackup | entr rsync -avx . "$1":zfs_autobackup
|
||||
33
scripts/enctest
Executable file
33
scripts/enctest
Executable file
@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
#NOTE: usually the speed is the same, but the cpu usage is much higher for ccm
|
||||
|
||||
set -e
|
||||
|
||||
D=/enctest123
|
||||
DS=rpool$D
|
||||
|
||||
echo sdflsakjfklsjfsda > key.txt
|
||||
|
||||
dd if=/dev/urandom of=dump.bin bs=1M count=10000
|
||||
|
||||
#readcache
|
||||
cat dump.bin > /dev/null
|
||||
|
||||
zfs destroy $DS || true
|
||||
|
||||
zfs create $DS
|
||||
|
||||
echo Unencrypted:
|
||||
sync
|
||||
time ( cp dump.bin $D/dump.bin; sync )
|
||||
|
||||
|
||||
for E in aes-128-ccm aes-192-ccm aes-256-ccm aes-128-gcm aes-192-gcm aes-256-gcm; do
|
||||
zfs destroy $DS
|
||||
zfs create -o encryption=$E -o keylocation=file://`pwd`/key.txt -o keyformat=passphrase $DS
|
||||
echo $E
|
||||
sync
|
||||
time ( cp dump.bin $D/dump.bin; sync )
|
||||
done
|
||||
|
||||
1
setup.py
1
setup.py
@ -20,6 +20,7 @@ setuptools.setup(
|
||||
[
|
||||
'zfs-autobackup = zfs_autobackup.ZfsAutobackup:cli',
|
||||
'zfs-autoverify = zfs_autobackup.ZfsAutoverify:cli',
|
||||
'zfs-check = zfs_autobackup.ZfsCheck:cli',
|
||||
]
|
||||
},
|
||||
packages=setuptools.find_packages(),
|
||||
|
||||
17
tests/Dockerfile
Normal file
17
tests/Dockerfile
Normal file
@ -0,0 +1,17 @@
|
||||
FROM alpine:3.18
|
||||
|
||||
|
||||
#base packages
|
||||
RUN apk update
|
||||
RUN apk add py3-pip
|
||||
|
||||
#zfs autobackup tests dependencies
|
||||
RUN apk add zfs openssh lzop pigz zstd gzip xz lz4 mbuffer udev zfs-udev
|
||||
|
||||
|
||||
#python modules
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
#git repo should be mounted in /app:
|
||||
ENTRYPOINT [ "/app/tests/tests_docker" ]
|
||||
3
tests/autorun_tests_docker
Executable file
3
tests/autorun_tests_docker
Executable file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
find tests zfs_autobackup -name '*.py' |entr ./tests/run_tests_docker $@
|
||||
@ -2,6 +2,17 @@
|
||||
# To run tests as non-root, use this hack:
|
||||
# chmod 4755 /usr/sbin/zpool /usr/sbin/zfs
|
||||
|
||||
import sys
|
||||
|
||||
import zfs_autobackup.util
|
||||
|
||||
#dirty hack for this error:
|
||||
#AttributeError: module 'collections' has no attribute 'MutableMapping'
|
||||
|
||||
if sys.version_info.major == 3 and sys.version_info.minor >= 10:
|
||||
import collections
|
||||
setattr(collections, "MutableMapping", collections.abc.MutableMapping)
|
||||
|
||||
import subprocess
|
||||
import random
|
||||
|
||||
@ -12,20 +23,24 @@ import time
|
||||
from pprint import *
|
||||
from zfs_autobackup.ZfsAutobackup import *
|
||||
from zfs_autobackup.ZfsAutoverify import *
|
||||
from zfs_autobackup.ZfsCheck import *
|
||||
from zfs_autobackup.util import *
|
||||
from mock import *
|
||||
import contextlib
|
||||
import sys
|
||||
import io
|
||||
|
||||
import datetime
|
||||
|
||||
|
||||
TEST_POOLS="test_source1 test_source2 test_target1"
|
||||
ZFS_USERSPACE= subprocess.check_output("dpkg-query -W zfsutils-linux |cut -f2", shell=True).decode('utf-8').rstrip()
|
||||
ZFS_KERNEL= subprocess.check_output("modinfo zfs|grep ^version |sed 's/.* //'", shell=True).decode('utf-8').rstrip()
|
||||
# ZFS_USERSPACE= subprocess.check_output("dpkg-query -W zfsutils-linux |cut -f2", shell=True).decode('utf-8').rstrip()
|
||||
# ZFS_KERNEL= subprocess.check_output("modinfo zfs|grep ^version |sed 's/.* //'", shell=True).decode('utf-8').rstrip()
|
||||
|
||||
print("###########################################")
|
||||
print("#### Unit testing against:")
|
||||
print("#### Python :"+sys.version.replace("\n", " "))
|
||||
print("#### ZFS userspace :"+ZFS_USERSPACE)
|
||||
print("#### ZFS kernel :"+ZFS_KERNEL)
|
||||
print("#### Python : "+sys.version.replace("\n", " "))
|
||||
print("#### ZFS version : "+subprocess.check_output("zfs --version", shell=True).decode('utf-8').rstrip().replace('\n', ' '))
|
||||
print("#############################################")
|
||||
|
||||
|
||||
@ -62,7 +77,7 @@ def redirect_stderr(target):
|
||||
def shelltest(cmd):
|
||||
"""execute and print result as nice copypastable string for unit tests (adds extra newlines on top/bottom)"""
|
||||
|
||||
ret=(subprocess.check_output("SUDO_ASKPASS=./password.sh sudo -A "+cmd , shell=True).decode('utf-8'))
|
||||
ret=(subprocess.check_output(cmd , shell=True).decode('utf-8'))
|
||||
|
||||
print("######### result of: {}".format(cmd))
|
||||
print(ret)
|
||||
@ -74,7 +89,7 @@ def prepare_zpools():
|
||||
print("Preparing zfs filesystems...")
|
||||
|
||||
#need ram blockdevice
|
||||
subprocess.check_call("modprobe brd rd_size=512000", shell=True)
|
||||
# subprocess.check_call("modprobe brd rd_size=512000", shell=True)
|
||||
|
||||
#remove old stuff
|
||||
subprocess.call("zpool destroy test_source1 2>/dev/null", shell=True)
|
||||
@ -94,3 +109,18 @@ def prepare_zpools():
|
||||
subprocess.check_call("zfs set autobackup:test=child test_source2/fs2", shell=True)
|
||||
|
||||
print("Prepare done")
|
||||
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mocktime(time_str, format="%Y%m%d%H%M%S"):
|
||||
|
||||
def fake_datetime_now():
|
||||
return datetime.datetime.strptime(time_str, format)
|
||||
|
||||
with patch.object(zfs_autobackup.util,'datetime_now_mock', fake_datetime_now()):
|
||||
yield
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
0
tests/data/empty
Normal file
0
tests/data/empty
Normal file
1
tests/data/partial
Normal file
1
tests/data/partial
Normal file
@ -0,0 +1 @@
|
||||
xC<78><43>ʟ<EFBFBD>ZG<5A><47>М<EFBFBD><D09C><EFBFBD>?<3F><><1D>ZG<>#<0F><>,<>ƻ<>Q=<3D>><3E>ك1<D983>NU<4E><15>u<>{Zj;<3B>`<60><19><19><>Dv<44><76>Q<EFBFBD>j<EFBFBD>voQFN<46><4E><EFBFBD><EFBFBD><EFBFBD>;3Sa<53>R<EFBFBD>^2Z<32><5A>
|
||||
BIN
tests/data/whole
Normal file
BIN
tests/data/whole
Normal file
Binary file not shown.
BIN
tests/data/whole2
Normal file
BIN
tests/data/whole2
Normal file
Binary file not shown.
BIN
tests/data/whole_whole2
Normal file
BIN
tests/data/whole_whole2
Normal file
Binary file not shown.
BIN
tests/data/whole_whole2_partial
Normal file
BIN
tests/data/whole_whole2_partial
Normal file
Binary file not shown.
@ -18,6 +18,18 @@ if ! [ -e /root/.ssh/id_rsa ]; then
|
||||
ssh -oStrictHostKeyChecking=no localhost true || exit 1
|
||||
fi
|
||||
|
||||
cat >> ~/.ssh/config <<EOF
|
||||
Host *
|
||||
addkeystoagent yes
|
||||
controlpath ~/.ssh/control-master-%r@%h:%p
|
||||
controlmaster auto
|
||||
controlpersist 3600
|
||||
EOF
|
||||
|
||||
|
||||
modprobe brd rd_size=512000
|
||||
|
||||
umount /tmp/ZfsCheck*
|
||||
|
||||
coverage run --branch --source zfs_autobackup -m unittest discover -vvvvf $SCRIPTDIR $@ 2>&1
|
||||
EXIT=$?
|
||||
|
||||
16
tests/run_tests_docker
Executable file
16
tests/run_tests_docker
Executable file
@ -0,0 +1,16 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
#remove stuff from previous local tests
|
||||
zpool destroy test_source1 2>/dev/null || true
|
||||
zpool destroy test_source2 2>/dev/null || true
|
||||
zpool destroy test_target1 2>/dev/null || true
|
||||
|
||||
#is needed
|
||||
modprobe brd rd_size=512000 || true
|
||||
|
||||
# builds and starts a docker container to run the test suite
|
||||
docker build -t zfs-autobackup-test -f tests/Dockerfile .
|
||||
docker run --name zfs-autobackup-test --privileged --rm -it -v .:/app zfs-autobackup-test $@
|
||||
|
||||
157
tests/test_blockhasher.py
Normal file
157
tests/test_blockhasher.py
Normal file
@ -0,0 +1,157 @@
|
||||
from basetest import *
|
||||
from zfs_autobackup.BlockHasher import BlockHasher
|
||||
|
||||
|
||||
# make VERY sure this works correctly under all circumstances.
|
||||
|
||||
# sha1 sums of files, (bs=4096)
|
||||
# da39a3ee5e6b4b0d3255bfef95601890afd80709 empty
|
||||
# 642027d63bb0afd7e0ba197f2c66ad03e3d70de1 partial
|
||||
# 3c0bf91170d873b8e327d3bafb6bc074580d11b7 whole
|
||||
# 2e863f1fcccd6642e4e28453eba10d2d3f74d798 whole2
|
||||
# 959e6b58078f0cfd2fb3d37e978fda51820473ff whole_whole2
|
||||
# 309ffffba2e1977d12f3b7469971f30d28b94bd8 whole_whole2_partial
|
||||
|
||||
class TestBlockHasher(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
def test_empty(self):
|
||||
block_hasher = BlockHasher(count=1)
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/empty")),
|
||||
[]
|
||||
)
|
||||
|
||||
def test_partial(self):
|
||||
block_hasher = BlockHasher(count=1)
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/partial")),
|
||||
[(0, "642027d63bb0afd7e0ba197f2c66ad03e3d70de1")]
|
||||
)
|
||||
|
||||
def test_whole(self):
|
||||
block_hasher = BlockHasher(count=1)
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole")),
|
||||
[(0, "3c0bf91170d873b8e327d3bafb6bc074580d11b7")]
|
||||
)
|
||||
|
||||
def test_whole2(self):
|
||||
block_hasher = BlockHasher(count=1)
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole_whole2")),
|
||||
[
|
||||
(0, "3c0bf91170d873b8e327d3bafb6bc074580d11b7"),
|
||||
(1, "2e863f1fcccd6642e4e28453eba10d2d3f74d798")
|
||||
]
|
||||
)
|
||||
|
||||
def test_wwp(self):
|
||||
block_hasher = BlockHasher(count=1)
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole_whole2_partial")),
|
||||
[
|
||||
(0, "3c0bf91170d873b8e327d3bafb6bc074580d11b7"), # whole
|
||||
(1, "2e863f1fcccd6642e4e28453eba10d2d3f74d798"), # whole2
|
||||
(2, "642027d63bb0afd7e0ba197f2c66ad03e3d70de1") # partial
|
||||
]
|
||||
)
|
||||
|
||||
def test_wwp_count2(self):
|
||||
block_hasher = BlockHasher(count=2)
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole_whole2_partial")),
|
||||
[
|
||||
(0, "959e6b58078f0cfd2fb3d37e978fda51820473ff"), # whole_whole2
|
||||
(1, "642027d63bb0afd7e0ba197f2c66ad03e3d70de1") # partial
|
||||
]
|
||||
)
|
||||
|
||||
def test_big(self):
|
||||
block_hasher = BlockHasher(count=10)
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole_whole2_partial")),
|
||||
[
|
||||
(0, "309ffffba2e1977d12f3b7469971f30d28b94bd8"), # whole_whole2_partial
|
||||
])
|
||||
|
||||
def test_blockhash_compare(self):
|
||||
#no errors
|
||||
block_hasher = BlockHasher(count=1)
|
||||
generator = block_hasher.generate("tests/data/whole_whole2_partial")
|
||||
self.assertEqual([], list(block_hasher.compare("tests/data/whole_whole2_partial", generator)))
|
||||
|
||||
#compare file is smaller (EOF errors)
|
||||
block_hasher = BlockHasher(count=1)
|
||||
generator = block_hasher.generate("tests/data/whole_whole2_partial")
|
||||
self.assertEqual(
|
||||
[(1, '2e863f1fcccd6642e4e28453eba10d2d3f74d798', 'EOF'),
|
||||
(2, '642027d63bb0afd7e0ba197f2c66ad03e3d70de1', 'EOF')],
|
||||
list(block_hasher.compare("tests/data/whole", generator)))
|
||||
|
||||
#no errors, huge chunks
|
||||
block_hasher = BlockHasher(count=10)
|
||||
generator = block_hasher.generate("tests/data/whole_whole2_partial")
|
||||
self.assertEqual([], list(block_hasher.compare("tests/data/whole_whole2_partial", generator)))
|
||||
|
||||
# different order to make sure seek functions are ok
|
||||
block_hasher = BlockHasher(count=1)
|
||||
checksums = list(block_hasher.generate("tests/data/whole_whole2_partial"))
|
||||
checksums.reverse()
|
||||
self.assertEqual([], list(block_hasher.compare("tests/data/whole_whole2_partial", checksums)))
|
||||
|
||||
def test_skip1(self):
|
||||
block_hasher = BlockHasher(count=1, skip=1)
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole_whole2_partial")),
|
||||
[
|
||||
(0, "3c0bf91170d873b8e327d3bafb6bc074580d11b7"), # whole
|
||||
# (1, "2e863f1fcccd6642e4e28453eba10d2d3f74d798"), # whole2
|
||||
(2, "642027d63bb0afd7e0ba197f2c66ad03e3d70de1") # partial
|
||||
]
|
||||
)
|
||||
|
||||
#should continue the pattern on the next file:
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole_whole2_partial")),
|
||||
[
|
||||
# (0, "3c0bf91170d873b8e327d3bafb6bc074580d11b7"), # whole
|
||||
(1, "2e863f1fcccd6642e4e28453eba10d2d3f74d798"), # whole2
|
||||
# (2, "642027d63bb0afd7e0ba197f2c66ad03e3d70de1") # partial
|
||||
]
|
||||
)
|
||||
|
||||
def test_skip6(self):
|
||||
block_hasher = BlockHasher(count=1, skip=6)
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole_whole2_partial")),
|
||||
[
|
||||
(0, "3c0bf91170d873b8e327d3bafb6bc074580d11b7"), # whole
|
||||
# (1, "2e863f1fcccd6642e4e28453eba10d2d3f74d798"), # whole2
|
||||
# (2, "642027d63bb0afd7e0ba197f2c66ad03e3d70de1") # partial
|
||||
]
|
||||
)
|
||||
|
||||
#all blocks of next file are skipped
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole_whole2_partial")),
|
||||
[
|
||||
# (0, "3c0bf91170d873b8e327d3bafb6bc074580d11b7"), # whole
|
||||
# (1, "2e863f1fcccd6642e4e28453eba10d2d3f74d798"), # whole2
|
||||
# (2, "642027d63bb0afd7e0ba197f2c66ad03e3d70de1") # partial
|
||||
]
|
||||
)
|
||||
|
||||
#first block of this one is the 6th to be skipped:
|
||||
self.assertEqual(
|
||||
list(block_hasher.generate("tests/data/whole_whole2_partial")),
|
||||
[
|
||||
# (0, "3c0bf91170d873b8e327d3bafb6bc074580d11b7"), # whole
|
||||
(1, "2e863f1fcccd6642e4e28453eba10d2d3f74d798"), # whole2
|
||||
# (2, "642027d63bb0afd7e0ba197f2c66ad03e3d70de1") # partial
|
||||
]
|
||||
)
|
||||
|
||||
#NOTE: compare doesnt use skip. thats the job of its input generator
|
||||
@ -9,11 +9,11 @@ class TestCmdPipe(unittest2.TestCase):
|
||||
p=CmdPipe(readonly=False, inp=None)
|
||||
err=[]
|
||||
out=[]
|
||||
p.add(CmdItem(["ls", "-d", "/", "/", "/nonexistent"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||
p.add(CmdItem(["sh", "-c", "echo out1;echo err1 >&2; echo out2; echo err2 >&2"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0), stdout_handler=lambda line: out.append(line)))
|
||||
executed=p.execute()
|
||||
|
||||
self.assertEqual(err, ["ls: cannot access '/nonexistent': No such file or directory"])
|
||||
self.assertEqual(out, ["/","/"])
|
||||
self.assertEqual(out, ["out1", "out2"])
|
||||
self.assertEqual(err, ["err1","err2"])
|
||||
self.assertIsNone(executed)
|
||||
|
||||
def test_input(self):
|
||||
@ -21,8 +21,8 @@ class TestCmdPipe(unittest2.TestCase):
|
||||
p=CmdPipe(readonly=False, inp="test")
|
||||
err=[]
|
||||
out=[]
|
||||
p.add(CmdItem(["cat"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||
p.add(CmdItem(["cat"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0), stdout_handler=lambda line: out.append(line) ))
|
||||
executed=p.execute()
|
||||
|
||||
self.assertEqual(err, [])
|
||||
self.assertEqual(out, ["test"])
|
||||
@ -37,8 +37,8 @@ class TestCmdPipe(unittest2.TestCase):
|
||||
out=[]
|
||||
p.add(CmdItem(["echo", "test"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||
p.add(CmdItem(["tr", "e", "E"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||
p.add(CmdItem(["tr", "t", "T"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||
p.add(CmdItem(["tr", "t", "T"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0), stdout_handler=lambda line: out.append(line)))
|
||||
executed=p.execute()
|
||||
|
||||
self.assertEqual(err1, [])
|
||||
self.assertEqual(err2, [])
|
||||
@ -56,16 +56,16 @@ class TestCmdPipe(unittest2.TestCase):
|
||||
err2=[]
|
||||
err3=[]
|
||||
out=[]
|
||||
p.add(CmdItem(["ls", "/nonexistent1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||
p.add(CmdItem(["ls", "/nonexistent2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||
p.add(CmdItem(["ls", "/nonexistent3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||
p.add(CmdItem(["sh", "-c", "echo err1 >&2"], stderr_handler=lambda line: err1.append(line), ))
|
||||
p.add(CmdItem(["sh", "-c", "echo err2 >&2"], stderr_handler=lambda line: err2.append(line), ))
|
||||
p.add(CmdItem(["sh", "-c", "echo err3 >&2"], stderr_handler=lambda line: err3.append(line), stdout_handler=lambda line: out.append(line)))
|
||||
executed=p.execute()
|
||||
|
||||
self.assertEqual(err1, ["ls: cannot access '/nonexistent1': No such file or directory"])
|
||||
self.assertEqual(err2, ["ls: cannot access '/nonexistent2': No such file or directory"])
|
||||
self.assertEqual(err3, ["ls: cannot access '/nonexistent3': No such file or directory"])
|
||||
self.assertEqual(err1, ["err1"])
|
||||
self.assertEqual(err2, ["err2"])
|
||||
self.assertEqual(err3, ["err3"])
|
||||
self.assertEqual(out, [])
|
||||
self.assertIsNone(executed)
|
||||
self.assertTrue(executed)
|
||||
|
||||
def test_exitcode(self):
|
||||
"""test piped exitcodes """
|
||||
@ -74,10 +74,10 @@ class TestCmdPipe(unittest2.TestCase):
|
||||
err2=[]
|
||||
err3=[]
|
||||
out=[]
|
||||
p.add(CmdItem(["bash", "-c", "exit 1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,1)))
|
||||
p.add(CmdItem(["bash", "-c", "exit 2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||
p.add(CmdItem(["bash", "-c", "exit 3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,3)))
|
||||
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||
p.add(CmdItem(["sh", "-c", "exit 1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,1)))
|
||||
p.add(CmdItem(["sh", "-c", "exit 2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||
p.add(CmdItem(["sh", "-c", "exit 3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,3), stdout_handler=lambda line: out.append(line)))
|
||||
executed=p.execute()
|
||||
|
||||
self.assertEqual(err1, [])
|
||||
self.assertEqual(err2, [])
|
||||
@ -97,8 +97,8 @@ class TestCmdPipe(unittest2.TestCase):
|
||||
return True
|
||||
|
||||
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), exit_handler=true_exit, readonly=True))
|
||||
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), exit_handler=true_exit, readonly=True))
|
||||
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), exit_handler=true_exit, readonly=True, stdout_handler=lambda line: out.append(line)))
|
||||
executed=p.execute()
|
||||
|
||||
self.assertEqual(err1, [])
|
||||
self.assertEqual(err2, [])
|
||||
@ -113,11 +113,63 @@ class TestCmdPipe(unittest2.TestCase):
|
||||
err2=[]
|
||||
out=[]
|
||||
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), readonly=False))
|
||||
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), readonly=True))
|
||||
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), readonly=True, stdout_handler=lambda line: out.append(line)))
|
||||
executed=p.execute()
|
||||
|
||||
self.assertEqual(err1, [])
|
||||
self.assertEqual(err2, [])
|
||||
self.assertEqual(out, [])
|
||||
self.assertTrue(executed)
|
||||
|
||||
def test_no_handlers(self):
|
||||
with self.assertRaises(Exception):
|
||||
p=CmdPipe()
|
||||
p.add(CmdItem([ "echo" ]))
|
||||
p.execute()
|
||||
|
||||
#NOTE: this will give some resource warnings
|
||||
|
||||
def test_manual_pipes(self):
|
||||
|
||||
# manual piping means: a command in the pipe has a stdout_handler, which is responsible for sending the data into the next item of the pipe.
|
||||
|
||||
result=[]
|
||||
|
||||
|
||||
def stdout_handler(line):
|
||||
item2.process.stdin.write(line.encode('utf8'))
|
||||
|
||||
# item2.process.stdin.close()
|
||||
|
||||
item1=CmdItem(["echo", "test"], stdout_handler=stdout_handler)
|
||||
item2=CmdItem(["tr", "e", "E"], stdout_handler=lambda line: result.append(line))
|
||||
|
||||
p=CmdPipe()
|
||||
p.add(item1)
|
||||
p.add(item2)
|
||||
p.execute()
|
||||
|
||||
self.assertEqual(result, ["tEst"])
|
||||
|
||||
def test_multiprocess(self):
|
||||
|
||||
#dont do any piping at all, just run multiple processes and handle outputs
|
||||
|
||||
result1=[]
|
||||
result2=[]
|
||||
result3=[]
|
||||
|
||||
item1=CmdItem(["echo", "test1"], stdout_handler=lambda line: result1.append(line))
|
||||
item2=CmdItem(["echo", "test2"], stdout_handler=lambda line: result2.append(line))
|
||||
item3=CmdItem(["echo", "test3"], stdout_handler=lambda line: result3.append(line))
|
||||
|
||||
p=CmdPipe()
|
||||
p.add(item1)
|
||||
p.add(item2)
|
||||
p.add(item3)
|
||||
p.execute()
|
||||
|
||||
self.assertEqual(result1, ["test1"])
|
||||
self.assertEqual(result2, ["test2"])
|
||||
self.assertEqual(result3, ["test3"])
|
||||
|
||||
|
||||
@ -13,10 +13,10 @@ class TestZfsNode(unittest2.TestCase):
|
||||
def test_destroymissing(self):
|
||||
|
||||
#initial backup
|
||||
with patch('time.strftime', return_value="test-19101111000000"): #1000 years in past
|
||||
with mocktime("19101111000000"): #1000 years in past
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"): #far in past
|
||||
with mocktime("20101111000000"): #far in past
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
@ -117,7 +117,7 @@ class TestZfsNode(unittest2.TestCase):
|
||||
|
||||
print(buf.getvalue())
|
||||
#on second run it sees the dangling ex-parent but doesnt know what to do with it (since it has no own snapshot)
|
||||
self.assertIn("test_source1: Destroy missing: has no snapshots made by us.", buf.getvalue())
|
||||
self.assertIn("test_source1: Destroy missing: has no snapshots made by us", buf.getvalue())
|
||||
|
||||
|
||||
|
||||
|
||||
@ -49,11 +49,11 @@ class TestZfsEncryption(unittest2.TestCase):
|
||||
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsourcekeyless", unload_key=True) # raw mode shouldn't need a key
|
||||
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
|
||||
|
||||
@ -86,11 +86,11 @@ test_target1/test_source2/fs2/sub encryption
|
||||
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
|
||||
|
||||
@ -121,13 +121,13 @@ test_target1/test_source2/fs2/sub encryptionroot -
|
||||
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||
self.assertEqual(r, """
|
||||
@ -156,14 +156,14 @@ test_target1/test_source2/fs2/sub encryptionroot -
|
||||
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
|
||||
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received --clear-mountpoint".split(
|
||||
" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
@ -191,3 +191,99 @@ test_target1/test_source2/fs2 encryptionroot -
|
||||
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||
""")
|
||||
|
||||
|
||||
|
||||
|
||||
def test_raw_invalid_snapshot(self):
|
||||
"""in raw mode, its not allowed to have any newer snaphots on target, #219"""
|
||||
|
||||
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress".split(" ")).run())
|
||||
|
||||
#this is invalid in raw mode
|
||||
shelltest("zfs snapshot test_target1/test_source1/fs1/encryptedsource@incompatible")
|
||||
|
||||
with mocktime("20101111000001"):
|
||||
#should fail because of incompatble snapshot
|
||||
self.assertEqual(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty".split(" ")).run(),1)
|
||||
#should destroy incompatible and continue
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --no-snapshot --destroy-incompatible".split(" ")).run())
|
||||
|
||||
|
||||
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
test_target1 encryptionroot - -
|
||||
test_target1/test_source1 encryptionroot - -
|
||||
test_target1/test_source1/fs1 encryptionroot - -
|
||||
test_target1/test_source1/fs1/encryptedsource encryptionroot test_target1/test_source1/fs1/encryptedsource -
|
||||
test_target1/test_source1/fs1/sub encryptionroot - -
|
||||
test_target1/test_source2 encryptionroot - -
|
||||
test_target1/test_source2/fs2 encryptionroot - -
|
||||
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||
""")
|
||||
|
||||
|
||||
def test_resume_encrypt_with_no_key(self):
|
||||
"""test what happens if target encryption key not loaded (this led to a kernel crash of freebsd with 2.1.x i think) while trying to resume"""
|
||||
|
||||
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||
|
||||
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --allow-empty --exclude-received --clear-mountpoint".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs set compress=off test_source1 test_target1")
|
||||
|
||||
# big change on source
|
||||
r = shelltest("dd if=/dev/zero of=/test_source1/fs1/data bs=250M count=1")
|
||||
|
||||
# waste space on target
|
||||
r = shelltest("dd if=/dev/zero of=/test_target1/waste bs=250M count=1")
|
||||
|
||||
# should fail and leave resume token
|
||||
with mocktime("20101111000001"):
|
||||
self.assertTrue(ZfsAutobackup(
|
||||
"test test_target1/encryptedtarget --verbose --no-progress --encrypt --exclude-received --allow-empty --clear-mountpoint".split(
|
||||
" ")).run())
|
||||
#
|
||||
# free up space
|
||||
r = shelltest("rm /test_target1/waste")
|
||||
|
||||
# sync
|
||||
r = shelltest("zfs umount test_target1")
|
||||
r = shelltest("zfs mount test_target1")
|
||||
|
||||
#
|
||||
# #unload key
|
||||
shelltest("zfs unload-key test_target1/encryptedtarget")
|
||||
|
||||
# resume
|
||||
with mocktime("20101111000001"):
|
||||
self.assertEqual(ZfsAutobackup(
|
||||
"test test_target1/encryptedtarget --verbose --no-progress --encrypt --exclude-received --allow-empty --no-snapshot --clear-mountpoint".split(
|
||||
" ")).run(),3)
|
||||
|
||||
|
||||
|
||||
r = shelltest("zfs get -r -t all encryptionroot test_target1")
|
||||
self.assertEqual(r, """
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
test_target1 encryptionroot - -
|
||||
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
|
||||
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
|
||||
test_target1/encryptedtarget/test_source1/fs1 encryptionroot test_target1/encryptedtarget -
|
||||
test_target1/encryptedtarget/test_source1/fs1@test-20101111000000 encryptionroot test_target1/encryptedtarget -
|
||||
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
|
||||
test_target1/encryptedtarget/test_source1/fs1/encryptedsource@test-20101111000000 encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
|
||||
test_target1/encryptedtarget/test_source1/fs1/encryptedsource@test-20101111000001 encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
|
||||
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot test_target1/encryptedtarget -
|
||||
test_target1/encryptedtarget/test_source1/fs1/sub@test-20101111000000 encryptionroot test_target1/encryptedtarget -
|
||||
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
|
||||
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
|
||||
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot test_target1/encryptedtarget -
|
||||
test_target1/encryptedtarget/test_source2/fs2/sub@test-20101111000000 encryptionroot test_target1/encryptedtarget -
|
||||
""")
|
||||
|
||||
@ -33,9 +33,9 @@ class TestExecuteNode(unittest2.TestCase):
|
||||
|
||||
#return std err as well, trigger stderr by listing something non existing
|
||||
with self.subTest("stderr return"):
|
||||
(stdout, stderr)=node.run(["ls", "nonexistingfile"], return_stderr=True, valid_exitcodes=[2])
|
||||
(stdout, stderr)=node.run(["sh", "-c", "echo bla >&2"], return_stderr=True, valid_exitcodes=[0])
|
||||
self.assertEqual(stdout,[])
|
||||
self.assertRegex(stderr[0],"nonexistingfile")
|
||||
self.assertRegex(stderr[0],"bla")
|
||||
|
||||
#slow command, make sure things dont exit too early
|
||||
with self.subTest("early exit test"):
|
||||
@ -110,19 +110,17 @@ class TestExecuteNode(unittest2.TestCase):
|
||||
|
||||
with self.subTest("check stderr on pipe output side"):
|
||||
output=nodea.run(["true"], pipe=True, valid_exitcodes=[0])
|
||||
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], inp=output, return_stderr=True, valid_exitcodes=[2])
|
||||
(stdout, stderr)=nodeb.run(["sh", "-c", "echo bla >&2"], inp=output, return_stderr=True, valid_exitcodes=[0])
|
||||
self.assertEqual(stdout,[])
|
||||
self.assertRegex(stderr[0], "nonexistingfile" )
|
||||
self.assertRegex(stderr[0], "bla" )
|
||||
|
||||
with self.subTest("check stderr on pipe input side (should be only printed)"):
|
||||
output=nodea.run(["ls", "nonexistingfile"], pipe=True, valid_exitcodes=[2])
|
||||
output=nodea.run(["sh", "-c", "echo bla >&2"], pipe=True, valid_exitcodes=[0])
|
||||
(stdout, stderr)=nodeb.run(["true"], inp=output, return_stderr=True, valid_exitcodes=[0])
|
||||
self.assertEqual(stdout,[])
|
||||
self.assertEqual(stderr,[])
|
||||
|
||||
|
||||
|
||||
|
||||
def test_pipe_local_local(self):
|
||||
nodea=ExecuteNode(debug_output=True)
|
||||
nodeb=ExecuteNode(debug_output=True)
|
||||
@ -144,5 +142,68 @@ class TestExecuteNode(unittest2.TestCase):
|
||||
self.pipe(nodea, nodeb)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
def test_cwd(self):
|
||||
|
||||
nodea=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||
nodeb=ExecuteNode(debug_output=True)
|
||||
|
||||
#change to a directory with a space and execute a system pipe, check if all piped commands are executed in correct directory.
|
||||
shelltest("mkdir '/tmp/space test' 2>/dev/null; true")
|
||||
self.assertEqual(nodea.run(cmd=["pwd", ExecuteNode.PIPE, "cat"], cwd="/tmp/space test"), ["/tmp/space test"])
|
||||
self.assertEqual(nodea.run(cmd=["cat", ExecuteNode.PIPE, "pwd"], cwd="/tmp/space test"), ["/tmp/space test"])
|
||||
self.assertEqual(nodeb.run(cmd=["pwd", ExecuteNode.PIPE, "cat"], cwd="/tmp/space test"), ["/tmp/space test"])
|
||||
self.assertEqual(nodeb.run(cmd=["cat", ExecuteNode.PIPE, "pwd"], cwd="/tmp/space test"), ["/tmp/space test"])
|
||||
|
||||
def test_script_handlers(self):
|
||||
|
||||
def test(node):
|
||||
results = []
|
||||
node.script(lines=["echo line1", "echo line2 1>&2", "exit 123"],
|
||||
stdout_handler=lambda line: results.append(line),
|
||||
stderr_handler=lambda line: results.append(line),
|
||||
exit_handler=lambda exit_code: results.append(exit_code),
|
||||
valid_exitcodes=[123]
|
||||
)
|
||||
|
||||
self.assertEqual(results, ["line1", "line2", 123 ])
|
||||
|
||||
with self.subTest("remote"):
|
||||
test(ExecuteNode(ssh_to="localhost", debug_output=True))
|
||||
#
|
||||
with self.subTest("local"):
|
||||
test(ExecuteNode(debug_output=True))
|
||||
|
||||
def test_script_defaults(self):
|
||||
|
||||
result=[]
|
||||
nodea=ExecuteNode(debug_output=True)
|
||||
nodea.script(lines=["echo test"], stdout_handler=lambda line: result.append(line))
|
||||
|
||||
self.assertEqual(result, ["test"])
|
||||
|
||||
def test_script_pipe(self):
|
||||
|
||||
result=[]
|
||||
nodea=ExecuteNode()
|
||||
cmd_pipe=nodea.script(lines=["echo test"], pipe=True)
|
||||
nodea.script(lines=["tr e E"], inp=cmd_pipe,stdout_handler=lambda line: result.append(line))
|
||||
|
||||
self.assertEqual(result, ["tEst"])
|
||||
|
||||
|
||||
def test_mixed(self):
|
||||
|
||||
#should be able to mix run() and script()
|
||||
node=ExecuteNode()
|
||||
|
||||
result=[]
|
||||
pipe=node.run(["echo", "test"], pipe=True)
|
||||
node.script(["tr e E"], inp=pipe, stdout_handler=lambda line: result.append(line))
|
||||
|
||||
self.assertEqual(result, ["tEst"])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@ -32,7 +32,7 @@ class TestExternalFailures(unittest2.TestCase):
|
||||
def test_initial_resume(self):
|
||||
|
||||
# inital backup, leaves resume token
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.generate_resume()
|
||||
|
||||
# --test should resume and succeed
|
||||
@ -42,12 +42,7 @@ class TestExternalFailures(unittest2.TestCase):
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
# did we really resume?
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
# abort this late, for beter coverage
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
else:
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
|
||||
# should resume and succeed
|
||||
with OutputIO() as buf:
|
||||
@ -56,12 +51,7 @@ class TestExternalFailures(unittest2.TestCase):
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
# did we really resume?
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
# abort this late, for beter coverage
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
else:
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
@ -81,11 +71,11 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
def test_incremental_resume(self):
|
||||
|
||||
# initial backup
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
# incremental backup leaves resume token
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.generate_resume()
|
||||
|
||||
# --test should resume and succeed
|
||||
@ -95,12 +85,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
# did we really resume?
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
# abort this late, for beter coverage
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
else:
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
|
||||
# should resume and succeed
|
||||
with OutputIO() as buf:
|
||||
@ -110,11 +95,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
print(buf.getvalue())
|
||||
|
||||
# did we really resume?
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
# abort this late, for beter coverage
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
else:
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
self.assertIn(": resuming", buf.getvalue())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
@ -134,11 +115,9 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
# generate an invalid resume token, and verify if its aborted automaticly
|
||||
def test_initial_resumeabort(self):
|
||||
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
|
||||
# inital backup, leaves resume token
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.generate_resume()
|
||||
|
||||
# remove corresponding source snapshot, so it becomes invalid
|
||||
@ -148,11 +127,11 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
|
||||
|
||||
# --test try again, should abort old resume
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||
|
||||
# try again, should abort old resume
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
@ -172,26 +151,23 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
# generate an invalid resume token, and verify if its aborted automaticly
|
||||
def test_incremental_resumeabort(self):
|
||||
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
|
||||
# initial backup
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
# icremental backup, leaves resume token
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.generate_resume()
|
||||
|
||||
# remove corresponding source snapshot, so it becomes invalid
|
||||
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
|
||||
|
||||
# --test try again, should abort old resume
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||
|
||||
# try again, should abort old resume
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
@ -212,22 +188,19 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
# create a resume situation, where the other side doesnt want the snapshot anymore ( should abort resume )
|
||||
def test_abort_unwanted_resume(self):
|
||||
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||
|
||||
# generate resume
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.generate_resume()
|
||||
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
# incremental, doesnt want previous anymore
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --no-progress --verbose --keep-target=0 --allow-empty".split(" ")).run())
|
||||
"test test_target1 --no-progress --verbose --keep-target=0 --allow-empty --debug".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
|
||||
@ -250,14 +223,11 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
# test with empty snapshot list (this was a bug)
|
||||
def test_abort_resume_emptysnapshotlist(self):
|
||||
|
||||
if "0.6.5" in ZFS_USERSPACE:
|
||||
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||
|
||||
# generate resume
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.generate_resume()
|
||||
|
||||
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
|
||||
@ -265,7 +235,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
# incremental, doesnt want previous anymore
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --no-progress --verbose --no-snapshot".split(
|
||||
" ")).run())
|
||||
@ -277,14 +247,14 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
|
||||
def test_missing_common(self):
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
# remove common snapshot and leave nothing
|
||||
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
|
||||
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
#UPDATE: offcourse the one thing that wasn't tested had a bug :( (in ExecuteNode.run()).
|
||||
@ -295,7 +265,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
# #recreate target pool without any features
|
||||
# # shelltest("zfs set compress=on test_source1; zpool destroy test_target1; zpool create test_target1 -o feature@project_quota=disabled /dev/ram2")
|
||||
#
|
||||
# with patch('time.strftime', return_value="test-20101111000000"):
|
||||
# with mocktime("20101111000000"):
|
||||
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --no-progress".split(" ")).run())
|
||||
#
|
||||
# r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
|
||||
@ -11,17 +11,17 @@ class TestZfsNode(unittest2.TestCase):
|
||||
def test_keepsource0target10queuedsend(self):
|
||||
"""Test if thinner doesnt destroy too much early on if there are no common snapshots YET. Issue #84"""
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
|
||||
" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
|
||||
" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty".split(
|
||||
" ")).run())
|
||||
@ -65,7 +65,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
shelltest("zfs set autobackup:test=true test_target1/target_shouldnotbeexcluded")
|
||||
shelltest("zfs create test_target1/target")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
"test test_target1/target --no-progress --verbose --allow-empty".split(
|
||||
" ")).run())
|
||||
|
||||
@ -33,26 +33,28 @@ class TestZfsScaling(unittest2.TestCase):
|
||||
run_counter=0
|
||||
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||
|
||||
with patch('time.strftime', return_value="test-20101112000000"):
|
||||
with mocktime("20101112000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||
expected_runs=343
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
expected_runs=342
|
||||
print("EXPECTED RUNS: {}".format(expected_runs))
|
||||
print("ACTUAL RUNS : {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||
|
||||
|
||||
run_counter=0
|
||||
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||
|
||||
with patch('time.strftime', return_value="test-20101112000001"):
|
||||
with mocktime("20101112000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||
expected_runs=47
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
print("EXPECTED RUNS: {}".format(expected_runs))
|
||||
print("ACTUAL RUNS : {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||
|
||||
def test_manydatasets(self):
|
||||
@ -73,12 +75,13 @@ class TestZfsScaling(unittest2.TestCase):
|
||||
run_counter=0
|
||||
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||
|
||||
with patch('time.strftime', return_value="test-20101112000000"):
|
||||
with mocktime("20101112000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||
expected_runs=743
|
||||
#this triggers if you make a change with an impact of more than O(snapshot_count/2)`
|
||||
expected_runs=842
|
||||
print("EXPECTED RUNS: {}".format(expected_runs))
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||
|
||||
@ -87,11 +90,12 @@ class TestZfsScaling(unittest2.TestCase):
|
||||
run_counter=0
|
||||
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||
|
||||
with patch('time.strftime', return_value="test-20101112000001"):
|
||||
with mocktime("20101112000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
|
||||
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||
expected_runs=947
|
||||
expected_runs=1047
|
||||
print("EXPECTED RUNS: {}".format(expected_runs))
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||
|
||||
@ -2,87 +2,145 @@ import zfs_autobackup.compressors
|
||||
from basetest import *
|
||||
import time
|
||||
|
||||
|
||||
class TestSendRecvPipes(unittest2.TestCase):
|
||||
"""test input/output pipes for zfs send and recv"""
|
||||
|
||||
def setUp(self):
|
||||
prepare_zpools()
|
||||
self.longMessage=True
|
||||
|
||||
|
||||
self.longMessage = True
|
||||
|
||||
def test_send_basics(self):
|
||||
"""send basics (remote/local send pipe)"""
|
||||
|
||||
|
||||
with self.subTest("local local pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress", "--clear-mountpoint",
|
||||
"--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("remote local pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("local remote pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("remote remote pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||
with mocktime("20101111000003"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M",
|
||||
"--recv-pipe=dd bs=2M"]).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1@test-20101111000002
|
||||
test_target1/test_source1/fs1@test-20101111000003
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||
""")
|
||||
|
||||
def test_compress(self):
|
||||
"""send basics (remote/local send pipe)"""
|
||||
|
||||
for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys():
|
||||
|
||||
with self.subTest("compress "+compress):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run())
|
||||
with self.subTest("compress " + compress):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--verbose",
|
||||
"--compress=" + compress]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
def test_buffer(self):
|
||||
"""test different buffer configurations"""
|
||||
|
||||
|
||||
with self.subTest("local local pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--buffer=1M" ]).run())
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress", "--clear-mountpoint", "--buffer=1M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("remote local pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--verbose", "--exclude-received", "--no-holds",
|
||||
"--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("local remote pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-target=localhost", "--buffer=1M"]).run())
|
||||
|
||||
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||
|
||||
with self.subTest("remote remote pipe"):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||
with mocktime("20101111000003"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--allow-empty", "--exclude-received", "--no-holds", "--no-progress",
|
||||
"--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1@test-20101111000002
|
||||
test_target1/test_source1/fs1@test-20101111000003
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||
""")
|
||||
|
||||
def test_rate(self):
|
||||
"""test rate limit"""
|
||||
|
||||
start = time.time()
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(
|
||||
["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k"]).run())
|
||||
|
||||
start=time.time()
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k" ]).run())
|
||||
|
||||
#not a great way of verifying but it works.
|
||||
self.assertGreater(time.time()-start, 5)
|
||||
|
||||
|
||||
# not a great way of verifying but it works.
|
||||
self.assertGreater(time.time() - start, 5)
|
||||
|
||||
@ -85,7 +85,7 @@ class TestThinner(unittest2.TestCase):
|
||||
if random.random()>=0.5:
|
||||
things.append(Thing(now))
|
||||
|
||||
(keeps, removes)=thinner.thin(things, now=now)
|
||||
(keeps, removes)=thinner.thin(things, keep_objects=[], now=now)
|
||||
things=keeps
|
||||
|
||||
|
||||
@ -143,7 +143,7 @@ class TestThinner(unittest2.TestCase):
|
||||
if random.random()>=0.5:
|
||||
things.append(Thing(now))
|
||||
|
||||
(things, removes)=thinner.thin(things, now=now)
|
||||
(things, removes)=thinner.thin(things, keep_objects=[], now=now)
|
||||
|
||||
result=[]
|
||||
for thing in things:
|
||||
|
||||
84
tests/test_treehasher.py
Normal file
84
tests/test_treehasher.py
Normal file
@ -0,0 +1,84 @@
|
||||
from basetest import *
|
||||
from zfs_autobackup.BlockHasher import BlockHasher
|
||||
|
||||
|
||||
# sha1 sums of files, (bs=4096)
|
||||
# da39a3ee5e6b4b0d3255bfef95601890afd80709 empty
|
||||
# 642027d63bb0afd7e0ba197f2c66ad03e3d70de1 partial
|
||||
# 3c0bf91170d873b8e327d3bafb6bc074580d11b7 whole
|
||||
# 2e863f1fcccd6642e4e28453eba10d2d3f74d798 whole2
|
||||
# 959e6b58078f0cfd2fb3d37e978fda51820473ff whole_whole2
|
||||
# 309ffffba2e1977d12f3b7469971f30d28b94bd8 whole_whole2_partial
|
||||
|
||||
|
||||
class TestTreeHasher(unittest2.TestCase):
|
||||
|
||||
def test_treehasher(self):
|
||||
shelltest("rm -rf /tmp/treehashertest; mkdir /tmp/treehashertest")
|
||||
shelltest("cp tests/data/whole /tmp/treehashertest")
|
||||
shelltest("mkdir /tmp/treehashertest/emptydir")
|
||||
shelltest("mkdir /tmp/treehashertest/dir")
|
||||
shelltest("cp tests/data/whole_whole2_partial /tmp/treehashertest/dir")
|
||||
|
||||
# it should ignore these:
|
||||
shelltest("ln -s / /tmp/treehashertest/symlink")
|
||||
shelltest("mknod /tmp/treehashertest/c c 1 1")
|
||||
shelltest("mknod /tmp/treehashertest/b b 1 1")
|
||||
shelltest("mkfifo /tmp/treehashertest/f")
|
||||
|
||||
|
||||
block_hasher = BlockHasher(count=1, skip=0)
|
||||
tree_hasher = TreeHasher(block_hasher)
|
||||
with self.subTest("Test output, count 1, skip 0"):
|
||||
self.assertEqual(list(tree_hasher.generate("/tmp/treehashertest")), [
|
||||
('whole', 0, '3c0bf91170d873b8e327d3bafb6bc074580d11b7'),
|
||||
('dir/whole_whole2_partial', 0, '3c0bf91170d873b8e327d3bafb6bc074580d11b7'),
|
||||
('dir/whole_whole2_partial', 1, '2e863f1fcccd6642e4e28453eba10d2d3f74d798'),
|
||||
('dir/whole_whole2_partial', 2, '642027d63bb0afd7e0ba197f2c66ad03e3d70de1')
|
||||
])
|
||||
|
||||
block_hasher = BlockHasher(count=1, skip=1)
|
||||
tree_hasher = TreeHasher(block_hasher)
|
||||
with self.subTest("Test output, count 1, skip 1"):
|
||||
self.assertEqual(list(tree_hasher.generate("/tmp/treehashertest")), [
|
||||
('whole', 0, '3c0bf91170d873b8e327d3bafb6bc074580d11b7'),
|
||||
# ('dir/whole_whole2_partial', 0, '3c0bf91170d873b8e327d3bafb6bc074580d11b7'),
|
||||
('dir/whole_whole2_partial', 1, '2e863f1fcccd6642e4e28453eba10d2d3f74d798'),
|
||||
# ('dir/whole_whole2_partial', 2, '642027d63bb0afd7e0ba197f2c66ad03e3d70de1')
|
||||
])
|
||||
|
||||
|
||||
|
||||
block_hasher = BlockHasher(count=2)
|
||||
tree_hasher = TreeHasher(block_hasher)
|
||||
|
||||
with self.subTest("Test output, count 2, skip 0"):
|
||||
self.assertEqual(list(tree_hasher.generate("/tmp/treehashertest")), [
|
||||
('whole', 0, '3c0bf91170d873b8e327d3bafb6bc074580d11b7'),
|
||||
('dir/whole_whole2_partial', 0, '959e6b58078f0cfd2fb3d37e978fda51820473ff'),
|
||||
('dir/whole_whole2_partial', 1, '642027d63bb0afd7e0ba197f2c66ad03e3d70de1')
|
||||
])
|
||||
|
||||
with self.subTest("Test compare"):
|
||||
generator = tree_hasher.generate("/tmp/treehashertest")
|
||||
errors = list(tree_hasher.compare("/tmp/treehashertest", generator))
|
||||
self.assertEqual(errors, [])
|
||||
|
||||
with self.subTest("Test mismatch"):
|
||||
generator = list(tree_hasher.generate("/tmp/treehashertest"))
|
||||
shelltest("cp tests/data/whole2 /tmp/treehashertest/whole")
|
||||
|
||||
self.assertEqual(list(tree_hasher.compare("/tmp/treehashertest", generator)),
|
||||
[('whole',
|
||||
0,
|
||||
'3c0bf91170d873b8e327d3bafb6bc074580d11b7',
|
||||
'2e863f1fcccd6642e4e28453eba10d2d3f74d798')])
|
||||
|
||||
with self.subTest("Test missing file compare"):
|
||||
generator = list(tree_hasher.generate("/tmp/treehashertest"))
|
||||
shelltest("rm /tmp/treehashertest/whole")
|
||||
|
||||
self.assertEqual(list(tree_hasher.compare("/tmp/treehashertest", generator)),
|
||||
[('whole', '-', '-', "ERROR: [Errno 2] No such file or directory: '/tmp/treehashertest/whole'")])
|
||||
|
||||
|
||||
@ -16,10 +16,12 @@ from basetest import *
|
||||
# - test all directions (local, remote/local, local/remote, remote/remote)
|
||||
#
|
||||
|
||||
class TestZfsEncryption(unittest2.TestCase):
|
||||
class TestZfsVerify(unittest2.TestCase):
|
||||
|
||||
|
||||
def setUp(self):
|
||||
self.skipTest("WIP")
|
||||
|
||||
prepare_zpools()
|
||||
|
||||
#create actual test files and data
|
||||
@ -36,7 +38,7 @@ class TestZfsEncryption(unittest2.TestCase):
|
||||
shelltest("dd if=/dev/urandom of=/dev/zvol/test_source1/fs1/bad_zvol count=1 bs=512k")
|
||||
|
||||
#create backup
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --no-holds".split(" ")).run())
|
||||
|
||||
#Do an ugly hack to create a fault in the bad filesystem
|
||||
@ -86,14 +88,15 @@ class TestZfsEncryption(unittest2.TestCase):
|
||||
runchecked("rsync, local", "test test_target1 --verbose --exclude-received --fs-compare=rsync")
|
||||
|
||||
runchecked("tar, remote source and remote target",
|
||||
"test test_target1 --ssh-source=localhost --ssh-target=localhost --verbose --exclude-received --fs-compare=tar")
|
||||
"test test_target1 --ssh-source=localhost --ssh-target=localhost --verbose --exclude-received --fs-compare=find")
|
||||
runchecked("tar, remote source",
|
||||
"test test_target1 --ssh-source=localhost --verbose --exclude-received --fs-compare=tar")
|
||||
"test test_target1 --ssh-source=localhost --verbose --exclude-received --fs-compare=find")
|
||||
runchecked("tar, remote target",
|
||||
"test test_target1 --ssh-target=localhost --verbose --exclude-received --fs-compare=tar")
|
||||
runchecked("tar, local", "test test_target1 --verbose --exclude-received --fs-compare=tar")
|
||||
"test test_target1 --ssh-target=localhost --verbose --exclude-received --fs-compare=find")
|
||||
runchecked("tar, local", "test test_target1 --verbose --exclude-received --fs-compare=find")
|
||||
|
||||
with self.subTest("no common snapshot"):
|
||||
#destroy common snapshot, now 3 should fail
|
||||
shelltest("zfs destroy test_source1/fs1/ok_zvol@test-20101111000000")
|
||||
self.assertEqual(3, ZfsAutoverify("test test_target1 --verbose --exclude-received".split(" ")).run())
|
||||
|
||||
|
||||
@ -35,7 +35,7 @@ class TestZfsAutobackup(unittest2.TestCase):
|
||||
def test_snapshotmode(self):
|
||||
"""test snapshot tool mode"""
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test --no-progress --verbose".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -55,11 +55,12 @@ test_target1
|
||||
""")
|
||||
|
||||
def test_defaults(self):
|
||||
self.maxDiff=2000
|
||||
|
||||
with self.subTest("no datasets selected"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stderr(buf):
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug --no-progress".split(" ")).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
@ -69,7 +70,7 @@ test_target1
|
||||
|
||||
with self.subTest("defaults with full verbose and debug"):
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug --no-progress".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -98,7 +99,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
with self.subTest("bare defaults, allow empty"):
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --no-progress".split(" ")).run())
|
||||
|
||||
|
||||
@ -168,47 +169,43 @@ test_target1/test_source2/fs2/sub@test-20101111000001 userrefs 1 -
|
||||
""")
|
||||
|
||||
#make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept
|
||||
#So in this case we only want to see 2 snapshots of 2011, and none of the 2010's anymore.
|
||||
with self.subTest("test time checking"):
|
||||
with patch('time.strftime', return_value="test-20111111000000"):
|
||||
with mocktime("20111211000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --no-progress".split(" ")).run())
|
||||
|
||||
|
||||
time_str="20111112000000" #month in the "future"
|
||||
future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
|
||||
with patch('time.time', return_value=future_timestamp):
|
||||
with patch('time.strftime', return_value="test-20111111000001"):
|
||||
with mocktime("20111211000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y --no-progress".split(" ")).run())
|
||||
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20111111000000
|
||||
test_source1/fs1@test-20111111000001
|
||||
test_source1/fs1@test-20111211000000
|
||||
test_source1/fs1@test-20111211000001
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20111111000000
|
||||
test_source1/fs1/sub@test-20111111000001
|
||||
test_source1/fs1/sub@test-20111211000000
|
||||
test_source1/fs1/sub@test-20111211000001
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20111111000000
|
||||
test_source2/fs2/sub@test-20111111000001
|
||||
test_source2/fs2/sub@test-20111211000000
|
||||
test_source2/fs2/sub@test-20111211000001
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20111111000000
|
||||
test_target1/test_source1/fs1@test-20111111000001
|
||||
test_target1/test_source1/fs1@test-20111211000000
|
||||
test_target1/test_source1/fs1@test-20111211000001
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20111111000000
|
||||
test_target1/test_source1/fs1/sub@test-20111111000001
|
||||
test_target1/test_source1/fs1/sub@test-20111211000000
|
||||
test_target1/test_source1/fs1/sub@test-20111211000001
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20111111000000
|
||||
test_target1/test_source2/fs2/sub@test-20111111000001
|
||||
test_target1/test_source2/fs2/sub@test-20111211000000
|
||||
test_target1/test_source2/fs2/sub@test-20111211000001
|
||||
""")
|
||||
|
||||
def test_ignore_othersnaphots(self):
|
||||
@ -216,7 +213,7 @@ test_target1/test_source2/fs2/sub@test-20111111000001
|
||||
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
||||
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -251,7 +248,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
||||
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --other-snapshots".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -286,7 +283,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
|
||||
def test_nosnapshot(self):
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --no-progress".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -310,7 +307,7 @@ test_target1/test_source2/fs2
|
||||
|
||||
def test_nosend(self):
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send --no-progress".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -333,7 +330,7 @@ test_target1
|
||||
def test_ignorereplicated(self):
|
||||
r=shelltest("zfs snapshot test_source1/fs1@otherreplication")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --ignore-replicated".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -362,7 +359,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
|
||||
def test_noholds(self):
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --no-progress".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
|
||||
@ -394,7 +391,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||
|
||||
def test_strippath(self):
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1 --no-progress".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -420,33 +417,13 @@ test_target1/fs2/sub
|
||||
test_target1/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
# def test_strippath_toomuch(self):
|
||||
# with patch('time.strftime', return_value="test-20101111000000"):
|
||||
# self.assertFalse(
|
||||
# ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress".split(" ")).run())
|
||||
#
|
||||
# r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
# self.assertMultiLineEqual(r, """
|
||||
# test_source1
|
||||
# test_source1/fs1
|
||||
# test_source1/fs1@test-20101111000000
|
||||
# test_source1/fs1/sub
|
||||
# test_source1/fs1/sub@test-20101111000000
|
||||
# test_source2
|
||||
# test_source2/fs2
|
||||
# test_source2/fs2/sub
|
||||
# test_source2/fs2/sub@test-20101111000000
|
||||
# test_source2/fs3
|
||||
# test_source2/fs3/sub
|
||||
# test_target1
|
||||
# test_target1/fs1
|
||||
# test_target1/fs1@test-20101111000000
|
||||
# test_target1/fs1/sub
|
||||
# test_target1/fs1/sub@test-20101111000000
|
||||
# test_target1/fs2
|
||||
# test_target1/fs2/sub
|
||||
# test_target1/fs2/sub@test-20101111000000
|
||||
# """)
|
||||
def test_strippath_collision(self):
|
||||
with self.assertRaisesRegexp(Exception,"collision"):
|
||||
ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress --debug".split(" ")).run()
|
||||
|
||||
def test_strippath_toomuch(self):
|
||||
with self.assertRaisesRegexp(Exception,"too much"):
|
||||
ZfsAutobackup("test test_target1 --verbose --strip-path=3 --no-progress --debug".split(" ")).run()
|
||||
|
||||
def test_clearrefres(self):
|
||||
|
||||
@ -457,10 +434,10 @@ test_target1/fs2/sub@test-20101111000000
|
||||
|
||||
r=shelltest("zfs set refreservation=1M test_source1/fs1")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-refreservation".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1")
|
||||
r=shelltest("zfs get -r refreservation test_source1 test_source2 test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
test_source1 refreservation none default
|
||||
@ -495,10 +472,10 @@ test_target1/test_source2/fs2/sub@test-20101111000000 refreservation -
|
||||
self.skipTest("This zfs-userspace version doesnt support -o")
|
||||
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-mountpoint --debug".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1")
|
||||
r=shelltest("zfs get -r canmount test_source1 test_source2 test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
NAME PROPERTY VALUE SOURCE
|
||||
test_source1 canmount on default
|
||||
@ -513,13 +490,13 @@ test_source2/fs2/sub@test-20101111000000 canmount - -
|
||||
test_source2/fs3 canmount on default
|
||||
test_source2/fs3/sub canmount on default
|
||||
test_target1 canmount on default
|
||||
test_target1/test_source1 canmount on default
|
||||
test_target1/test_source1 canmount off local
|
||||
test_target1/test_source1/fs1 canmount noauto local
|
||||
test_target1/test_source1/fs1@test-20101111000000 canmount - -
|
||||
test_target1/test_source1/fs1/sub canmount noauto local
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000 canmount - -
|
||||
test_target1/test_source2 canmount on default
|
||||
test_target1/test_source2/fs2 canmount on default
|
||||
test_target1/test_source2 canmount off local
|
||||
test_target1/test_source2/fs2 canmount off local
|
||||
test_target1/test_source2/fs2/sub canmount noauto local
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
|
||||
""")
|
||||
@ -528,18 +505,17 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
|
||||
def test_rollback(self):
|
||||
|
||||
#initial backup
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||
|
||||
#make change
|
||||
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
||||
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
#should fail (busy)
|
||||
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
#rollback, should succeed
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run())
|
||||
|
||||
@ -547,36 +523,35 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
|
||||
def test_destroyincompat(self):
|
||||
|
||||
#initial backup
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||
|
||||
#add multiple compatible snapshot (written is still 0)
|
||||
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1")
|
||||
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
#should be ok, is compatible
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
#add incompatible snapshot by changing and snapshotting
|
||||
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
||||
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
|
||||
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")
|
||||
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
#--test should fail, now incompatible
|
||||
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --test".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
#should fail, now incompatible
|
||||
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000003"):
|
||||
with mocktime("20101111000003"):
|
||||
#--test should succeed by destroying incompatibles
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000003"):
|
||||
with mocktime("20101111000003"):
|
||||
#should succeed by destroying incompatibles
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible".split(" ")).run())
|
||||
|
||||
@ -614,13 +589,13 @@ test_target1/test_source2/fs2/sub@test-20101111000003
|
||||
|
||||
#test all ssh directions
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --exclude-received".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost --exclude-received".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
|
||||
|
||||
|
||||
@ -665,7 +640,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
def test_minchange(self):
|
||||
|
||||
#initial
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
|
||||
|
||||
#make small change, use umount to reflect the changes immediately
|
||||
@ -675,7 +650,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
|
||||
|
||||
#too small change, takes no snapshots
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
|
||||
|
||||
#make big change
|
||||
@ -683,7 +658,7 @@ test_target1/test_source2/fs2/sub@test-20101111000002
|
||||
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
|
||||
|
||||
#bigger change, should take snapshot
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -716,7 +691,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
def test_test(self):
|
||||
|
||||
#initial
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -733,12 +708,12 @@ test_target1
|
||||
""")
|
||||
|
||||
#actual make initial backup
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||
|
||||
|
||||
#test incremental
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --allow-empty --verbose --test".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -774,7 +749,7 @@ test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
shelltest("zfs create test_target1/test_source1")
|
||||
shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -807,15 +782,15 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
def test_keep0(self):
|
||||
"""test if keep-source=0 and keep-target=0 dont delete common snapshot and break backup"""
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0".split(" ")).run())
|
||||
|
||||
#make snapshot, shouldnt delete 0
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||
|
||||
#make snapshot 2, shouldnt delete 0 since it has holds, but will delete 1 since it has no holds
|
||||
with patch('time.strftime', return_value="test-20101111000002"):
|
||||
with mocktime("20101111000002"):
|
||||
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
@ -847,7 +822,7 @@ test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
#make another backup but with no-holds. we should naturally endup with only number 3
|
||||
with patch('time.strftime', return_value="test-20101111000003"):
|
||||
with mocktime("20101111000003"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
@ -877,7 +852,7 @@ test_target1/test_source2/fs2/sub@test-20101111000003
|
||||
|
||||
|
||||
# run with snapshot-only for 4, since we used no-holds, it will delete 3 on the source, breaking the backup
|
||||
with patch('time.strftime', return_value="test-20101111000004"):
|
||||
with mocktime("20101111000004"):
|
||||
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
@ -912,7 +887,7 @@ test_target1/test_source2/fs2/sub@test-20101111000003
|
||||
r = shelltest("zfs snapshot test_source1@test")
|
||||
|
||||
l=LogConsole(show_verbose=True, show_debug=False, color=False)
|
||||
n=ZfsNode(snapshot_time_format="bla", hold_name="bla", logger=l)
|
||||
n=ZfsNode(utc=False, snapshot_time_format="bla", hold_name="bla", logger=l)
|
||||
d=ZfsDataset(n,"test_source1@test")
|
||||
|
||||
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True, zfs_compressed=True)
|
||||
|
||||
@ -10,10 +10,10 @@ class TestZfsAutobackup31(unittest2.TestCase):
|
||||
|
||||
def test_no_thinning(self):
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
@ -54,10 +54,10 @@ test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
shelltest("zfs create test_target1/a")
|
||||
shelltest("zfs create test_target1/b")
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/a --no-progress --verbose --debug".split(" ")).run())
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000001"):
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1/b --no-progress --verbose".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
|
||||
@ -75,7 +75,45 @@ test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
|
||||
|
||||
def test_zfs_compressed(self):
|
||||
|
||||
with patch('time.strftime', return_value="test-20101111000000"):
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(
|
||||
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --zfs-compressed".split(" ")).run())
|
||||
|
||||
def test_force(self):
|
||||
"""test 1:1 replication"""
|
||||
|
||||
shelltest("zfs set autobackup:test=true test_source1")
|
||||
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(
|
||||
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --force --strip-path=1".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_target1@test-20101111000000
|
||||
test_target1/fs1@test-20101111000000
|
||||
test_target1/fs1/sub@test-20101111000000
|
||||
test_target1/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
|
||||
def test_exclude_unchanged(self):
|
||||
|
||||
shelltest("zfs snapshot -r test_source1@somesnapshot")
|
||||
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(
|
||||
ZfsAutobackup(
|
||||
"test test_target1 --verbose --allow-empty --exclude-unchanged=1".split(" ")).run())
|
||||
|
||||
#everything should be excluded, but should not return an error (see #190)
|
||||
with mocktime("20101111000001"):
|
||||
self.assertFalse(
|
||||
ZfsAutobackup(
|
||||
"test test_target1 --verbose --allow-empty --exclude-unchanged=1".split(" ")).run())
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t snapshot test_target1")
|
||||
self.assertMultiLineEqual(r, """
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
|
||||
200
tests/test_zfsautobackup32.py
Normal file
200
tests/test_zfsautobackup32.py
Normal file
@ -0,0 +1,200 @@
|
||||
from basetest import *
|
||||
|
||||
class TestZfsAutobackup32(unittest2.TestCase):
|
||||
"""various new 3.2 features"""
|
||||
|
||||
def setUp(self):
|
||||
prepare_zpools()
|
||||
self.longMessage=True
|
||||
|
||||
def test_invalid_common_snapshot(self):
|
||||
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
#create 2 snapshots with the same name, which are invalid as common snapshot
|
||||
shelltest("zfs snapshot test_source1/fs1@invalid")
|
||||
shelltest("zfs snapshot test_target1/test_source1/fs1@invalid")
|
||||
|
||||
with mocktime("20101111000001"):
|
||||
#try the old way (without guid checking), and fail:
|
||||
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --no-guid-check".split(" ")).run(),1)
|
||||
#new way should be ok:
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1@invalid
|
||||
test_source1/fs1@test-20101111000001
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source1/fs1/sub@test-20101111000001
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs2/sub@test-20101111000001
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@invalid
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
""")
|
||||
|
||||
def test_invalid_common_snapshot_with_data(self):
|
||||
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
#create 2 snapshots with the same name, which are invalid as common snapshot
|
||||
shelltest("zfs snapshot test_source1/fs1@invalid")
|
||||
shelltest("touch /test_target1/test_source1/fs1/shouldnotbeHere")
|
||||
shelltest("zfs snapshot test_target1/test_source1/fs1@invalid")
|
||||
|
||||
with mocktime("20101111000001"):
|
||||
#try the old way and fail:
|
||||
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --no-guid-check".split(" ")).run(),1)
|
||||
#new way should be ok
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-incompatible".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1@test-20101111000000
|
||||
test_source1/fs1@invalid
|
||||
test_source1/fs1@test-20101111000001
|
||||
test_source1/fs1/sub
|
||||
test_source1/fs1/sub@test-20101111000000
|
||||
test_source1/fs1/sub@test-20101111000001
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs2/sub@test-20101111000000
|
||||
test_source2/fs2/sub@test-20101111000001
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
test_target1/test_source1
|
||||
test_target1/test_source1/fs1
|
||||
test_target1/test_source1/fs1@test-20101111000000
|
||||
test_target1/test_source1/fs1@test-20101111000001
|
||||
test_target1/test_source1/fs1/sub
|
||||
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||
test_target1/test_source2
|
||||
test_target1/test_source2/fs2
|
||||
test_target1/test_source2/fs2/sub
|
||||
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||
""")
|
||||
|
||||
|
||||
#check consistent mounting behaviour, see issue #112
|
||||
def test_mount_consitency_mounted(self):
|
||||
"""only filesystems that have canmount=on with a mountpoint should be mounted. """
|
||||
|
||||
shelltest("zfs create -V 10M test_source1/fs1/subvol")
|
||||
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs mount |grep -o /test_target1.*")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
/test_target1
|
||||
/test_target1/test_source1/fs1
|
||||
/test_target1/test_source1/fs1/sub
|
||||
/test_target1/test_source2/fs2/sub
|
||||
""")
|
||||
|
||||
|
||||
def test_mount_consitency_unmounted(self):
|
||||
"""only test_target1 should be mounted in this test"""
|
||||
|
||||
shelltest("zfs create -V 10M test_source1/fs1/subvol")
|
||||
|
||||
with mocktime("20101111000000"):
|
||||
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --clear-mountpoint".split(" ")).run())
|
||||
|
||||
r=shelltest("zfs mount |grep -o /test_target1.*")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
/test_target1
|
||||
""")
|
||||
|
||||
|
||||
|
||||
|
||||
def test_transfer_thinning(self):
|
||||
# test pre/post/during transfer thinning and efficient transfer (no transerring of stuff that gets deleted on target)
|
||||
|
||||
#less output
|
||||
shelltest("zfs set autobackup:test2=true test_source1/fs1/sub")
|
||||
|
||||
# nobody wants this one, will be destroyed before transferring (over a year ago)
|
||||
with mocktime("20000101000000"):
|
||||
self.assertFalse(ZfsAutobackup("test2 --allow-empty".split(" ")).run())
|
||||
|
||||
# only target wants this one (monthlys)
|
||||
with mocktime("20010101000000"):
|
||||
self.assertFalse(ZfsAutobackup("test2 --allow-empty".split(" ")).run())
|
||||
|
||||
# both want this one (dayly + monthly)
|
||||
# other snapshots should influence the middle one that we actually want.
|
||||
with mocktime("20010201000000"):
|
||||
shelltest("zfs snapshot test_source1/fs1/sub@other1")
|
||||
self.assertFalse(ZfsAutobackup("test2 --allow-empty".split(" ")).run())
|
||||
shelltest("zfs snapshot test_source1/fs1/sub@other2")
|
||||
|
||||
# only source wants this one (dayly)
|
||||
with mocktime("20010202000000"):
|
||||
self.assertFalse(ZfsAutobackup("test2 --allow-empty".split(" ")).run())
|
||||
|
||||
#will become common snapshot
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
with mocktime("20010203000000"):
|
||||
self.assertFalse(ZfsAutobackup("--keep-source=1d10d --keep-target=1m10m --allow-empty --verbose --clear-mountpoint --other-snapshots test2 test_target1".split(" ")).run())
|
||||
|
||||
|
||||
print(buf.getvalue())
|
||||
self.assertIn(
|
||||
"""
|
||||
[Source] test_source1/fs1/sub@test2-20000101000000: Destroying
|
||||
[Source] test_source1/fs1/sub@test2-20010101000000: -> test_target1/test_source1/fs1/sub (new)
|
||||
[Source] test_source1/fs1/sub@other1: -> test_target1/test_source1/fs1/sub
|
||||
[Source] test_source1/fs1/sub@test2-20010101000000: Destroying
|
||||
[Source] test_source1/fs1/sub@test2-20010201000000: -> test_target1/test_source1/fs1/sub
|
||||
[Source] test_source1/fs1/sub@other2: -> test_target1/test_source1/fs1/sub
|
||||
[Source] test_source1/fs1/sub@test2-20010203000000: -> test_target1/test_source1/fs1/sub
|
||||
""", buf.getvalue())
|
||||
|
||||
|
||||
r=shelltest("zfs list -H -o name -r -t snapshot test_source1 test_target1")
|
||||
self.assertMultiLineEqual(r,"""
|
||||
test_source1/fs1/sub@other1
|
||||
test_source1/fs1/sub@test2-20010201000000
|
||||
test_source1/fs1/sub@other2
|
||||
test_source1/fs1/sub@test2-20010202000000
|
||||
test_source1/fs1/sub@test2-20010203000000
|
||||
test_target1/test_source1/fs1/sub@test2-20010101000000
|
||||
test_target1/test_source1/fs1/sub@other1
|
||||
test_target1/test_source1/fs1/sub@test2-20010201000000
|
||||
test_target1/test_source1/fs1/sub@other2
|
||||
test_target1/test_source1/fs1/sub@test2-20010203000000
|
||||
""")
|
||||
|
||||
|
||||
223
tests/test_zfscheck.py
Normal file
223
tests/test_zfscheck.py
Normal file
@ -0,0 +1,223 @@
|
||||
from os.path import exists
|
||||
|
||||
from basetest import *
|
||||
from zfs_autobackup.BlockHasher import BlockHasher
|
||||
|
||||
|
||||
class TestZfsCheck(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
|
||||
def test_volume(self):
|
||||
|
||||
if exists("/.dockerenv"):
|
||||
self.skipTest("FIXME: zfscheck volumes not supported in docker yet")
|
||||
|
||||
prepare_zpools()
|
||||
|
||||
shelltest("zfs create -V200M test_source1/vol")
|
||||
shelltest("zfs snapshot test_source1/vol@test")
|
||||
|
||||
with self.subTest("Generate"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsCheck("test_source1/vol@test".split(" "),print_arguments=False).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
self.assertEqual("""0 2c2ceccb5ec5574f791d45b63c940cff20550f9a
|
||||
1 2c2ceccb5ec5574f791d45b63c940cff20550f9a
|
||||
""", buf.getvalue())
|
||||
|
||||
#store on disk for next step, add one error.
|
||||
with open("/tmp/testhashes", "w") as fh:
|
||||
fh.write(buf.getvalue()+"1\t2c2ceccb5ec5574f791d45b63c940cff20550f9X")
|
||||
|
||||
with self.subTest("Compare"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertEqual(1, ZfsCheck("test_source1/vol@test --check=/tmp/testhashes".split(" "),print_arguments=False).run())
|
||||
print(buf.getvalue())
|
||||
self.assertEqual("Chunk 1 failed: 2c2ceccb5ec5574f791d45b63c940cff20550f9X 2c2ceccb5ec5574f791d45b63c940cff20550f9a\n", buf.getvalue())
|
||||
|
||||
def test_filesystem(self):
|
||||
prepare_zpools()
|
||||
|
||||
shelltest("cp tests/data/whole /test_source1/testfile")
|
||||
shelltest("mkdir /test_source1/emptydir")
|
||||
shelltest("mkdir /test_source1/dir")
|
||||
shelltest("cp tests/data/whole2 /test_source1/dir/testfile")
|
||||
|
||||
#it should ignore these:
|
||||
shelltest("ln -s / /test_source1/symlink")
|
||||
shelltest("mknod /test_source1/c c 1 1")
|
||||
shelltest("mknod /test_source1/b b 1 1")
|
||||
shelltest("mkfifo /test_source1/f")
|
||||
|
||||
shelltest("zfs snapshot test_source1@test")
|
||||
ZfsCheck("test_source1@test --debug".split(" "), print_arguments=False).run()
|
||||
with self.subTest("Generate"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsCheck("test_source1@test".split(" "), print_arguments=False).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
self.assertEqual("""testfile 0 3c0bf91170d873b8e327d3bafb6bc074580d11b7
|
||||
dir/testfile 0 2e863f1fcccd6642e4e28453eba10d2d3f74d798
|
||||
""", buf.getvalue())
|
||||
|
||||
#store on disk for next step, add error
|
||||
with open("/tmp/testhashes", "w") as fh:
|
||||
fh.write(buf.getvalue()+"dir/testfile 0 2e863f1fcccd6642e4e28453eba10d2d3f74d79X")
|
||||
|
||||
with self.subTest("Compare"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertEqual(1, ZfsCheck("test_source1@test --check=/tmp/testhashes".split(" "),print_arguments=False).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
self.assertEqual("dir/testfile: Chunk 0 failed: 2e863f1fcccd6642e4e28453eba10d2d3f74d79X 2e863f1fcccd6642e4e28453eba10d2d3f74d798\n", buf.getvalue())
|
||||
|
||||
def test_file(self):
|
||||
|
||||
with self.subTest("Generate"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsCheck("tests/data/whole".split(" "), print_arguments=False).run())
|
||||
|
||||
print(buf.getvalue())
|
||||
self.assertEqual("""0 3c0bf91170d873b8e327d3bafb6bc074580d11b7
|
||||
""", buf.getvalue())
|
||||
|
||||
# store on disk for next step, add error
|
||||
with open("/tmp/testhashes", "w") as fh:
|
||||
fh.write(buf.getvalue()+"0 3c0bf91170d873b8e327d3bafb6bc074580d11bX")
|
||||
|
||||
with self.subTest("Compare"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertEqual(1,ZfsCheck("tests/data/whole --check=/tmp/testhashes".split(" "), print_arguments=False).run())
|
||||
print(buf.getvalue())
|
||||
self.assertEqual("Chunk 0 failed: 3c0bf91170d873b8e327d3bafb6bc074580d11bX 3c0bf91170d873b8e327d3bafb6bc074580d11b7\n", buf.getvalue())
|
||||
|
||||
def test_tree(self):
|
||||
shelltest("rm -rf /tmp/testtree; mkdir /tmp/testtree")
|
||||
shelltest("cp tests/data/whole /tmp/testtree")
|
||||
shelltest("cp tests/data/whole_whole2 /tmp/testtree")
|
||||
shelltest("cp tests/data/whole2 /tmp/testtree")
|
||||
shelltest("cp tests/data/partial /tmp/testtree")
|
||||
shelltest("cp tests/data/whole_whole2_partial /tmp/testtree")
|
||||
|
||||
####################################
|
||||
with self.subTest("Generate, skip 1"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsCheck("/tmp/testtree --skip=1".split(" "), print_arguments=False).run())
|
||||
|
||||
#since order varies, just check count (there is one empty line for some reason, only when testing like this)
|
||||
print(buf.getvalue().split("\n"))
|
||||
self.assertEqual(len(buf.getvalue().split("\n")),4)
|
||||
|
||||
######################################
|
||||
with self.subTest("Compare, all incorrect, skip 1"):
|
||||
|
||||
# store on disk for next step, add error
|
||||
with open("/tmp/testhashes", "w") as fh:
|
||||
fh.write("""
|
||||
partial 0 642027d63bb0afd7e0ba197f2c66ad03e3d70deX
|
||||
whole 0 3c0bf91170d873b8e327d3bafb6bc074580d11bX
|
||||
whole2 0 2e863f1fcccd6642e4e28453eba10d2d3f74d79X
|
||||
whole_whole2 0 959e6b58078f0cfd2fb3d37e978fda51820473fX
|
||||
whole_whole2_partial 0 309ffffba2e1977d12f3b7469971f30d28b94bdX
|
||||
""")
|
||||
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertEqual(ZfsCheck("/tmp/testtree --check=/tmp/testhashes --skip=1".split(" "), print_arguments=False).run(), 3)
|
||||
|
||||
print(buf.getvalue())
|
||||
self.assertMultiLineEqual("""partial: Chunk 0 failed: 642027d63bb0afd7e0ba197f2c66ad03e3d70deX 642027d63bb0afd7e0ba197f2c66ad03e3d70de1
|
||||
whole2: Chunk 0 failed: 2e863f1fcccd6642e4e28453eba10d2d3f74d79X 2e863f1fcccd6642e4e28453eba10d2d3f74d798
|
||||
whole_whole2_partial: Chunk 0 failed: 309ffffba2e1977d12f3b7469971f30d28b94bdX 309ffffba2e1977d12f3b7469971f30d28b94bd8
|
||||
""",buf.getvalue())
|
||||
|
||||
####################################
|
||||
with self.subTest("Generate"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertFalse(ZfsCheck("/tmp/testtree".split(" "), print_arguments=False).run())
|
||||
|
||||
#file order on disk can vary, so sort it..
|
||||
sorted=buf.getvalue().split("\n")
|
||||
sorted.sort()
|
||||
sorted="\n".join(sorted)+"\n"
|
||||
|
||||
print(sorted)
|
||||
self.assertEqual("""
|
||||
partial 0 642027d63bb0afd7e0ba197f2c66ad03e3d70de1
|
||||
whole 0 3c0bf91170d873b8e327d3bafb6bc074580d11b7
|
||||
whole2 0 2e863f1fcccd6642e4e28453eba10d2d3f74d798
|
||||
whole_whole2 0 959e6b58078f0cfd2fb3d37e978fda51820473ff
|
||||
whole_whole2_partial 0 309ffffba2e1977d12f3b7469971f30d28b94bd8
|
||||
""", sorted)
|
||||
|
||||
# store on disk for next step, add error
|
||||
with open("/tmp/testhashes", "w") as fh:
|
||||
fh.write(buf.getvalue() + "whole_whole2_partial 0 309ffffba2e1977d12f3b7469971f30d28b94bdX")
|
||||
|
||||
####################################
|
||||
with self.subTest("Compare"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
self.assertEqual(1, ZfsCheck("/tmp/testtree --check=/tmp/testhashes".split(" "),
|
||||
print_arguments=False).run())
|
||||
print(buf.getvalue())
|
||||
self.assertEqual(
|
||||
"whole_whole2_partial: Chunk 0 failed: 309ffffba2e1977d12f3b7469971f30d28b94bdX 309ffffba2e1977d12f3b7469971f30d28b94bd8\n",
|
||||
buf.getvalue())
|
||||
|
||||
def test_brokenpipe_cleanup_filesystem(self):
|
||||
"""test if stuff is cleaned up correctly, in debugging mode , when a pipe breaks. """
|
||||
|
||||
prepare_zpools()
|
||||
shelltest("cp tests/data/whole /test_source1/testfile")
|
||||
shelltest("zfs snapshot test_source1@test")
|
||||
|
||||
#breaks pipe when head exists
|
||||
#important to use --debug, since that generates extra output which would be problematic if we didnt do correct SIGPIPE handling
|
||||
shelltest("python -m zfs_autobackup.ZfsCheck test_source1@test --debug | head -n1")
|
||||
|
||||
#should NOT be mounted anymore if cleanup went ok:
|
||||
self.assertNotRegex(shelltest("mount"), "test_source1@test")
|
||||
|
||||
def test_brokenpipe_cleanup_volume(self):
|
||||
if exists("/.dockerenv"):
|
||||
self.skipTest("FIXME: zfscheck volumes not supported in docker yet")
|
||||
|
||||
prepare_zpools()
|
||||
shelltest("zfs create -V200M test_source1/vol")
|
||||
shelltest("zfs snapshot test_source1/vol@test")
|
||||
|
||||
#breaks pipe when grep exists:
|
||||
#important to use --debug, since that generates extra output which would be problematic if we didnt do correct SIGPIPE handling
|
||||
shelltest("python -m zfs_autobackup.ZfsCheck test_source1/vol@test --debug| grep -m1 'Hashing file'")
|
||||
# time.sleep(1)
|
||||
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertMultiLineEqual("""
|
||||
test_source1
|
||||
test_source1/fs1
|
||||
test_source1/fs1/sub
|
||||
test_source1/vol
|
||||
test_source1/vol@test
|
||||
test_source2
|
||||
test_source2/fs2
|
||||
test_source2/fs2/sub
|
||||
test_source2/fs3
|
||||
test_source2/fs3/sub
|
||||
test_target1
|
||||
""",r )
|
||||
|
||||
|
||||
|
||||
@ -12,10 +12,12 @@ class TestZfsNode(unittest2.TestCase):
|
||||
def test_consistent_snapshot(self):
|
||||
logger = LogStub()
|
||||
description = "[Source]"
|
||||
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
node = ZfsNode(utc=False, snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
|
||||
with self.subTest("first snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000001", 100000)
|
||||
(selected_datasets, excluded_datasets)=node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False,
|
||||
exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-20101111000001", 100000)
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertEqual(r, """
|
||||
test_source1
|
||||
@ -33,7 +35,9 @@ test_target1
|
||||
""")
|
||||
|
||||
with self.subTest("second snapshot, no changes, no snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 1)
|
||||
(selected_datasets, excluded_datasets)=node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False,
|
||||
exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-20101111000002", 1)
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertEqual(r, """
|
||||
test_source1
|
||||
@ -51,7 +55,8 @@ test_target1
|
||||
""")
|
||||
|
||||
with self.subTest("second snapshot, no changes, empty snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 0)
|
||||
(selected_datasets, excluded_datasets) =node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-20101111000002", 0)
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertEqual(r, """
|
||||
test_source1
|
||||
@ -74,12 +79,13 @@ test_target1
|
||||
def test_consistent_snapshot_prepostcmds(self):
|
||||
logger = LogStub()
|
||||
description = "[Source]"
|
||||
node = ZfsNode(snapshot_time_format="test", hold_name="test", logger=logger, description=description, debug_output=True)
|
||||
node = ZfsNode(utc=False, snapshot_time_format="test", hold_name="test", logger=logger, description=description, debug_output=True)
|
||||
|
||||
with self.subTest("Test if all cmds are executed correctly (no failures)"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||
(selected_datasets, excluded_datasets) =node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-1",
|
||||
0,
|
||||
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||
post_snapshot_cmds=["echo post1 >&2", "echo post2 >&2"]
|
||||
@ -95,7 +101,8 @@ test_target1
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
with self.assertRaises(ExecuteError):
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||
(selected_datasets, excluded_datasets) =node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-1",
|
||||
0,
|
||||
pre_snapshot_cmds=["echo pre1", "false", "echo pre2"],
|
||||
post_snapshot_cmds=["echo post1", "false", "echo post2"]
|
||||
@ -112,7 +119,8 @@ test_target1
|
||||
with redirect_stdout(buf):
|
||||
with self.assertRaises(ExecuteError):
|
||||
#same snapshot name as before so it fails
|
||||
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||
(selected_datasets, excluded_datasets) =node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=0)
|
||||
node.consistent_snapshot(selected_datasets, "test-1",
|
||||
0,
|
||||
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||
post_snapshot_cmds=["echo post1", "echo post2"]
|
||||
@ -124,6 +132,21 @@ test_target1
|
||||
self.assertIn("STDOUT > post1", buf.getvalue())
|
||||
self.assertIn("STDOUT > post2", buf.getvalue())
|
||||
|
||||
def test_timestamps(self):
|
||||
# Assert that timestamps keep relative order both for utc and for localtime
|
||||
logger = LogStub()
|
||||
description = "[Source]"
|
||||
node_local = ZfsNode(utc=False, snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
node_utc = ZfsNode(utc=True, snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
|
||||
for node in [node_local, node_utc]:
|
||||
with self.subTest("timestamp ordering " + ("utc" if node == node_utc else "localtime")):
|
||||
dataset_a = ZfsDataset(node,"test_source1@test-20101111000001")
|
||||
dataset_b = ZfsDataset(node,"test_source1@test-20101111000002")
|
||||
dataset_c = ZfsDataset(node,"test_source1@test-20240101020202")
|
||||
self.assertGreater(dataset_b.timestamp, dataset_a.timestamp)
|
||||
self.assertGreater(dataset_c.timestamp, dataset_b.timestamp)
|
||||
|
||||
|
||||
def test_getselected(self):
|
||||
|
||||
@ -131,18 +154,26 @@ test_target1
|
||||
shelltest("zfs create test_source1/fs1/subexcluded")
|
||||
shelltest("zfs set autobackup:test=false test_source1/fs1/subexcluded")
|
||||
|
||||
# only select parent
|
||||
shelltest("zfs create test_source1/fs1/onlyparent")
|
||||
shelltest("zfs create test_source1/fs1/onlyparent/child")
|
||||
shelltest("zfs set autobackup:test=parent test_source1/fs1/onlyparent")
|
||||
|
||||
# should be excluded by being unchanged
|
||||
shelltest("zfs create test_source1/fs1/unchanged")
|
||||
shelltest("zfs snapshot test_source1/fs1/unchanged@somesnapshot")
|
||||
|
||||
logger = LogStub()
|
||||
description = "[Source]"
|
||||
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
s = pformat(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=True, min_change=1))
|
||||
node = ZfsNode(utc=False, snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
(selected_datasets, excluded_datasets)=node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False,
|
||||
exclude_unchanged=1)
|
||||
s = pformat(selected_datasets)
|
||||
print(s)
|
||||
|
||||
# basics
|
||||
self.assertEqual(s, """[(local): test_source1/fs1,
|
||||
(local): test_source1/fs1/onlyparent,
|
||||
(local): test_source1/fs1/sub,
|
||||
(local): test_source2/fs2/sub]""")
|
||||
|
||||
@ -150,7 +181,7 @@ test_target1
|
||||
def test_validcommand(self):
|
||||
logger = LogStub()
|
||||
description = "[Source]"
|
||||
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
node = ZfsNode(utc=False, snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
|
||||
with self.subTest("test invalid option"):
|
||||
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
|
||||
@ -160,7 +191,7 @@ test_target1
|
||||
def test_supportedsendoptions(self):
|
||||
logger = LogStub()
|
||||
description = "[Source]"
|
||||
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
node = ZfsNode(utc=False, snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||
# -D propably always supported
|
||||
self.assertGreater(len(node.supported_send_options), 0)
|
||||
|
||||
@ -168,7 +199,7 @@ test_target1
|
||||
logger = LogStub()
|
||||
description = "[Source]"
|
||||
# NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
|
||||
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description, ssh_to='localhost')
|
||||
node = ZfsNode(utc=False, snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description, ssh_to='localhost')
|
||||
self.assertIsInstance(node.supported_recv_options, list)
|
||||
|
||||
|
||||
|
||||
42
tests/tests_docker
Executable file
42
tests/tests_docker
Executable file
@ -0,0 +1,42 @@
|
||||
#!/bin/sh
|
||||
|
||||
#NOTE: This script will started inside the test docker container
|
||||
|
||||
set -e
|
||||
|
||||
if ! [ -e /.dockerenv ]; then
|
||||
echo "only run this script inside a docker container!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [ -e /dev/ram0 ]; then
|
||||
echo "Please load this module outside container:" >&2
|
||||
echo "sudo modprobe brd rd_size=512000" >&2
|
||||
exit 1
|
||||
|
||||
fi
|
||||
|
||||
#start sshd and other stuff
|
||||
ssh-keygen -A
|
||||
/usr/sbin/sshd
|
||||
udevd -d
|
||||
|
||||
|
||||
#config ssh
|
||||
if ! [ -e /root/.ssh/id_rsa ]; then
|
||||
ssh-keygen -t rsa -f /root/.ssh/id_rsa -P ''
|
||||
fi
|
||||
|
||||
cat >> ~/.ssh/config <<EOF
|
||||
Host *
|
||||
addkeystoagent yes
|
||||
controlpath ~/.ssh/control-master-%r@%h:%p
|
||||
controlmaster auto
|
||||
controlpersist 3600
|
||||
EOF
|
||||
|
||||
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
|
||||
ssh -oStrictHostKeyChecking=no localhost 'echo SSH OK'
|
||||
|
||||
cd /app
|
||||
python -m unittest discover /app/tests -vvvvf $@
|
||||
127
zfs_autobackup/BlockHasher.py
Normal file
127
zfs_autobackup/BlockHasher.py
Normal file
@ -0,0 +1,127 @@
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
|
||||
class BlockHasher():
|
||||
"""This class was created to checksum huge files and blockdevices (TB's)
|
||||
Instead of one sha1sum of the whole file, it generates sha1susms of chunks of the file.
|
||||
|
||||
The chunksize is count*bs (bs is the read blocksize from disk)
|
||||
|
||||
Its also possible to only read a certain percentage of blocks to just check a sample.
|
||||
|
||||
Input and output generators are in the format ( chunk_nr, hexdigest )
|
||||
|
||||
NOTE: skipping is only used on the generator side. The compare side just compares what it gets from the input generator.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, count=10000, bs=4096, hash_class=hashlib.sha1, skip=0):
|
||||
self.count = count
|
||||
self.bs = bs
|
||||
self.chunk_size=bs*count
|
||||
self.hash_class = hash_class
|
||||
|
||||
# self.coverage=coverage
|
||||
self.skip=skip
|
||||
self._skip_count=0
|
||||
|
||||
self.stats_total_bytes=0
|
||||
|
||||
|
||||
def _seek_next_chunk(self, fh, fsize):
|
||||
"""seek fh to next chunk and update skip counter.
|
||||
returns chunk_nr
|
||||
return false it should skip the rest of the file
|
||||
|
||||
|
||||
"""
|
||||
|
||||
#ignore rempty files
|
||||
if fsize==0:
|
||||
return False
|
||||
|
||||
# need to skip chunks?
|
||||
if self._skip_count > 0:
|
||||
chunks_left = ((fsize - fh.tell()) // self.chunk_size) + 1
|
||||
# not enough chunks left in this file?
|
||||
if self._skip_count >= chunks_left:
|
||||
# skip rest of this file
|
||||
self._skip_count = self._skip_count - chunks_left
|
||||
return False
|
||||
else:
|
||||
# seek to next chunk, reset skip count
|
||||
fh.seek(self.chunk_size * self._skip_count, os.SEEK_CUR)
|
||||
self._skip_count = self.skip
|
||||
return fh.tell()//self.chunk_size
|
||||
else:
|
||||
# should read this chunk, reset skip count
|
||||
self._skip_count = self.skip
|
||||
return fh.tell() // self.chunk_size
|
||||
|
||||
def generate(self, fname):
|
||||
"""Generates checksums
|
||||
|
||||
yields(chunk_nr, hexdigest)
|
||||
|
||||
yields nothing for empty files.
|
||||
"""
|
||||
|
||||
|
||||
with open(fname, "rb") as fh:
|
||||
|
||||
fh.seek(0, os.SEEK_END)
|
||||
fsize=fh.tell()
|
||||
fh.seek(0)
|
||||
|
||||
while fh.tell()<fsize:
|
||||
chunk_nr=self._seek_next_chunk(fh, fsize)
|
||||
if chunk_nr is False:
|
||||
return
|
||||
|
||||
#read chunk
|
||||
hash = self.hash_class()
|
||||
block_nr = 0
|
||||
while block_nr != self.count:
|
||||
block=fh.read(self.bs)
|
||||
if block==b"":
|
||||
break
|
||||
hash.update(block)
|
||||
block_nr = block_nr + 1
|
||||
|
||||
yield (chunk_nr, hash.hexdigest())
|
||||
|
||||
def compare(self, fname, generator):
|
||||
"""reads from generator and compares blocks
|
||||
Yields mismatches in the form: ( chunk_nr, hexdigest, actual_hexdigest)
|
||||
Yields errors in the form: ( chunk_nr, hexdigest, "message" )
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
checked = 0
|
||||
with open(fname, "rb") as f:
|
||||
for (chunk_nr, hexdigest) in generator:
|
||||
try:
|
||||
|
||||
checked = checked + 1
|
||||
hash = self.hash_class()
|
||||
f.seek(int(chunk_nr) * self.bs * self.count)
|
||||
block_nr = 0
|
||||
for block in iter(lambda: f.read(self.bs), b""):
|
||||
hash.update(block)
|
||||
block_nr = block_nr + 1
|
||||
if block_nr == self.count:
|
||||
break
|
||||
|
||||
if block_nr == 0:
|
||||
yield (chunk_nr, hexdigest, 'EOF')
|
||||
|
||||
elif (hash.hexdigest() != hexdigest):
|
||||
yield (chunk_nr, hexdigest, hash.hexdigest())
|
||||
|
||||
except Exception as e:
|
||||
yield ( chunk_nr , hexdigest, 'ERROR: '+str(e))
|
||||
|
||||
except Exception as e:
|
||||
yield ( '-', '-', 'ERROR: '+ str(e))
|
||||
111
zfs_autobackup/CliBase.py
Normal file
111
zfs_autobackup/CliBase.py
Normal file
@ -0,0 +1,111 @@
|
||||
import argparse
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
from .LogConsole import LogConsole
|
||||
|
||||
|
||||
class CliBase(object):
|
||||
"""Base class for all cli programs
|
||||
Overridden in subclasses that add stuff for the specific programs."""
|
||||
|
||||
# also used by setup.py
|
||||
VERSION = "3.3-beta.2"
|
||||
HEADER = "{} v{} - (c)2022 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
|
||||
|
||||
def __init__(self, argv, print_arguments=True):
|
||||
|
||||
self.parser=self.get_parser()
|
||||
self.args = self.parse_args(argv)
|
||||
|
||||
# helps with investigating failed regression tests:
|
||||
if print_arguments:
|
||||
print("ARGUMENTS: " + " ".join(argv))
|
||||
|
||||
def parse_args(self, argv):
|
||||
"""parses the arguments and does additional checks, might print warnings or notes
|
||||
Overridden in subclasses with extra checks.
|
||||
"""
|
||||
|
||||
args = self.parser.parse_args(argv)
|
||||
|
||||
if args.help:
|
||||
self.parser.print_help()
|
||||
sys.exit(255)
|
||||
|
||||
if args.version:
|
||||
print(self.HEADER)
|
||||
sys.exit(255)
|
||||
|
||||
# auto enable progress?
|
||||
if sys.stderr.isatty() and not args.no_progress:
|
||||
args.progress = True
|
||||
|
||||
if args.debug_output:
|
||||
args.debug = True
|
||||
|
||||
if args.test:
|
||||
args.verbose = True
|
||||
|
||||
if args.debug:
|
||||
args.verbose = True
|
||||
|
||||
self.log = LogConsole(show_debug=args.debug, show_verbose=args.verbose, color=sys.stdout.isatty())
|
||||
|
||||
self.verbose(self.HEADER)
|
||||
self.verbose("")
|
||||
|
||||
return args
|
||||
|
||||
def get_parser(self):
|
||||
"""build up the argument parser
|
||||
Overridden in subclasses that add extra arguments
|
||||
"""
|
||||
|
||||
parser = argparse.ArgumentParser(description=self.HEADER, add_help=False,
|
||||
epilog='Full manual at: https://github.com/psy0rz/zfs_autobackup')
|
||||
|
||||
# Basic options
|
||||
group=parser.add_argument_group("Common options")
|
||||
group.add_argument('--help', '-h', action='store_true', help='show help')
|
||||
group.add_argument('--test', '--dry-run', '-n', action='store_true',
|
||||
help='Dry run, dont change anything, just show what would be done (still does all read-only '
|
||||
'operations)')
|
||||
group.add_argument('--verbose', '-v', action='store_true', help='verbose output')
|
||||
group.add_argument('--debug', '-d', action='store_true',
|
||||
help='Show zfs commands that are executed, stops after an exception.')
|
||||
group.add_argument('--debug-output', action='store_true',
|
||||
help='Show zfs commands and their output/exit codes. (noisy)')
|
||||
group.add_argument('--progress', action='store_true',
|
||||
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
|
||||
group.add_argument('--no-progress', action='store_true',
|
||||
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
||||
group.add_argument('--utc', action='store_true',
|
||||
help='Use UTC instead of local time when dealing with timestamps for both formatting and parsing. To snapshot in an ISO 8601 compliant time format you may for example specify --snapshot-format "{}-%%Y-%%m-%%dT%%H:%%M:%%SZ". Changing this parameter after-the-fact (existing snapshots) will cause their timestamps to be interpreted as a different time than before.')
|
||||
group.add_argument('--version', action='store_true',
|
||||
help='Show version.')
|
||||
|
||||
|
||||
return parser
|
||||
|
||||
def verbose(self, txt):
|
||||
self.log.verbose(txt)
|
||||
|
||||
def warning(self, txt):
|
||||
self.log.warning(txt)
|
||||
|
||||
def error(self, txt):
|
||||
self.log.error(txt)
|
||||
|
||||
def debug(self, txt):
|
||||
self.log.debug(txt)
|
||||
|
||||
def progress(self, txt):
|
||||
self.log.progress(txt)
|
||||
|
||||
def clear_progress(self):
|
||||
self.log.clear_progress()
|
||||
|
||||
def set_title(self, title):
|
||||
self.log.verbose("")
|
||||
self.log.verbose("#### " + title)
|
||||
@ -1,3 +1,17 @@
|
||||
# This is the low level process executing stuff.
|
||||
# It makes piping and parallel process handling more easy.
|
||||
|
||||
# You can specify a handler for each line of stderr output for each item in the pipe.
|
||||
# Every item also has its own exitcode handler.
|
||||
|
||||
# Normally you add a stdout_handler to the last item in the pipe.
|
||||
# However: You can also add stdout_handler to other items in a pipe. This will turn that item in to a manual pipe: your
|
||||
# handler is responsible for sending data into the next item of the pipe. (avaiable in item.next)
|
||||
|
||||
# You can also use manual pipe mode to just execute multiple command in parallel and handle their output parallel,
|
||||
# without doing any actual pipe stuff. (because you dont HAVE to send data into the next item.)
|
||||
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import select
|
||||
@ -11,17 +25,23 @@ except ImportError:
|
||||
class CmdItem:
|
||||
"""one command item, to be added to a CmdPipe"""
|
||||
|
||||
def __init__(self, cmd, readonly=False, stderr_handler=None, exit_handler=None, shell=False):
|
||||
def __init__(self, cmd, readonly=False, stderr_handler=None, exit_handler=None, stdout_handler=None, shell=False):
|
||||
"""create item. caller has to make sure cmd is properly escaped when using shell.
|
||||
|
||||
If stdout_handler is None, it will connect the stdout to the stdin of the next item in the pipe, like
|
||||
and actual system pipe. (no python overhead)
|
||||
|
||||
:type cmd: list of str
|
||||
"""
|
||||
|
||||
self.cmd = cmd
|
||||
self.readonly = readonly
|
||||
self.stderr_handler = stderr_handler
|
||||
self.stdout_handler = stdout_handler
|
||||
self.exit_handler = exit_handler
|
||||
self.shell = shell
|
||||
self.process = None
|
||||
self.next = None #next item in pipe, set by CmdPipe
|
||||
|
||||
def __str__(self):
|
||||
"""return copy-pastable version of command."""
|
||||
@ -84,72 +104,23 @@ class CmdPipe:
|
||||
def should_execute(self):
|
||||
return self._should_execute
|
||||
|
||||
def execute(self, stdout_handler):
|
||||
"""run the pipe. returns True all exit handlers returned true"""
|
||||
def execute(self):
|
||||
"""run the pipe. returns True all exit handlers returned true. (otherwise it will be False/None depending on exit handlers returncode) """
|
||||
|
||||
if not self._should_execute:
|
||||
return True
|
||||
|
||||
# first process should have actual user input as stdin:
|
||||
selectors = []
|
||||
selectors = self.__create()
|
||||
|
||||
# create processes
|
||||
last_stdout = None
|
||||
stdin = subprocess.PIPE
|
||||
for item in self.items:
|
||||
if not selectors:
|
||||
raise (Exception("Cant use cmdpipe without any output handlers."))
|
||||
|
||||
item.create(stdin)
|
||||
selectors.append(item.process.stderr)
|
||||
|
||||
if last_stdout is None:
|
||||
# we're the first process in the pipe, do we have some input?
|
||||
if self.inp is not None:
|
||||
# TODO: make streaming to support big inputs?
|
||||
item.process.stdin.write(self.inp.encode('utf-8'))
|
||||
item.process.stdin.close()
|
||||
else:
|
||||
# last stdout was piped to this stdin already, so close it because we dont need it anymore
|
||||
last_stdout.close()
|
||||
|
||||
last_stdout = item.process.stdout
|
||||
stdin = last_stdout
|
||||
|
||||
# monitor last stdout as well
|
||||
selectors.append(last_stdout)
|
||||
|
||||
while True:
|
||||
# wait for output on one of the stderrs or last_stdout
|
||||
(read_ready, write_ready, ex_ready) = select.select(selectors, [], [])
|
||||
eof_count = 0
|
||||
done_count = 0
|
||||
|
||||
# read line and call appropriate handlers
|
||||
if last_stdout in read_ready:
|
||||
line = last_stdout.readline().decode('utf-8').rstrip()
|
||||
if line != "":
|
||||
stdout_handler(line)
|
||||
else:
|
||||
eof_count = eof_count + 1
|
||||
|
||||
for item in self.items:
|
||||
if item.process.stderr in read_ready:
|
||||
line = item.process.stderr.readline().decode('utf-8').rstrip()
|
||||
if line != "":
|
||||
item.stderr_handler(line)
|
||||
else:
|
||||
eof_count = eof_count + 1
|
||||
|
||||
if item.process.poll() is not None:
|
||||
done_count = done_count + 1
|
||||
|
||||
# all filehandles are eof and all processes are done (poll() is not None)
|
||||
if eof_count == len(selectors) and done_count == len(self.items):
|
||||
break
|
||||
self.__process_outputs(selectors)
|
||||
|
||||
# close filehandles
|
||||
last_stdout.close()
|
||||
for item in self.items:
|
||||
item.process.stderr.close()
|
||||
item.process.stdout.close()
|
||||
|
||||
# call exit handlers
|
||||
success = True
|
||||
@ -158,3 +129,86 @@ class CmdPipe:
|
||||
success=item.exit_handler(item.process.returncode) and success
|
||||
|
||||
return success
|
||||
|
||||
def __process_outputs(self, selectors):
|
||||
"""watch all output selectors and call handlers"""
|
||||
|
||||
while True:
|
||||
# wait for output on one of the stderrs or last_stdout
|
||||
(read_ready, write_ready, ex_ready) = select.select(selectors, [], [])
|
||||
|
||||
eof_count = 0
|
||||
done_count = 0
|
||||
|
||||
# read line and call appropriate handlers
|
||||
|
||||
for item in self.items:
|
||||
if item.process.stdout in read_ready:
|
||||
line = item.process.stdout.readline().decode('utf-8').rstrip()
|
||||
if line != "":
|
||||
item.stdout_handler(line)
|
||||
else:
|
||||
eof_count = eof_count + 1
|
||||
if item.next:
|
||||
item.next.process.stdin.close()
|
||||
|
||||
if item.process.stderr in read_ready:
|
||||
line = item.process.stderr.readline().decode('utf-8').rstrip()
|
||||
if line != "":
|
||||
item.stderr_handler(line)
|
||||
else:
|
||||
eof_count = eof_count + 1
|
||||
|
||||
|
||||
if item.process.poll() is not None:
|
||||
done_count = done_count + 1
|
||||
|
||||
# all filehandles are eof and all processes are done (poll() is not None)
|
||||
if eof_count == len(selectors) and done_count == len(self.items):
|
||||
break
|
||||
|
||||
|
||||
|
||||
def __create(self):
|
||||
"""create actual processes, do piping and return selectors."""
|
||||
|
||||
selectors = []
|
||||
next_stdin = subprocess.PIPE # means we write input via python instead of an actual system pipe
|
||||
first = True
|
||||
prev_item = None
|
||||
|
||||
for item in self.items:
|
||||
|
||||
# creates the actual subprocess via subprocess.popen
|
||||
item.create(next_stdin)
|
||||
|
||||
# we piped previous process? dont forget to close its stdout
|
||||
if next_stdin != subprocess.PIPE:
|
||||
next_stdin.close()
|
||||
|
||||
if item.stderr_handler:
|
||||
selectors.append(item.process.stderr)
|
||||
|
||||
# we're the first process in the pipe
|
||||
if first:
|
||||
if self.inp is not None:
|
||||
# write the input we have
|
||||
item.process.stdin.write(self.inp.encode('utf-8'))
|
||||
item.process.stdin.close()
|
||||
first = False
|
||||
|
||||
# manual stdout handling or pipe it to the next process?
|
||||
if item.stdout_handler is None:
|
||||
# no manual stdout handling, pipe it to the next process via sytem pipe
|
||||
next_stdin = item.process.stdout
|
||||
else:
|
||||
# manual stdout handling via python
|
||||
selectors.append(item.process.stdout)
|
||||
# next process will get input from python:
|
||||
next_stdin = subprocess.PIPE
|
||||
|
||||
if prev_item is not None:
|
||||
prev_item.next = item
|
||||
|
||||
prev_item = item
|
||||
return selectors
|
||||
|
||||
@ -54,15 +54,16 @@ class ExecuteNode(LogStub):
|
||||
if cmd==self.PIPE:
|
||||
return('|')
|
||||
else:
|
||||
return(cmd_quote(cmd))
|
||||
return cmd_quote(cmd)
|
||||
|
||||
def _shell_cmd(self, cmd):
|
||||
def _shell_cmd(self, cmd, cwd):
|
||||
"""prefix specified ssh shell to command and escape shell characters"""
|
||||
|
||||
ret=[]
|
||||
|
||||
#add remote shell
|
||||
if not self.is_local():
|
||||
#note: dont escape this part (executed directly without shell)
|
||||
ret=["ssh"]
|
||||
|
||||
if self.ssh_config is not None:
|
||||
@ -70,7 +71,17 @@ class ExecuteNode(LogStub):
|
||||
|
||||
ret.append(self.ssh_to)
|
||||
|
||||
ret.append(" ".join(map(self._quote, cmd)))
|
||||
#note: DO escape from here, executed in either local or remote shell.
|
||||
|
||||
shell_str=""
|
||||
|
||||
#add cwd change?
|
||||
if cwd is not None:
|
||||
shell_str=shell_str + "cd " + self._quote(cwd) + "; "
|
||||
|
||||
shell_str=shell_str + " ".join(map(self._quote, cmd))
|
||||
|
||||
ret.append(shell_str)
|
||||
|
||||
return ret
|
||||
|
||||
@ -78,24 +89,26 @@ class ExecuteNode(LogStub):
|
||||
return self.ssh_to is None
|
||||
|
||||
def run(self, cmd, inp=None, tab_split=False, valid_exitcodes=None, readonly=False, hide_errors=False,
|
||||
return_stderr=False, pipe=False, return_all=False):
|
||||
return_stderr=False, pipe=False, return_all=False, cwd=None):
|
||||
"""run a command on the node , checks output and parses/handle output and returns it
|
||||
|
||||
Takes care of proper quoting/escaping/ssh and logging of stdout/err/exit codes.
|
||||
|
||||
Either uses a local shell (sh -c) or remote shell (ssh) to execute the command.
|
||||
Therefore the command can have stuff like actual pipes in it, if you dont want to use pipe=True to pipe stuff.
|
||||
|
||||
:param cmd: the actual command, should be a list, where the first item is the command
|
||||
and the rest are parameters. use ExecuteNode.PIPE to add an unescaped |
|
||||
(if you want to use system piping instead of python piping)
|
||||
:param pipe: return CmdPipe instead of executing it.
|
||||
:param pipe: return CmdPipe instead of executing it. (pipe this into another run() command via inp=...)
|
||||
:param inp: Can be None, a string or a CmdPipe that was previously returned.
|
||||
:param tab_split: split tabbed files in output into a list
|
||||
:param valid_exitcodes: list of valid exit codes for this command (checks exit code of both sides of a pipe)
|
||||
Use [] to accept all exit codes. Default [0]
|
||||
:param valid_exitcodes: list of valid exit codes for this command. Use [] to accept all exit codes. Default [0]
|
||||
:param readonly: make this True if the command doesn't make any changes and is safe to execute in testmode
|
||||
:param hide_errors: don't show stderr output as error, instead show it as debugging output (use to hide expected errors)
|
||||
:param return_stderr: return both stdout and stderr as a tuple. (normally only returns stdout)
|
||||
:param return_all: return both stdout and stderr and exit_code as a tuple. (normally only returns stdout)
|
||||
:param cwd: Change current working directory before executing command.
|
||||
|
||||
"""
|
||||
|
||||
@ -131,23 +144,28 @@ class ExecuteNode(LogStub):
|
||||
|
||||
return True
|
||||
|
||||
# add shell command and handlers to pipe
|
||||
cmd_item=CmdItem(cmd=self._shell_cmd(cmd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local())
|
||||
cmd_pipe.add(cmd_item)
|
||||
|
||||
# return pipe instead of executing?
|
||||
if pipe:
|
||||
return cmd_pipe
|
||||
|
||||
# stdout parser
|
||||
output_lines = []
|
||||
|
||||
def stdout_handler(line):
|
||||
if tab_split:
|
||||
output_lines.append(line.rstrip().split('\t'))
|
||||
else:
|
||||
output_lines.append(line.rstrip())
|
||||
self._parse_stdout(line)
|
||||
if pipe:
|
||||
# dont specify output handler, so it will get piped to next process
|
||||
stdout_handler=None
|
||||
else:
|
||||
# handle output manually, dont pipe it
|
||||
def stdout_handler(line):
|
||||
if tab_split:
|
||||
output_lines.append(line.rstrip().split('\t'))
|
||||
else:
|
||||
output_lines.append(line.rstrip())
|
||||
self._parse_stdout(line)
|
||||
|
||||
# add shell command and handlers to pipe
|
||||
cmd_item=CmdItem(cmd=self._shell_cmd(cmd, cwd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local(), stdout_handler=stdout_handler)
|
||||
cmd_pipe.add(cmd_item)
|
||||
|
||||
# return CmdPipe instead of executing?
|
||||
if pipe:
|
||||
return cmd_pipe
|
||||
|
||||
if cmd_pipe.should_execute():
|
||||
self.debug("CMD > {}".format(cmd_pipe))
|
||||
@ -155,7 +173,7 @@ class ExecuteNode(LogStub):
|
||||
self.debug("CMDSKIP> {}".format(cmd_pipe))
|
||||
|
||||
# execute and calls handlers in CmdPipe
|
||||
if not cmd_pipe.execute(stdout_handler=stdout_handler):
|
||||
if not cmd_pipe.execute():
|
||||
raise(ExecuteError("Last command returned error"))
|
||||
|
||||
if return_all:
|
||||
@ -164,3 +182,90 @@ class ExecuteNode(LogStub):
|
||||
return output_lines, error_lines
|
||||
else:
|
||||
return output_lines
|
||||
|
||||
def script(self, lines, inp=None, stdout_handler=None, stderr_handler=None, exit_handler=None, valid_exitcodes=None, readonly=False, hide_errors=False, pipe=False):
|
||||
"""Run a multiline script on the node.
|
||||
|
||||
This is much more low level than run() and allows for finer grained control.
|
||||
|
||||
Either uses a local shell (sh -c) or remote shell (ssh) to execute the command.
|
||||
You need to do your own escaping/quoting.
|
||||
It will do logging of stderr and exit codes, but you should
|
||||
specify your stdout handler when calling CmdPipe.execute.
|
||||
Also specify the optional stderr/exit code handlers if you need them.
|
||||
Handlers are called for each line.
|
||||
It wont collect lines internally like run() does, so streams of data can be of unlimited size.
|
||||
|
||||
:param lines: list of lines of the actual script.
|
||||
:param inp: Can be None, a string or a CmdPipe that was previously returned.
|
||||
:param readonly: make this True if the command doesn't make any changes and is safe to execute in testmode
|
||||
:param valid_exitcodes: list of valid exit codes for this command. Use [] to accept all exit codes. Default [0]
|
||||
:param hide_errors: don't show stderr output as error, instead show it as debugging output (use to hide expected errors)
|
||||
:param pipe: return CmdPipe instead of executing it. (pipe this into another run() command via inp=...)
|
||||
|
||||
"""
|
||||
|
||||
# create new pipe?
|
||||
if not isinstance(inp, CmdPipe):
|
||||
cmd_pipe = CmdPipe(self.readonly, inp)
|
||||
else:
|
||||
# add stuff to existing pipe
|
||||
cmd_pipe = inp
|
||||
|
||||
internal_stdout_handler=None
|
||||
if stdout_handler is not None:
|
||||
if self.debug_output:
|
||||
def internal_stdout_handler(line):
|
||||
self.debug("STDOUT > " + line.rstrip())
|
||||
stdout_handler(line)
|
||||
else:
|
||||
internal_stdout_handler=stdout_handler
|
||||
|
||||
def internal_stderr_handler(line):
|
||||
self._parse_stderr(line, hide_errors)
|
||||
if stderr_handler is not None:
|
||||
stderr_handler(line)
|
||||
|
||||
# exit code hanlder
|
||||
if valid_exitcodes is None:
|
||||
valid_exitcodes = [0]
|
||||
|
||||
def internal_exit_handler(exit_code):
|
||||
if self.debug_output:
|
||||
self.debug("EXIT > {}".format(exit_code))
|
||||
|
||||
if exit_handler is not None:
|
||||
exit_handler(exit_code)
|
||||
|
||||
if (valid_exitcodes != []) and (exit_code not in valid_exitcodes):
|
||||
self.error("Script returned exit code {} (valid codes: {})".format(exit_code, valid_exitcodes))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
#build command
|
||||
cmd=[]
|
||||
|
||||
#add remote shell
|
||||
if not self.is_local():
|
||||
#note: dont escape this part (executed directly without shell)
|
||||
cmd.append("ssh")
|
||||
|
||||
if self.ssh_config is not None:
|
||||
cmd.append(["-F", self.ssh_config])
|
||||
|
||||
cmd.append(self.ssh_to)
|
||||
|
||||
# convert to script
|
||||
cmd.append("\n".join(lines))
|
||||
|
||||
# add shell command and handlers to pipe
|
||||
cmd_item=CmdItem(cmd=cmd, readonly=readonly, stderr_handler=internal_stderr_handler, exit_handler=internal_exit_handler, stdout_handler=internal_stdout_handler, shell=self.is_local())
|
||||
cmd_pipe.add(cmd_item)
|
||||
|
||||
self.debug("SCRIPT > {}".format(cmd_pipe))
|
||||
|
||||
if pipe:
|
||||
return cmd_pipe
|
||||
else:
|
||||
return cmd_pipe.execute()
|
||||
|
||||
@ -10,6 +10,7 @@ class LogConsole:
|
||||
self.last_log = ""
|
||||
self.show_debug = show_debug
|
||||
self.show_verbose = show_verbose
|
||||
self._progress_uncleared=False
|
||||
|
||||
if color:
|
||||
# try to use color, failback if colorama not available
|
||||
@ -25,6 +26,7 @@ class LogConsole:
|
||||
self.colorama=False
|
||||
|
||||
def error(self, txt):
|
||||
self.clear_progress()
|
||||
if self.colorama:
|
||||
print(colorama.Fore.RED + colorama.Style.BRIGHT + "! " + txt + colorama.Style.RESET_ALL, file=sys.stderr)
|
||||
else:
|
||||
@ -32,14 +34,16 @@ class LogConsole:
|
||||
sys.stderr.flush()
|
||||
|
||||
def warning(self, txt):
|
||||
self.clear_progress()
|
||||
if self.colorama:
|
||||
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + " NOTE: " + txt + colorama.Style.RESET_ALL)
|
||||
print(colorama.Fore.YELLOW + colorama.Style.NORMAL + " NOTE: " + txt + colorama.Style.RESET_ALL)
|
||||
else:
|
||||
print(" NOTE: " + txt)
|
||||
sys.stdout.flush()
|
||||
|
||||
def verbose(self, txt):
|
||||
if self.show_verbose:
|
||||
self.clear_progress()
|
||||
if self.colorama:
|
||||
print(colorama.Style.NORMAL + " " + txt + colorama.Style.RESET_ALL)
|
||||
else:
|
||||
@ -48,6 +52,7 @@ class LogConsole:
|
||||
|
||||
def debug(self, txt):
|
||||
if self.show_debug:
|
||||
self.clear_progress()
|
||||
if self.colorama:
|
||||
print(colorama.Fore.GREEN + "# " + txt + colorama.Style.RESET_ALL)
|
||||
else:
|
||||
@ -57,10 +62,13 @@ class LogConsole:
|
||||
def progress(self, txt):
|
||||
"""print progress output to stderr (stays on same line)"""
|
||||
self.clear_progress()
|
||||
self._progress_uncleared=True
|
||||
print(">>> {}\r".format(txt), end='', file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
|
||||
def clear_progress(self):
|
||||
import colorama
|
||||
print(colorama.ansi.clear_line(), end='', file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
if self._progress_uncleared:
|
||||
import colorama
|
||||
print(colorama.ansi.clear_line(), end='', file=sys.stderr)
|
||||
# sys.stderr.flush()
|
||||
self._progress_uncleared=False
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
import time
|
||||
|
||||
from .ThinnerRule import ThinnerRule
|
||||
|
||||
@ -37,7 +36,7 @@ class Thinner:
|
||||
|
||||
return ret
|
||||
|
||||
def thin(self, objects, keep_objects=None, now=None):
|
||||
def thin(self, objects, keep_objects, now):
|
||||
"""thin list of objects with current schedule rules. objects: list of
|
||||
objects to thin. every object should have timestamp attribute.
|
||||
|
||||
@ -49,8 +48,6 @@ class Thinner:
|
||||
now: if specified, use this time as current time
|
||||
"""
|
||||
|
||||
if not keep_objects:
|
||||
keep_objects = []
|
||||
|
||||
# always keep a number of the last objets?
|
||||
if self.always_keep:
|
||||
@ -68,9 +65,6 @@ class Thinner:
|
||||
for rule in self.rules:
|
||||
time_blocks[rule.period] = {}
|
||||
|
||||
if not now:
|
||||
now = int(time.time())
|
||||
|
||||
keeps = []
|
||||
removes = []
|
||||
|
||||
|
||||
60
zfs_autobackup/TreeHasher.py
Normal file
60
zfs_autobackup/TreeHasher.py
Normal file
@ -0,0 +1,60 @@
|
||||
import itertools
|
||||
import os
|
||||
|
||||
|
||||
class TreeHasher():
|
||||
"""uses BlockHasher recursively on a directory tree
|
||||
|
||||
Input and output generators are in the format: ( relative-filepath, chunk_nr, hexdigest)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, block_hasher):
|
||||
"""
|
||||
|
||||
:type block_hasher: BlockHasher
|
||||
"""
|
||||
self.block_hasher=block_hasher
|
||||
|
||||
def generate(self, start_path):
|
||||
"""Use BlockHasher on every file in a tree, yielding the results
|
||||
|
||||
note that it only checks the contents of actual files. It ignores metadata like permissions and mtimes.
|
||||
It also ignores empty directories, symlinks and special files.
|
||||
"""
|
||||
|
||||
def walkerror(e):
|
||||
raise e
|
||||
|
||||
for (dirpath, dirnames, filenames) in os.walk(start_path, onerror=walkerror):
|
||||
for f in filenames:
|
||||
file_path=os.path.join(dirpath, f)
|
||||
|
||||
if (not os.path.islink(file_path)) and os.path.isfile(file_path):
|
||||
for (chunk_nr, hash) in self.block_hasher.generate(file_path):
|
||||
yield ( os.path.relpath(file_path,start_path), chunk_nr, hash )
|
||||
|
||||
|
||||
def compare(self, start_path, generator):
|
||||
"""reads from generator and compares blocks
|
||||
|
||||
yields mismatches in the form: ( relative_filename, chunk_nr, compare_hexdigest, actual_hexdigest )
|
||||
yields errors in the form: ( relative_filename, chunk_nr, compare_hexdigest, "message" )
|
||||
|
||||
"""
|
||||
|
||||
count=0
|
||||
|
||||
def filter_file_name( file_name, chunk_nr, hexdigest):
|
||||
return ( chunk_nr, hexdigest )
|
||||
|
||||
|
||||
for file_name, group_generator in itertools.groupby(generator, lambda x: x[0]):
|
||||
count=count+1
|
||||
block_generator=itertools.starmap(filter_file_name, group_generator)
|
||||
for ( chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(os.path.join(start_path,file_name), block_generator):
|
||||
yield ( file_name, chunk_nr, compare_hexdigest, actual_hexdigest )
|
||||
|
||||
|
||||
|
||||
|
||||
@ -1,16 +1,12 @@
|
||||
import argparse
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
from .LogConsole import LogConsole
|
||||
from .CliBase import CliBase
|
||||
from .util import datetime_now
|
||||
|
||||
|
||||
class ZfsAuto(object):
|
||||
"""Common Base class, this class is always used subclassed. Look at ZfsAutobackup and ZfsAutoverify ."""
|
||||
|
||||
# also used by setup.py
|
||||
VERSION = "3.2-alpha1"
|
||||
HEADER = "{} v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
|
||||
class ZfsAuto(CliBase):
|
||||
"""Common Base class for ZfsAutobackup and ZfsAutoverify ."""
|
||||
|
||||
def __init__(self, argv, print_arguments=True):
|
||||
|
||||
@ -19,46 +15,15 @@ class ZfsAuto(object):
|
||||
self.property_name = None
|
||||
self.exclude_paths = None
|
||||
|
||||
# helps with investigating failed regression tests:
|
||||
if print_arguments:
|
||||
print("ARGUMENTS: " + " ".join(argv))
|
||||
|
||||
self.args = self.parse_args(argv)
|
||||
super(ZfsAuto, self).__init__(argv, print_arguments)
|
||||
|
||||
def parse_args(self, argv):
|
||||
"""parse common arguments, setup logging, check and adjust parameters"""
|
||||
|
||||
parser=self.get_parser()
|
||||
args = parser.parse_args(argv)
|
||||
|
||||
if args.help:
|
||||
parser.print_help()
|
||||
sys.exit(255)
|
||||
|
||||
if args.version:
|
||||
print(self.HEADER)
|
||||
sys.exit(255)
|
||||
|
||||
# auto enable progress?
|
||||
if sys.stderr.isatty() and not args.no_progress:
|
||||
args.progress = True
|
||||
|
||||
if args.debug_output:
|
||||
args.debug = True
|
||||
|
||||
if args.test:
|
||||
args.verbose = True
|
||||
|
||||
if args.debug:
|
||||
args.verbose = True
|
||||
|
||||
self.log = LogConsole(show_debug=args.debug, show_verbose=args.verbose, color=sys.stdout.isatty())
|
||||
|
||||
self.verbose(self.HEADER)
|
||||
self.verbose("")
|
||||
args = super(ZfsAuto, self).parse_args(argv)
|
||||
|
||||
if args.backup_name == None:
|
||||
parser.print_usage()
|
||||
self.parser.print_usage()
|
||||
self.log.error("Please specify BACKUP-NAME")
|
||||
sys.exit(255)
|
||||
|
||||
@ -82,8 +47,9 @@ class ZfsAuto(object):
|
||||
self.verbose("NOTE: Source and target are on the same host, excluding target-path from selection.")
|
||||
self.exclude_paths.append(args.target_path)
|
||||
else:
|
||||
self.verbose("NOTE: Source and target are on the same host, excluding received datasets from selection.")
|
||||
args.exclude_received = True
|
||||
if not args.exclude_received and not args.include_received:
|
||||
self.verbose("NOTE: Source and target are on the same host, adding --exclude-received to commandline. (use --include-received to overrule)")
|
||||
args.exclude_received = True
|
||||
|
||||
if args.test:
|
||||
self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
|
||||
@ -93,16 +59,20 @@ class ZfsAuto(object):
|
||||
self.snapshot_time_format = args.snapshot_format.format(args.backup_name)
|
||||
self.hold_name = args.hold_format.format(args.backup_name)
|
||||
|
||||
dt = datetime_now(args.utc)
|
||||
|
||||
self.verbose("")
|
||||
self.verbose("Current time {} : {}".format(args.utc and "UTC" or " ", dt.strftime("%Y-%m-%d %H:%M:%S")))
|
||||
|
||||
self.verbose("Selecting dataset property : {}".format(self.property_name))
|
||||
self.verbose("Snapshot format : {}".format(self.snapshot_time_format))
|
||||
self.verbose("Timezone : {}".format("UTC" if args.utc else "Local"))
|
||||
|
||||
return args
|
||||
|
||||
def get_parser(self):
|
||||
|
||||
parser = argparse.ArgumentParser(description=self.HEADER, add_help=False,
|
||||
epilog='Full manual at: https://github.com/psy0rz/zfs_autobackup')
|
||||
parser = super(ZfsAuto, self).get_parser()
|
||||
|
||||
#positional arguments
|
||||
parser.add_argument('backup_name', metavar='BACKUP-NAME', default=None, nargs='?',
|
||||
@ -111,32 +81,13 @@ class ZfsAuto(object):
|
||||
parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
|
||||
help='Target ZFS filesystem (optional)')
|
||||
|
||||
# Basic options
|
||||
group=parser.add_argument_group("Basic options")
|
||||
group.add_argument('--help', '-h', action='store_true', help='show help')
|
||||
group.add_argument('--test', '--dry-run', '-n', action='store_true',
|
||||
help='Dry run, dont change anything, just show what would be done (still does all read-only '
|
||||
'operations)')
|
||||
group.add_argument('--verbose', '-v', action='store_true', help='verbose output')
|
||||
group.add_argument('--debug', '-d', action='store_true',
|
||||
help='Show zfs commands that are executed, stops after an exception.')
|
||||
group.add_argument('--debug-output', action='store_true',
|
||||
help='Show zfs commands and their output/exit codes. (noisy)')
|
||||
group.add_argument('--progress', action='store_true',
|
||||
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
|
||||
group.add_argument('--no-progress', action='store_true',
|
||||
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
||||
group.add_argument('--version', action='store_true',
|
||||
help='Show version.')
|
||||
group.add_argument('--strip-path', metavar='N', default=0, type=int,
|
||||
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
|
||||
'SmartOS machines)')
|
||||
|
||||
|
||||
# SSH options
|
||||
group=parser.add_argument_group("SSH options")
|
||||
group.add_argument('--ssh-config', metavar='CONFIG-FILE', default=None, help='Custom ssh client config')
|
||||
group.add_argument('--ssh-source', metavar='USER@HOST', default=None,
|
||||
help='Source host to get backup from.')
|
||||
help='Source host to pull backup from.')
|
||||
group.add_argument('--ssh-target', metavar='USER@HOST', default=None,
|
||||
help='Target host to push backup to.')
|
||||
|
||||
@ -147,39 +98,22 @@ class ZfsAuto(object):
|
||||
help='ZFS Snapshot string format. Default: %(default)s')
|
||||
group.add_argument('--hold-format', metavar='FORMAT', default="zfs_autobackup:{}",
|
||||
help='ZFS hold string format. Default: %(default)s')
|
||||
group.add_argument('--strip-path', metavar='N', default=0, type=int,
|
||||
help='Number of directories to strip from target path.')
|
||||
|
||||
group=parser.add_argument_group("Selection options")
|
||||
group.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
|
||||
group.add_argument('--exclude-unchanged', action='store_true',
|
||||
help='Exclude datasets that have no changes since any last snapshot. (Useful in combination with proxmox HA replication)')
|
||||
group.add_argument('--exclude-unchanged', metavar='BYTES', default=0, type=int,
|
||||
help='Exclude datasets that have less than BYTES data changed since any last snapshot. (Use with proxmox HA replication)')
|
||||
group.add_argument('--exclude-received', action='store_true',
|
||||
help='Exclude datasets that have the origin of their autobackup: property as "received". '
|
||||
'This can avoid recursive replication between two backup partners.')
|
||||
group.add_argument('--include-received', action='store_true',
|
||||
help=argparse.SUPPRESS)
|
||||
|
||||
|
||||
return parser
|
||||
|
||||
def verbose(self, txt):
|
||||
self.log.verbose(txt)
|
||||
|
||||
def warning(self, txt):
|
||||
self.log.warning(txt)
|
||||
|
||||
def error(self, txt):
|
||||
self.log.error(txt)
|
||||
|
||||
def debug(self, txt):
|
||||
self.log.debug(txt)
|
||||
|
||||
def progress(self, txt):
|
||||
self.log.progress(txt)
|
||||
|
||||
def clear_progress(self):
|
||||
self.log.clear_progress()
|
||||
|
||||
def set_title(self, title):
|
||||
self.log.verbose("")
|
||||
self.log.verbose("#### " + title)
|
||||
|
||||
def print_error_sources(self):
|
||||
self.error(
|
||||
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
import time
|
||||
|
||||
import argparse
|
||||
from signal import signal, SIGPIPE
|
||||
from .util import output_redir, sigpipe_handler, datetime_now
|
||||
|
||||
from .ZfsAuto import ZfsAuto
|
||||
|
||||
from . import compressors
|
||||
@ -10,7 +12,6 @@ from .ZfsDataset import ZfsDataset
|
||||
from .ZfsNode import ZfsNode
|
||||
from .ThinnerRule import ThinnerRule
|
||||
|
||||
|
||||
class ZfsAutobackup(ZfsAuto):
|
||||
"""The main zfs-autobackup class. Start here, at run() :)"""
|
||||
|
||||
@ -30,15 +31,15 @@ class ZfsAutobackup(ZfsAuto):
|
||||
if args.allow_empty:
|
||||
args.min_change = 0
|
||||
|
||||
if args.destroy_incompatible:
|
||||
args.rollback = True
|
||||
# if args.destroy_incompatible:
|
||||
# args.rollback = True
|
||||
|
||||
if args.resume:
|
||||
self.warning("The --resume option isn't needed anymore (its autodetected now)")
|
||||
self.warning("The --resume option isn't needed anymore (it's autodetected now)")
|
||||
|
||||
if args.raw:
|
||||
self.warning(
|
||||
"The --raw option isn't needed anymore (its autodetected now). Also see --encrypt and --decrypt.")
|
||||
"The --raw option isn't needed anymore (it's autodetected now). Also see --encrypt and --decrypt.")
|
||||
|
||||
if args.compress and args.ssh_source is None and args.ssh_target is None:
|
||||
self.warning("Using compression, but transfer is local.")
|
||||
@ -64,17 +65,22 @@ class ZfsAutobackup(ZfsAuto):
|
||||
help='Only create snapshot if enough bytes are changed. (default %('
|
||||
'default)s)')
|
||||
group.add_argument('--allow-empty', action='store_true',
|
||||
help='If nothing has changed, still create empty snapshots. (Faster. Same as --min-change=0)')
|
||||
help='If nothing has changed, still create empty snapshots. (Same as --min-change=0)')
|
||||
group.add_argument('--other-snapshots', action='store_true',
|
||||
help='Send over other snapshots as well, not just the ones created by this tool.')
|
||||
group.add_argument('--set-snapshot-properties', metavar='PROPERTY=VALUE,...', type=str,
|
||||
help='List of properties to set on the snapshot.')
|
||||
group.add_argument('--no-guid-check', action='store_true',
|
||||
help='Dont check guid of common snapshots. (faster)')
|
||||
|
||||
|
||||
group = parser.add_argument_group("Transfer options")
|
||||
group.add_argument('--no-send', action='store_true',
|
||||
help='Don\'t transfer snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
||||
help='Don\'t transfer snapshots (useful for cleanups, or if you want a separate send-cronjob)')
|
||||
group.add_argument('--no-holds', action='store_true',
|
||||
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
|
||||
group.add_argument('--clear-refreservation', action='store_true',
|
||||
help='Filter "refreservation" property. (recommended, safes space. same as '
|
||||
help='Filter "refreservation" property. (recommended, saves space. same as '
|
||||
'--filter-properties refreservation)')
|
||||
group.add_argument('--clear-mountpoint', action='store_true',
|
||||
help='Set property canmount=noauto for new datasets. (recommended, prevents mount '
|
||||
@ -88,8 +94,10 @@ class ZfsAutobackup(ZfsAuto):
|
||||
group.add_argument('--rollback', action='store_true',
|
||||
help='Rollback changes to the latest target snapshot before starting. (normally you can '
|
||||
'prevent changes by setting the readonly property on the target_path to on)')
|
||||
group.add_argument('--force', '-F', action='store_true',
|
||||
help='Use zfs -F option to force overwrite/rollback. (Useful with --strip-path=1, but use with care)')
|
||||
group.add_argument('--destroy-incompatible', action='store_true',
|
||||
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
|
||||
help='Destroy incompatible snapshots on target. Use with care! (also does rollback of dataset)')
|
||||
group.add_argument('--ignore-transfer-errors', action='store_true',
|
||||
help='Ignore transfer errors (still checks if received filesystem exists. useful for '
|
||||
'acltype errors)')
|
||||
@ -102,15 +110,17 @@ class ZfsAutobackup(ZfsAuto):
|
||||
group.add_argument('--zfs-compressed', action='store_true',
|
||||
help='Transfer blocks that already have zfs-compression as-is.')
|
||||
|
||||
group = parser.add_argument_group("ZFS send/recv pipes")
|
||||
group = parser.add_argument_group("Data transfer options")
|
||||
group.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-fast',
|
||||
choices=compressors.choices(),
|
||||
help='Use compression during transfer, defaults to zstd-fast if TYPE is not specified. ({})'.format(
|
||||
", ".join(compressors.choices())))
|
||||
group.add_argument('--rate', metavar='DATARATE', default=None,
|
||||
help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
|
||||
help='Limit data transfer rate in Bytes/sec (e.g. 128K. requires mbuffer.)')
|
||||
group.add_argument('--buffer', metavar='SIZE', default=None,
|
||||
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
|
||||
parser.add_argument('--buffer-chunk-size', metavar="BUFFERCHUNKSIZE", default=None,
|
||||
help='Tune chunk size when mbuffer is used. (requires mbuffer.)')
|
||||
group.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
|
||||
help='pipe zfs send output through COMMAND (can be used multiple times)')
|
||||
group.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
|
||||
@ -134,7 +144,10 @@ class ZfsAutobackup(ZfsAuto):
|
||||
|
||||
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||
def thin_missing_targets(self, target_dataset, used_target_datasets):
|
||||
"""thin target datasets that are missing on the source."""
|
||||
"""thin target datasets that are missing on the source.
|
||||
:type used_target_datasets: list[ZfsDataset]
|
||||
:type target_dataset: ZfsDataset
|
||||
"""
|
||||
|
||||
self.debug("Thinning obsolete datasets")
|
||||
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
|
||||
@ -142,6 +155,7 @@ class ZfsAutobackup(ZfsAuto):
|
||||
|
||||
count = 0
|
||||
for dataset in missing_datasets:
|
||||
self.debug("analyse missing {}".format(dataset))
|
||||
|
||||
count = count + 1
|
||||
if self.args.progress:
|
||||
@ -154,12 +168,16 @@ class ZfsAutobackup(ZfsAuto):
|
||||
except Exception as e:
|
||||
dataset.error("Error during thinning of missing datasets ({})".format(str(e)))
|
||||
|
||||
if self.args.progress:
|
||||
self.clear_progress()
|
||||
# if self.args.progress:
|
||||
# self.clear_progress()
|
||||
|
||||
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||
def destroy_missing_targets(self, target_dataset, used_target_datasets):
|
||||
"""destroy target datasets that are missing on the source and that meet the requirements"""
|
||||
"""destroy target datasets that are missing on the source and that meet the requirements
|
||||
:type used_target_datasets: list[ZfsDataset]
|
||||
:type target_dataset: ZfsDataset
|
||||
|
||||
"""
|
||||
|
||||
self.debug("Destroying obsolete datasets")
|
||||
|
||||
@ -181,11 +199,11 @@ class ZfsAutobackup(ZfsAuto):
|
||||
dataset.debug("Destroy missing: ignoring")
|
||||
else:
|
||||
dataset.verbose(
|
||||
"Destroy missing: has no snapshots made by us. (please destroy manually)")
|
||||
"Destroy missing: has no snapshots made by us (please destroy manually).")
|
||||
else:
|
||||
# past the deadline?
|
||||
deadline_ttl = ThinnerRule("0s" + self.args.destroy_missing).ttl
|
||||
now = int(time.time())
|
||||
now = datetime_now(self.args.utc).timestamp()
|
||||
if dataset.our_snapshots[-1].timestamp + deadline_ttl > now:
|
||||
dataset.verbose("Destroy missing: Waiting for deadline.")
|
||||
else:
|
||||
@ -214,23 +232,34 @@ class ZfsAutobackup(ZfsAuto):
|
||||
dataset.destroy(fail_exception=True)
|
||||
|
||||
except Exception as e:
|
||||
if self.args.progress:
|
||||
self.clear_progress()
|
||||
# if self.args.progress:
|
||||
# self.clear_progress()
|
||||
|
||||
dataset.error("Error during --destroy-missing: {}".format(str(e)))
|
||||
|
||||
if self.args.progress:
|
||||
self.clear_progress()
|
||||
# if self.args.progress:
|
||||
# self.clear_progress()
|
||||
|
||||
def get_send_pipes(self, logger):
|
||||
"""determine the zfs send pipe"""
|
||||
|
||||
ret = []
|
||||
_mbuffer = False
|
||||
_buffer = "16M"
|
||||
_cs = "128k"
|
||||
_rate = False
|
||||
|
||||
# IO buffer
|
||||
if self.args.buffer:
|
||||
logger("zfs send buffer : {}".format(self.args.buffer))
|
||||
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m" + self.args.buffer])
|
||||
_mbuffer = True
|
||||
_buffer = self.args.buffer
|
||||
|
||||
# IO chunk size
|
||||
if self.args.buffer_chunk_size:
|
||||
logger("zfs send chunk size : {}".format(self.args.buffer_chunk_size))
|
||||
_mbuffer = True
|
||||
_cs = self.args.buffer_chunk_size
|
||||
|
||||
# custom pipes
|
||||
for send_pipe in self.args.send_pipe:
|
||||
@ -248,7 +277,14 @@ class ZfsAutobackup(ZfsAuto):
|
||||
# transfer rate
|
||||
if self.args.rate:
|
||||
logger("zfs send transfer rate : {}".format(self.args.rate))
|
||||
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R" + self.args.rate])
|
||||
_mbuffer = True
|
||||
_rate = self.args.rate
|
||||
|
||||
if _mbuffer:
|
||||
cmd = [ExecuteNode.PIPE, "mbuffer", "-q", "-s{}".format(_cs), "-m{}".format(_buffer)]
|
||||
if _rate:
|
||||
cmd.append("-R{}".format(self.args.rate))
|
||||
ret.extend(cmd)
|
||||
|
||||
return ret
|
||||
|
||||
@ -270,14 +306,45 @@ class ZfsAutobackup(ZfsAuto):
|
||||
logger("zfs recv custom pipe : {}".format(recv_pipe))
|
||||
|
||||
# IO buffer
|
||||
if self.args.buffer:
|
||||
if self.args.buffer or self.args.buffer_chunk_size:
|
||||
_cs = "128k"
|
||||
_buffer = "16M"
|
||||
# only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
|
||||
if self.args.ssh_source != None or self.args.ssh_target != None or self.args.recv_pipe or self.args.send_pipe or self.args.compress != None:
|
||||
logger("zfs recv buffer : {}".format(self.args.buffer))
|
||||
ret.extend(["mbuffer", "-q", "-s128k", "-m" + self.args.buffer, ExecuteNode.PIPE])
|
||||
|
||||
if self.args.buffer_chunk_size:
|
||||
_cs = self.args.buffer_chunk_size
|
||||
if self.args.buffer:
|
||||
_buffer = self.args.buffer
|
||||
|
||||
ret.extend(["mbuffer", "-q", "-s{}".format(_cs), "-m{}".format(_buffer), ExecuteNode.PIPE])
|
||||
|
||||
return ret
|
||||
|
||||
def make_target_name(self, source_dataset):
|
||||
"""make target_name from a source_dataset"""
|
||||
stripped=source_dataset.lstrip_path(self.args.strip_path)
|
||||
if stripped!="":
|
||||
return self.args.target_path + "/" + stripped
|
||||
else:
|
||||
return self.args.target_path
|
||||
|
||||
def check_target_names(self, source_node, source_datasets, target_node):
|
||||
"""check all target names for collesions etc due to strip-options"""
|
||||
|
||||
self.debug("Checking target names:")
|
||||
target_datasets={}
|
||||
for source_dataset in source_datasets:
|
||||
|
||||
target_name = self.make_target_name(source_dataset)
|
||||
source_dataset.debug("-> {}".format(target_name))
|
||||
|
||||
if target_name in target_datasets:
|
||||
raise Exception("Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name, source_dataset, target_datasets[target_name]))
|
||||
|
||||
target_datasets[target_name]=source_dataset
|
||||
|
||||
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||
def sync_datasets(self, source_node, source_datasets, target_node):
|
||||
"""Sync datasets, or thin-only on both sides
|
||||
@ -308,8 +375,10 @@ class ZfsAutobackup(ZfsAuto):
|
||||
# ensure parents exists
|
||||
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
|
||||
if not self.args.no_send \
|
||||
and target_dataset.parent \
|
||||
and target_dataset.parent not in target_datasets \
|
||||
and not target_dataset.parent.exists:
|
||||
target_dataset.debug("Creating unmountable parents")
|
||||
target_dataset.parent.create_filesystem(parents=True)
|
||||
|
||||
# determine common zpool features (cached, so no problem we call it often)
|
||||
@ -328,10 +397,8 @@ class ZfsAutobackup(ZfsAuto):
|
||||
destroy_incompatible=self.args.destroy_incompatible,
|
||||
send_pipes=send_pipes, recv_pipes=recv_pipes,
|
||||
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
|
||||
zfs_compressed=self.args.zfs_compressed)
|
||||
zfs_compressed=self.args.zfs_compressed, force=self.args.force, guid_check=not self.args.no_guid_check)
|
||||
except Exception as e:
|
||||
if self.args.progress:
|
||||
self.clear_progress()
|
||||
|
||||
fail_count = fail_count + 1
|
||||
source_dataset.error("FAILED: " + str(e))
|
||||
@ -339,8 +406,6 @@ class ZfsAutobackup(ZfsAuto):
|
||||
self.verbose("Debug mode, aborting on first error")
|
||||
raise
|
||||
|
||||
if self.args.progress:
|
||||
self.clear_progress()
|
||||
|
||||
target_path_dataset = target_node.get_dataset(self.args.target_path)
|
||||
if not self.args.no_thinning:
|
||||
@ -382,6 +447,15 @@ class ZfsAutobackup(ZfsAuto):
|
||||
|
||||
return set_properties
|
||||
|
||||
def set_snapshot_properties_list(self):
|
||||
|
||||
if self.args.set_snapshot_properties:
|
||||
set_snapshot_properties = self.args.set_snapshot_properties.split(",")
|
||||
else:
|
||||
set_snapshot_properties = []
|
||||
|
||||
return set_snapshot_properties
|
||||
|
||||
def run(self):
|
||||
|
||||
try:
|
||||
@ -394,30 +468,31 @@ class ZfsAutobackup(ZfsAuto):
|
||||
source_thinner = None
|
||||
else:
|
||||
source_thinner = Thinner(self.args.keep_source)
|
||||
source_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
|
||||
source_node = ZfsNode(utc=self.args.utc,
|
||||
snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
|
||||
ssh_config=self.args.ssh_config,
|
||||
ssh_to=self.args.ssh_source, readonly=self.args.test,
|
||||
debug_output=self.args.debug_output, description=description, thinner=source_thinner)
|
||||
|
||||
################# select source datasets
|
||||
self.set_title("Selecting")
|
||||
source_datasets = source_node.selected_datasets(property_name=self.property_name,
|
||||
( source_datasets, excluded_datasets) = source_node.selected_datasets(property_name=self.property_name,
|
||||
exclude_received=self.args.exclude_received,
|
||||
exclude_paths=self.exclude_paths,
|
||||
exclude_unchanged=self.args.exclude_unchanged,
|
||||
min_change=self.args.min_change)
|
||||
if not source_datasets:
|
||||
exclude_unchanged=self.args.exclude_unchanged)
|
||||
if not source_datasets and not excluded_datasets:
|
||||
self.print_error_sources()
|
||||
return 255
|
||||
|
||||
################# snapshotting
|
||||
if not self.args.no_snapshot:
|
||||
self.set_title("Snapshotting")
|
||||
snapshot_name = time.strftime(self.snapshot_time_format)
|
||||
snapshot_name = datetime_now(self.args.utc).strftime(self.snapshot_time_format)
|
||||
source_node.consistent_snapshot(source_datasets, snapshot_name,
|
||||
min_changed_bytes=self.args.min_change,
|
||||
pre_snapshot_cmds=self.args.pre_snapshot_cmd,
|
||||
post_snapshot_cmds=self.args.post_snapshot_cmd)
|
||||
post_snapshot_cmds=self.args.post_snapshot_cmd,
|
||||
set_snapshot_properties=self.set_snapshot_properties_list())
|
||||
|
||||
################# sync
|
||||
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
|
||||
@ -429,7 +504,8 @@ class ZfsAutobackup(ZfsAuto):
|
||||
target_thinner = None
|
||||
else:
|
||||
target_thinner = Thinner(self.args.keep_target)
|
||||
target_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
|
||||
target_node = ZfsNode(utc=self.args.utc,
|
||||
snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
|
||||
logger=self, ssh_config=self.args.ssh_config,
|
||||
ssh_to=self.args.ssh_target,
|
||||
readonly=self.args.test, debug_output=self.args.debug_output,
|
||||
@ -445,6 +521,9 @@ class ZfsAutobackup(ZfsAuto):
|
||||
raise (Exception(
|
||||
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
|
||||
|
||||
# check for collisions due to strip-path
|
||||
self.check_target_names(source_node, source_datasets, target_node)
|
||||
|
||||
# do the actual sync
|
||||
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
|
||||
fail_count = self.sync_datasets(
|
||||
@ -474,6 +553,7 @@ class ZfsAutobackup(ZfsAuto):
|
||||
self.verbose("")
|
||||
self.warning("TEST MODE - DID NOT MAKE ANY CHANGES!")
|
||||
|
||||
self.clear_progress()
|
||||
return fail_count
|
||||
|
||||
except Exception as e:
|
||||
@ -489,7 +569,10 @@ class ZfsAutobackup(ZfsAuto):
|
||||
def cli():
|
||||
import sys
|
||||
|
||||
sys.exit(ZfsAutobackup(sys.argv[1:], False).run())
|
||||
signal(SIGPIPE, sigpipe_handler)
|
||||
|
||||
failed_datasets=ZfsAutobackup(sys.argv[1:], False).run()
|
||||
sys.exit(min(failed_datasets, 255))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@ -1,84 +1,70 @@
|
||||
import os
|
||||
import time
|
||||
# from util import activate_volume_snapshot, create_mountpoints, cleanup_mountpoint
|
||||
from signal import signal, SIGPIPE
|
||||
from .util import output_redir, sigpipe_handler
|
||||
|
||||
from .ExecuteNode import ExecuteNode
|
||||
from .ZfsAuto import ZfsAuto
|
||||
from .ZfsDataset import ZfsDataset
|
||||
from .ZfsNode import ZfsNode
|
||||
import sys
|
||||
import platform
|
||||
|
||||
def tmp_name(suffix=""):
|
||||
"""create temporary name unique to this process and node"""
|
||||
|
||||
#we could use uuids but those are ugly and confusing
|
||||
name="zfstmp_{}_{}".format(platform.node(), os.getpid())
|
||||
name=name+suffix
|
||||
return name
|
||||
|
||||
def hash_tree_tar(node, path):
|
||||
"""calculate md5sum of a directory tree, using tar"""
|
||||
|
||||
node.debug("Hashing filesystem {} ".format(path))
|
||||
|
||||
cmd=[ "tar", "-cf", "-", "-C", path, ".",
|
||||
ExecuteNode.PIPE, "md5sum"]
|
||||
|
||||
stdout = node.run(cmd)
|
||||
|
||||
if node.readonly:
|
||||
hashed=None
|
||||
else:
|
||||
hashed = stdout[0].split(" ")[0]
|
||||
|
||||
node.debug("Hash of {} filesytem is {}".format(path, hashed))
|
||||
|
||||
return hashed
|
||||
|
||||
|
||||
def compare_trees_tar(source_node, source_path, target_node, target_path):
|
||||
"""compare two trees using tar. compatible and simple"""
|
||||
|
||||
source_hash= hash_tree_tar(source_node, source_path)
|
||||
target_hash= hash_tree_tar(target_node, target_path)
|
||||
|
||||
if source_hash != target_hash:
|
||||
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
|
||||
|
||||
|
||||
def compare_trees_rsync(source_node, source_path, target_node, target_path):
|
||||
"""use rsync to compare two trees.
|
||||
Advantage is that we can see which individual files differ.
|
||||
But requires rsync and cant do remote to remote."""
|
||||
|
||||
cmd = ["rsync", "-rcn", "--info=COPY,DEL,MISC,NAME,SYMSAFE", "--msgs2stderr", "--delete" ]
|
||||
|
||||
#local
|
||||
if source_node.ssh_to is None and target_node.ssh_to is None:
|
||||
cmd.append("{}/".format(source_path))
|
||||
cmd.append("{}/".format(target_path))
|
||||
source_node.debug("Running rsync locally, on source.")
|
||||
stdout, stderr = source_node.run(cmd, return_stderr=True)
|
||||
|
||||
#source is local
|
||||
elif source_node.ssh_to is None and target_node.ssh_to is not None:
|
||||
cmd.append("{}/".format(source_path))
|
||||
cmd.append("{}:{}/".format(target_node.ssh_to, target_path))
|
||||
source_node.debug("Running rsync locally, on source.")
|
||||
stdout, stderr = source_node.run(cmd, return_stderr=True)
|
||||
|
||||
#target is local
|
||||
elif source_node.ssh_to is not None and target_node.ssh_to is None:
|
||||
cmd.append("{}:{}/".format(source_node.ssh_to, source_path))
|
||||
cmd.append("{}/".format(target_path))
|
||||
source_node.debug("Running rsync locally, on target.")
|
||||
stdout, stderr=target_node.run(cmd, return_stderr=True)
|
||||
|
||||
else:
|
||||
raise Exception("Source and target cant both be remote when verifying. (rsync limitation)")
|
||||
|
||||
if stderr:
|
||||
raise Exception("Dataset verify failed, see above list for differences")
|
||||
# # try to be as unix compatible as possible, while still having decent performance
|
||||
# def compare_trees_find(source_node, source_path, target_node, target_path):
|
||||
# # find /tmp/zfstmp_pve1_1993135target/ -xdev -type f -print0 | xargs -0 md5sum | md5sum -c
|
||||
#
|
||||
# #verify tree has atleast one file
|
||||
#
|
||||
# stdout=source_node.run(["find", ".", "-type", "f",
|
||||
# ExecuteNode.PIPE, "head", "-n1",
|
||||
# ], cwd=source_path)
|
||||
#
|
||||
# if not stdout:
|
||||
# source_node.debug("No files, skipping check")
|
||||
# else:
|
||||
# pipe=source_node.run(["find", ".", "-type", "f", "-print0",
|
||||
# ExecuteNode.PIPE, "xargs", "-0", "md5sum"
|
||||
# ], pipe=True, cwd=source_path)
|
||||
# stdout=target_node.run([ "md5sum", "-c", "--quiet"], inp=pipe, cwd=target_path, valid_exitcodes=[0,1])
|
||||
#
|
||||
# if len(stdout):
|
||||
# for line in stdout:
|
||||
# target_node.error("md5sum: "+line)
|
||||
#
|
||||
# raise(Exception("Some files have checksum errors"))
|
||||
#
|
||||
#
|
||||
# def compare_trees_rsync(source_node, source_path, target_node, target_path):
|
||||
# """use rsync to compare two trees.
|
||||
# Advantage is that we can see which individual files differ.
|
||||
# But requires rsync and cant do remote to remote."""
|
||||
#
|
||||
# cmd = ["rsync", "-rcnq", "--info=COPY,DEL,MISC,NAME,SYMSAFE", "--msgs2stderr", "--delete" ]
|
||||
#
|
||||
# #local
|
||||
# if source_node.ssh_to is None and target_node.ssh_to is None:
|
||||
# cmd.append("{}/".format(source_path))
|
||||
# cmd.append("{}/".format(target_path))
|
||||
# source_node.debug("Running rsync locally, on source.")
|
||||
# stdout, stderr = source_node.run(cmd, return_stderr=True)
|
||||
#
|
||||
# #source is local
|
||||
# elif source_node.ssh_to is None and target_node.ssh_to is not None:
|
||||
# cmd.append("{}/".format(source_path))
|
||||
# cmd.append("{}:{}/".format(target_node.ssh_to, target_path))
|
||||
# source_node.debug("Running rsync locally, on source.")
|
||||
# stdout, stderr = source_node.run(cmd, return_stderr=True)
|
||||
#
|
||||
# #target is local
|
||||
# elif source_node.ssh_to is not None and target_node.ssh_to is None:
|
||||
# cmd.append("{}:{}/".format(source_node.ssh_to, source_path))
|
||||
# cmd.append("{}/".format(target_path))
|
||||
# source_node.debug("Running rsync locally, on target.")
|
||||
# stdout, stderr=target_node.run(cmd, return_stderr=True)
|
||||
#
|
||||
# else:
|
||||
# raise Exception("Source and target cant both be remote when verifying. (rsync limitation)")
|
||||
#
|
||||
# if stderr:
|
||||
# raise Exception("Dataset verify failed, see above list for differences")
|
||||
|
||||
|
||||
def verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt, method):
|
||||
@ -92,142 +78,66 @@ def verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt,
|
||||
|
||||
if method=='rsync':
|
||||
compare_trees_rsync(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
|
||||
elif method == 'tar':
|
||||
compare_trees_tar(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
|
||||
# elif method == 'tar':
|
||||
# compare_trees_tar(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
|
||||
elif method == 'find':
|
||||
compare_trees_find(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
|
||||
else:
|
||||
raise(Exception("program errror, unknown method"))
|
||||
|
||||
finally:
|
||||
source_snapshot.unmount()
|
||||
target_snapshot.unmount()
|
||||
source_snapshot.unmount(source_mnt)
|
||||
target_snapshot.unmount(target_mnt)
|
||||
|
||||
|
||||
def hash_dev(node, dev):
|
||||
"""calculate md5sum of a device on a node"""
|
||||
|
||||
node.debug("Hashing volume {} ".format(dev))
|
||||
|
||||
cmd = [ "md5sum", dev ]
|
||||
|
||||
stdout = node.run(cmd)
|
||||
|
||||
if node.readonly:
|
||||
hashed=None
|
||||
else:
|
||||
hashed = stdout[0].split(" ")[0]
|
||||
|
||||
node.debug("Hash of volume {} is {}".format(dev, hashed))
|
||||
|
||||
return hashed
|
||||
|
||||
# def activate_volume_snapshot(dataset, snapshot):
|
||||
# """enables snapdev, waits and tries to findout /dev path to the volume, in a compatible way. (linux/freebsd/smartos)"""
|
||||
# def hash_dev(node, dev):
|
||||
# """calculate md5sum of a device on a node"""
|
||||
#
|
||||
# dataset.set("snapdev", "visible")
|
||||
# node.debug("Hashing volume {} ".format(dev))
|
||||
#
|
||||
# #NOTE: add smartos location to this list as well
|
||||
# locations=[
|
||||
# "/dev/zvol/" + snapshot.name
|
||||
# ]
|
||||
# cmd = [ "md5sum", dev ]
|
||||
#
|
||||
# dataset.debug("Waiting for /dev entry to appear...")
|
||||
# time.sleep(0.1)
|
||||
# stdout = node.run(cmd)
|
||||
#
|
||||
# start_time=time.time()
|
||||
# while time.time()-start_time<10:
|
||||
# for location in locations:
|
||||
# stdout, stderr, exit_code=dataset.zfs_node.run(["test", "-e", location], return_all=True, valid_exitcodes=[0,1])
|
||||
# if node.readonly:
|
||||
# hashed=None
|
||||
# else:
|
||||
# hashed = stdout[0].split(" ")[0]
|
||||
#
|
||||
# #fake it in testmode
|
||||
# if dataset.zfs_node.readonly:
|
||||
# return location
|
||||
# node.debug("Hash of volume {} is {}".format(dev, hashed))
|
||||
#
|
||||
# if exit_code==0:
|
||||
# return location
|
||||
# time.sleep(1)
|
||||
#
|
||||
# raise(Exception("Timeout while waiting for {} entry to appear.".format(locations)))
|
||||
#
|
||||
# def deacitvate_volume_snapshot(dataset):
|
||||
# dataset.inherit("snapdev")
|
||||
# return hashed
|
||||
|
||||
#NOTE: https://www.google.com/search?q=Mount+Path+Limit+freebsd
|
||||
#Freebsd has limitations regarding path length, so we cant use the above method.
|
||||
#Instead we create a temporary clone
|
||||
|
||||
def get_tmp_clone_name(snapshot):
|
||||
pool=snapshot.zfs_node.get_pool(snapshot)
|
||||
return pool.name+"/"+tmp_name()
|
||||
|
||||
def activate_volume_snapshot(snapshot):
|
||||
"""clone volume, waits and tries to findout /dev path to the volume, in a compatible way. (linux/freebsd/smartos)"""
|
||||
|
||||
clone_name=get_tmp_clone_name(snapshot)
|
||||
clone=snapshot.clone(clone_name)
|
||||
|
||||
#NOTE: add smartos location to this list as well
|
||||
locations=[
|
||||
"/dev/zvol/" + clone_name
|
||||
]
|
||||
|
||||
clone.debug("Waiting for /dev entry to appear...")
|
||||
time.sleep(0.1)
|
||||
|
||||
start_time=time.time()
|
||||
while time.time()-start_time<10:
|
||||
for location in locations:
|
||||
stdout, stderr, exit_code=clone.zfs_node.run(["test", "-e", location], return_all=True, valid_exitcodes=[0,1])
|
||||
|
||||
#fake it in testmode
|
||||
if clone.zfs_node.readonly:
|
||||
return location
|
||||
|
||||
if exit_code==0:
|
||||
return location
|
||||
time.sleep(1)
|
||||
|
||||
raise(Exception("Timeout while waiting for {} entry to appear.".format(locations)))
|
||||
|
||||
def deacitvate_volume_snapshot(snapshot):
|
||||
clone_name=get_tmp_clone_name(snapshot)
|
||||
clone=snapshot.zfs_node.get_dataset(clone_name)
|
||||
clone.destroy()
|
||||
# def deacitvate_volume_snapshot(snapshot):
|
||||
# clone_name=get_tmp_clone_name(snapshot)
|
||||
# clone=snapshot.zfs_node.get_dataset(clone_name)
|
||||
# clone.destroy(deferred=True, verbose=False)
|
||||
|
||||
def verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot):
|
||||
"""compare the contents of two zfs volume snapshots"""
|
||||
|
||||
try:
|
||||
source_dev= activate_volume_snapshot(source_snapshot)
|
||||
target_dev= activate_volume_snapshot(target_snapshot)
|
||||
# try:
|
||||
source_dev= activate_volume_snapshot(source_snapshot)
|
||||
target_dev= activate_volume_snapshot(target_snapshot)
|
||||
|
||||
source_hash= hash_dev(source_snapshot.zfs_node, source_dev)
|
||||
target_hash= hash_dev(target_snapshot.zfs_node, target_dev)
|
||||
source_hash= hash_dev(source_snapshot.zfs_node, source_dev)
|
||||
target_hash= hash_dev(target_snapshot.zfs_node, target_dev)
|
||||
|
||||
if source_hash!=target_hash:
|
||||
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
|
||||
if source_hash!=target_hash:
|
||||
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
|
||||
|
||||
finally:
|
||||
deacitvate_volume_snapshot(source_snapshot)
|
||||
deacitvate_volume_snapshot(target_snapshot)
|
||||
|
||||
def create_mountpoints(source_node, target_node):
|
||||
|
||||
# prepare mount points
|
||||
source_node.debug("Create temporary mount point")
|
||||
source_mnt = "/tmp/"+tmp_name("source")
|
||||
source_node.run(["mkdir", source_mnt])
|
||||
|
||||
target_node.debug("Create temporary mount point")
|
||||
target_mnt = "/tmp/"+tmp_name("target")
|
||||
target_node.run(["mkdir", target_mnt])
|
||||
|
||||
return source_mnt, target_mnt
|
||||
# finally:
|
||||
# deacitvate_volume_snapshot(source_snapshot)
|
||||
# deacitvate_volume_snapshot(target_snapshot)
|
||||
|
||||
|
||||
def cleanup_mountpoint(node, mnt):
|
||||
node.debug("Cleaning up temporary mount point")
|
||||
node.run([ "rmdir", mnt ], hide_errors=True, valid_exitcodes=[] )
|
||||
|
||||
# class ZfsAutoChecksumVolume(ZfsAuto):
|
||||
# def __init__(self, argv, print_arguments=True):
|
||||
#
|
||||
# # NOTE: common options and parameters are in ZfsAuto
|
||||
# super(ZfsAutoverify, self).__init__(argv, print_arguments)
|
||||
|
||||
class ZfsAutoverify(ZfsAuto):
|
||||
"""The zfs-autoverify class, default agruments and stuff come from ZfsAuto"""
|
||||
@ -254,8 +164,8 @@ class ZfsAutoverify(ZfsAuto):
|
||||
parser=super(ZfsAutoverify, self).get_parser()
|
||||
|
||||
group=parser.add_argument_group("Verify options")
|
||||
group.add_argument('--fs-compare', metavar='METHOD', default="tar", choices=["tar", "rsync"],
|
||||
help='Compare method to use for filesystems. (tar, rsync) Default: %(default)s ')
|
||||
group.add_argument('--fs-compare', metavar='METHOD', default="find", choices=["find", "rsync"],
|
||||
help='Compare method to use for filesystems. (find, rsync) Default: %(default)s ')
|
||||
|
||||
return parser
|
||||
|
||||
@ -276,7 +186,7 @@ class ZfsAutoverify(ZfsAuto):
|
||||
target_dataset = target_node.get_dataset(target_name)
|
||||
|
||||
# find common snapshots to verify
|
||||
source_snapshot = source_dataset.find_common_snapshot(target_dataset)
|
||||
source_snapshot = source_dataset.find_common_snapshot(target_dataset, True)
|
||||
target_snapshot = target_dataset.find_snapshot(source_snapshot)
|
||||
|
||||
if source_snapshot is None or target_snapshot is None:
|
||||
@ -293,8 +203,8 @@ class ZfsAutoverify(ZfsAuto):
|
||||
|
||||
|
||||
except Exception as e:
|
||||
if self.args.progress:
|
||||
self.clear_progress()
|
||||
# if self.args.progress:
|
||||
# self.clear_progress()
|
||||
|
||||
fail_count = fail_count + 1
|
||||
target_dataset.error("FAILED: " + str(e))
|
||||
@ -302,8 +212,8 @@ class ZfsAutoverify(ZfsAuto):
|
||||
self.verbose("Debug mode, aborting on first error")
|
||||
raise
|
||||
|
||||
if self.args.progress:
|
||||
self.clear_progress()
|
||||
# if self.args.progress:
|
||||
# self.clear_progress()
|
||||
|
||||
return fail_count
|
||||
|
||||
@ -321,25 +231,26 @@ class ZfsAutoverify(ZfsAuto):
|
||||
self.set_title("Source settings")
|
||||
|
||||
description = "[Source]"
|
||||
source_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
|
||||
source_node = ZfsNode(utc=self.args.utc,
|
||||
snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
|
||||
ssh_config=self.args.ssh_config,
|
||||
ssh_to=self.args.ssh_source, readonly=self.args.test,
|
||||
debug_output=self.args.debug_output, description=description)
|
||||
|
||||
################# select source datasets
|
||||
self.set_title("Selecting")
|
||||
source_datasets = source_node.selected_datasets(property_name=self.property_name,
|
||||
( source_datasets, excluded_datasets) = source_node.selected_datasets(property_name=self.property_name,
|
||||
exclude_received=self.args.exclude_received,
|
||||
exclude_paths=self.exclude_paths,
|
||||
exclude_unchanged=self.args.exclude_unchanged,
|
||||
min_change=0)
|
||||
if not source_datasets:
|
||||
exclude_unchanged=self.args.exclude_unchanged)
|
||||
if not source_datasets and not excluded_datasets:
|
||||
self.print_error_sources()
|
||||
return 255
|
||||
|
||||
# create target_node
|
||||
self.set_title("Target settings")
|
||||
target_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
|
||||
target_node = ZfsNode(utc=self.args.utc,
|
||||
snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
|
||||
logger=self, ssh_config=self.args.ssh_config,
|
||||
ssh_to=self.args.ssh_target,
|
||||
readonly=self.args.test, debug_output=self.args.debug_output,
|
||||
@ -395,7 +306,10 @@ class ZfsAutoverify(ZfsAuto):
|
||||
def cli():
|
||||
import sys
|
||||
|
||||
sys.exit(ZfsAutoverify(sys.argv[1:], False).run())
|
||||
raise(Exception("This program is incomplete, dont use it yet."))
|
||||
signal(SIGPIPE, sigpipe_handler)
|
||||
failed = ZfsAutoverify(sys.argv[1:], False).run()
|
||||
sys.exit(min(failed,255))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
310
zfs_autobackup/ZfsCheck.py
Normal file
310
zfs_autobackup/ZfsCheck.py
Normal file
@ -0,0 +1,310 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import time
|
||||
from signal import signal, SIGPIPE
|
||||
|
||||
from . import util
|
||||
from .TreeHasher import TreeHasher
|
||||
from .BlockHasher import BlockHasher
|
||||
from .ZfsNode import ZfsNode
|
||||
from .util import *
|
||||
from .CliBase import CliBase
|
||||
|
||||
|
||||
class ZfsCheck(CliBase):
|
||||
|
||||
def __init__(self, argv, print_arguments=True):
|
||||
|
||||
# NOTE: common options argument parsing are in CliBase
|
||||
super(ZfsCheck, self).__init__(argv, print_arguments)
|
||||
|
||||
self.node = ZfsNode(self.log, utc=self.args.utc, readonly=self.args.test, debug_output=self.args.debug_output)
|
||||
|
||||
self.block_hasher = BlockHasher(count=self.args.count, bs=self.args.block_size, skip=self.args.skip)
|
||||
|
||||
def get_parser(self):
|
||||
|
||||
parser = super(ZfsCheck, self).get_parser()
|
||||
|
||||
# positional arguments
|
||||
parser.add_argument('target', metavar='TARGET', default=None, nargs='?', help='Target to checkum. (can be blockdevice, directory or ZFS snapshot)')
|
||||
|
||||
group = parser.add_argument_group('Checker options')
|
||||
|
||||
group.add_argument('--block-size', metavar="BYTES", default=4096, help="Read block-size, default %(default)s",
|
||||
type=int)
|
||||
group.add_argument('--count', metavar="COUNT", default=int((100 * (1024 ** 2)) / 4096),
|
||||
help="Hash chunks of COUNT blocks. Default %(default)s . (CHUNK size is BYTES * COUNT) ", type=int) # 100MiB
|
||||
|
||||
group.add_argument('--check', '-c', metavar="FILE", default=None, const=True, nargs='?',
|
||||
help="Read hashes from STDIN (or FILE) and compare them")
|
||||
|
||||
group.add_argument('--skip', '-s', metavar="NUMBER", default=0, type=int,
|
||||
help="Skip this number of chunks after every hash. %(default)s")
|
||||
|
||||
return parser
|
||||
|
||||
def parse_args(self, argv):
|
||||
args = super(ZfsCheck, self).parse_args(argv)
|
||||
|
||||
if args.test:
|
||||
self.warning("TEST MODE - WILL ONLY DO READ-ONLY STUFF")
|
||||
|
||||
if args.target is None:
|
||||
self.error("Please specify TARGET")
|
||||
sys.exit(1)
|
||||
|
||||
self.verbose("Target : {}".format(args.target))
|
||||
self.verbose("Block size : {} bytes".format(args.block_size))
|
||||
self.verbose("Block count : {}".format(args.count))
|
||||
self.verbose("Effective chunk size : {} bytes".format(args.count*args.block_size))
|
||||
self.verbose("Skip chunk count : {} (checks {:.2f}% of data)".format(args.skip, 100/(1+args.skip)))
|
||||
self.verbose("")
|
||||
|
||||
|
||||
return args
|
||||
|
||||
def prepare_zfs_filesystem(self, snapshot):
|
||||
|
||||
mnt = "/tmp/" + tmp_name()
|
||||
self.debug("Create temporary mount point {}".format(mnt))
|
||||
self.node.run(["mkdir", mnt])
|
||||
snapshot.mount(mnt)
|
||||
return mnt
|
||||
|
||||
def cleanup_zfs_filesystem(self, snapshot):
|
||||
mnt = "/tmp/" + tmp_name()
|
||||
snapshot.unmount(mnt)
|
||||
self.debug("Cleaning up temporary mount point")
|
||||
self.node.run(["rmdir", mnt], hide_errors=True, valid_exitcodes=[])
|
||||
|
||||
# NOTE: https://www.google.com/search?q=Mount+Path+Limit+freebsd
|
||||
# Freebsd has limitations regarding path length, so we have to clone it so the part stays sort
|
||||
def prepare_zfs_volume(self, snapshot):
|
||||
"""clone volume, waits and tries to findout /dev path to the volume, in a compatible way. (linux/freebsd/smartos)"""
|
||||
|
||||
clone_name = get_tmp_clone_name(snapshot)
|
||||
clone = snapshot.clone(clone_name)
|
||||
|
||||
# TODO: add smartos location to this list as well
|
||||
locations = [
|
||||
"/dev/zvol/" + clone_name
|
||||
]
|
||||
|
||||
clone.debug("Waiting for /dev entry to appear in: {}".format(locations))
|
||||
time.sleep(0.1)
|
||||
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < 10:
|
||||
for location in locations:
|
||||
if os.path.exists(location):
|
||||
return location
|
||||
|
||||
# fake it in testmode
|
||||
if self.args.test:
|
||||
return location
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
raise (Exception("Timeout while waiting for /dev entry to appear. (looking in: {}). Hint: did you forget to load the encryption key?".format(locations)))
|
||||
|
||||
def cleanup_zfs_volume(self, snapshot):
|
||||
"""destroys temporary volume snapshot"""
|
||||
clone_name = get_tmp_clone_name(snapshot)
|
||||
clone = snapshot.zfs_node.get_dataset(clone_name)
|
||||
clone.destroy(deferred=True, verbose=False)
|
||||
|
||||
def generate_tree_hashes(self, prepared_target):
|
||||
|
||||
tree_hasher = TreeHasher(self.block_hasher)
|
||||
self.debug("Hashing tree: {}".format(prepared_target))
|
||||
for i in tree_hasher.generate(prepared_target):
|
||||
yield i
|
||||
|
||||
def generate_tree_compare(self, prepared_target, input_generator=None):
|
||||
|
||||
tree_hasher = TreeHasher(self.block_hasher)
|
||||
self.debug("Comparing tree: {}".format(prepared_target))
|
||||
for i in tree_hasher.compare(prepared_target, input_generator):
|
||||
yield i
|
||||
|
||||
def generate_file_hashes(self, prepared_target):
|
||||
|
||||
self.debug("Hashing file: {}".format(prepared_target))
|
||||
for i in self.block_hasher.generate(prepared_target):
|
||||
yield i
|
||||
|
||||
def generate_file_compare(self, prepared_target, input_generator=None):
|
||||
|
||||
self.debug("Comparing file: {}".format(prepared_target))
|
||||
for i in self.block_hasher.compare(prepared_target, input_generator):
|
||||
yield i
|
||||
|
||||
def generate_input(self):
|
||||
"""parse input lines and yield items to use in compare functions"""
|
||||
|
||||
if self.args.check is True:
|
||||
input_fh=sys.stdin
|
||||
else:
|
||||
input_fh=open(self.args.check, 'r')
|
||||
|
||||
last_progress_time = time.time()
|
||||
progress_checked = 0
|
||||
progress_skipped = 0
|
||||
|
||||
line=input_fh.readline()
|
||||
skip=0
|
||||
while line:
|
||||
i=line.rstrip().split("\t")
|
||||
#ignores lines without tabs
|
||||
if (len(i)>1):
|
||||
|
||||
if skip==0:
|
||||
progress_checked=progress_checked+1
|
||||
yield i
|
||||
skip=self.args.skip
|
||||
else:
|
||||
skip=skip-1
|
||||
progress_skipped=progress_skipped+1
|
||||
|
||||
if self.args.progress and time.time() - last_progress_time > 1:
|
||||
last_progress_time = time.time()
|
||||
self.progress("Checked {} hashes (skipped {})".format(progress_checked, progress_skipped))
|
||||
|
||||
line=input_fh.readline()
|
||||
|
||||
self.verbose("Checked {} hashes (skipped {})".format(progress_checked, progress_skipped))
|
||||
|
||||
def print_hashes(self, hash_generator):
|
||||
"""prints hashes that are yielded by the specified hash_generator"""
|
||||
|
||||
last_progress_time = time.time()
|
||||
progress_count = 0
|
||||
|
||||
for i in hash_generator:
|
||||
|
||||
if len(i) == 3:
|
||||
print("{}\t{}\t{}".format(*i))
|
||||
else:
|
||||
print("{}\t{}".format(*i))
|
||||
progress_count = progress_count + 1
|
||||
|
||||
if self.args.progress and time.time() - last_progress_time > 1:
|
||||
last_progress_time = time.time()
|
||||
self.progress("Generated {} hashes.".format(progress_count))
|
||||
|
||||
sys.stdout.flush()
|
||||
|
||||
self.verbose("Generated {} hashes.".format(progress_count))
|
||||
self.clear_progress()
|
||||
|
||||
return 0
|
||||
|
||||
def print_errors(self, compare_generator):
|
||||
"""prints errors that are yielded by the specified compare_generator"""
|
||||
errors = 0
|
||||
for i in compare_generator:
|
||||
errors = errors + 1
|
||||
|
||||
if len(i) == 4:
|
||||
(file_name, chunk_nr, compare_hexdigest, actual_hexdigest) = i
|
||||
print("{}: Chunk {} failed: {} {}".format(file_name, chunk_nr, compare_hexdigest, actual_hexdigest))
|
||||
else:
|
||||
(chunk_nr, compare_hexdigest, actual_hexdigest) = i
|
||||
print("Chunk {} failed: {} {}".format(chunk_nr, compare_hexdigest, actual_hexdigest))
|
||||
|
||||
sys.stdout.flush()
|
||||
|
||||
self.verbose("Total errors: {}".format(errors))
|
||||
self.clear_progress()
|
||||
|
||||
return errors
|
||||
|
||||
def prepare_target(self):
|
||||
|
||||
if "@" in self.args.target:
|
||||
# zfs snapshot
|
||||
snapshot=self.node.get_dataset(self.args.target)
|
||||
if not snapshot.exists:
|
||||
raise Exception("ZFS snapshot {} does not exist!".format(snapshot))
|
||||
dataset_type = snapshot.parent.properties['type']
|
||||
|
||||
if dataset_type == 'volume':
|
||||
return self.prepare_zfs_volume(snapshot)
|
||||
elif dataset_type == 'filesystem':
|
||||
return self.prepare_zfs_filesystem(snapshot)
|
||||
else:
|
||||
raise Exception("Unknown dataset type")
|
||||
return self.args.target
|
||||
|
||||
def cleanup_target(self):
|
||||
if "@" in self.args.target:
|
||||
# zfs snapshot
|
||||
snapshot=self.node.get_dataset(self.args.target)
|
||||
if not snapshot.exists:
|
||||
return
|
||||
|
||||
dataset_type = snapshot.parent.properties['type']
|
||||
|
||||
if dataset_type == 'volume':
|
||||
self.cleanup_zfs_volume(snapshot)
|
||||
elif dataset_type == 'filesystem':
|
||||
self.cleanup_zfs_filesystem(snapshot)
|
||||
|
||||
def run(self):
|
||||
|
||||
compare_generator=None
|
||||
hash_generator=None
|
||||
try:
|
||||
prepared_target=self.prepare_target()
|
||||
is_dir=os.path.isdir(prepared_target)
|
||||
|
||||
#run as compare
|
||||
if self.args.check is not None:
|
||||
input_generator=self.generate_input()
|
||||
if is_dir:
|
||||
compare_generator = self.generate_tree_compare(prepared_target, input_generator)
|
||||
else:
|
||||
compare_generator=self.generate_file_compare(prepared_target, input_generator)
|
||||
errors=self.print_errors(compare_generator)
|
||||
#run as generator
|
||||
else:
|
||||
if is_dir:
|
||||
hash_generator = self.generate_tree_hashes(prepared_target)
|
||||
else:
|
||||
hash_generator=self.generate_file_hashes(prepared_target)
|
||||
|
||||
errors=self.print_hashes(hash_generator)
|
||||
|
||||
except Exception as e:
|
||||
self.error("Exception: " + str(e))
|
||||
if self.args.debug:
|
||||
raise
|
||||
return 255
|
||||
except KeyboardInterrupt:
|
||||
self.error("Aborted")
|
||||
return 255
|
||||
|
||||
finally:
|
||||
#important to call check_output so that cleanup still functions in case of a broken pipe:
|
||||
# util.check_output()
|
||||
|
||||
#close generators, to make sure files are not in use anymore when cleaning up
|
||||
if hash_generator is not None:
|
||||
hash_generator.close()
|
||||
if compare_generator is not None:
|
||||
compare_generator.close()
|
||||
self.cleanup_target()
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def cli():
|
||||
import sys
|
||||
signal(SIGPIPE, sigpipe_handler)
|
||||
failed=ZfsCheck(sys.argv[1:], False).run()
|
||||
sys.exit(min(failed,255))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@ -1,4 +1,6 @@
|
||||
import re
|
||||
from datetime import datetime
|
||||
import sys
|
||||
import time
|
||||
|
||||
from .CachedProperty import CachedProperty
|
||||
@ -56,6 +58,13 @@ class ZfsDataset:
|
||||
"""
|
||||
self.zfs_node.error("{}: {}".format(self.name, txt))
|
||||
|
||||
def warning(self, txt):
|
||||
"""
|
||||
Args:
|
||||
:type txt: str
|
||||
"""
|
||||
self.zfs_node.warning("{}: {}".format(self.name, txt))
|
||||
|
||||
def debug(self, txt):
|
||||
"""
|
||||
Args:
|
||||
@ -79,7 +88,11 @@ class ZfsDataset:
|
||||
Args:
|
||||
:type count: int
|
||||
"""
|
||||
return "/".join(self.split_path()[count:])
|
||||
components = self.split_path()
|
||||
if count > len(components):
|
||||
raise Exception("Trying to strip too much from path ({} items from {})".format(count, self.name))
|
||||
|
||||
return "/".join(components[count:])
|
||||
|
||||
def rstrip_path(self, count):
|
||||
"""return name with last count components stripped
|
||||
@ -112,22 +125,25 @@ class ZfsDataset:
|
||||
"""true if this dataset is a snapshot"""
|
||||
return self.name.find("@") != -1
|
||||
|
||||
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged, min_change):
|
||||
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged):
|
||||
"""determine if dataset should be selected for backup (called from
|
||||
ZfsNode)
|
||||
|
||||
Args:
|
||||
:type exclude_paths: list of str
|
||||
:type exclude_paths: list[str]
|
||||
:type value: str
|
||||
:type source: str
|
||||
:type inherited: bool
|
||||
:type exclude_received: bool
|
||||
:type exclude_unchanged: bool
|
||||
:type min_change: bool
|
||||
:type exclude_unchanged: int
|
||||
|
||||
:param value: Value of the zfs property ("false"/"true"/"child"/"-")
|
||||
:param value: Value of the zfs property ("false"/"true"/"child"/parent/"-")
|
||||
:param source: Source of the zfs property ("local"/"received", "-")
|
||||
:param inherited: True of the value/source was inherited from a higher dataset.
|
||||
|
||||
Returns: True : Selected
|
||||
False: Excluded
|
||||
None: No property found
|
||||
"""
|
||||
|
||||
# sanity checks
|
||||
@ -136,19 +152,23 @@ class ZfsDataset:
|
||||
raise (Exception(
|
||||
"{} autobackup-property has illegal source: '{}' (possible BUG)".format(self.name, source)))
|
||||
|
||||
if value not in ["false", "true", "child", "-"]:
|
||||
if value not in ["false", "true", "child", "parent", "-"]:
|
||||
# user error
|
||||
raise (Exception(
|
||||
"{} autobackup-property has illegal value: '{}'".format(self.name, value)))
|
||||
|
||||
# non specified, ignore
|
||||
if value == "-":
|
||||
return False
|
||||
return None
|
||||
|
||||
# only select childs of this dataset, ignore
|
||||
if value == "child" and not inherited:
|
||||
return False
|
||||
|
||||
# only select parent, no childs, ignore
|
||||
if value == "parent" and inherited:
|
||||
return False
|
||||
|
||||
# manually excluded by property
|
||||
if value == "false":
|
||||
self.verbose("Excluded")
|
||||
@ -169,26 +189,32 @@ class ZfsDataset:
|
||||
self.verbose("Excluded (dataset already received)")
|
||||
return False
|
||||
|
||||
if exclude_unchanged and not self.is_changed(min_change):
|
||||
self.verbose("Excluded (unchanged since last snapshot)")
|
||||
if not self.is_changed(exclude_unchanged):
|
||||
self.verbose("Excluded (by --exclude-unchanged)")
|
||||
return False
|
||||
|
||||
self.verbose("Selected")
|
||||
return True
|
||||
|
||||
|
||||
@CachedProperty
|
||||
@property
|
||||
def parent(self):
|
||||
"""get zfs-parent of this dataset. for snapshots this means it will get
|
||||
the filesystem/volume that it belongs to. otherwise it will return the
|
||||
parent according to path
|
||||
|
||||
we cache this so everything in the parent that is cached also stays.
|
||||
|
||||
returns None if there is no parent.
|
||||
:rtype: ZfsDataset | None
|
||||
"""
|
||||
if self.is_snapshot:
|
||||
return self.zfs_node.get_dataset(self.filesystem_name)
|
||||
else:
|
||||
return self.zfs_node.get_dataset(self.rstrip_path(1))
|
||||
stripped = self.rstrip_path(1)
|
||||
if stripped:
|
||||
return self.zfs_node.get_dataset(stripped)
|
||||
else:
|
||||
return None
|
||||
|
||||
# NOTE: unused for now
|
||||
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
|
||||
@ -231,36 +257,50 @@ class ZfsDataset:
|
||||
return None
|
||||
|
||||
@CachedProperty
|
||||
def exists_check(self):
|
||||
"""check on disk if it exists"""
|
||||
self.debug("Checking if dataset exists")
|
||||
return (len(self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True,
|
||||
valid_exitcodes=[0, 1],
|
||||
hide_errors=True)) > 0)
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
"""check if dataset exists. Use force to force a specific value to be
|
||||
cached, if you already know. Useful for performance reasons
|
||||
"""returns True if dataset should exist.
|
||||
Use force_exists to force a specific value, if you already know. Useful for performance and test reasons
|
||||
"""
|
||||
|
||||
if self.force_exists is not None:
|
||||
self.debug("Checking if filesystem exists: was forced to {}".format(self.force_exists))
|
||||
if self.force_exists:
|
||||
self.debug("Dataset should exist")
|
||||
else:
|
||||
self.debug("Dataset should not exist")
|
||||
return self.force_exists
|
||||
else:
|
||||
self.debug("Checking if filesystem exists")
|
||||
return self.exists_check
|
||||
|
||||
return (self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True, valid_exitcodes=[0, 1],
|
||||
hide_errors=True) and True)
|
||||
|
||||
def create_filesystem(self, parents=False):
|
||||
def create_filesystem(self, parents=False, unmountable=True):
|
||||
"""create a filesystem
|
||||
|
||||
Args:
|
||||
:type parents: bool
|
||||
"""
|
||||
if parents:
|
||||
self.verbose("Creating filesystem and parents")
|
||||
self.zfs_node.run(["zfs", "create", "-p", self.name])
|
||||
else:
|
||||
self.verbose("Creating filesystem")
|
||||
self.zfs_node.run(["zfs", "create", self.name])
|
||||
|
||||
# recurse up
|
||||
if parents and self.parent and not self.parent.exists:
|
||||
self.parent.create_filesystem(parents, unmountable)
|
||||
|
||||
cmd = ["zfs", "create"]
|
||||
|
||||
if unmountable:
|
||||
cmd.extend(["-o", "canmount=off"])
|
||||
|
||||
cmd.append(self.name)
|
||||
self.zfs_node.run(cmd)
|
||||
|
||||
self.force_exists = True
|
||||
|
||||
def destroy(self, fail_exception=False):
|
||||
def destroy(self, fail_exception=False, deferred=False, verbose=True):
|
||||
"""destroy the dataset. by default failures are not an exception, so we
|
||||
can continue making backups
|
||||
|
||||
@ -268,13 +308,20 @@ class ZfsDataset:
|
||||
:type fail_exception: bool
|
||||
"""
|
||||
|
||||
self.verbose("Destroying")
|
||||
if verbose:
|
||||
self.verbose("Destroying")
|
||||
else:
|
||||
self.debug("Destroying")
|
||||
|
||||
if self.is_snapshot:
|
||||
self.release()
|
||||
|
||||
try:
|
||||
self.zfs_node.run(["zfs", "destroy", self.name])
|
||||
if deferred and self.is_snapshot:
|
||||
self.zfs_node.run(["zfs", "destroy", "-d", self.name])
|
||||
else:
|
||||
self.zfs_node.run(["zfs", "destroy", self.name])
|
||||
|
||||
self.invalidate()
|
||||
self.force_exists = False
|
||||
return True
|
||||
@ -292,9 +339,6 @@ class ZfsDataset:
|
||||
"zfs", "get", "-H", "-o", "property,value", "-p", "all", self.name
|
||||
]
|
||||
|
||||
if not self.exists:
|
||||
return {}
|
||||
|
||||
self.debug("Getting zfs properties")
|
||||
|
||||
ret = {}
|
||||
@ -315,7 +359,6 @@ class ZfsDataset:
|
||||
if min_changed_bytes == 0:
|
||||
return True
|
||||
|
||||
|
||||
if int(self.properties['written']) < min_changed_bytes:
|
||||
return False
|
||||
else:
|
||||
@ -332,7 +375,7 @@ class ZfsDataset:
|
||||
|
||||
@property
|
||||
def holds(self):
|
||||
"""get list of holds for dataset"""
|
||||
"""get list[holds] for dataset"""
|
||||
|
||||
output = self.zfs_node.run(["zfs", "holds", "-H", self.name], valid_exitcodes=[0], tab_split=True,
|
||||
readonly=True)
|
||||
@ -358,19 +401,32 @@ class ZfsDataset:
|
||||
"""get timestamp from snapshot name. Only works for our own snapshots
|
||||
with the correct format.
|
||||
"""
|
||||
dt = datetime.strptime(self.snapshot_name, self.zfs_node.snapshot_time_format)
|
||||
if sys.version_info[0] >= 3:
|
||||
from datetime import timezone
|
||||
if self.zfs_node.utc:
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
seconds = dt.timestamp()
|
||||
else:
|
||||
# python2 has no good functions to deal with UTC. Yet the unix timestamp
|
||||
# must be in UTC to allow comparison against `time.time()` in on other parts
|
||||
# of this project (e.g. Thinner.py). If we are handling UTC timestamps,
|
||||
# we must adjust for that here.
|
||||
if self.zfs_node.utc:
|
||||
seconds = (dt - datetime(1970, 1, 1)).total_seconds()
|
||||
else:
|
||||
seconds = time.mktime(dt.timetuple())
|
||||
return seconds
|
||||
|
||||
time_secs = time.mktime(time.strptime(self.snapshot_name, self.zfs_node.snapshot_time_format))
|
||||
return time_secs
|
||||
|
||||
def from_names(self, names):
|
||||
"""convert a list of names to a list ZfsDatasets for this zfs_node
|
||||
def from_names(self, names, force_exists=None):
|
||||
"""convert a list[names] to a list ZfsDatasets for this zfs_node
|
||||
|
||||
Args:
|
||||
:type names: list of str
|
||||
:type names: list[str]
|
||||
"""
|
||||
ret = []
|
||||
for name in names:
|
||||
ret.append(self.zfs_node.get_dataset(name))
|
||||
ret.append(self.zfs_node.get_dataset(name, force_exists))
|
||||
|
||||
return ret
|
||||
|
||||
@ -389,8 +445,11 @@ class ZfsDataset:
|
||||
|
||||
@CachedProperty
|
||||
def snapshots(self):
|
||||
"""get all snapshots of this dataset"""
|
||||
"""get all snapshots of this dataset
|
||||
:rtype: ZfsDataset
|
||||
"""
|
||||
|
||||
#FIXME: dont check for existance. (currenlty needed for _add_virtual_snapshots)
|
||||
if not self.exists:
|
||||
return []
|
||||
|
||||
@ -400,11 +459,11 @@ class ZfsDataset:
|
||||
"zfs", "list", "-d", "1", "-r", "-t", "snapshot", "-H", "-o", "name", self.name
|
||||
]
|
||||
|
||||
return self.from_names(self.zfs_node.run(cmd=cmd, readonly=True))
|
||||
return self.from_names(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True)
|
||||
|
||||
@property
|
||||
def our_snapshots(self):
|
||||
"""get list of snapshots creates by us of this dataset"""
|
||||
"""get list[snapshots] creates by us of this dataset"""
|
||||
ret = []
|
||||
for snapshot in self.snapshots:
|
||||
if snapshot.is_ours():
|
||||
@ -499,7 +558,7 @@ class ZfsDataset:
|
||||
"zfs", "list", "-r", "-t", types, "-o", "name", "-H", self.name
|
||||
])
|
||||
|
||||
return self.from_names(names[1:])
|
||||
return self.from_names(names[1:], force_exists=True)
|
||||
|
||||
@CachedProperty
|
||||
def datasets(self, types="filesystem,volume"):
|
||||
@ -515,9 +574,10 @@ class ZfsDataset:
|
||||
"zfs", "list", "-r", "-t", types, "-o", "name", "-H", "-d", "1", self.name
|
||||
])
|
||||
|
||||
return self.from_names(names[1:])
|
||||
return self.from_names(names[1:], force_exists=True)
|
||||
|
||||
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes, zfs_compressed):
|
||||
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded,
|
||||
send_pipes, zfs_compressed):
|
||||
"""returns a pipe with zfs send output for this snapshot
|
||||
|
||||
resume_token: resume sending from this token. (in that case we don't
|
||||
@ -525,8 +585,8 @@ class ZfsDataset:
|
||||
|
||||
Args:
|
||||
:param send_pipes: output cmd array that will be added to actual zfs send command. (e.g. mbuffer or compression program)
|
||||
:type send_pipes: list of str
|
||||
:type features: list of str
|
||||
:type send_pipes: list[str]
|
||||
:type features: list[str]
|
||||
:type prev_snapshot: ZfsDataset
|
||||
:type resume_token: str
|
||||
:type show_progress: bool
|
||||
@ -539,13 +599,14 @@ class ZfsDataset:
|
||||
|
||||
# all kind of performance options:
|
||||
if 'large_blocks' in features and "-L" in self.zfs_node.supported_send_options:
|
||||
cmd.append("--large-block") # large block support (only if recordsize>128k which is seldomly used)
|
||||
# large block support (only if recordsize>128k which is seldomly used)
|
||||
cmd.append("-L") # --large-block
|
||||
|
||||
if write_embedded and 'embedded_data' in features and "-e" in self.zfs_node.supported_send_options:
|
||||
cmd.append("--embed") # WRITE_EMBEDDED, more compact stream
|
||||
cmd.append("-e") # --embed; WRITE_EMBEDDED, more compact stream
|
||||
|
||||
if zfs_compressed and "-c" in self.zfs_node.supported_send_options:
|
||||
cmd.append("--compressed") # use compressed WRITE records
|
||||
cmd.append("-c") # --compressed; use compressed WRITE records
|
||||
|
||||
# raw? (send over encrypted data in its original encrypted form without decrypting)
|
||||
if raw:
|
||||
@ -553,8 +614,8 @@ class ZfsDataset:
|
||||
|
||||
# progress output
|
||||
if show_progress:
|
||||
cmd.append("--verbose")
|
||||
cmd.append("--parsable")
|
||||
cmd.append("-v") # --verbose
|
||||
cmd.append("-P") # --parsable
|
||||
|
||||
# resume a previous send? (don't need more parameters in that case)
|
||||
if resume_token:
|
||||
@ -563,7 +624,7 @@ class ZfsDataset:
|
||||
else:
|
||||
# send properties
|
||||
if send_properties:
|
||||
cmd.append("--props")
|
||||
cmd.append("-p") # --props
|
||||
|
||||
# incremental?
|
||||
if prev_snapshot:
|
||||
@ -577,7 +638,8 @@ class ZfsDataset:
|
||||
|
||||
return output_pipe
|
||||
|
||||
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False):
|
||||
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False,
|
||||
force=False):
|
||||
"""starts a zfs recv for this snapshot and uses pipe as input
|
||||
|
||||
note: you can it both on a snapshot or filesystem object. The
|
||||
@ -587,9 +649,9 @@ class ZfsDataset:
|
||||
Args:
|
||||
:param recv_pipes: input cmd array that will be prepended to actual zfs recv command. (e.g. mbuffer or decompression program)
|
||||
:type pipe: subprocess.pOpen
|
||||
:type features: list of str
|
||||
:type filter_properties: list of str
|
||||
:type set_properties: list of str
|
||||
:type features: list[str]
|
||||
:type filter_properties: list[str]
|
||||
:type set_properties: list[str]
|
||||
:type ignore_exit_code: bool
|
||||
"""
|
||||
|
||||
@ -606,7 +668,7 @@ class ZfsDataset:
|
||||
|
||||
cmd.extend(["zfs", "recv"])
|
||||
|
||||
# don't mount filesystem that is received
|
||||
# don't let zfs recv mount everything thats received (even with canmount=noauto!)
|
||||
cmd.append("-u")
|
||||
|
||||
for property_ in filter_properties:
|
||||
@ -618,6 +680,9 @@ class ZfsDataset:
|
||||
# verbose output
|
||||
cmd.append("-v")
|
||||
|
||||
if force:
|
||||
cmd.append("-F")
|
||||
|
||||
if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options:
|
||||
# support resuming
|
||||
self.debug("Enabled resume support")
|
||||
@ -630,10 +695,10 @@ class ZfsDataset:
|
||||
else:
|
||||
valid_exitcodes = [0]
|
||||
|
||||
self.zfs_node.reset_progress()
|
||||
# self.zfs_node.reset_progress()
|
||||
self.zfs_node.run(cmd, inp=pipe, valid_exitcodes=valid_exitcodes)
|
||||
|
||||
# invalidate cache, but we at least know we exist now
|
||||
# invalidate cache
|
||||
self.invalidate()
|
||||
|
||||
# in test mode we assume everything was ok and it exists
|
||||
@ -646,23 +711,51 @@ class ZfsDataset:
|
||||
self.error("error during transfer")
|
||||
raise (Exception("Target doesn't exist after transfer, something went wrong."))
|
||||
|
||||
# at this point we're sure the actual dataset exists
|
||||
self.parent.force_exists = True
|
||||
|
||||
def automount(self):
|
||||
"""Mount the dataset as if one did a zfs mount -a, but only for this dataset
|
||||
Failure to mount doesnt result in an exception, but outputs errors to STDERR.
|
||||
|
||||
"""
|
||||
|
||||
self.debug("Auto mounting")
|
||||
|
||||
if self.properties['type'] != "filesystem":
|
||||
return
|
||||
|
||||
if self.properties['canmount'] != 'on':
|
||||
return
|
||||
|
||||
if self.properties['mountpoint'] == 'legacy':
|
||||
return
|
||||
|
||||
if self.properties['mountpoint'] == 'none':
|
||||
return
|
||||
|
||||
if self.properties['encryption'] != 'off' and self.properties['keystatus'] == 'unavailable':
|
||||
return
|
||||
|
||||
self.zfs_node.run(["zfs", "mount", self.name], valid_exitcodes=[0,1])
|
||||
|
||||
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
|
||||
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
|
||||
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed):
|
||||
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed, force):
|
||||
"""transfer this snapshot to target_snapshot. specify prev_snapshot for
|
||||
incremental transfer
|
||||
|
||||
connects a send_pipe() to recv_pipe()
|
||||
|
||||
Args:
|
||||
:type send_pipes: list of str
|
||||
:type recv_pipes: list of str
|
||||
:type send_pipes: list[str]
|
||||
:type recv_pipes: list[str]
|
||||
:type target_snapshot: ZfsDataset
|
||||
:type features: list of str
|
||||
:type features: list[str]
|
||||
:type prev_snapshot: ZfsDataset
|
||||
:type show_progress: bool
|
||||
:type filter_properties: list of str
|
||||
:type set_properties: list of str
|
||||
:type filter_properties: list[str]
|
||||
:type set_properties: list[str]
|
||||
:type ignore_recv_exit_code: bool
|
||||
:type resume_token: str
|
||||
:type raw: bool
|
||||
@ -676,20 +769,28 @@ class ZfsDataset:
|
||||
self.debug("Transfer snapshot to {}".format(target_snapshot.filesystem_name))
|
||||
|
||||
if resume_token:
|
||||
target_snapshot.verbose("resuming")
|
||||
self.verbose("resuming")
|
||||
|
||||
# initial or increment
|
||||
if not prev_snapshot:
|
||||
target_snapshot.verbose("receiving full".format(self.snapshot_name))
|
||||
self.verbose("-> {} (new)".format(target_snapshot.filesystem_name))
|
||||
else:
|
||||
# incremental
|
||||
target_snapshot.verbose("receiving incremental".format(self.snapshot_name))
|
||||
self.verbose("-> {}".format(target_snapshot.filesystem_name))
|
||||
|
||||
# do it
|
||||
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
|
||||
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
|
||||
resume_token=resume_token, raw=raw, send_properties=send_properties,
|
||||
write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
|
||||
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
|
||||
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes)
|
||||
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code,
|
||||
recv_pipes=recv_pipes, force=force)
|
||||
|
||||
# try to automount it, if its the initial transfer
|
||||
if not prev_snapshot:
|
||||
# in test mode it doesnt actually exist, so dont try to mount it/read properties
|
||||
if not target_snapshot.zfs_node.readonly:
|
||||
target_snapshot.parent.automount()
|
||||
|
||||
def abort_resume(self):
|
||||
"""abort current resume state"""
|
||||
@ -731,16 +832,16 @@ class ZfsDataset:
|
||||
return None
|
||||
|
||||
def thin_list(self, keeps=None, ignores=None):
|
||||
"""determines list of snapshots that should be kept or deleted based on
|
||||
"""determines list[snapshots] that should be kept or deleted based on
|
||||
the thinning schedule. cull the herd!
|
||||
|
||||
returns: ( keeps, obsoletes )
|
||||
|
||||
Args:
|
||||
:param keeps: list of snapshots to always keep (usually the last)
|
||||
:param keeps: list[snapshots] to always keep (usually the last)
|
||||
:param ignores: snapshots to completely ignore (usually incompatible target snapshots that are going to be destroyed anyway)
|
||||
:type keeps: list of ZfsDataset
|
||||
:type ignores: list of ZfsDataset
|
||||
:type keeps: list[ZfsDataset]
|
||||
:type ignores: list[ZfsDataset]
|
||||
"""
|
||||
|
||||
if ignores is None:
|
||||
@ -767,23 +868,29 @@ class ZfsDataset:
|
||||
obsolete.destroy()
|
||||
self.snapshots.remove(obsolete)
|
||||
|
||||
def find_common_snapshot(self, target_dataset):
|
||||
def find_common_snapshot(self, target_dataset, guid_check):
|
||||
"""find latest common snapshot between us and target returns None if its
|
||||
an initial transfer
|
||||
|
||||
Args:
|
||||
:type guid_check: bool
|
||||
:type target_dataset: ZfsDataset
|
||||
"""
|
||||
|
||||
if not target_dataset.snapshots:
|
||||
# target has nothing yet
|
||||
return None
|
||||
else:
|
||||
for source_snapshot in reversed(self.snapshots):
|
||||
if target_dataset.find_snapshot(source_snapshot):
|
||||
source_snapshot.debug("common snapshot")
|
||||
return source_snapshot
|
||||
target_dataset.error("Cant find common snapshot with source.")
|
||||
raise (Exception("You probably need to delete the target dataset to fix this."))
|
||||
target_snapshot = target_dataset.find_snapshot(source_snapshot)
|
||||
if target_snapshot:
|
||||
if guid_check and source_snapshot.properties['guid'] != target_snapshot.properties['guid']:
|
||||
target_snapshot.warning("Common snapshot has invalid guid, ignoring.")
|
||||
else:
|
||||
target_snapshot.debug("common snapshot")
|
||||
return source_snapshot
|
||||
# target_dataset.error("Cant find common snapshot with source.")
|
||||
raise (Exception("Cant find common snapshot with target."))
|
||||
|
||||
def find_start_snapshot(self, common_snapshot, also_other_snapshots):
|
||||
"""finds first snapshot to send :rtype: ZfsDataset or None if we cant
|
||||
@ -810,13 +917,16 @@ class ZfsDataset:
|
||||
|
||||
return start_snapshot
|
||||
|
||||
def find_incompatible_snapshots(self, common_snapshot):
|
||||
"""returns a list of snapshots that is incompatible for a zfs recv onto
|
||||
def find_incompatible_snapshots(self, common_snapshot, raw):
|
||||
"""returns a list[snapshots] that is incompatible for a zfs recv onto
|
||||
the common_snapshot. all direct followup snapshots with written=0 are
|
||||
compatible.
|
||||
|
||||
in raw-mode nothing is compatible. issue #219
|
||||
|
||||
Args:
|
||||
:type common_snapshot: ZfsDataset
|
||||
:type raw: bool
|
||||
"""
|
||||
|
||||
ret = []
|
||||
@ -824,7 +934,7 @@ class ZfsDataset:
|
||||
if common_snapshot and self.snapshots:
|
||||
followup = True
|
||||
for snapshot in self.snapshots[self.find_snapshot_index(common_snapshot) + 1:]:
|
||||
if not followup or int(snapshot.properties['written']) != 0:
|
||||
if raw or not followup or int(snapshot.properties['written']) != 0:
|
||||
followup = False
|
||||
ret.append(snapshot)
|
||||
|
||||
@ -834,8 +944,8 @@ class ZfsDataset:
|
||||
"""only returns lists of allowed properties for this dataset type
|
||||
|
||||
Args:
|
||||
:type filter_properties: list of str
|
||||
:type set_properties: list of str
|
||||
:type filter_properties: list[str]
|
||||
:type set_properties: list[str]
|
||||
"""
|
||||
|
||||
allowed_filter_properties = []
|
||||
@ -867,7 +977,8 @@ class ZfsDataset:
|
||||
while snapshot:
|
||||
# create virtual target snapsho
|
||||
# NOTE: with force_exist we're telling the dataset it doesnt exist yet. (e.g. its virtual)
|
||||
virtual_snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot.snapshot_name, force_exists=False)
|
||||
virtual_snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot.snapshot_name,
|
||||
force_exists=False)
|
||||
self.snapshots.append(virtual_snapshot)
|
||||
snapshot = source_dataset.find_next_snapshot(snapshot, also_other_snapshots)
|
||||
|
||||
@ -877,9 +988,9 @@ class ZfsDataset:
|
||||
Args:
|
||||
:type common_snapshot: ZfsDataset
|
||||
:type target_dataset: ZfsDataset
|
||||
:type source_obsoletes: list of ZfsDataset
|
||||
:type target_obsoletes: list of ZfsDataset
|
||||
:type target_keeps: list of ZfsDataset
|
||||
:type source_obsoletes: list[ZfsDataset]
|
||||
:type target_obsoletes: list[ZfsDataset]
|
||||
:type target_keeps: list[ZfsDataset]
|
||||
"""
|
||||
|
||||
# on source: destroy all obsoletes before common. (since we cant send them anyways)
|
||||
@ -901,7 +1012,7 @@ class ZfsDataset:
|
||||
# on target: destroy everything thats obsolete, except common_snapshot
|
||||
for target_snapshot in target_dataset.snapshots:
|
||||
if (target_snapshot in target_obsoletes) \
|
||||
and ( not common_snapshot or (target_snapshot.snapshot_name != common_snapshot.snapshot_name)):
|
||||
and (not common_snapshot or (target_snapshot.snapshot_name != common_snapshot.snapshot_name)):
|
||||
if target_snapshot.exists:
|
||||
target_snapshot.destroy()
|
||||
|
||||
@ -913,8 +1024,8 @@ class ZfsDataset:
|
||||
:type start_snapshot: ZfsDataset
|
||||
"""
|
||||
|
||||
if 'receive_resume_token' in target_dataset.properties:
|
||||
if start_snapshot==None:
|
||||
if target_dataset.exists and 'receive_resume_token' in target_dataset.properties:
|
||||
if start_snapshot == None:
|
||||
target_dataset.verbose("Aborting resume, its obsolete.")
|
||||
target_dataset.abort_resume()
|
||||
else:
|
||||
@ -927,20 +1038,22 @@ class ZfsDataset:
|
||||
else:
|
||||
return resume_token
|
||||
|
||||
def _plan_sync(self, target_dataset, also_other_snapshots):
|
||||
def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
|
||||
"""plan where to start syncing and what to sync and what to keep
|
||||
|
||||
Args:
|
||||
:rtype: ( ZfsDataset, ZfsDataset, list of ZfsDataset, list of ZfsDataset, list of ZfsDataset, list of ZfsDataset )
|
||||
:rtype: ( ZfsDataset, ZfsDataset, list[ZfsDataset], list[ZfsDataset], list[ZfsDataset], list[ZfsDataset] )
|
||||
:type target_dataset: ZfsDataset
|
||||
:type also_other_snapshots: bool
|
||||
:type guid_check: bool
|
||||
:type raw: bool
|
||||
"""
|
||||
|
||||
# determine common and start snapshot
|
||||
target_dataset.debug("Determining start snapshot")
|
||||
common_snapshot = self.find_common_snapshot(target_dataset)
|
||||
common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check)
|
||||
start_snapshot = self.find_start_snapshot(common_snapshot, also_other_snapshots)
|
||||
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(common_snapshot)
|
||||
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(common_snapshot, raw)
|
||||
|
||||
# let thinner decide whats obsolete on source
|
||||
source_obsoletes = []
|
||||
@ -962,7 +1075,7 @@ class ZfsDataset:
|
||||
what to do
|
||||
|
||||
Args:
|
||||
:type incompatible_target_snapshots: list of ZfsDataset
|
||||
:type incompatible_target_snapshots: list[ZfsDataset]
|
||||
:type destroy_incompatible: bool
|
||||
"""
|
||||
|
||||
@ -970,40 +1083,60 @@ class ZfsDataset:
|
||||
if not destroy_incompatible:
|
||||
for snapshot in incompatible_target_snapshots:
|
||||
snapshot.error("Incompatible snapshot")
|
||||
raise (Exception("Please destroy incompatible snapshots or use --destroy-incompatible."))
|
||||
raise (Exception("Please destroy incompatible snapshots on target, or use --destroy-incompatible."))
|
||||
else:
|
||||
for snapshot in incompatible_target_snapshots:
|
||||
snapshot.verbose("Incompatible snapshot")
|
||||
snapshot.destroy()
|
||||
snapshot.destroy(fail_exception=True)
|
||||
self.snapshots.remove(snapshot)
|
||||
|
||||
if len(incompatible_target_snapshots) > 0:
|
||||
self.rollback()
|
||||
|
||||
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
||||
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
|
||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed):
|
||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check):
|
||||
"""sync this dataset's snapshots to target_dataset, while also thinning
|
||||
out old snapshots along the way.
|
||||
|
||||
Args:
|
||||
:type send_pipes: list of str
|
||||
:type recv_pipes: list of str
|
||||
:type send_pipes: list[str]
|
||||
:type recv_pipes: list[str]
|
||||
:type target_dataset: ZfsDataset
|
||||
:type features: list of str
|
||||
:type features: list[str]
|
||||
:type show_progress: bool
|
||||
:type filter_properties: list of str
|
||||
:type set_properties: list of str
|
||||
:type filter_properties: list[str]
|
||||
:type set_properties: list[str]
|
||||
:type ignore_recv_exit_code: bool
|
||||
:type holds: bool
|
||||
:type rollback: bool
|
||||
:type decrypt: bool
|
||||
:type also_other_snapshots: bool
|
||||
:type no_send: bool
|
||||
:type destroy_incompatible: bool
|
||||
:type guid_check: bool
|
||||
"""
|
||||
|
||||
# self.verbose("-> {}".format(target_dataset))
|
||||
|
||||
# defaults for these settings if there is no encryption stuff going on:
|
||||
send_properties = True
|
||||
raw = False
|
||||
write_embedded = True
|
||||
|
||||
# source dataset encrypted?
|
||||
if self.properties.get('encryption', 'off') != 'off':
|
||||
# user wants to send it over decrypted?
|
||||
if decrypt:
|
||||
# when decrypting, zfs cant send properties
|
||||
send_properties = False
|
||||
else:
|
||||
# keep data encrypted by sending it raw (including properties)
|
||||
raw = True
|
||||
|
||||
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
|
||||
incompatible_target_snapshots) = \
|
||||
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)
|
||||
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots,
|
||||
guid_check=guid_check, raw=raw)
|
||||
|
||||
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
|
||||
# Also usefull with no_send to still cleanup stuff.
|
||||
@ -1021,49 +1154,38 @@ class ZfsDataset:
|
||||
# check if we can resume
|
||||
resume_token = self._validate_resume_token(target_dataset, start_snapshot)
|
||||
|
||||
# rollback target to latest?
|
||||
if rollback:
|
||||
target_dataset.rollback()
|
||||
|
||||
#defaults for these settings if there is no encryption stuff going on:
|
||||
send_properties = True
|
||||
raw = False
|
||||
write_embedded = True
|
||||
|
||||
(active_filter_properties, active_set_properties) = self.get_allowed_properties(filter_properties, set_properties)
|
||||
|
||||
# source dataset encrypted?
|
||||
if self.properties.get('encryption', 'off')!='off':
|
||||
# user wants to send it over decrypted?
|
||||
if decrypt:
|
||||
# when decrypting, zfs cant send properties
|
||||
send_properties=False
|
||||
else:
|
||||
# keep data encrypted by sending it raw (including properties)
|
||||
raw=True
|
||||
(active_filter_properties, active_set_properties) = self.get_allowed_properties(filter_properties,
|
||||
set_properties)
|
||||
|
||||
# encrypt at target?
|
||||
if encrypt and not raw:
|
||||
# filter out encryption properties to let encryption on the target take place
|
||||
active_filter_properties.extend(["keylocation","pbkdf2iters","keyformat", "encryption"])
|
||||
write_embedded=False
|
||||
|
||||
active_filter_properties.extend(["keylocation", "pbkdf2iters", "keyformat", "encryption"])
|
||||
write_embedded = False
|
||||
|
||||
# now actually transfer the snapshots
|
||||
prev_source_snapshot = common_snapshot
|
||||
source_snapshot = start_snapshot
|
||||
do_rollback = rollback
|
||||
while source_snapshot:
|
||||
target_snapshot = target_dataset.find_snapshot(source_snapshot) # still virtual
|
||||
|
||||
# does target actually want it?
|
||||
if target_snapshot not in target_obsoletes:
|
||||
|
||||
# do the rollback, one time at first transfer
|
||||
if do_rollback:
|
||||
target_dataset.rollback()
|
||||
do_rollback = False
|
||||
|
||||
source_snapshot.transfer_snapshot(target_snapshot, features=features,
|
||||
prev_snapshot=prev_source_snapshot, show_progress=show_progress,
|
||||
filter_properties=active_filter_properties,
|
||||
set_properties=active_set_properties,
|
||||
ignore_recv_exit_code=ignore_recv_exit_code,
|
||||
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes, zfs_compressed=zfs_compressed)
|
||||
resume_token=resume_token, write_embedded=write_embedded, raw=raw,
|
||||
send_properties=send_properties, send_pipes=send_pipes,
|
||||
recv_pipes=recv_pipes, zfs_compressed=zfs_compressed, force=force)
|
||||
|
||||
resume_token = None
|
||||
|
||||
@ -1108,12 +1230,12 @@ class ZfsDataset:
|
||||
|
||||
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
||||
|
||||
def unmount(self):
|
||||
def unmount(self, mount_point):
|
||||
|
||||
self.debug("Unmounting")
|
||||
|
||||
cmd = [
|
||||
"umount", self.name
|
||||
"umount", mount_point
|
||||
]
|
||||
|
||||
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
||||
@ -1156,4 +1278,3 @@ class ZfsDataset:
|
||||
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
||||
|
||||
self.invalidate()
|
||||
|
||||
|
||||
@ -12,15 +12,17 @@ from .CachedProperty import CachedProperty
|
||||
from .ZfsPool import ZfsPool
|
||||
from .ZfsDataset import ZfsDataset
|
||||
from .ExecuteNode import ExecuteError
|
||||
from .util import datetime_now
|
||||
|
||||
|
||||
class ZfsNode(ExecuteNode):
|
||||
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
|
||||
|
||||
def __init__(self, snapshot_time_format, hold_name, logger, ssh_config=None, ssh_to=None, readonly=False,
|
||||
def __init__(self, logger, utc=False, snapshot_time_format="", hold_name="", ssh_config=None, ssh_to=None, readonly=False,
|
||||
description="",
|
||||
debug_output=False, thinner=None):
|
||||
|
||||
self.utc = utc
|
||||
self.snapshot_time_format = snapshot_time_format
|
||||
self.hold_name = hold_name
|
||||
|
||||
@ -32,9 +34,9 @@ class ZfsNode(ExecuteNode):
|
||||
self.verbose("Using custom SSH config: {}".format(ssh_config))
|
||||
|
||||
if ssh_to:
|
||||
self.verbose("Datasets on: {}".format(ssh_to))
|
||||
else:
|
||||
self.verbose("Datasets are local")
|
||||
self.verbose("SSH to: {}".format(ssh_to))
|
||||
# else:
|
||||
# self.verbose("Datasets are local")
|
||||
|
||||
if thinner is not None:
|
||||
rules = thinner.human_rules()
|
||||
@ -58,7 +60,8 @@ class ZfsNode(ExecuteNode):
|
||||
def thin(self, objects, keep_objects):
|
||||
# NOTE: if thinning is disabled with --no-thinning, self.__thinner will be none.
|
||||
if self.__thinner is not None:
|
||||
return self.__thinner.thin(objects, keep_objects)
|
||||
|
||||
return self.__thinner.thin(objects, keep_objects, datetime_now(self.utc).timestamp())
|
||||
else:
|
||||
return (keep_objects, [])
|
||||
|
||||
@ -107,12 +110,12 @@ class ZfsNode(ExecuteNode):
|
||||
def get_dataset(self, name, force_exists=None):
|
||||
"""get a ZfsDataset() object from name. stores objects internally to enable caching"""
|
||||
|
||||
return self.__datasets.setdefault(name, ZfsDataset(self, name))
|
||||
return self.__datasets.setdefault(name, ZfsDataset(self, name, force_exists))
|
||||
|
||||
def reset_progress(self):
|
||||
"""reset progress output counters"""
|
||||
self._progress_total_bytes = 0
|
||||
self._progress_start_time = time.time()
|
||||
# def reset_progress(self):
|
||||
# """reset progress output counters"""
|
||||
# self._progress_total_bytes = 0
|
||||
# self._progress_start_time = time.time()
|
||||
|
||||
def parse_zfs_progress(self, line, hide_errors, prefix):
|
||||
"""try to parse progress output of zfs recv -Pv, and don't show it as error to the user """
|
||||
@ -132,9 +135,15 @@ class ZfsNode(ExecuteNode):
|
||||
# actual useful info
|
||||
if len(progress_fields) >= 3:
|
||||
if progress_fields[0] == 'full' or progress_fields[0] == 'size':
|
||||
# Reset the total bytes and start the timer again (otherwise the MB/s
|
||||
# counter gets confused)
|
||||
self._progress_total_bytes = int(progress_fields[2])
|
||||
self._progress_start_time = time.time()
|
||||
elif progress_fields[0] == 'incremental':
|
||||
# Reset the total bytes and start the timer again (otherwise the MB/s
|
||||
# counter gets confused)
|
||||
self._progress_total_bytes = int(progress_fields[3])
|
||||
self._progress_start_time = time.time()
|
||||
elif progress_fields[1].isnumeric():
|
||||
bytes_ = int(progress_fields[1])
|
||||
if self._progress_total_bytes:
|
||||
@ -174,7 +183,7 @@ class ZfsNode(ExecuteNode):
|
||||
self.logger.debug("{} {}".format(self.description, txt))
|
||||
|
||||
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_snapshot_cmds=[],
|
||||
post_snapshot_cmds=[]):
|
||||
post_snapshot_cmds=[], set_snapshot_properties=[]):
|
||||
"""create a consistent (atomic) snapshot of specified datasets, per pool.
|
||||
"""
|
||||
|
||||
@ -212,6 +221,8 @@ class ZfsNode(ExecuteNode):
|
||||
# create consistent snapshot per pool
|
||||
for (pool_name, snapshots) in pools.items():
|
||||
cmd = ["zfs", "snapshot"]
|
||||
for snapshot_property in set_snapshot_properties:
|
||||
cmd += ['-o', snapshot_property]
|
||||
|
||||
cmd.extend(map(lambda snapshot_: str(snapshot_), snapshots))
|
||||
|
||||
@ -226,10 +237,10 @@ class ZfsNode(ExecuteNode):
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change):
|
||||
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged):
|
||||
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
|
||||
|
||||
returns: list of ZfsDataset
|
||||
returns: ( list of selected ZfsDataset, list of excluded ZfsDataset)
|
||||
"""
|
||||
|
||||
self.debug("Getting selected datasets")
|
||||
@ -240,8 +251,10 @@ class ZfsNode(ExecuteNode):
|
||||
property_name
|
||||
])
|
||||
|
||||
|
||||
# The returnlist of selected ZfsDataset's:
|
||||
selected_filesystems = []
|
||||
excluded_filesystems = []
|
||||
|
||||
# list of sources, used to resolve inherited sources
|
||||
sources = {}
|
||||
@ -261,9 +274,14 @@ class ZfsNode(ExecuteNode):
|
||||
source = raw_source
|
||||
|
||||
# determine it
|
||||
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
|
||||
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
|
||||
min_change=min_change):
|
||||
selected_filesystems.append(dataset)
|
||||
selected=dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
|
||||
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged)
|
||||
|
||||
return selected_filesystems
|
||||
if selected==True:
|
||||
selected_filesystems.append(dataset)
|
||||
elif selected==False:
|
||||
excluded_filesystems.append(dataset)
|
||||
#returns None when no property is set.
|
||||
|
||||
|
||||
return ( selected_filesystems, excluded_filesystems)
|
||||
|
||||
63
zfs_autobackup/util.py
Normal file
63
zfs_autobackup/util.py
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
# NOTE: surprisingly sha1 in via python3 is faster than the native sha1sum utility, even in the way we use below!
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def tmp_name(suffix=""):
|
||||
"""create temporary name unique to this process and node. always retruns the same result during the same execution"""
|
||||
|
||||
#we could use uuids but those are ugly and confusing
|
||||
name="{}-{}-{}".format(
|
||||
os.path.basename(sys.argv[0]).replace(" ","_"),
|
||||
platform.node(),
|
||||
os.getpid())
|
||||
name=name+suffix
|
||||
return name
|
||||
|
||||
|
||||
def get_tmp_clone_name(snapshot):
|
||||
pool=snapshot.zfs_node.get_pool(snapshot)
|
||||
return pool.name+"/"+tmp_name()
|
||||
|
||||
|
||||
|
||||
def output_redir():
|
||||
"""use this after a BrokenPipeError to prevent further exceptions.
|
||||
Redirects stdout/err to /dev/null
|
||||
"""
|
||||
|
||||
devnull = os.open(os.devnull, os.O_WRONLY)
|
||||
os.dup2(devnull, sys.stdout.fileno())
|
||||
os.dup2(devnull, sys.stderr.fileno())
|
||||
|
||||
def sigpipe_handler(sig, stack):
|
||||
#redir output so we dont get more SIGPIPES during cleanup. (which my try to write to stdout)
|
||||
output_redir()
|
||||
#deb('redir')
|
||||
|
||||
# def check_output():
|
||||
# """make sure stdout still functions. if its broken, this will trigger a SIGPIPE which will be handled by the sigpipe_handler."""
|
||||
# try:
|
||||
# print(" ")
|
||||
# sys.stdout.flush()
|
||||
# except Exception as e:
|
||||
# pass
|
||||
|
||||
# def deb(txt):
|
||||
# with open('/tmp/debug.log', 'a') as fh:
|
||||
# fh.write("DEB: "+txt+"\n")
|
||||
|
||||
|
||||
# This should be the only source of trueth for the current datetime.
|
||||
# This function will be mocked during unit testing.
|
||||
|
||||
|
||||
datetime_now_mock=None
|
||||
def datetime_now(utc):
|
||||
if datetime_now_mock is None:
|
||||
return( datetime.utcnow() if utc else datetime.now())
|
||||
else:
|
||||
return datetime_now_mock
|
||||
Reference in New Issue
Block a user