Compare commits
528 Commits
bookmark
...
v3.2-alpha
| Author | SHA1 | Date | |
|---|---|---|---|
| 89ed1e012d | |||
| ff9beae427 | |||
| 302a9ecd86 | |||
| c0086f8953 | |||
| ddd82b935b | |||
| 51d6731aa8 | |||
| 36f2b672bd | |||
| 81a785b360 | |||
| 670532ef31 | |||
| dd55ca4079 | |||
| f66957d867 | |||
| 69975b37fb | |||
| c299626d18 | |||
| 7b4f10080f | |||
| 787e3dba9c | |||
| 86d504722c | |||
| 6791bc4abd | |||
| db5186bf38 | |||
| d2b183bb27 | |||
| 033fcf68f7 | |||
| 14d45667de | |||
| f2a3221911 | |||
| 8baee52ab1 | |||
| d114f63f29 | |||
| b36b64cc94 | |||
| 5a70172a50 | |||
| f635e8cd67 | |||
| 0e362e5d89 | |||
| f2ab2938b0 | |||
| 2d96d13125 | |||
| 883984fda3 | |||
| db2625b08c | |||
| e11c332808 | |||
| 07cb7cfad4 | |||
| 7b4a986f13 | |||
| be2474bb1c | |||
| 81e7cd940c | |||
| 0b4448798e | |||
| b1689f5066 | |||
| dcb9cdac44 | |||
| 9dc280abad | |||
| 6b8c683315 | |||
| 66e123849b | |||
| 7325e1e351 | |||
| 9f4ea51622 | |||
| 8c1058a808 | |||
| d9e759a3eb | |||
| 46457b3aca | |||
| 59f7ccc352 | |||
| 578fb1be4b | |||
| f9b16c050b | |||
| 2ba6fe5235 | |||
| 8e2c91735a | |||
| d57e3922a0 | |||
| 4b25dd76f1 | |||
| 2843781aa6 | |||
| ce987328d9 | |||
| 9a902f0f38 | |||
| ee2c074539 | |||
| 77f1c16414 | |||
| c5363a1538 | |||
| 119225ba5b | |||
| 84437ee1d0 | |||
| 1286bfafd0 | |||
| 9fc2703638 | |||
| 01dc65af96 | |||
| 082153e0ce | |||
| 77f5474447 | |||
| 55ff14f1d8 | |||
| 2acd26b304 | |||
| ec9459c1d2 | |||
| 233fd83ded | |||
| 37c24e092c | |||
| b2bf11382c | |||
| 19b918044e | |||
| 67d9240e7b | |||
| 1a5e4a9cdd | |||
| 31f8c359ff | |||
| b50b7b7563 | |||
| 37f91e1e08 | |||
| a2f3aee5b1 | |||
| 75d0a3cc7e | |||
| 98c55e2aa8 | |||
| d478e22111 | |||
| 3a4953fbc5 | |||
| 8d4e041a9c | |||
| 8725d56bc9 | |||
| ab0bfdbf4e | |||
| ea9012e476 | |||
| 97e3c110b3 | |||
| 9264e8de6d | |||
| 830ccf1bd4 | |||
| a389e4c81c | |||
| 36a66fbafc | |||
| b70c9986c7 | |||
| 664ea32c96 | |||
| 30f30babea | |||
| 5e04aabf37 | |||
| 59d53e9664 | |||
| 171f0ac5ad | |||
| 0ce3bf1297 | |||
| c682665888 | |||
| 086cfe570b | |||
| 521d1078bd | |||
| 8ea178af1f | |||
| 3e39e1553e | |||
| f0cc2bca2a | |||
| 59b0c23a20 | |||
| 401a3f73cc | |||
| 8ec5ed2f4f | |||
| 8318b2f9bf | |||
| 72b97ab2e8 | |||
| a16a038f0e | |||
| fc0da9d380 | |||
| 31be12c0bf | |||
| 176f04b302 | |||
| 7696d8c16d | |||
| 190a73ec10 | |||
| 2bf015e127 | |||
| 671eda7386 | |||
| 3d4b26cec3 | |||
| c0ea311e18 | |||
| b7b2723b2e | |||
| ec1d3ff93e | |||
| 352d5e6094 | |||
| 488ff6f551 | |||
| f52b8bbf58 | |||
| e47d461999 | |||
| a920744b1e | |||
| 63f423a201 | |||
| db6523f3c0 | |||
| 6b172dce2d | |||
| 85d493469d | |||
| bef3be4955 | |||
| f9719ba87e | |||
| 4b97f789df | |||
| ed7cd41ad7 | |||
| 62e19d97c2 | |||
| 594a2664c4 | |||
| d8fbc96be6 | |||
| 61bb590112 | |||
| 86ea5e49f4 | |||
| 01642365c7 | |||
| 4910b1dfb5 | |||
| 966df73d2f | |||
| 69ed827c0d | |||
| e79f6ac157 | |||
| 59efd070a1 | |||
| 80c1bdad1c | |||
| cf72de7c28 | |||
| 686bb48bda | |||
| 6a48b8a2a9 | |||
| 477b66c342 | |||
| a4155f970e | |||
| 0c9d14bf32 | |||
| 1f5955ccec | |||
| 1b94a849db | |||
| 98c40c6df5 | |||
| b479ab9c98 | |||
| a0fb205e75 | |||
| d3ce222921 | |||
| 36e134eb75 | |||
| 628cd75941 | |||
| 1da14c5c3b | |||
| c83d0fcff2 | |||
| 573af341b8 | |||
| a64168bee2 | |||
| c678ae5f9a | |||
| e95967db53 | |||
| 29e6c056d1 | |||
| deadbe9383 | |||
| 5cbec2e06f | |||
| 66d284f183 | |||
| ae64fd6e99 | |||
| 305bd3008d | |||
| 17fec7d1ee | |||
| f5b0a4f272 | |||
| 06c8416771 | |||
| 4f9b7b6cef | |||
| 0214584e4c | |||
| b6627eb389 | |||
| 48f1f6ec5d | |||
| e33e47c10c | |||
| 01f0078ccf | |||
| 9fad773bfb | |||
| 7493a0bc55 | |||
| 0649f42d66 | |||
| 6fefadf884 | |||
| ce05e1ba4c | |||
| 35584149ff | |||
| 427f74d2f0 | |||
| 9b2c321fe7 | |||
| d02a6df0f3 | |||
| 7fb5a7db92 | |||
| 64e53952fe | |||
| b7ef6c9528 | |||
| b7b3e785ce | |||
| 50070bc70f | |||
| 0fb0faccae | |||
| ab77b91d4e | |||
| bbe7a112fd | |||
| 8a09a49951 | |||
| 8092b08e7f | |||
| 075c96bf47 | |||
| 2cbfa0f38a | |||
| 47c50583c0 | |||
| e40eb71f39 | |||
| fab3bf3b3e | |||
| 1afe2407fa | |||
| 3550100099 | |||
| 9e2a6dba3d | |||
| b31b74aa92 | |||
| 222568ad31 | |||
| 35f739b8dd | |||
| 52f9e0d810 | |||
| 7bbf041a70 | |||
| b6796ded84 | |||
| 930bf6cf50 | |||
| fcc8470758 | |||
| fde4a5ed6a | |||
| 12c45f95c3 | |||
| 10e7e5b95f | |||
| 656b435a7f | |||
| 1c1c6647f1 | |||
| 39514de86a | |||
| 49f6e36749 | |||
| 371de417a4 | |||
| 6965c04dc6 | |||
| 9e645e9237 | |||
| fa372799f5 | |||
| da55436863 | |||
| 4d0db0b5d3 | |||
| 75f5e0ee9f | |||
| d0ab60168b | |||
| b48726185c | |||
| 74da005870 | |||
| 6e0664ad8e | |||
| f508e72f5e | |||
| 4918a2c055 | |||
| e65d1ac860 | |||
| fd7015b77a | |||
| f524845dbb | |||
| 51c15ec618 | |||
| 9fe13a4207 | |||
| 7b8b536d53 | |||
| 122035dfef | |||
| 7b278be0b9 | |||
| cc1a9a3d72 | |||
| eaad31e8b4 | |||
| 470b4aaf55 | |||
| fc3026abdc | |||
| 0b1081e87f | |||
| 8699ec5c69 | |||
| cba6470500 | |||
| d08f7bf3c1 | |||
| d19cb2c842 | |||
| f2b284c407 | |||
| a6cdd4b89e | |||
| 8176326126 | |||
| ad2542e930 | |||
| b926c86a7b | |||
| 915a29b36e | |||
| f363142926 | |||
| 27c598344b | |||
| ce817eb05c | |||
| b97bde3f6d | |||
| fa14dcdce1 | |||
| c34bf22f4e | |||
| 01425e735d | |||
| 56a2f26dfa | |||
| 8729fcac74 | |||
| 6151096dc8 | |||
| 1e8b02db28 | |||
| 6d69c8f2b4 | |||
| fc853622dd | |||
| 37a9f49d8d | |||
| ff33f46cb8 | |||
| e1610b6874 | |||
| 10c45affd7 | |||
| 4d12b8da5f | |||
| 58e098324e | |||
| 1ffd9a15a3 | |||
| e54c275685 | |||
| ee4fade2e4 | |||
| c5f1a87c40 | |||
| 2fe854905d | |||
| 1c86c6f866 | |||
| 8bb9769a8b | |||
| 3ef7c32237 | |||
| c254ad3b82 | |||
| fb7da316f8 | |||
| fedae35221 | |||
| 1cb26d48b6 | |||
| 87e0599130 | |||
| 252086e2e6 | |||
| 4d15f29b5b | |||
| 3bc37d143c | |||
| 4dc4bdbba5 | |||
| d2fe9b9ec7 | |||
| 2143d22ae5 | |||
| 138c913e58 | |||
| 2305fdf033 | |||
| 797fb23baa | |||
| 82c7ac5e53 | |||
| 293ab1d075 | |||
| 50e94baf4e | |||
| 47bd4ed490 | |||
| 0d26420b15 | |||
| 3e243a0916 | |||
| 499ccc6fd0 | |||
| ca294f9dd6 | |||
| 9772fc80cf | |||
| 83905c4614 | |||
| 7a3c309123 | |||
| 022dc1b7fc | |||
| 136289b4d6 | |||
| 5bf49cf19e | |||
| 735938eded | |||
| d83fa2f97f | |||
| b0db6d13cc | |||
| c7762d8163 | |||
| bc17825582 | |||
| b22113aad4 | |||
| 4e1bfd8cba | |||
| 0388026f94 | |||
| b718e282b1 | |||
| b6fb07a436 | |||
| 4f78f0cd22 | |||
| 2fa95f098b | |||
| c864e5ffad | |||
| 6f6a2ceee2 | |||
| 0813a8cef6 | |||
| 55f491915a | |||
| 04971f2f29 | |||
| e1344dd9da | |||
| ea390df6f6 | |||
| 9be1f334cb | |||
| de877362c9 | |||
| 9b1254a6d9 | |||
| c110943f20 | |||
| e94eb11f63 | |||
| 0d498e3f44 | |||
| dd301dc422 | |||
| 9e6d90adfe | |||
| a6b688c976 | |||
| 10f1290ad9 | |||
| b51eefa139 | |||
| 805d7e3536 | |||
| 8f0472e8f5 | |||
| 002aa6a731 | |||
| 8a960389d1 | |||
| c7cd73ae1f | |||
| c8c1d0fd27 | |||
| c090979f3e | |||
| 3a4062c983 | |||
| bcf73c6e5c | |||
| 9cf5ce188a | |||
| a226309ce5 | |||
| 231f41e195 | |||
| 7c1546fb49 | |||
| b1dd2b55f8 | |||
| 4ed53eb03f | |||
| 6f8c73b87f | |||
| ee03da2f9b | |||
| e737d0a79f | |||
| cbd281c79d | |||
| dfd38985d1 | |||
| f1c15cec18 | |||
| 1bc35f5812 | |||
| 805a3147b5 | |||
| 944435cbd1 | |||
| 022a7d75e2 | |||
| 14ac525525 | |||
| 3a45951361 | |||
| 2a300bbcba | |||
| bdeb4c40fa | |||
| e8b90abfde | |||
| 1d9c25d3b4 | |||
| 56d7f8c754 | |||
| ef5bca3de1 | |||
| 3b2a19d492 | |||
| d2314c0143 | |||
| f3a80991c9 | |||
| bd3321e879 | |||
| 55e18cc613 | |||
| 9d5534c11e | |||
| 93d0823c82 | |||
| 0285eb31a7 | |||
| 4c4cd36f9f | |||
| 9e8c6f7732 | |||
| f305f00d91 | |||
| 3e06a8e2fa | |||
| 241716cf6d | |||
| 1b7f7fd140 | |||
| 2e3ce2168d | |||
| ef5a164774 | |||
| a41644c84f | |||
| 0e9f68d754 | |||
| 3ceb815432 | |||
| c3630ded45 | |||
| bf1f0fc35b | |||
| 6c168ff867 | |||
| 383a3669db | |||
| d8fa868ce1 | |||
| 1dd683c1e2 | |||
| c6d45e4441 | |||
| 24164fe215 | |||
| ec95bd127e | |||
| 932ff7abe2 | |||
| e1e6a87eba | |||
| 11608fdea7 | |||
| ec1628a8ff | |||
| 4f9a84536e | |||
| a215b0fedc | |||
| 793f87d6a9 | |||
| 0b996f239f | |||
| 881e6f7b89 | |||
| b3c41cb943 | |||
| 15a7528fbc | |||
| 1ad93da8cc | |||
| 17445ec54a | |||
| 8d3c58572b | |||
| 07a150618a | |||
| 71270f8de6 | |||
| 067f3b92d1 | |||
| c98137ad42 | |||
| a8d4c110ec | |||
| 72b6213410 | |||
| 467b0588c9 | |||
| 48ff1f7d2f | |||
| 29078b7c04 | |||
| 678012b255 | |||
| 90b147aa13 | |||
| 1511642509 | |||
| a6878e1037 | |||
| 403ccb0a05 | |||
| e455b42825 | |||
| 0313876811 | |||
| 4e525d97be | |||
| b56e1d1a84 | |||
| f114114993 | |||
| d367d9aa98 | |||
| ff55a6d413 | |||
| d80a636b12 | |||
| 3fd80c9307 | |||
| 70eda7a9a7 | |||
| 71f2d1aa43 | |||
| b6fe4edb1c | |||
| 9ae57a270f | |||
| 48a55ebb5e | |||
| 2a219fdcc5 | |||
| e7919489fb | |||
| 17882449e0 | |||
| 47337d5706 | |||
| c4bbce6fda | |||
| 8763850917 | |||
| a589b2bf24 | |||
| 1e9227869a | |||
| 9d594305e3 | |||
| 87d0354a67 | |||
| 5fd92874e8 | |||
| 66d7beb7ac | |||
| 98b3902b4c | |||
| 73214d4d2b | |||
| f259d01ec3 | |||
| 5f5e2a8433 | |||
| 66727c55b0 | |||
| 673db7c014 | |||
| 637963c046 | |||
| 2d11229c26 | |||
| 8fbbb59055 | |||
| 052890a7e0 | |||
| 34d0c5d67b | |||
| 63d2091712 | |||
| ebbc05d52b | |||
| bf985998b3 | |||
| 6ff3cec0e1 | |||
| 823616d455 | |||
| dd1476331b | |||
| 058a189aa5 | |||
| 04cc860db3 | |||
| 96741ac843 | |||
| f5c8e558a3 | |||
| 1af1c351bb | |||
| aed5d6f8a6 | |||
| e83c297f92 | |||
| d24cc5ba7b | |||
| 91cf07f47d | |||
| 57874e8e3e | |||
| fb1f0d90ad | |||
| 5abd371329 | |||
| 62b9d0ba39 | |||
| 5e8c7fa968 | |||
| 27f2397843 | |||
| afae972040 | |||
| 71f23fede1 | |||
| 5cb98589bf | |||
| 6b50460542 | |||
| b98ffec10c | |||
| b97eed404a | |||
| fe39f42a9d | |||
| 9ee5b2545c | |||
| 71a394cfc7 | |||
| bfc36ac87f | |||
| 1cbf92cabc | |||
| d12bff05ab | |||
| ad47b26f56 | |||
| f38da17592 | |||
| d973905303 | |||
| 82465acd5b | |||
| 514131d67c | |||
| dfcae1613b | |||
| 67b21b4015 | |||
| 3907c850a6 | |||
| 3b9b96243b | |||
| 54235f455a | |||
| c176b968a9 | |||
| 921f7df0a5 | |||
| edee598cf8 | |||
| 80b3272f0f | |||
| 617e0fb69b | |||
| 46a85fd170 | |||
| 1f59229419 | |||
| fcd98e2d87 | |||
| dd8b2442ec | |||
| 765dbf124a | |||
| cae8ec3e70 | |||
| 441a323fb2 |
5
.github/FUNDING.yml
vendored
Normal file
5
.github/FUNDING.yml
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# These are supported funding model platforms
|
||||||
|
|
||||||
|
github: psy0rz
|
||||||
|
ko_fi: psy0rz
|
||||||
|
custom: https://paypal.me/psy0rz
|
||||||
46
.github/workflows/python-publish.yml
vendored
Normal file
46
.github/workflows/python-publish.yml
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# This workflow will upload a Python Package using Twine when a release is created
|
||||||
|
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
|
||||||
|
|
||||||
|
name: Upload Python Package
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [created]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Set up Python 3.x
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: '3.x'
|
||||||
|
|
||||||
|
- name: Set up Python 2.x
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: '2.x'
|
||||||
|
|
||||||
|
- name: Install dependencies 3.x
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip3 install setuptools wheel twine
|
||||||
|
|
||||||
|
- name: Install dependencies 2.x
|
||||||
|
run: |
|
||||||
|
python2 -m pip install --upgrade pip
|
||||||
|
pip2 install setuptools wheel twine
|
||||||
|
|
||||||
|
- name: Build and publish
|
||||||
|
env:
|
||||||
|
TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }}
|
||||||
|
TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
|
||||||
|
run: |
|
||||||
|
python3 setup.py sdist bdist_wheel
|
||||||
|
python2 setup.py sdist bdist_wheel
|
||||||
|
twine check dist/*
|
||||||
|
twine upload dist/*
|
||||||
76
.github/workflows/regression.yml
vendored
Normal file
76
.github/workflows/regression.yml
vendored
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
name: Regression tests
|
||||||
|
|
||||||
|
|
||||||
|
on: ["push", "pull_request"]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
ubuntu20:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2.3.4
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: sudo apt update && sudo apt install zfsutils-linux lzop pigz zstd gzip xz-utils lz4 mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||||
|
|
||||||
|
|
||||||
|
- name: Regression test
|
||||||
|
run: sudo -E ./tests/run_tests
|
||||||
|
|
||||||
|
|
||||||
|
- name: Coveralls
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: coveralls --service=github || true
|
||||||
|
|
||||||
|
ubuntu18:
|
||||||
|
runs-on: ubuntu-18.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2.3.4
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
|
||||||
|
|
||||||
|
|
||||||
|
- name: Regression test
|
||||||
|
run: sudo -E ./tests/run_tests
|
||||||
|
|
||||||
|
|
||||||
|
- name: Coveralls
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: coveralls --service=github || true
|
||||||
|
|
||||||
|
ubuntu18_python2:
|
||||||
|
runs-on: ubuntu-18.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2.3.4
|
||||||
|
|
||||||
|
- name: Set up Python 2.x
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: '2.x'
|
||||||
|
|
||||||
|
- name: Prepare
|
||||||
|
run: sudo apt update && sudo apt install zfsutils-linux python-setuptools lzop pigz zstd gzip xz-utils liblz4-tool mbuffer && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls colorama
|
||||||
|
|
||||||
|
- name: Regression test
|
||||||
|
run: sudo -E ./tests/run_tests
|
||||||
|
|
||||||
|
- name: Coveralls
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
COVERALLS_REPO_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: coveralls --service=github || true
|
||||||
14
.gitignore
vendored
Normal file
14
.gitignore
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
.vscode/settings.json
|
||||||
|
token
|
||||||
|
tokentest
|
||||||
|
dist/
|
||||||
|
build/
|
||||||
|
zfs_autobackup.egg-info
|
||||||
|
.eggs/
|
||||||
|
__pycache__
|
||||||
|
.coverage
|
||||||
|
*.pyc
|
||||||
|
python2.env
|
||||||
|
venv
|
||||||
|
.idea
|
||||||
|
password.sh
|
||||||
838
LICENSE
838
LICENSE
@ -1,281 +1,622 @@
|
|||||||
GNU GENERAL PUBLIC LICENSE
|
GNU GENERAL PUBLIC LICENSE
|
||||||
Version 2, June 1991
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/>
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
of this license document, but changing it is not allowed.
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
Preamble
|
Preamble
|
||||||
|
|
||||||
The licenses for most software are designed to take away your
|
The GNU General Public License is a free, copyleft license for
|
||||||
freedom to share and change it. By contrast, the GNU General Public
|
software and other kinds of works.
|
||||||
License is intended to guarantee your freedom to share and change free
|
|
||||||
software--to make sure the software is free for all its users. This
|
The licenses for most software and other practical works are designed
|
||||||
General Public License applies to most of the Free Software
|
to take away your freedom to share and change the works. By contrast,
|
||||||
Foundation's software and to any other program whose authors commit to
|
the GNU General Public License is intended to guarantee your freedom to
|
||||||
using it. (Some other Free Software Foundation software is covered by
|
share and change all versions of a program--to make sure it remains free
|
||||||
the GNU Lesser General Public License instead.) You can apply it to
|
software for all its users. We, the Free Software Foundation, use the
|
||||||
|
GNU General Public License for most of our software; it applies also to
|
||||||
|
any other work released this way by its authors. You can apply it to
|
||||||
your programs, too.
|
your programs, too.
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
When we speak of free software, we are referring to freedom, not
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
have the freedom to distribute copies of free software (and charge for
|
have the freedom to distribute copies of free software (and charge for
|
||||||
this service if you wish), that you receive source code or can get it
|
them if you wish), that you receive source code or can get it if you
|
||||||
if you want it, that you can change the software or use pieces of it
|
want it, that you can change the software or use pieces of it in new
|
||||||
in new free programs; and that you know you can do these things.
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
To protect your rights, we need to make restrictions that forbid
|
To protect your rights, we need to prevent others from denying you
|
||||||
anyone to deny you these rights or to ask you to surrender the rights.
|
these rights or asking you to surrender the rights. Therefore, you have
|
||||||
These restrictions translate to certain responsibilities for you if you
|
certain responsibilities if you distribute copies of the software, or if
|
||||||
distribute copies of the software, or if you modify it.
|
you modify it: responsibilities to respect the freedom of others.
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
For example, if you distribute copies of such a program, whether
|
||||||
gratis or for a fee, you must give the recipients all the rights that
|
gratis or for a fee, you must pass on to the recipients the same
|
||||||
you have. You must make sure that they, too, receive or can get the
|
freedoms that you received. You must make sure that they, too, receive
|
||||||
source code. And you must show them these terms so they know their
|
or can get the source code. And you must show them these terms so they
|
||||||
rights.
|
know their rights.
|
||||||
|
|
||||||
We protect your rights with two steps: (1) copyright the software, and
|
Developers that use the GNU GPL protect your rights with two steps:
|
||||||
(2) offer you this license which gives you legal permission to copy,
|
(1) assert copyright on the software, and (2) offer you this License
|
||||||
distribute and/or modify the software.
|
giving you legal permission to copy, distribute and/or modify it.
|
||||||
|
|
||||||
Also, for each author's protection and ours, we want to make certain
|
For the developers' and authors' protection, the GPL clearly explains
|
||||||
that everyone understands that there is no warranty for this free
|
that there is no warranty for this free software. For both users' and
|
||||||
software. If the software is modified by someone else and passed on, we
|
authors' sake, the GPL requires that modified versions be marked as
|
||||||
want its recipients to know that what they have is not the original, so
|
changed, so that their problems will not be attributed erroneously to
|
||||||
that any problems introduced by others will not reflect on the original
|
authors of previous versions.
|
||||||
authors' reputations.
|
|
||||||
|
|
||||||
Finally, any free program is threatened constantly by software
|
Some devices are designed to deny users access to install or run
|
||||||
patents. We wish to avoid the danger that redistributors of a free
|
modified versions of the software inside them, although the manufacturer
|
||||||
program will individually obtain patent licenses, in effect making the
|
can do so. This is fundamentally incompatible with the aim of
|
||||||
program proprietary. To prevent this, we have made it clear that any
|
protecting users' freedom to change the software. The systematic
|
||||||
patent must be licensed for everyone's free use or not licensed at all.
|
pattern of such abuse occurs in the area of products for individuals to
|
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we
|
||||||
|
have designed this version of the GPL to prohibit the practice for those
|
||||||
|
products. If such problems arise substantially in other domains, we
|
||||||
|
stand ready to extend this provision to those domains in future versions
|
||||||
|
of the GPL, as needed to protect the freedom of users.
|
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents.
|
||||||
|
States should not allow patents to restrict development and use of
|
||||||
|
software on general-purpose computers, but in those that do, we wish to
|
||||||
|
avoid the special danger that patents applied to a free program could
|
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that
|
||||||
|
patents cannot be used to render the program non-free.
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
The precise terms and conditions for copying, distribution and
|
||||||
modification follow.
|
modification follow.
|
||||||
|
|
||||||
GNU GENERAL PUBLIC LICENSE
|
TERMS AND CONDITIONS
|
||||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
|
||||||
|
|
||||||
0. This License applies to any program or other work which contains
|
0. Definitions.
|
||||||
a notice placed by the copyright holder saying it may be distributed
|
|
||||||
under the terms of this General Public License. The "Program", below,
|
|
||||||
refers to any such program or work, and a "work based on the Program"
|
|
||||||
means either the Program or any derivative work under copyright law:
|
|
||||||
that is to say, a work containing the Program or a portion of it,
|
|
||||||
either verbatim or with modifications and/or translated into another
|
|
||||||
language. (Hereinafter, translation is included without limitation in
|
|
||||||
the term "modification".) Each licensee is addressed as "you".
|
|
||||||
|
|
||||||
Activities other than copying, distribution and modification are not
|
"This License" refers to version 3 of the GNU General Public License.
|
||||||
covered by this License; they are outside its scope. The act of
|
|
||||||
running the Program is not restricted, and the output from the Program
|
|
||||||
is covered only if its contents constitute a work based on the
|
|
||||||
Program (independent of having been made by running the Program).
|
|
||||||
Whether that is true depends on what the Program does.
|
|
||||||
|
|
||||||
1. You may copy and distribute verbatim copies of the Program's
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
source code as you receive it, in any medium, provided that you
|
works, such as semiconductor masks.
|
||||||
conspicuously and appropriately publish on each copy an appropriate
|
|
||||||
copyright notice and disclaimer of warranty; keep intact all the
|
|
||||||
notices that refer to this License and to the absence of any warranty;
|
|
||||||
and give any other recipients of the Program a copy of this License
|
|
||||||
along with the Program.
|
|
||||||
|
|
||||||
You may charge a fee for the physical act of transferring a copy, and
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
you may at your option offer warranty protection in exchange for a fee.
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
2. You may modify your copy or copies of the Program or any portion
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
of it, thus forming a work based on the Program, and copy and
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
distribute such modifications or work under the terms of Section 1
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
above, provided that you also meet all of these conditions:
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
a) You must cause the modified files to carry prominent notices
|
A "covered work" means either the unmodified Program or a work based
|
||||||
stating that you changed the files and the date of any change.
|
on the Program.
|
||||||
|
|
||||||
b) You must cause any work that you distribute or publish, that in
|
To "propagate" a work means to do anything with it that, without
|
||||||
whole or in part contains or is derived from the Program or any
|
permission, would make you directly or secondarily liable for
|
||||||
part thereof, to be licensed as a whole at no charge to all third
|
infringement under applicable copyright law, except executing it on a
|
||||||
parties under the terms of this License.
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
c) If the modified program normally reads commands interactively
|
To "convey" a work means any kind of propagation that enables other
|
||||||
when run, you must cause it, when started running for such
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
interactive use in the most ordinary way, to print or display an
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
announcement including an appropriate copyright notice and a
|
|
||||||
notice that there is no warranty (or else, saying that you provide
|
|
||||||
a warranty) and that users may redistribute the program under
|
|
||||||
these conditions, and telling the user how to view a copy of this
|
|
||||||
License. (Exception: if the Program itself is interactive but
|
|
||||||
does not normally print such an announcement, your work based on
|
|
||||||
the Program is not required to print an announcement.)
|
|
||||||
|
|
||||||
These requirements apply to the modified work as a whole. If
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
identifiable sections of that work are not derived from the Program,
|
to the extent that it includes a convenient and prominently visible
|
||||||
and can be reasonably considered independent and separate works in
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
themselves, then this License, and its terms, do not apply to those
|
tells the user that there is no warranty for the work (except to the
|
||||||
sections when you distribute them as separate works. But when you
|
extent that warranties are provided), that licensees may convey the
|
||||||
distribute the same sections as part of a whole which is a work based
|
work under this License, and how to view a copy of this License. If
|
||||||
on the Program, the distribution of the whole must be on the terms of
|
the interface presents a list of user commands or options, such as a
|
||||||
this License, whose permissions for other licensees extend to the
|
menu, a prominent item in the list meets this criterion.
|
||||||
entire whole, and thus to each and every part regardless of who wrote it.
|
|
||||||
|
|
||||||
Thus, it is not the intent of this section to claim rights or contest
|
1. Source Code.
|
||||||
your rights to work written entirely by you; rather, the intent is to
|
|
||||||
exercise the right to control the distribution of derivative or
|
|
||||||
collective works based on the Program.
|
|
||||||
|
|
||||||
In addition, mere aggregation of another work not based on the Program
|
The "source code" for a work means the preferred form of the work
|
||||||
with the Program (or with a work based on the Program) on a volume of
|
for making modifications to it. "Object code" means any non-source
|
||||||
a storage or distribution medium does not bring the other work under
|
form of a work.
|
||||||
the scope of this License.
|
|
||||||
|
|
||||||
3. You may copy and distribute the Program (or a work based on it,
|
A "Standard Interface" means an interface that either is an official
|
||||||
under Section 2) in object code or executable form under the terms of
|
standard defined by a recognized standards body, or, in the case of
|
||||||
Sections 1 and 2 above provided that you also do one of the following:
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
a) Accompany it with the complete corresponding machine-readable
|
The "System Libraries" of an executable work include anything, other
|
||||||
source code, which must be distributed under the terms of Sections
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
1 and 2 above on a medium customarily used for software interchange; or,
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
b) Accompany it with a written offer, valid for at least three
|
The "Corresponding Source" for a work in object code form means all
|
||||||
years, to give any third party, for a charge no more than your
|
the source code needed to generate, install, and (for an executable
|
||||||
cost of physically performing source distribution, a complete
|
work) run the object code and to modify the work, including scripts to
|
||||||
machine-readable copy of the corresponding source code, to be
|
control those activities. However, it does not include the work's
|
||||||
distributed under the terms of Sections 1 and 2 above on a medium
|
System Libraries, or general-purpose tools or generally available free
|
||||||
customarily used for software interchange; or,
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
c) Accompany it with the information you received as to the offer
|
The Corresponding Source need not include anything that users
|
||||||
to distribute corresponding source code. (This alternative is
|
can regenerate automatically from other parts of the Corresponding
|
||||||
allowed only for noncommercial distribution and only if you
|
Source.
|
||||||
received the program in object code or executable form with such
|
|
||||||
an offer, in accord with Subsection b above.)
|
|
||||||
|
|
||||||
The source code for a work means the preferred form of the work for
|
The Corresponding Source for a work in source code form is that
|
||||||
making modifications to it. For an executable work, complete source
|
same work.
|
||||||
code means all the source code for all modules it contains, plus any
|
|
||||||
associated interface definition files, plus the scripts used to
|
|
||||||
control compilation and installation of the executable. However, as a
|
|
||||||
special exception, the source code distributed need not include
|
|
||||||
anything that is normally distributed (in either source or binary
|
|
||||||
form) with the major components (compiler, kernel, and so on) of the
|
|
||||||
operating system on which the executable runs, unless that component
|
|
||||||
itself accompanies the executable.
|
|
||||||
|
|
||||||
If distribution of executable or object code is made by offering
|
2. Basic Permissions.
|
||||||
access to copy from a designated place, then offering equivalent
|
|
||||||
access to copy the source code from the same place counts as
|
|
||||||
distribution of the source code, even though third parties are not
|
|
||||||
compelled to copy the source along with the object code.
|
|
||||||
|
|
||||||
4. You may not copy, modify, sublicense, or distribute the Program
|
All rights granted under this License are granted for the term of
|
||||||
except as expressly provided under this License. Any attempt
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
otherwise to copy, modify, sublicense or distribute the Program is
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
void, and will automatically terminate your rights under this License.
|
permission to run the unmodified Program. The output from running a
|
||||||
However, parties who have received copies, or rights, from you under
|
covered work is covered by this License only if the output, given its
|
||||||
this License will not have their licenses terminated so long as such
|
content, constitutes a covered work. This License acknowledges your
|
||||||
parties remain in full compliance.
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
5. You are not required to accept this License, since you have not
|
You may make, run and propagate covered works that you do not
|
||||||
signed it. However, nothing else grants you permission to modify or
|
convey, without conditions so long as your license otherwise remains
|
||||||
distribute the Program or its derivative works. These actions are
|
in force. You may convey covered works to others for the sole purpose
|
||||||
prohibited by law if you do not accept this License. Therefore, by
|
of having them make modifications exclusively for you, or provide you
|
||||||
modifying or distributing the Program (or any work based on the
|
with facilities for running those works, provided that you comply with
|
||||||
Program), you indicate your acceptance of this License to do so, and
|
the terms of this License in conveying all material for which you do
|
||||||
all its terms and conditions for copying, distributing or modifying
|
not control copyright. Those thus making or running the covered works
|
||||||
the Program or works based on it.
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
6. Each time you redistribute the Program (or any work based on the
|
Conveying under any other circumstances is permitted solely under
|
||||||
Program), the recipient automatically receives a license from the
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
original licensor to copy, distribute or modify the Program subject to
|
makes it unnecessary.
|
||||||
these terms and conditions. You may not impose any further
|
|
||||||
restrictions on the recipients' exercise of the rights granted herein.
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
You are not responsible for enforcing compliance by third parties to
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
this License.
|
this License.
|
||||||
|
|
||||||
7. If, as a consequence of a court judgment or allegation of patent
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
infringement or for any other reason (not limited to patent issues),
|
patent license under the contributor's essential patent claims, to
|
||||||
conditions are imposed on you (whether by court order, agreement or
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
excuse you from the conditions of this License. If you cannot
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
distribute so as to satisfy simultaneously your obligations under this
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
License and any other pertinent obligations, then as a consequence you
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
may not distribute the Program at all. For example, if a patent
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
license would not permit royalty-free redistribution of the Program by
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
all those who receive copies directly or indirectly through you, then
|
the Program, the only way you could satisfy both those terms and this
|
||||||
the only way you could satisfy both it and this License would be to
|
License would be to refrain entirely from conveying the Program.
|
||||||
refrain entirely from distribution of the Program.
|
|
||||||
|
|
||||||
If any portion of this section is held invalid or unenforceable under
|
13. Use with the GNU Affero General Public License.
|
||||||
any particular circumstance, the balance of the section is intended to
|
|
||||||
apply and the section as a whole is intended to apply in other
|
|
||||||
circumstances.
|
|
||||||
|
|
||||||
It is not the purpose of this section to induce you to infringe any
|
Notwithstanding any other provision of this License, you have
|
||||||
patents or other property right claims or to contest validity of any
|
permission to link or combine any covered work with a work licensed
|
||||||
such claims; this section has the sole purpose of protecting the
|
under version 3 of the GNU Affero General Public License into a single
|
||||||
integrity of the free software distribution system, which is
|
combined work, and to convey the resulting work. The terms of this
|
||||||
implemented by public license practices. Many people have made
|
License will continue to apply to the part which is the covered work,
|
||||||
generous contributions to the wide range of software distributed
|
but the special requirements of the GNU Affero General Public License,
|
||||||
through that system in reliance on consistent application of that
|
section 13, concerning interaction through a network will apply to the
|
||||||
system; it is up to the author/donor to decide if he or she is willing
|
combination as such.
|
||||||
to distribute software through any other system and a licensee cannot
|
|
||||||
impose that choice.
|
|
||||||
|
|
||||||
This section is intended to make thoroughly clear what is believed to
|
14. Revised Versions of this License.
|
||||||
be a consequence of the rest of this License.
|
|
||||||
|
|
||||||
8. If the distribution and/or use of the Program is restricted in
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
certain countries either by patents or by copyrighted interfaces, the
|
the GNU General Public License from time to time. Such new versions will
|
||||||
original copyright holder who places the Program under this License
|
|
||||||
may add an explicit geographical distribution limitation excluding
|
|
||||||
those countries, so that distribution is permitted only in or among
|
|
||||||
countries not thus excluded. In such case, this License incorporates
|
|
||||||
the limitation as if written in the body of this License.
|
|
||||||
|
|
||||||
9. The Free Software Foundation may publish revised and/or new versions
|
|
||||||
of the General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
address new problems or concerns.
|
address new problems or concerns.
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the Program
|
Each version is given a distinguishing version number. If the
|
||||||
specifies a version number of this License which applies to it and "any
|
Program specifies that a certain numbered version of the GNU General
|
||||||
later version", you have the option of following the terms and conditions
|
Public License "or any later version" applies to it, you have the
|
||||||
either of that version or of any later version published by the Free
|
option of following the terms and conditions either of that numbered
|
||||||
Software Foundation. If the Program does not specify a version number of
|
version or of any later version published by the Free Software
|
||||||
this License, you may choose any version ever published by the Free Software
|
Foundation. If the Program does not specify a version number of the
|
||||||
Foundation.
|
GNU General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
10. If you wish to incorporate parts of the Program into other free
|
If the Program specifies that a proxy can decide which future
|
||||||
programs whose distribution conditions are different, write to the author
|
versions of the GNU General Public License can be used, that proxy's
|
||||||
to ask for permission. For software which is copyrighted by the Free
|
public statement of acceptance of a version permanently authorizes you
|
||||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
to choose that version for the Program.
|
||||||
make exceptions for this. Our decision will be guided by the two goals
|
|
||||||
of preserving the free status of all derivatives of our free software and
|
|
||||||
of promoting the sharing and reuse of software generally.
|
|
||||||
|
|
||||||
NO WARRANTY
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
15. Disclaimer of Warranty.
|
||||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
|
||||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
|
||||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
|
||||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
|
||||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
|
||||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
|
||||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
|
||||||
REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
POSSIBILITY OF SUCH DAMAGES.
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
@ -287,15 +628,15 @@ free software which everyone can redistribute and change under these terms.
|
|||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
To do so, attach the following notices to the program. It is safest
|
||||||
to attach them to the start of each source file to most effectively
|
to attach them to the start of each source file to most effectively
|
||||||
convey the exclusion of warranty; and each file should have at least
|
state the exclusion of warranty; and each file should have at least
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
{description}
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
Copyright (C) {year} {fullname}
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
This program is free software; you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU General Public License as published by
|
it under the terms of the GNU General Public License as published by
|
||||||
the Free Software Foundation; either version 2 of the License, or
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
(at your option) any later version.
|
(at your option) any later version.
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
This program is distributed in the hope that it will be useful,
|
||||||
@ -303,38 +644,31 @@ the "copyright" line and a pointer to where the full notice is found.
|
|||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
GNU General Public License for more details.
|
GNU General Public License for more details.
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License along
|
You should have received a copy of the GNU General Public License
|
||||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
If the program is interactive, make it output a short notice like this
|
If the program does terminal interaction, make it output a short
|
||||||
when it starts in an interactive mode:
|
notice like this when it starts in an interactive mode:
|
||||||
|
|
||||||
Gnomovision version 69, Copyright (C) year name of author
|
<program> Copyright (C) <year> <name of author>
|
||||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
This is free software, and you are welcome to redistribute it
|
This is free software, and you are welcome to redistribute it
|
||||||
under certain conditions; type `show c' for details.
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
parts of the General Public License. Of course, the commands you use may
|
parts of the General Public License. Of course, your program's commands
|
||||||
be called something other than `show w' and `show c'; they could even be
|
might be different; for a GUI interface, you would use an "about box".
|
||||||
mouse-clicks or menu items--whatever suits your program.
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or your
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
necessary. Here is a sample; alter the names:
|
For more information on this, and how to apply and follow the GNU GPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
||||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
|
||||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
|
||||||
|
|
||||||
{signature of Ty Coon}, 1 April 1989
|
|
||||||
Ty Coon, President of Vice
|
|
||||||
|
|
||||||
This General Public License does not permit incorporating your program into
|
|
||||||
proprietary programs. If your program is a subroutine library, you may
|
|
||||||
consider it more useful to permit linking proprietary applications with the
|
|
||||||
library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License.
|
|
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program
|
||||||
|
into proprietary programs. If your program is a subroutine library, you
|
||||||
|
may consider it more useful to permit linking proprietary applications with
|
||||||
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License. But first, please read
|
||||||
|
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||||
|
|||||||
284
README.md
284
README.md
@ -1,251 +1,61 @@
|
|||||||
|
|
||||||
# ZFS autobackup
|
# ZFS autobackup
|
||||||
|
|
||||||
Introduction
|
[](https://github.com/psy0rz/zfs_autobackup/actions?query=workflow%3A%22Regression+tests%22) [](https://coveralls.io/github/psy0rz/zfs_autobackup) [](https://pypi.org/project/zfs-autobackup/)
|
||||||
============
|
|
||||||
|
|
||||||
ZFS autobackup is used to periodicly backup ZFS filesystems to other locations. This is done using the very effcient zfs send and receive commands.
|
## Introduction
|
||||||
|
|
||||||
It has the following features:
|
ZFS-autobackup tries to be the most reliable and easiest to use tool, while having all the features.
|
||||||
* Automaticly selects filesystems to backup by looking at a simple ZFS property. (recursive)
|
|
||||||
* Creates consistent snapshots. (takes all snapshots at once, atomic.)
|
You can either use it as a **backup** tool, **replication** tool or **snapshot** tool.
|
||||||
|
|
||||||
|
You can select what to backup by setting a custom `ZFS property`. This makes it easy to add/remove specific datasets, or just backup your whole pool.
|
||||||
|
|
||||||
|
Other settings are just specified on the commandline: Simply setup and test your zfs-autobackup command and fix all the issues you might encounter. When you're done you can just copy/paste your command to a cron or script.
|
||||||
|
|
||||||
|
Since its using ZFS commands, you can see what it's actually doing by specifying `--debug`. This also helps a lot if you run into some strange problem or error. You can just copy-paste the command that fails and play around with it on the commandline. (something I missed in other tools)
|
||||||
|
|
||||||
|
An important feature thats missing from other tools is a reliable `--test` option: This allows you to see what zfs-autobackup will do and tune your parameters. It will do everything, except make changes to your system.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
* Works across operating systems: Tested with **Linux**, **FreeBSD/FreeNAS** and **SmartOS**.
|
||||||
|
* Low learning curve: no complex daemons or services, no additional software or networking needed. (Only read this page)
|
||||||
|
* Plays nicely with existing replication systems. (Like Proxmox HA)
|
||||||
|
* Automatically selects filesystems to backup by looking at a simple ZFS property.
|
||||||
|
* Creates consistent snapshots. (takes all snapshots at once, atomicly.)
|
||||||
* Multiple backups modes:
|
* Multiple backups modes:
|
||||||
|
* Backup local data on the same server.
|
||||||
* "push" local data to a backup-server via SSH.
|
* "push" local data to a backup-server via SSH.
|
||||||
* "pull" remote data from a server via SSH and backup it locally.
|
* "pull" remote data from a server via SSH and backup it locally.
|
||||||
* Backup local data on the same server.
|
* "pull+push": Zero trust between source and target.
|
||||||
* Can be scheduled via a simple cronjob or run directly from commandline.
|
* Can be scheduled via simple cronjob or run directly from commandline.
|
||||||
* Supports resuming of interrupted transfers. (via the zfs extensible_dataset feature)
|
* ZFS encryption support: Can decrypt / encrypt or even re-encrypt datasets during transfer.
|
||||||
* Backups and snapshots can be named to prevent conflicts. (multiple backups from and to the same filesystems are no problem)
|
* Supports sending with compression. (Using pigz, zstd etc)
|
||||||
* Always creates a new snapshot before starting.
|
* IO buffering to speed up transfer.
|
||||||
* Checks everything and aborts on errors.
|
* Bandwidth rate limiting.
|
||||||
* Ability to 'finish' aborted backups to see what goes wrong.
|
* Multiple backups from and to the same datasets are no problem.
|
||||||
|
* Resillient to errors.
|
||||||
|
* Ability to manually 'finish' failed backups to see whats going on.
|
||||||
* Easy to debug and has a test-mode. Actual unix commands are printed.
|
* Easy to debug and has a test-mode. Actual unix commands are printed.
|
||||||
* Keeps latest X snapshots remote and locally. (default 30, configurable)
|
* Uses progressive thinning for older snapshots.
|
||||||
|
* Uses zfs-holds on important snapshots to prevent accidental deletion.
|
||||||
|
* Automatic resuming of failed transfers.
|
||||||
|
* Easy migration from existing zfs backups.
|
||||||
|
* Gracefully handles datasets that no longer exist on source.
|
||||||
|
* Complete and clean logging.
|
||||||
* Easy installation:
|
* Easy installation:
|
||||||
* Only one host needs the zfs_autobackup script. The other host just needs ssh and the zfs command.
|
* Just install zfs-autobackup via pip.
|
||||||
* Written in python and uses zfs-commands, no 3rd party dependencys or libraries.
|
* Only needs to be installed on one side.
|
||||||
* No seperate config files or properties. Just one command you can copy/paste in your backup script.
|
* Written in python and uses zfs-commands, no special 3rd party dependency's or compiled libraries needed.
|
||||||
|
* No annoying config files or properties.
|
||||||
|
|
||||||
Usage
|
## Getting started
|
||||||
====
|
|
||||||
```
|
|
||||||
usage: zfs_autobackup [-h] [--ssh-source SSH_SOURCE] [--ssh-target SSH_TARGET]
|
|
||||||
[--keep-source KEEP_SOURCE] [--keep-target KEEP_TARGET]
|
|
||||||
[--no-snapshot] [--no-send] [--resume]
|
|
||||||
[--strip-path STRIP_PATH] [--destroy-stale]
|
|
||||||
[--clear-refreservation] [--clear-mountpoint]
|
|
||||||
[--filter-properties FILTER_PROPERTIES] [--rollback]
|
|
||||||
[--test] [--verbose] [--debug]
|
|
||||||
backup_name target_fs
|
|
||||||
|
|
||||||
ZFS autobackup v2.2
|
Please look at our wiki to [Get started](https://github.com/psy0rz/zfs_autobackup/wiki).
|
||||||
|
|
||||||
positional arguments:
|
# Sponsor list
|
||||||
backup_name Name of the backup (you should set the zfs property
|
|
||||||
"autobackup:backup-name" to true on filesystems you
|
|
||||||
want to backup
|
|
||||||
target_fs Target filesystem
|
|
||||||
|
|
||||||
optional arguments:
|
This project was sponsorred by:
|
||||||
-h, --help show this help message and exit
|
|
||||||
--ssh-source SSH_SOURCE
|
|
||||||
Source host to get backup from. (user@hostname)
|
|
||||||
Default local.
|
|
||||||
--ssh-target SSH_TARGET
|
|
||||||
Target host to push backup to. (user@hostname) Default
|
|
||||||
local.
|
|
||||||
--keep-source KEEP_SOURCE
|
|
||||||
Number of days to keep old snapshots on source.
|
|
||||||
Default 30.
|
|
||||||
--keep-target KEEP_TARGET
|
|
||||||
Number of days to keep old snapshots on target.
|
|
||||||
Default 30.
|
|
||||||
--no-snapshot dont create new snapshot (usefull for finishing
|
|
||||||
uncompleted backups, or cleanups)
|
|
||||||
--no-send dont send snapshots (usefull to only do a cleanup)
|
|
||||||
--resume support resuming of interrupted transfers by using the
|
|
||||||
zfs extensible_dataset feature (both zpools should
|
|
||||||
have it enabled) Disadvantage is that you need to use
|
|
||||||
zfs recv -A if another snapshot is created on the
|
|
||||||
target during a receive. Otherwise it will keep
|
|
||||||
failing.
|
|
||||||
--strip-path STRIP_PATH
|
|
||||||
number of directory to strip from path (use 1 when
|
|
||||||
cloning zones between 2 SmartOS machines)
|
|
||||||
--destroy-stale Destroy stale backups that have no more snapshots. Be
|
|
||||||
sure to verify the output before using this!
|
|
||||||
--clear-refreservation
|
|
||||||
Set refreservation property to none for new
|
|
||||||
filesystems. Usefull when backupping SmartOS volumes.
|
|
||||||
(recommended)
|
|
||||||
--clear-mountpoint Sets canmount=noauto property, to prevent the received
|
|
||||||
filesystem from mounting over existing filesystems.
|
|
||||||
(recommended)
|
|
||||||
--filter-properties FILTER_PROPERTIES
|
|
||||||
Filter properties when receiving filesystems. Can be
|
|
||||||
specified multiple times. (Example: If you send data
|
|
||||||
from Linux to FreeNAS, you should filter xattr)
|
|
||||||
--rollback Rollback changes on the target before starting a
|
|
||||||
backup. (normally you can prevent changes by setting
|
|
||||||
the readonly property on the target_fs to on)
|
|
||||||
--test dont change anything, just show what would be done
|
|
||||||
(still does all read-only operations)
|
|
||||||
--verbose verbose output
|
|
||||||
--debug debug output (shows commands that are executed)
|
|
||||||
```
|
|
||||||
|
|
||||||
Backup example
|
* JetBrains (Provided me with a license for their whole professional product line, https://www.jetbrains.com/pycharm/ )
|
||||||
==============
|
|
||||||
|
|
||||||
In this example we're going to backup a SmartOS machine called `smartos01` to our fileserver called `fs1`.
|
|
||||||
|
|
||||||
Its important to choose a unique and consistent backup name. In this case we name our backup: `smartos01_fs1`.
|
|
||||||
|
|
||||||
Select filesystems to backup
|
|
||||||
----------------------------
|
|
||||||
|
|
||||||
On the source zfs system set the ```autobackup:smartos01_fs1``` zfs property to true:
|
|
||||||
```
|
|
||||||
[root@smartos01 ~]# zfs set autobackup:smartos01_fs1=true zones
|
|
||||||
[root@smartos01 ~]# zfs get -t filesystem autobackup:smartos01_fs1
|
|
||||||
NAME PROPERTY VALUE SOURCE
|
|
||||||
zones autobackup:smartos01_fs1 true local
|
|
||||||
zones/1eb33958-72c1-11e4-af42-ff0790f603dd autobackup:smartos01_fs1 true inherited from zones
|
|
||||||
zones/3c71a6cd-6857-407c-880c-09225ce4208e autobackup:smartos01_fs1 true inherited from zones
|
|
||||||
zones/3c905e49-81c0-4a5a-91c3-fc7996f97d47 autobackup:smartos01_fs1 true inherited from zones
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
Because we dont want to backup everything, we can exclude certain filesystem by setting the property to false:
|
|
||||||
```
|
|
||||||
[root@smartos01 ~]# zfs set autobackup:smartos01_fs1=false zones/backup
|
|
||||||
[root@smartos01 ~]# zfs get -t filesystem autobackup:smartos01_fs1
|
|
||||||
NAME PROPERTY VALUE SOURCE
|
|
||||||
zones autobackup:smartos01_fs1 true local
|
|
||||||
zones/1eb33958-72c1-11e4-af42-ff0790f603dd autobackup:smartos01_fs1 true inherited from zones
|
|
||||||
...
|
|
||||||
zones/backup autobackup:smartos01_fs1 false local
|
|
||||||
zones/backup/fs1 autobackup:smartos01_fs1 false inherited from zones/backup
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
Running zfs_autobackup
|
|
||||||
----------------------
|
|
||||||
There are 2 ways to run the backup, but the endresult is always the same. Its just a matter of security (trust relations between the servers) and preference.
|
|
||||||
|
|
||||||
First install the ssh-key on the server that you specify with --ssh-source or --ssh-target.
|
|
||||||
|
|
||||||
Method 1: Run the script on the backup server and pull the data from the server specfied by --ssh-source. This is usually the preferred way and prevents a hacked server from accesing the backup-data:
|
|
||||||
```
|
|
||||||
root@fs1:/home/psy# ./zfs_autobackup --ssh-source root@1.2.3.4 smartos01_fs1 fs1/zones/backup/zfsbackups/smartos01.server.com --verbose
|
|
||||||
Getting selected source filesystems for backup smartos01_fs1 on root@1.2.3.4
|
|
||||||
Selected: zones (direct selection)
|
|
||||||
Selected: zones/1eb33958-72c1-11e4-af42-ff0790f603dd (inherited selection)
|
|
||||||
Selected: zones/325dbc5e-2b90-11e3-8a3e-bfdcb1582a8d (inherited selection)
|
|
||||||
...
|
|
||||||
Ignoring: zones/backup (disabled)
|
|
||||||
Ignoring: zones/backup/fs1 (disabled)
|
|
||||||
...
|
|
||||||
Creating source snapshot smartos01_fs1-20151030203738 on root@1.2.3.4
|
|
||||||
Getting source snapshot-list from root@1.2.3.4
|
|
||||||
Getting target snapshot-list from local
|
|
||||||
Tranferring zones incremental backup between snapshots smartos01_fs1-20151030175345...smartos01_fs1-20151030203738
|
|
||||||
...
|
|
||||||
received 1.09MB stream in 1 seconds (1.09MB/sec)
|
|
||||||
Destroying old snapshots on source
|
|
||||||
Destroying old snapshots on target
|
|
||||||
All done
|
|
||||||
```
|
|
||||||
|
|
||||||
Method 2: Run the script on the server and push the data to the backup server specified by --ssh-target:
|
|
||||||
```
|
|
||||||
./zfs_autobackup --ssh-target root@2.2.2.2 smartos01_fs1 fs1/zones/backup/zfsbackups/smartos01.server.com --verbose --compress
|
|
||||||
...
|
|
||||||
All done
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
Tips
|
|
||||||
====
|
|
||||||
|
|
||||||
* Set the ```readonly``` property of the target filesystem to ```on```. This prevents changes on the target side. If there are changes the next backup will fail and will require a zfs rollback. (by using the --rollback option for example)
|
|
||||||
* Use ```--clear-refreservation``` to save space on your backup server.
|
|
||||||
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot. If this happens on systems like SmartOS or Openindia, svc://filesystem/local wont be able to mount some stuff and you need to resolve these issues on the console.
|
|
||||||
|
|
||||||
Speeding up SSH and prevent connection flooding
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
Add this to your ~/.ssh/config:
|
|
||||||
```
|
|
||||||
Host *
|
|
||||||
ControlPath ~/.ssh/control-master-%r@%h:%p
|
|
||||||
ControlMaster auto
|
|
||||||
ControlPersist 3600
|
|
||||||
```
|
|
||||||
|
|
||||||
This will make all your ssh connections persistent and greatly speed up zfs_autobackup for jobs with short intervals.
|
|
||||||
|
|
||||||
Thanks @mariusvw :)
|
|
||||||
|
|
||||||
|
|
||||||
Specifying ssh port or options
|
|
||||||
------------------------------
|
|
||||||
|
|
||||||
The correct way to do this is by creating ~/.ssh/config:
|
|
||||||
```
|
|
||||||
Host smartos04
|
|
||||||
Hostname 1.2.3.4
|
|
||||||
Port 1234
|
|
||||||
user root
|
|
||||||
Compression yes
|
|
||||||
```
|
|
||||||
|
|
||||||
This way you can just specify smartos04
|
|
||||||
|
|
||||||
Also uses compression on slow links.
|
|
||||||
|
|
||||||
Look in man ssh_config for many more options.
|
|
||||||
|
|
||||||
Troubleshooting
|
|
||||||
===============
|
|
||||||
|
|
||||||
`cannot receive incremental stream: invalid backup stream`
|
|
||||||
|
|
||||||
This usually means you've created a new snapshot on the target side during a backup.
|
|
||||||
* Solution 1: Restart zfs_autobackup and make sure you dont use --resume. If you did use --resume, be sure to "abort" the recveive on the target side with zfs recv -A.
|
|
||||||
* Solution 2: Destroy the newly created snapshot and restart zfs_autobackup.
|
|
||||||
|
|
||||||
|
|
||||||
`internal error: Invalid argument`
|
|
||||||
|
|
||||||
In some cases (Linux -> FreeBSD) this means certain properties are not fully supported on the target system.
|
|
||||||
|
|
||||||
Try using something like: --filter-properties xattr
|
|
||||||
|
|
||||||
|
|
||||||
Restore example
|
|
||||||
===============
|
|
||||||
|
|
||||||
Restoring can be done with simple zfs commands. For example, use this to restore a specific SmartOS disk image to a temporary restore location:
|
|
||||||
|
|
||||||
|
|
||||||
```
|
|
||||||
root@fs1:/home/psy# zfs send fs1/zones/backup/zfsbackups/smartos01.server.com/zones/a3abd6c8-24c6-4125-9e35-192e2eca5908-disk0@smartos01_fs1-20160110000003 | ssh root@2.2.2.2 "zfs recv zones/restore"
|
|
||||||
```
|
|
||||||
|
|
||||||
After that you can rename the disk image from the temporary location to the location of a new SmartOS machine you've created.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Monitoring with Zabbix-jobs
|
|
||||||
===========================
|
|
||||||
|
|
||||||
You can monitor backups by using my zabbix-jobs script. (https://github.com/psy0rz/stuff/tree/master/zabbix-jobs)
|
|
||||||
|
|
||||||
Put this command directly after the zfs_backup command in your cronjob:
|
|
||||||
```
|
|
||||||
zabbix-job-status backup_smartos01_fs1 daily $?
|
|
||||||
```
|
|
||||||
|
|
||||||
This will update the zabbix server with the exitcode and will also alert you if the job didnt run for more than 2 days.
|
|
||||||
|
|||||||
BIN
doc/thinner.odg
Normal file
BIN
doc/thinner.odg
Normal file
Binary file not shown.
BIN
doc/thinner.png
Normal file
BIN
doc/thinner.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 22 KiB |
6
requirements.txt
Normal file
6
requirements.txt
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
colorama
|
||||||
|
argparse
|
||||||
|
coverage
|
||||||
|
python-coveralls
|
||||||
|
unittest2
|
||||||
|
mock
|
||||||
18
scripts/release
Executable file
18
scripts/release
Executable file
@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
|
||||||
|
rm -rf dist
|
||||||
|
python3 setup.py sdist bdist_wheel
|
||||||
|
# python2 setup.py sdist bdist_wheel
|
||||||
|
|
||||||
|
|
||||||
|
gnome-keyring-daemon
|
||||||
|
source token
|
||||||
|
|
||||||
|
|
||||||
|
python3 -m twine check dist/*
|
||||||
|
python3 -m twine upload dist/*
|
||||||
|
|
||||||
|
git push --tags
|
||||||
16
scripts/releasetest
Executable file
16
scripts/releasetest
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
|
||||||
|
rm -rf dist
|
||||||
|
python3 setup.py sdist bdist_wheel
|
||||||
|
# python2 setup.py sdist bdist_wheel
|
||||||
|
|
||||||
|
|
||||||
|
gnome-keyring-daemon
|
||||||
|
source tokentest
|
||||||
|
|
||||||
|
|
||||||
|
python3 -m twine check dist/*
|
||||||
|
python3 -m twine upload --repository-url https://test.pypi.org/legacy/ dist/* --verbose
|
||||||
38
setup.py
Normal file
38
setup.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
import setuptools
|
||||||
|
from zfs_autobackup.ZfsAutobackup import ZfsAutobackup
|
||||||
|
import os
|
||||||
|
|
||||||
|
with open("README.md", "r") as fh:
|
||||||
|
long_description = fh.read()
|
||||||
|
|
||||||
|
setuptools.setup(
|
||||||
|
name="zfs_autobackup",
|
||||||
|
version=ZfsAutobackup.VERSION,
|
||||||
|
author="Edwin Eefting",
|
||||||
|
author_email="edwin@datux.nl",
|
||||||
|
description="ZFS autobackup is used to periodicly backup ZFS filesystems to other locations. It tries to be the most friendly to use and easy to debug ZFS backup tool.",
|
||||||
|
long_description=long_description,
|
||||||
|
long_description_content_type="text/markdown",
|
||||||
|
|
||||||
|
url="https://github.com/psy0rz/zfs_autobackup",
|
||||||
|
entry_points={
|
||||||
|
'console_scripts':
|
||||||
|
[
|
||||||
|
'zfs-autobackup = zfs_autobackup.ZfsAutobackup:cli',
|
||||||
|
'zfs-autoverify = zfs_autobackup.ZfsAutoverify:cli',
|
||||||
|
]
|
||||||
|
},
|
||||||
|
packages=setuptools.find_packages(),
|
||||||
|
|
||||||
|
classifiers=[
|
||||||
|
"Programming Language :: Python :: 2",
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
],
|
||||||
|
python_requires='>=2.7',
|
||||||
|
install_requires=[
|
||||||
|
"colorama",
|
||||||
|
"argparse"
|
||||||
|
]
|
||||||
|
)
|
||||||
6
tests/autoruntests
Executable file
6
tests/autoruntests
Executable file
@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#NOTE: run from top directory
|
||||||
|
|
||||||
|
find tests/*.py zfs_autobackup/*.py| entr -r ./tests/run_tests $@
|
||||||
|
|
||||||
96
tests/basetest.py
Normal file
96
tests/basetest.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
|
||||||
|
# To run tests as non-root, use this hack:
|
||||||
|
# chmod 4755 /usr/sbin/zpool /usr/sbin/zfs
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import random
|
||||||
|
|
||||||
|
#default test stuff
|
||||||
|
import unittest2
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
from pprint import *
|
||||||
|
from zfs_autobackup.ZfsAutobackup import *
|
||||||
|
from zfs_autobackup.ZfsAutoverify import *
|
||||||
|
from mock import *
|
||||||
|
import contextlib
|
||||||
|
import sys
|
||||||
|
import io
|
||||||
|
|
||||||
|
TEST_POOLS="test_source1 test_source2 test_target1"
|
||||||
|
ZFS_USERSPACE= subprocess.check_output("dpkg-query -W zfsutils-linux |cut -f2", shell=True).decode('utf-8').rstrip()
|
||||||
|
ZFS_KERNEL= subprocess.check_output("modinfo zfs|grep ^version |sed 's/.* //'", shell=True).decode('utf-8').rstrip()
|
||||||
|
|
||||||
|
print("###########################################")
|
||||||
|
print("#### Unit testing against:")
|
||||||
|
print("#### Python :"+sys.version.replace("\n", " "))
|
||||||
|
print("#### ZFS userspace :"+ZFS_USERSPACE)
|
||||||
|
print("#### ZFS kernel :"+ZFS_KERNEL)
|
||||||
|
print("#############################################")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# for python2 compatibility
|
||||||
|
if sys.version_info.major==2:
|
||||||
|
OutputIO=io.BytesIO
|
||||||
|
else:
|
||||||
|
OutputIO=io.StringIO
|
||||||
|
|
||||||
|
|
||||||
|
# for python2 compatibility (python 3 has this already)
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def redirect_stdout(target):
|
||||||
|
original = sys.stdout
|
||||||
|
try:
|
||||||
|
sys.stdout = target
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
sys.stdout = original
|
||||||
|
|
||||||
|
# for python2 compatibility (python 3 has this already)
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def redirect_stderr(target):
|
||||||
|
original = sys.stderr
|
||||||
|
try:
|
||||||
|
sys.stderr = target
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
sys.stderr = original
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def shelltest(cmd):
|
||||||
|
"""execute and print result as nice copypastable string for unit tests (adds extra newlines on top/bottom)"""
|
||||||
|
|
||||||
|
ret=(subprocess.check_output("SUDO_ASKPASS=./password.sh sudo -A "+cmd , shell=True).decode('utf-8'))
|
||||||
|
|
||||||
|
print("######### result of: {}".format(cmd))
|
||||||
|
print(ret)
|
||||||
|
print("#########")
|
||||||
|
ret='\n'+ret
|
||||||
|
return(ret)
|
||||||
|
|
||||||
|
def prepare_zpools():
|
||||||
|
print("Preparing zfs filesystems...")
|
||||||
|
|
||||||
|
#need ram blockdevice
|
||||||
|
subprocess.check_call("modprobe brd rd_size=512000", shell=True)
|
||||||
|
|
||||||
|
#remove old stuff
|
||||||
|
subprocess.call("zpool destroy test_source1 2>/dev/null", shell=True)
|
||||||
|
subprocess.call("zpool destroy test_source2 2>/dev/null", shell=True)
|
||||||
|
subprocess.call("zpool destroy test_target1 2>/dev/null", shell=True)
|
||||||
|
|
||||||
|
#create pools
|
||||||
|
subprocess.check_call("zpool create test_source1 /dev/ram0", shell=True)
|
||||||
|
subprocess.check_call("zpool create test_source2 /dev/ram1", shell=True)
|
||||||
|
subprocess.check_call("zpool create test_target1 /dev/ram2", shell=True)
|
||||||
|
|
||||||
|
#create test structure
|
||||||
|
subprocess.check_call("zfs create -p test_source1/fs1/sub", shell=True)
|
||||||
|
subprocess.check_call("zfs create -p test_source2/fs2/sub", shell=True)
|
||||||
|
subprocess.check_call("zfs create -p test_source2/fs3/sub", shell=True)
|
||||||
|
subprocess.check_call("zfs set autobackup:test=true test_source1/fs1", shell=True)
|
||||||
|
subprocess.check_call("zfs set autobackup:test=child test_source2/fs2", shell=True)
|
||||||
|
|
||||||
|
print("Prepare done")
|
||||||
5
tests/run_test
Executable file
5
tests/run_test
Executable file
@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#run one test. start from main directory
|
||||||
|
|
||||||
|
python -m unittest discover tests $@ -vvvf
|
||||||
28
tests/run_tests
Executable file
28
tests/run_tests
Executable file
@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
SCRIPTDIR=`dirname $0`
|
||||||
|
|
||||||
|
#cd $SCRIPTDIR || exit 1
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$USER" != "root" ]; then
|
||||||
|
echo "Need root to do proper zfs testing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# test needs ssh access to localhost for testing
|
||||||
|
if ! [ -e /root/.ssh/id_rsa ]; then
|
||||||
|
ssh-keygen -t rsa -f /root/.ssh/id_rsa -P '' || exit 1
|
||||||
|
cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys || exit 1
|
||||||
|
ssh -oStrictHostKeyChecking=no localhost true || exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
coverage run --branch --source zfs_autobackup -m unittest discover -vvvvf $SCRIPTDIR $@ 2>&1
|
||||||
|
EXIT=$?
|
||||||
|
|
||||||
|
echo
|
||||||
|
coverage report
|
||||||
|
|
||||||
|
exit $EXIT
|
||||||
123
tests/test_cmdpipe.py
Normal file
123
tests/test_cmdpipe.py
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
from basetest import *
|
||||||
|
from zfs_autobackup.CmdPipe import CmdPipe,CmdItem
|
||||||
|
|
||||||
|
|
||||||
|
class TestCmdPipe(unittest2.TestCase):
|
||||||
|
|
||||||
|
def test_single(self):
|
||||||
|
"""single process stdout and stderr"""
|
||||||
|
p=CmdPipe(readonly=False, inp=None)
|
||||||
|
err=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["ls", "-d", "/", "/", "/nonexistent"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err, ["ls: cannot access '/nonexistent': No such file or directory"])
|
||||||
|
self.assertEqual(out, ["/","/"])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
def test_input(self):
|
||||||
|
"""test stdinput"""
|
||||||
|
p=CmdPipe(readonly=False, inp="test")
|
||||||
|
err=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["cat"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err, [])
|
||||||
|
self.assertEqual(out, ["test"])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
def test_pipe(self):
|
||||||
|
"""test piped"""
|
||||||
|
p=CmdPipe(readonly=False)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
err3=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["echo", "test"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||||
|
p.add(CmdItem(["tr", "e", "E"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||||
|
p.add(CmdItem(["tr", "t", "T"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, [])
|
||||||
|
self.assertEqual(err2, [])
|
||||||
|
self.assertEqual(err3, [])
|
||||||
|
self.assertEqual(out, ["TEsT"])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
#test str representation as well
|
||||||
|
self.assertEqual(str(p), "(echo test) | (tr e E) | (tr t T)")
|
||||||
|
|
||||||
|
def test_pipeerrors(self):
|
||||||
|
"""test piped stderrs """
|
||||||
|
p=CmdPipe(readonly=False)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
err3=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["ls", "/nonexistent1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
p.add(CmdItem(["ls", "/nonexistent2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
p.add(CmdItem(["ls", "/nonexistent3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, ["ls: cannot access '/nonexistent1': No such file or directory"])
|
||||||
|
self.assertEqual(err2, ["ls: cannot access '/nonexistent2': No such file or directory"])
|
||||||
|
self.assertEqual(err3, ["ls: cannot access '/nonexistent3': No such file or directory"])
|
||||||
|
self.assertEqual(out, [])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
def test_exitcode(self):
|
||||||
|
"""test piped exitcodes """
|
||||||
|
p=CmdPipe(readonly=False)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
err3=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["bash", "-c", "exit 1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,1)))
|
||||||
|
p.add(CmdItem(["bash", "-c", "exit 2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
|
||||||
|
p.add(CmdItem(["bash", "-c", "exit 3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,3)))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, [])
|
||||||
|
self.assertEqual(err2, [])
|
||||||
|
self.assertEqual(err3, [])
|
||||||
|
self.assertEqual(out, [])
|
||||||
|
self.assertIsNone(executed)
|
||||||
|
|
||||||
|
def test_readonly_execute(self):
|
||||||
|
"""everything readonly, just should execute"""
|
||||||
|
|
||||||
|
p=CmdPipe(readonly=True)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
out=[]
|
||||||
|
|
||||||
|
def true_exit(exit_code):
|
||||||
|
return True
|
||||||
|
|
||||||
|
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), exit_handler=true_exit, readonly=True))
|
||||||
|
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), exit_handler=true_exit, readonly=True))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, [])
|
||||||
|
self.assertEqual(err2, [])
|
||||||
|
self.assertEqual(out, ["test2"])
|
||||||
|
self.assertTrue(executed)
|
||||||
|
|
||||||
|
def test_readonly_skip(self):
|
||||||
|
"""one command not readonly, skip"""
|
||||||
|
|
||||||
|
p=CmdPipe(readonly=True)
|
||||||
|
err1=[]
|
||||||
|
err2=[]
|
||||||
|
out=[]
|
||||||
|
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), readonly=False))
|
||||||
|
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), readonly=True))
|
||||||
|
executed=p.execute(stdout_handler=lambda line: out.append(line))
|
||||||
|
|
||||||
|
self.assertEqual(err1, [])
|
||||||
|
self.assertEqual(err2, [])
|
||||||
|
self.assertEqual(out, [])
|
||||||
|
self.assertTrue(executed)
|
||||||
|
|
||||||
135
tests/test_destroymissing.py
Normal file
135
tests/test_destroymissing.py
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
|
||||||
|
from basetest import *
|
||||||
|
|
||||||
|
|
||||||
|
class TestZfsNode(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage=True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_destroymissing(self):
|
||||||
|
|
||||||
|
#initial backup
|
||||||
|
with patch('time.strftime', return_value="test-19101111000000"): #1000 years in past
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"): #far in past
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("Should do nothing yet"):
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertNotIn(": Destroy missing", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("missing dataset of us that still has children"):
|
||||||
|
|
||||||
|
#just deselect it so it counts as 'missing'
|
||||||
|
shelltest("zfs set autobackup:test=child test_source1/fs1")
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf), redirect_stderr(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
#should have done the snapshot cleanup for destoy missing:
|
||||||
|
self.assertIn("fs1@test-19101111000000: Destroying", buf.getvalue())
|
||||||
|
|
||||||
|
self.assertIn("fs1: Destroy missing: Still has children here.", buf.getvalue())
|
||||||
|
|
||||||
|
shelltest("zfs inherit autobackup:test test_source1/fs1")
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("Normal destroyed leaf"):
|
||||||
|
shelltest("zfs destroy -r test_source1/fs1/sub")
|
||||||
|
|
||||||
|
#wait for deadline of last snapshot
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
#100y: lastest should not be old enough, while second to latest snapshot IS old enough:
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 100y".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertIn(": Waiting for deadline", buf.getvalue())
|
||||||
|
|
||||||
|
#past deadline, destroy
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 1y".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertIn("sub: Destroying", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("Leaf with other snapshot still using it"):
|
||||||
|
shelltest("zfs destroy -r test_source1/fs1")
|
||||||
|
shelltest("zfs snapshot -r test_target1/test_source1/fs1@other1")
|
||||||
|
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
#cant finish because still in use:
|
||||||
|
self.assertIn("fs1: Destroy missing: Still in use", buf.getvalue())
|
||||||
|
|
||||||
|
shelltest("zfs destroy test_target1/test_source1/fs1@other1")
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("In use by clone"):
|
||||||
|
shelltest("zfs clone test_target1/test_source1/fs1@test-20101111000000 test_target1/clone1")
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf), redirect_stderr(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
#now tries to destroy our own last snapshot (before the final destroy of the dataset)
|
||||||
|
self.assertIn("fs1@test-20101111000000: Destroying", buf.getvalue())
|
||||||
|
#but cant finish because still in use:
|
||||||
|
self.assertIn("fs1: Error during --destroy-missing", buf.getvalue())
|
||||||
|
|
||||||
|
shelltest("zfs destroy test_target1/clone1")
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("Should leave test_source1 parent"):
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf), redirect_stderr(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
#should have done the snapshot cleanup for destoy missing:
|
||||||
|
self.assertIn("fs1: Destroying", buf.getvalue())
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf), redirect_stderr(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-snapshot --destroy-missing 0s".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
#on second run it sees the dangling ex-parent but doesnt know what to do with it (since it has no own snapshot)
|
||||||
|
self.assertIn("test_source1: Destroy missing: has no snapshots made by us.", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#end result
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-19101111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
193
tests/test_encryption.py
Normal file
193
tests/test_encryption.py
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
from zfs_autobackup.CmdPipe import CmdPipe
|
||||||
|
from basetest import *
|
||||||
|
import time
|
||||||
|
|
||||||
|
# We have to do a LOT to properly test encryption/decryption/raw transfers
|
||||||
|
#
|
||||||
|
# For every scenario we need at least:
|
||||||
|
# - plain source dataset
|
||||||
|
# - encrypted source dataset
|
||||||
|
# - plain target path
|
||||||
|
# - encrypted target path
|
||||||
|
# - do a full transfer
|
||||||
|
# - do a incremental transfer
|
||||||
|
|
||||||
|
# Scenarios:
|
||||||
|
# - Raw transfer
|
||||||
|
# - Decryption transfer (--decrypt)
|
||||||
|
# - Encryption transfer (--encrypt)
|
||||||
|
# - Re-encryption transfer (--decrypt --encrypt)
|
||||||
|
|
||||||
|
class TestZfsEncryption(unittest2.TestCase):
|
||||||
|
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
|
||||||
|
try:
|
||||||
|
shelltest("zfs get encryption test_source1")
|
||||||
|
except:
|
||||||
|
self.skipTest("Encryption not supported on this ZFS version.")
|
||||||
|
|
||||||
|
def prepare_encrypted_dataset(self, key, path, unload_key=False):
|
||||||
|
|
||||||
|
# create encrypted source dataset
|
||||||
|
shelltest("rm /tmp/zfstest.key 2>/dev/null;true")
|
||||||
|
shelltest("echo {} > /tmp/zfstest.key".format(key))
|
||||||
|
shelltest("zfs create -o keylocation=file:///tmp/zfstest.key -o keyformat=passphrase -o encryption=on {}".format(path))
|
||||||
|
|
||||||
|
if unload_key:
|
||||||
|
shelltest("zfs unmount {}".format(path))
|
||||||
|
shelltest("zfs unload-key {}".format(path))
|
||||||
|
|
||||||
|
# r=shelltest("dd if=/dev/zero of=/test_source1/fs1/enc1/data.txt bs=200000 count=1")
|
||||||
|
|
||||||
|
def test_raw(self):
|
||||||
|
"""send encrypted data unaltered (standard operation)"""
|
||||||
|
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsourcekeyless", unload_key=True) # raw mode shouldn't need a key
|
||||||
|
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_target1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsourcekeyless encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsourcekeyless -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot - -
|
||||||
|
test_target1/test_source1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/encryptedsource encryptionroot test_target1/test_source1/fs1/encryptedsource -
|
||||||
|
test_target1/test_source1/fs1/encryptedsourcekeyless encryptionroot test_target1/test_source1/fs1/encryptedsourcekeyless -
|
||||||
|
test_target1/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/test_source2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_decrypt(self):
|
||||||
|
"""decrypt data and store unencrypted (--decrypt)"""
|
||||||
|
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||||
|
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
|
self.assertEqual(r, """
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_target1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot - -
|
||||||
|
test_target1/test_source1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/encryptedsource encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/test_source2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_encrypt(self):
|
||||||
|
"""send normal data set and store encrypted on the other side (--encrypt) issue #60 """
|
||||||
|
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||||
|
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
|
self.assertEqual(r, """
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_target1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget/test_source1/fs1/encryptedsource -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/test_source1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/encryptedsource encryptionroot test_target1/test_source1/fs1/encryptedsource -
|
||||||
|
test_target1/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/test_source2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_reencrypt(self):
|
||||||
|
"""reencrypt data (--decrypt --encrypt) """
|
||||||
|
|
||||||
|
self.prepare_encrypted_dataset("11111111", "test_source1/fs1/encryptedsource")
|
||||||
|
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
|
||||||
|
self.assertEqual(r, """
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_target1 encryptionroot - -
|
||||||
|
test_target1/encryptedtarget encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/encryptedsource encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source1/fs1/sub encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2 encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/encryptedtarget/test_source2/fs2/sub encryptionroot test_target1/encryptedtarget -
|
||||||
|
test_target1/test_source1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1 encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/encryptedsource encryptionroot - -
|
||||||
|
test_target1/test_source1/fs1/sub encryptionroot - -
|
||||||
|
test_target1/test_source2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2 encryptionroot - -
|
||||||
|
test_target1/test_source2/fs2/sub encryptionroot - -
|
||||||
|
""")
|
||||||
|
|
||||||
148
tests/test_executenode.py
Normal file
148
tests/test_executenode.py
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
from basetest import *
|
||||||
|
from zfs_autobackup.ExecuteNode import *
|
||||||
|
|
||||||
|
print("THIS TEST REQUIRES SSH TO LOCALHOST")
|
||||||
|
|
||||||
|
class TestExecuteNode(unittest2.TestCase):
|
||||||
|
|
||||||
|
# def setUp(self):
|
||||||
|
|
||||||
|
# return super().setUp()
|
||||||
|
|
||||||
|
def basics(self, node ):
|
||||||
|
|
||||||
|
with self.subTest("simple echo"):
|
||||||
|
self.assertEqual(node.run(["echo","test"]), ["test"])
|
||||||
|
|
||||||
|
with self.subTest("error exit code"):
|
||||||
|
with self.assertRaises(ExecuteError):
|
||||||
|
node.run(["false"])
|
||||||
|
|
||||||
|
#
|
||||||
|
with self.subTest("multiline without tabsplit"):
|
||||||
|
self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=False), ["l1c1\tl1c2", "l2c1\tl2c2"])
|
||||||
|
|
||||||
|
#multiline tabsplit
|
||||||
|
with self.subTest("multiline tabsplit"):
|
||||||
|
self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=True), [['l1c1', 'l1c2'], ['l2c1', 'l2c2']])
|
||||||
|
|
||||||
|
#escaping test
|
||||||
|
with self.subTest("escape test"):
|
||||||
|
s="><`'\"@&$()$bla\\/.* !#test _+-={}[]|${bla} $bla"
|
||||||
|
self.assertEqual(node.run(["echo",s]), [s])
|
||||||
|
|
||||||
|
#return std err as well, trigger stderr by listing something non existing
|
||||||
|
with self.subTest("stderr return"):
|
||||||
|
(stdout, stderr)=node.run(["ls", "nonexistingfile"], return_stderr=True, valid_exitcodes=[2])
|
||||||
|
self.assertEqual(stdout,[])
|
||||||
|
self.assertRegex(stderr[0],"nonexistingfile")
|
||||||
|
|
||||||
|
#slow command, make sure things dont exit too early
|
||||||
|
with self.subTest("early exit test"):
|
||||||
|
start_time=time.time()
|
||||||
|
self.assertEqual(node.run(["sleep","1"]), [])
|
||||||
|
self.assertGreaterEqual(time.time()-start_time,1)
|
||||||
|
|
||||||
|
#input a string and check it via cat
|
||||||
|
with self.subTest("stdin input string"):
|
||||||
|
self.assertEqual(node.run(["cat"], inp="test"), ["test"])
|
||||||
|
|
||||||
|
#command that wants input, while we dont have input, shouldnt hang forever.
|
||||||
|
with self.subTest("stdin process with inp=None (shouldn't hang)"):
|
||||||
|
self.assertEqual(node.run(["cat"]), [])
|
||||||
|
|
||||||
|
# let the system do the piping with an unescaped |:
|
||||||
|
with self.subTest("system piping test"):
|
||||||
|
|
||||||
|
#first make sure the actual | character is still properly escaped:
|
||||||
|
self.assertEqual(node.run(["echo","|"]), ["|"])
|
||||||
|
|
||||||
|
#now pipe
|
||||||
|
self.assertEqual(node.run(["echo", "abc", node.PIPE, "tr", "a", "A" ]), ["Abc"])
|
||||||
|
|
||||||
|
def test_basics_local(self):
|
||||||
|
node=ExecuteNode(debug_output=True)
|
||||||
|
self.basics(node)
|
||||||
|
|
||||||
|
def test_basics_remote(self):
|
||||||
|
node=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||||
|
self.basics(node)
|
||||||
|
|
||||||
|
################
|
||||||
|
|
||||||
|
def test_readonly(self):
|
||||||
|
node=ExecuteNode(debug_output=True, readonly=True)
|
||||||
|
|
||||||
|
self.assertEqual(node.run(["echo","test"], readonly=False), [])
|
||||||
|
self.assertEqual(node.run(["echo","test"], readonly=True), ["test"])
|
||||||
|
|
||||||
|
|
||||||
|
################
|
||||||
|
|
||||||
|
def pipe(self, nodea, nodeb):
|
||||||
|
|
||||||
|
with self.subTest("pipe data"):
|
||||||
|
output=nodea.run(["dd", "if=/dev/zero", "count=1000"],pipe=True)
|
||||||
|
self.assertEqual(nodeb.run(["md5sum"], inp=output), ["816df6f64deba63b029ca19d880ee10a -"])
|
||||||
|
|
||||||
|
with self.subTest("exit code both ends of pipe ok"):
|
||||||
|
output=nodea.run(["true"], pipe=True)
|
||||||
|
nodeb.run(["true"], inp=output)
|
||||||
|
|
||||||
|
with self.subTest("error on pipe input side"):
|
||||||
|
with self.assertRaises(ExecuteError):
|
||||||
|
output=nodea.run(["false"], pipe=True)
|
||||||
|
nodeb.run(["true"], inp=output)
|
||||||
|
|
||||||
|
with self.subTest("error on both sides, ignore exit codes"):
|
||||||
|
output=nodea.run(["false"], pipe=True, valid_exitcodes=[])
|
||||||
|
nodeb.run(["false"], inp=output, valid_exitcodes=[])
|
||||||
|
|
||||||
|
with self.subTest("error on pipe output side "):
|
||||||
|
with self.assertRaises(ExecuteError):
|
||||||
|
output=nodea.run(["true"], pipe=True)
|
||||||
|
nodeb.run(["false"], inp=output)
|
||||||
|
|
||||||
|
with self.subTest("error on both sides of pipe"):
|
||||||
|
with self.assertRaises(ExecuteError):
|
||||||
|
output=nodea.run(["false"], pipe=True)
|
||||||
|
nodeb.run(["false"], inp=output)
|
||||||
|
|
||||||
|
with self.subTest("check stderr on pipe output side"):
|
||||||
|
output=nodea.run(["true"], pipe=True, valid_exitcodes=[0])
|
||||||
|
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], inp=output, return_stderr=True, valid_exitcodes=[2])
|
||||||
|
self.assertEqual(stdout,[])
|
||||||
|
self.assertRegex(stderr[0], "nonexistingfile" )
|
||||||
|
|
||||||
|
with self.subTest("check stderr on pipe input side (should be only printed)"):
|
||||||
|
output=nodea.run(["ls", "nonexistingfile"], pipe=True, valid_exitcodes=[2])
|
||||||
|
(stdout, stderr)=nodeb.run(["true"], inp=output, return_stderr=True, valid_exitcodes=[0])
|
||||||
|
self.assertEqual(stdout,[])
|
||||||
|
self.assertEqual(stderr,[])
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_pipe_local_local(self):
|
||||||
|
nodea=ExecuteNode(debug_output=True)
|
||||||
|
nodeb=ExecuteNode(debug_output=True)
|
||||||
|
self.pipe(nodea, nodeb)
|
||||||
|
|
||||||
|
def test_pipe_remote_remote(self):
|
||||||
|
nodea=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||||
|
nodeb=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||||
|
self.pipe(nodea, nodeb)
|
||||||
|
|
||||||
|
def test_pipe_local_remote(self):
|
||||||
|
nodea=ExecuteNode(debug_output=True)
|
||||||
|
nodeb=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||||
|
self.pipe(nodea, nodeb)
|
||||||
|
|
||||||
|
def test_pipe_remote_local(self):
|
||||||
|
nodea=ExecuteNode(ssh_to="localhost", debug_output=True)
|
||||||
|
nodeb=ExecuteNode(debug_output=True)
|
||||||
|
self.pipe(nodea, nodeb)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
314
tests/test_externalfailures.py
Normal file
314
tests/test_externalfailures.py
Normal file
@ -0,0 +1,314 @@
|
|||||||
|
from basetest import *
|
||||||
|
|
||||||
|
|
||||||
|
class TestExternalFailures(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage = True
|
||||||
|
|
||||||
|
# generate a resumable state
|
||||||
|
# NOTE: this generates two resumable test_target1/test_source1/fs1 and test_target1/test_source1/fs1/sub
|
||||||
|
def generate_resume(self):
|
||||||
|
|
||||||
|
r = shelltest("zfs set compress=off test_source1 test_target1")
|
||||||
|
|
||||||
|
# big change on source
|
||||||
|
r = shelltest("dd if=/dev/zero of=/test_source1/fs1/data bs=250M count=1")
|
||||||
|
|
||||||
|
# waste space on target
|
||||||
|
r = shelltest("dd if=/dev/zero of=/test_target1/waste bs=250M count=1")
|
||||||
|
|
||||||
|
# should fail and leave resume token (if supported)
|
||||||
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
# free up space
|
||||||
|
r = shelltest("rm /test_target1/waste")
|
||||||
|
# sync
|
||||||
|
r = shelltest("zfs umount test_target1")
|
||||||
|
r = shelltest("zfs mount test_target1")
|
||||||
|
|
||||||
|
# resume initial backup
|
||||||
|
def test_initial_resume(self):
|
||||||
|
|
||||||
|
# inital backup, leaves resume token
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
# --test should resume and succeed
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
# did we really resume?
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
# abort this late, for beter coverage
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
else:
|
||||||
|
self.assertIn(": resuming", buf.getvalue())
|
||||||
|
|
||||||
|
# should resume and succeed
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
# did we really resume?
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
# abort this late, for beter coverage
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
else:
|
||||||
|
self.assertIn(": resuming", buf.getvalue())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
# resume incremental backup
|
||||||
|
def test_incremental_resume(self):
|
||||||
|
|
||||||
|
# initial backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
# incremental backup leaves resume token
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
# --test should resume and succeed
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
# did we really resume?
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
# abort this late, for beter coverage
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
else:
|
||||||
|
self.assertIn(": resuming", buf.getvalue())
|
||||||
|
|
||||||
|
# should resume and succeed
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
# did we really resume?
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
# abort this late, for beter coverage
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
else:
|
||||||
|
self.assertIn(": resuming", buf.getvalue())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
# generate an invalid resume token, and verify if its aborted automaticly
|
||||||
|
def test_initial_resumeabort(self):
|
||||||
|
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
|
||||||
|
# inital backup, leaves resume token
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
# remove corresponding source snapshot, so it becomes invalid
|
||||||
|
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
|
||||||
|
|
||||||
|
# NOTE: it can only abort the initial dataset if it has no subs
|
||||||
|
shelltest("zfs destroy test_target1/test_source1/fs1/sub; true")
|
||||||
|
|
||||||
|
# --test try again, should abort old resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
# try again, should abort old resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
# generate an invalid resume token, and verify if its aborted automaticly
|
||||||
|
def test_incremental_resumeabort(self):
|
||||||
|
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
|
||||||
|
# initial backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
# icremental backup, leaves resume token
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
# remove corresponding source snapshot, so it becomes invalid
|
||||||
|
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
|
||||||
|
|
||||||
|
# --test try again, should abort old resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
# try again, should abort old resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
# create a resume situation, where the other side doesnt want the snapshot anymore ( should abort resume )
|
||||||
|
def test_abort_unwanted_resume(self):
|
||||||
|
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
# generate resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
# incremental, doesnt want previous anymore
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --keep-target=0 --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
self.assertIn("Aborting resume, we dont want that snapshot anymore.", buf.getvalue())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||||
|
""")
|
||||||
|
|
||||||
|
# test with empty snapshot list (this was a bug)
|
||||||
|
def test_abort_resume_emptysnapshotlist(self):
|
||||||
|
|
||||||
|
if "0.6.5" in ZFS_USERSPACE:
|
||||||
|
self.skipTest("Resume not supported in this ZFS userspace version")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
# generate resume
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.generate_resume()
|
||||||
|
|
||||||
|
shelltest("zfs destroy test_source1/fs1@test-20101111000001")
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
# incremental, doesnt want previous anymore
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --no-snapshot".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
|
||||||
|
self.assertIn("Aborting resume, its obsolete", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
def test_missing_common(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
# remove common snapshot and leave nothing
|
||||||
|
shelltest("zfs release zfs_autobackup:test test_source1/fs1@test-20101111000000")
|
||||||
|
shelltest("zfs destroy test_source1/fs1@test-20101111000000")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
#UPDATE: offcourse the one thing that wasn't tested had a bug :( (in ExecuteNode.run()).
|
||||||
|
def test_ignoretransfererrors(self):
|
||||||
|
|
||||||
|
self.skipTest("Not sure how to implement a test for this without some serious hacking and patching.")
|
||||||
|
|
||||||
|
# #recreate target pool without any features
|
||||||
|
# # shelltest("zfs set compress=on test_source1; zpool destroy test_target1; zpool create test_target1 -o feature@project_quota=disabled /dev/ram2")
|
||||||
|
#
|
||||||
|
# with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
# self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --no-progress".split(" ")).run())
|
||||||
|
#
|
||||||
|
# r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
#
|
||||||
|
# self.assertMultiLineEqual(r, """
|
||||||
|
# test_target1
|
||||||
|
# test_target1/test_source1
|
||||||
|
# test_target1/test_source1/fs1
|
||||||
|
# test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
# test_target1/test_source1/fs1/sub
|
||||||
|
# test_target1/test_source1/fs1/sub@test-20101111000002
|
||||||
|
# test_target1/test_source2
|
||||||
|
# test_target1/test_source2/fs2
|
||||||
|
# test_target1/test_source2/fs2/sub
|
||||||
|
# test_target1/test_source2/fs2/sub@test-20101111000002
|
||||||
|
# """)
|
||||||
52
tests/test_log.py
Normal file
52
tests/test_log.py
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
from zfs_autobackup.LogConsole import LogConsole
|
||||||
|
from basetest import *
|
||||||
|
|
||||||
|
|
||||||
|
class TestLog(unittest2.TestCase):
|
||||||
|
|
||||||
|
def test_colored(self):
|
||||||
|
"""test with color output"""
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
l= LogConsole(show_verbose=False, show_debug=False, color=True)
|
||||||
|
l.verbose("verbose")
|
||||||
|
l.debug("debug")
|
||||||
|
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
l=LogConsole(show_verbose=True, show_debug=True, color=True)
|
||||||
|
l.verbose("verbose")
|
||||||
|
l.debug("debug")
|
||||||
|
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
l=LogConsole(show_verbose=False, show_debug=False, color=True)
|
||||||
|
l.error("error")
|
||||||
|
|
||||||
|
print(list(buf.getvalue()))
|
||||||
|
self.assertEqual(list(buf.getvalue()), ['\x1b', '[', '2', '2', 'm', ' ', ' ', 'v', 'e', 'r', 'b', 'o', 's', 'e', '\x1b', '[', '0', 'm', '\n', '\x1b', '[', '3', '2', 'm', '#', ' ', 'd', 'e', 'b', 'u', 'g', '\x1b', '[', '0', 'm', '\n', '\x1b', '[', '3', '1', 'm', '\x1b', '[', '1', 'm', '!', ' ', 'e', 'r', 'r', 'o', 'r', '\x1b', '[', '0', 'm', '\n'])
|
||||||
|
|
||||||
|
def test_nocolor(self):
|
||||||
|
"""test without color output"""
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
l=LogConsole(show_verbose=False, show_debug=False, color=False)
|
||||||
|
l.verbose("verbose")
|
||||||
|
l.debug("debug")
|
||||||
|
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
l=LogConsole(show_verbose=True, show_debug=True, color=False)
|
||||||
|
l.verbose("verbose")
|
||||||
|
l.debug("debug")
|
||||||
|
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
l=LogConsole(show_verbose=False, show_debug=False, color=False)
|
||||||
|
l.error("error")
|
||||||
|
|
||||||
|
print(list(buf.getvalue()))
|
||||||
|
self.assertEqual(list(buf.getvalue()), [' ', ' ', 'v', 'e', 'r', 'b', 'o', 's', 'e', '\n', '#', ' ', 'd', 'e', 'b', 'u', 'g', '\n', '!', ' ', 'e', 'r', 'r', 'o', 'r', '\n'])
|
||||||
|
|
||||||
|
|
||||||
|
# zfs_autobackup.LogConsole.colorama=False
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
105
tests/test_regressions.py
Normal file
105
tests/test_regressions.py
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
|
||||||
|
from basetest import *
|
||||||
|
|
||||||
|
|
||||||
|
class TestZfsNode(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage=True
|
||||||
|
|
||||||
|
def test_keepsource0target10queuedsend(self):
|
||||||
|
"""Test if thinner doesnt destroy too much early on if there are no common snapshots YET. Issue #84"""
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty --no-send".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1 --no-progress --verbose --keep-source=0 --keep-target=10 --allow-empty".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000002
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000002
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_excludepaths(self):
|
||||||
|
"""Test issue #103"""
|
||||||
|
|
||||||
|
shelltest("zfs create test_target1/target_shouldnotbeexcluded")
|
||||||
|
shelltest("zfs set autobackup:test=true test_target1/target_shouldnotbeexcluded")
|
||||||
|
shelltest("zfs create test_target1/target")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(
|
||||||
|
"test test_target1/target --no-progress --verbose --allow-empty".split(
|
||||||
|
" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/target
|
||||||
|
test_target1/target/test_source1
|
||||||
|
test_target1/target/test_source1/fs1
|
||||||
|
test_target1/target/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/target/test_source1/fs1/sub
|
||||||
|
test_target1/target/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/target/test_source2
|
||||||
|
test_target1/target/test_source2/fs2
|
||||||
|
test_target1/target/test_source2/fs2/sub
|
||||||
|
test_target1/target/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/target/test_target1
|
||||||
|
test_target1/target/test_target1/target_shouldnotbeexcluded
|
||||||
|
test_target1/target/test_target1/target_shouldnotbeexcluded@test-20101111000000
|
||||||
|
test_target1/target_shouldnotbeexcluded
|
||||||
|
test_target1/target_shouldnotbeexcluded@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
97
tests/test_scaling.py
Normal file
97
tests/test_scaling.py
Normal file
@ -0,0 +1,97 @@
|
|||||||
|
from basetest import *
|
||||||
|
|
||||||
|
from zfs_autobackup.ExecuteNode import ExecuteNode
|
||||||
|
|
||||||
|
run_orig=ExecuteNode.run
|
||||||
|
run_counter=0
|
||||||
|
|
||||||
|
def run_count(*args, **kwargs):
|
||||||
|
global run_counter
|
||||||
|
run_counter=run_counter+1
|
||||||
|
return (run_orig(*args, **kwargs))
|
||||||
|
|
||||||
|
class TestZfsScaling(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage = True
|
||||||
|
|
||||||
|
def test_manysnapshots(self):
|
||||||
|
"""count the number of commands when there are many snapshots."""
|
||||||
|
|
||||||
|
snapshot_count=100
|
||||||
|
|
||||||
|
print("Creating many snapshots...")
|
||||||
|
s=""
|
||||||
|
for i in range(1970,1970+snapshot_count):
|
||||||
|
s=s+"zfs snapshot test_source1/fs1@test-{:04}1111000000;".format(i)
|
||||||
|
|
||||||
|
shelltest(s)
|
||||||
|
|
||||||
|
global run_counter
|
||||||
|
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101112000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=343
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||||
|
|
||||||
|
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101112000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=47
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||||
|
|
||||||
|
def test_manydatasets(self):
|
||||||
|
"""count the number of commands when when there are many datasets"""
|
||||||
|
|
||||||
|
dataset_count=100
|
||||||
|
|
||||||
|
print("Creating many datasets...")
|
||||||
|
s=""
|
||||||
|
for i in range(0,dataset_count):
|
||||||
|
s=s+"zfs create test_source1/fs1/{};".format(i)
|
||||||
|
|
||||||
|
shelltest(s)
|
||||||
|
|
||||||
|
global run_counter
|
||||||
|
|
||||||
|
#first run
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101112000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=743
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||||
|
|
||||||
|
|
||||||
|
#second run, should have higher number of expected_runs
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101112000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=947
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||||
88
tests/test_sendrecvpipes.py
Normal file
88
tests/test_sendrecvpipes.py
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
import zfs_autobackup.compressors
|
||||||
|
from basetest import *
|
||||||
|
import time
|
||||||
|
|
||||||
|
class TestSendRecvPipes(unittest2.TestCase):
|
||||||
|
"""test input/output pipes for zfs send and recv"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage=True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_send_basics(self):
|
||||||
|
"""send basics (remote/local send pipe)"""
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("local local pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("remote local pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("local remote pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("remote remote pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
|
||||||
|
|
||||||
|
def test_compress(self):
|
||||||
|
"""send basics (remote/local send pipe)"""
|
||||||
|
|
||||||
|
for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys():
|
||||||
|
|
||||||
|
with self.subTest("compress "+compress):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
def test_buffer(self):
|
||||||
|
"""test different buffer configurations"""
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("local local pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--buffer=1M" ]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("remote local pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--buffer=1M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("local remote pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||||
|
|
||||||
|
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
|
||||||
|
|
||||||
|
with self.subTest("remote remote pipe"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--buffer=1M"]).run())
|
||||||
|
|
||||||
|
def test_rate(self):
|
||||||
|
"""test rate limit"""
|
||||||
|
|
||||||
|
|
||||||
|
start=time.time()
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--rate=50k" ]).run())
|
||||||
|
|
||||||
|
#not a great way of verifying but it works.
|
||||||
|
self.assertGreater(time.time()-start, 5)
|
||||||
|
|
||||||
|
|
||||||
159
tests/test_thinner.py
Normal file
159
tests/test_thinner.py
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
from basetest import *
|
||||||
|
import pprint
|
||||||
|
|
||||||
|
from zfs_autobackup.Thinner import Thinner
|
||||||
|
|
||||||
|
# randint is different in python 2 vs 3
|
||||||
|
randint_compat = lambda lo, hi: lo + int(random.random() * (hi + 1 - lo))
|
||||||
|
|
||||||
|
|
||||||
|
class Thing:
|
||||||
|
def __init__(self, timestamp):
|
||||||
|
self.timestamp=timestamp
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
# age=now-self.timestamp
|
||||||
|
struct=time.gmtime(self.timestamp)
|
||||||
|
return("{}".format(time.strftime("%Y-%m-%d %H:%M:%S",struct)))
|
||||||
|
|
||||||
|
|
||||||
|
class TestThinner(unittest2.TestCase):
|
||||||
|
|
||||||
|
# def setUp(self):
|
||||||
|
|
||||||
|
# return super().setUp()
|
||||||
|
|
||||||
|
def test_exceptions(self):
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Invalid period"):
|
||||||
|
ThinnerRule("12X12m")
|
||||||
|
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Invalid ttl"):
|
||||||
|
ThinnerRule("12d12X")
|
||||||
|
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Period cant be"):
|
||||||
|
ThinnerRule("12d1d")
|
||||||
|
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Invalid schedule"):
|
||||||
|
ThinnerRule("XXX")
|
||||||
|
|
||||||
|
with self.assertRaisesRegexp(Exception, "^Number of"):
|
||||||
|
Thinner("-1")
|
||||||
|
|
||||||
|
|
||||||
|
def test_incremental(self):
|
||||||
|
ok=['2023-01-03 10:53:16',
|
||||||
|
'2024-01-02 15:43:29',
|
||||||
|
'2025-01-01 06:15:32',
|
||||||
|
'2026-01-01 02:48:23',
|
||||||
|
'2026-04-07 20:07:36',
|
||||||
|
'2026-05-07 02:30:29',
|
||||||
|
'2026-06-06 01:19:46',
|
||||||
|
'2026-07-06 06:38:09',
|
||||||
|
'2026-08-05 05:08:53',
|
||||||
|
'2026-09-04 03:33:04',
|
||||||
|
'2026-10-04 05:27:09',
|
||||||
|
'2026-11-04 04:01:17',
|
||||||
|
'2026-12-03 13:49:56',
|
||||||
|
'2027-01-01 17:02:00',
|
||||||
|
'2027-01-03 04:26:42',
|
||||||
|
'2027-02-01 14:16:02',
|
||||||
|
'2027-02-12 03:31:02',
|
||||||
|
'2027-02-18 00:33:10',
|
||||||
|
'2027-02-26 21:09:54',
|
||||||
|
'2027-03-02 08:05:18',
|
||||||
|
'2027-03-03 16:46:09',
|
||||||
|
'2027-03-04 06:39:14',
|
||||||
|
'2027-03-06 03:35:41',
|
||||||
|
'2027-03-08 12:24:42',
|
||||||
|
'2027-03-08 20:34:57']
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#some arbitrary date
|
||||||
|
now=1589229252
|
||||||
|
#we want deterministic results
|
||||||
|
random.seed(1337)
|
||||||
|
thinner=Thinner("5,10s1min,1d1w,1w1m,1m12m,1y5y")
|
||||||
|
things=[]
|
||||||
|
|
||||||
|
#thin incrementally while adding
|
||||||
|
for i in range(0,5000):
|
||||||
|
|
||||||
|
#increase random amount of time and maybe add a thing
|
||||||
|
now=now+randint_compat(0,3600*24)
|
||||||
|
if random.random()>=0.5:
|
||||||
|
things.append(Thing(now))
|
||||||
|
|
||||||
|
(keeps, removes)=thinner.thin(things, now=now)
|
||||||
|
things=keeps
|
||||||
|
|
||||||
|
|
||||||
|
result=[]
|
||||||
|
for thing in things:
|
||||||
|
result.append(str(thing))
|
||||||
|
|
||||||
|
print("Thinner result incremental:")
|
||||||
|
pprint.pprint(result)
|
||||||
|
|
||||||
|
self.assertEqual(result, ok)
|
||||||
|
|
||||||
|
|
||||||
|
def test_full(self):
|
||||||
|
ok=['2022-03-09 01:56:23',
|
||||||
|
'2023-01-03 10:53:16',
|
||||||
|
'2024-01-02 15:43:29',
|
||||||
|
'2025-01-01 06:15:32',
|
||||||
|
'2026-01-01 02:48:23',
|
||||||
|
'2026-03-14 09:08:04',
|
||||||
|
'2026-04-07 20:07:36',
|
||||||
|
'2026-05-07 02:30:29',
|
||||||
|
'2026-06-06 01:19:46',
|
||||||
|
'2026-07-06 06:38:09',
|
||||||
|
'2026-08-05 05:08:53',
|
||||||
|
'2026-09-04 03:33:04',
|
||||||
|
'2026-10-04 05:27:09',
|
||||||
|
'2026-11-04 04:01:17',
|
||||||
|
'2026-12-03 13:49:56',
|
||||||
|
'2027-01-01 17:02:00',
|
||||||
|
'2027-01-03 04:26:42',
|
||||||
|
'2027-02-01 14:16:02',
|
||||||
|
'2027-02-08 02:41:14',
|
||||||
|
'2027-02-12 03:31:02',
|
||||||
|
'2027-02-18 00:33:10',
|
||||||
|
'2027-02-26 21:09:54',
|
||||||
|
'2027-03-02 08:05:18',
|
||||||
|
'2027-03-03 16:46:09',
|
||||||
|
'2027-03-04 06:39:14',
|
||||||
|
'2027-03-06 03:35:41',
|
||||||
|
'2027-03-08 12:24:42',
|
||||||
|
'2027-03-08 20:34:57']
|
||||||
|
|
||||||
|
#some arbitrary date
|
||||||
|
now=1589229252
|
||||||
|
#we want deterministic results
|
||||||
|
random.seed(1337)
|
||||||
|
thinner=Thinner("5,10s1min,1d1w,1w1m,1m12m,1y5y")
|
||||||
|
things=[]
|
||||||
|
|
||||||
|
for i in range(0,5000):
|
||||||
|
|
||||||
|
#increase random amount of time and maybe add a thing
|
||||||
|
now=now+randint_compat(0,3600*24)
|
||||||
|
if random.random()>=0.5:
|
||||||
|
things.append(Thing(now))
|
||||||
|
|
||||||
|
(things, removes)=thinner.thin(things, now=now)
|
||||||
|
|
||||||
|
result=[]
|
||||||
|
for thing in things:
|
||||||
|
result.append(str(thing))
|
||||||
|
|
||||||
|
print("Thinner result full:")
|
||||||
|
pprint.pprint(result)
|
||||||
|
|
||||||
|
self.assertEqual(result, ok)
|
||||||
|
|
||||||
|
|
||||||
|
# if __name__ == '__main__':
|
||||||
|
# unittest.main()
|
||||||
99
tests/test_verify.py
Normal file
99
tests/test_verify.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
|
||||||
|
from basetest import *
|
||||||
|
|
||||||
|
|
||||||
|
# test zfs-verify:
|
||||||
|
# - when there is no common snapshot at all
|
||||||
|
# - when encryption key not loaded
|
||||||
|
# - --test mode
|
||||||
|
# - --fs-compare methods
|
||||||
|
# - on snapshots of datasets:
|
||||||
|
# - that are correct
|
||||||
|
# - that are different
|
||||||
|
# - on snapshots of zvols
|
||||||
|
# - that are correct
|
||||||
|
# - that are different
|
||||||
|
# - test all directions (local, remote/local, local/remote, remote/remote)
|
||||||
|
#
|
||||||
|
|
||||||
|
class TestZfsEncryption(unittest2.TestCase):
|
||||||
|
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
|
||||||
|
#create actual test files and data
|
||||||
|
shelltest("zfs create test_source1/fs1/ok_filesystem")
|
||||||
|
shelltest("cp tests/*.py /test_source1/fs1/ok_filesystem")
|
||||||
|
|
||||||
|
shelltest("zfs create test_source1/fs1/bad_filesystem")
|
||||||
|
shelltest("cp tests/*.py /test_source1/fs1/bad_filesystem")
|
||||||
|
|
||||||
|
shelltest("zfs create -V 1M test_source1/fs1/ok_zvol")
|
||||||
|
shelltest("dd if=/dev/urandom of=/dev/zvol/test_source1/fs1/ok_zvol count=1 bs=512k")
|
||||||
|
|
||||||
|
shelltest("zfs create -V 1M test_source1/fs1/bad_zvol")
|
||||||
|
shelltest("dd if=/dev/urandom of=/dev/zvol/test_source1/fs1/bad_zvol count=1 bs=512k")
|
||||||
|
|
||||||
|
#create backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --no-holds".split(" ")).run())
|
||||||
|
|
||||||
|
#Do an ugly hack to create a fault in the bad filesystem
|
||||||
|
#In zfs-autoverify it doenst matter that the snapshot isnt actually the same snapshot, so this hack works
|
||||||
|
shelltest("zfs destroy test_target1/test_source1/fs1/bad_filesystem@test-20101111000000")
|
||||||
|
shelltest("zfs mount test_target1/test_source1/fs1/bad_filesystem")
|
||||||
|
shelltest("echo >> /test_target1/test_source1/fs1/bad_filesystem/test_verify.py")
|
||||||
|
shelltest("zfs snapshot test_target1/test_source1/fs1/bad_filesystem@test-20101111000000")
|
||||||
|
|
||||||
|
#do the same hack for the bad zvol
|
||||||
|
shelltest("zfs destroy test_target1/test_source1/fs1/bad_zvol@test-20101111000000")
|
||||||
|
shelltest("dd if=/dev/urandom of=/dev/zvol/test_target1/test_source1/fs1/bad_zvol count=1 bs=1")
|
||||||
|
shelltest("zfs snapshot test_target1/test_source1/fs1/bad_zvol@test-20101111000000")
|
||||||
|
|
||||||
|
|
||||||
|
# make sure we cant accidently compare current data
|
||||||
|
shelltest("zfs mount test_target1/test_source1/fs1/ok_filesystem")
|
||||||
|
shelltest("rm /test_source1/fs1/ok_filesystem/*")
|
||||||
|
shelltest("rm /test_source1/fs1/bad_filesystem/*")
|
||||||
|
shelltest("dd if=/dev/zero of=/dev/zvol/test_source1/fs1/ok_zvol count=1 bs=512k")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_verify(self):
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("default --test"):
|
||||||
|
self.assertFalse(ZfsAutoverify("test test_target1 --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
with self.subTest("rsync, remote source and target. (not supported, all 6 fail)"):
|
||||||
|
self.assertEqual(6, ZfsAutoverify("test test_target1 --ssh-source=localhost --ssh-target=localhost --verbose --exclude-received --fs-compare=rsync".split(" ")).run())
|
||||||
|
|
||||||
|
def runchecked(testname, command):
|
||||||
|
with self.subTest(testname):
|
||||||
|
with OutputIO() as buf:
|
||||||
|
result=None
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
result=ZfsAutoverify(command.split(" ")).run()
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertEqual(2,result)
|
||||||
|
self.assertRegex(buf.getvalue(), "bad_filesystem: FAILED:")
|
||||||
|
self.assertRegex(buf.getvalue(), "bad_zvol: FAILED:")
|
||||||
|
|
||||||
|
runchecked("rsync, remote source", "test test_target1 --ssh-source=localhost --verbose --exclude-received --fs-compare=rsync")
|
||||||
|
runchecked("rsync, remote target", "test test_target1 --ssh-target=localhost --verbose --exclude-received --fs-compare=rsync")
|
||||||
|
runchecked("rsync, local", "test test_target1 --verbose --exclude-received --fs-compare=rsync")
|
||||||
|
|
||||||
|
runchecked("tar, remote source and remote target",
|
||||||
|
"test test_target1 --ssh-source=localhost --ssh-target=localhost --verbose --exclude-received --fs-compare=tar")
|
||||||
|
runchecked("tar, remote source",
|
||||||
|
"test test_target1 --ssh-source=localhost --verbose --exclude-received --fs-compare=tar")
|
||||||
|
runchecked("tar, remote target",
|
||||||
|
"test test_target1 --ssh-target=localhost --verbose --exclude-received --fs-compare=tar")
|
||||||
|
runchecked("tar, local", "test test_target1 --verbose --exclude-received --fs-compare=tar")
|
||||||
|
|
||||||
|
with self.subTest("no common snapshot"):
|
||||||
|
#destroy common snapshot, now 3 should fail
|
||||||
|
shelltest("zfs destroy test_source1/fs1/ok_zvol@test-20101111000000")
|
||||||
|
self.assertEqual(3, ZfsAutoverify("test test_target1 --verbose --exclude-received".split(" ")).run())
|
||||||
930
tests/test_zfsautobackup.py
Normal file
930
tests/test_zfsautobackup.py
Normal file
@ -0,0 +1,930 @@
|
|||||||
|
from zfs_autobackup.CmdPipe import CmdPipe
|
||||||
|
|
||||||
|
from basetest import *
|
||||||
|
import time
|
||||||
|
|
||||||
|
from zfs_autobackup.LogConsole import LogConsole
|
||||||
|
|
||||||
|
|
||||||
|
class TestZfsAutobackup(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage=True
|
||||||
|
|
||||||
|
def test_invalidpars(self):
|
||||||
|
|
||||||
|
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --keep-source -1".split(" ")).run(), 255)
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
self.assertEqual(ZfsAutobackup("test test_target1 --no-progress --resume --verbose --no-snapshot".split(" ")).run(), 0)
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertIn("The --resume", buf.getvalue())
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
self.assertEqual(ZfsAutobackup("test test_target_nonexisting --no-progress".split(" ")).run(), 255)
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
# correct message?
|
||||||
|
self.assertIn("Please create this dataset", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
def test_snapshotmode(self):
|
||||||
|
"""test snapshot tool mode"""
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_defaults(self):
|
||||||
|
|
||||||
|
with self.subTest("no datasets selected"):
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertTrue(ZfsAutobackup("nonexisting test_target1 --verbose --debug --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
#correct message?
|
||||||
|
self.assertIn("No source filesystems selected", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("defaults with full verbose and debug"):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --debug --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self.subTest("bare defaults, allow empty"):
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self.subTest("verify holds"):
|
||||||
|
|
||||||
|
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_source1 userrefs - -
|
||||||
|
test_source1/fs1 userrefs - -
|
||||||
|
test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||||
|
test_source1/fs1@test-20101111000001 userrefs 1 -
|
||||||
|
test_source1/fs1/sub userrefs - -
|
||||||
|
test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||||
|
test_source1/fs1/sub@test-20101111000001 userrefs 1 -
|
||||||
|
test_source2 userrefs - -
|
||||||
|
test_source2/fs2 userrefs - -
|
||||||
|
test_source2/fs2/sub userrefs - -
|
||||||
|
test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||||
|
test_source2/fs2/sub@test-20101111000001 userrefs 1 -
|
||||||
|
test_source2/fs3 userrefs - -
|
||||||
|
test_source2/fs3/sub userrefs - -
|
||||||
|
test_target1 userrefs - -
|
||||||
|
test_target1/test_source1 userrefs - -
|
||||||
|
test_target1/test_source1/fs1 userrefs - -
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001 userrefs 1 -
|
||||||
|
test_target1/test_source1/fs1/sub userrefs - -
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001 userrefs 1 -
|
||||||
|
test_target1/test_source2 userrefs - -
|
||||||
|
test_target1/test_source2/fs2 userrefs - -
|
||||||
|
test_target1/test_source2/fs2/sub userrefs - -
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001 userrefs 1 -
|
||||||
|
""")
|
||||||
|
|
||||||
|
#make sure time handling is correctly. try to make snapshots a year appart and verify that only snapshots mostly 1y old are kept
|
||||||
|
with self.subTest("test time checking"):
|
||||||
|
with patch('time.strftime', return_value="test-20111111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
time_str="20111112000000" #month in the "future"
|
||||||
|
future_timestamp=time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
|
||||||
|
with patch('time.time', return_value=future_timestamp):
|
||||||
|
with patch('time.strftime', return_value="test-20111111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --keep-source 1y1y --keep-target 1d1y --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20111111000000
|
||||||
|
test_source1/fs1@test-20111111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20111111000000
|
||||||
|
test_source1/fs1/sub@test-20111111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20111111000000
|
||||||
|
test_source2/fs2/sub@test-20111111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20111111000000
|
||||||
|
test_target1/test_source1/fs1@test-20111111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20111111000000
|
||||||
|
test_target1/test_source1/fs1/sub@test-20111111000001
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20111111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20111111000001
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_ignore_othersnaphots(self):
|
||||||
|
|
||||||
|
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
||||||
|
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@othersimple
|
||||||
|
test_source1/fs1@otherdate-20001111000000
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_othersnaphots(self):
|
||||||
|
|
||||||
|
r=shelltest("zfs snapshot test_source1/fs1@othersimple")
|
||||||
|
r=shelltest("zfs snapshot test_source1/fs1@otherdate-20001111000000")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --other-snapshots".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@othersimple
|
||||||
|
test_source1/fs1@otherdate-20001111000000
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@othersimple
|
||||||
|
test_target1/test_source1/fs1@otherdate-20001111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_nosnapshot(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-snapshot --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
#(only parents are created )
|
||||||
|
#TODO: it probably shouldn't create these
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_nosend(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-send --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_ignorereplicated(self):
|
||||||
|
r=shelltest("zfs snapshot test_source1/fs1@otherreplication")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --ignore-replicated".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@otherreplication
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_noholds(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-holds --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs get -r userrefs test_source1 test_source2 test_target1")
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_source1 userrefs - -
|
||||||
|
test_source1/fs1 userrefs - -
|
||||||
|
test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||||
|
test_source1/fs1/sub userrefs - -
|
||||||
|
test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||||
|
test_source2 userrefs - -
|
||||||
|
test_source2/fs2 userrefs - -
|
||||||
|
test_source2/fs2/sub userrefs - -
|
||||||
|
test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||||
|
test_source2/fs3 userrefs - -
|
||||||
|
test_source2/fs3/sub userrefs - -
|
||||||
|
test_target1 userrefs - -
|
||||||
|
test_target1/test_source1 userrefs - -
|
||||||
|
test_target1/test_source1/fs1 userrefs - -
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||||
|
test_target1/test_source1/fs1/sub userrefs - -
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||||
|
test_target1/test_source2 userrefs - -
|
||||||
|
test_target1/test_source2/fs2 userrefs - -
|
||||||
|
test_target1/test_source2/fs2/sub userrefs - -
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_strippath(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --strip-path=1 --no-progress".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/fs1
|
||||||
|
test_target1/fs1@test-20101111000000
|
||||||
|
test_target1/fs1/sub
|
||||||
|
test_target1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/fs2
|
||||||
|
test_target1/fs2/sub
|
||||||
|
test_target1/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
# def test_strippath_toomuch(self):
|
||||||
|
# with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
# self.assertFalse(
|
||||||
|
# ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress".split(" ")).run())
|
||||||
|
#
|
||||||
|
# r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
# self.assertMultiLineEqual(r, """
|
||||||
|
# test_source1
|
||||||
|
# test_source1/fs1
|
||||||
|
# test_source1/fs1@test-20101111000000
|
||||||
|
# test_source1/fs1/sub
|
||||||
|
# test_source1/fs1/sub@test-20101111000000
|
||||||
|
# test_source2
|
||||||
|
# test_source2/fs2
|
||||||
|
# test_source2/fs2/sub
|
||||||
|
# test_source2/fs2/sub@test-20101111000000
|
||||||
|
# test_source2/fs3
|
||||||
|
# test_source2/fs3/sub
|
||||||
|
# test_target1
|
||||||
|
# test_target1/fs1
|
||||||
|
# test_target1/fs1@test-20101111000000
|
||||||
|
# test_target1/fs1/sub
|
||||||
|
# test_target1/fs1/sub@test-20101111000000
|
||||||
|
# test_target1/fs2
|
||||||
|
# test_target1/fs2/sub
|
||||||
|
# test_target1/fs2/sub@test-20101111000000
|
||||||
|
# """)
|
||||||
|
|
||||||
|
def test_clearrefres(self):
|
||||||
|
|
||||||
|
#on zfs utils 0.6.x -x isnt supported
|
||||||
|
r=shelltest("zfs recv -x bla test >/dev/null </dev/zero; echo $?")
|
||||||
|
if r=="\n2\n":
|
||||||
|
self.skipTest("This zfs-userspace version doesnt support -x")
|
||||||
|
|
||||||
|
r=shelltest("zfs set refreservation=1M test_source1/fs1")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-refreservation".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs get refreservation -r test_source1 test_source2 test_target1")
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_source1 refreservation none default
|
||||||
|
test_source1/fs1 refreservation 1M local
|
||||||
|
test_source1/fs1@test-20101111000000 refreservation - -
|
||||||
|
test_source1/fs1/sub refreservation none default
|
||||||
|
test_source1/fs1/sub@test-20101111000000 refreservation - -
|
||||||
|
test_source2 refreservation none default
|
||||||
|
test_source2/fs2 refreservation none default
|
||||||
|
test_source2/fs2/sub refreservation none default
|
||||||
|
test_source2/fs2/sub@test-20101111000000 refreservation - -
|
||||||
|
test_source2/fs3 refreservation none default
|
||||||
|
test_source2/fs3/sub refreservation none default
|
||||||
|
test_target1 refreservation none default
|
||||||
|
test_target1/test_source1 refreservation none default
|
||||||
|
test_target1/test_source1/fs1 refreservation none default
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000 refreservation - -
|
||||||
|
test_target1/test_source1/fs1/sub refreservation none default
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000 refreservation - -
|
||||||
|
test_target1/test_source2 refreservation none default
|
||||||
|
test_target1/test_source2/fs2 refreservation none default
|
||||||
|
test_target1/test_source2/fs2/sub refreservation none default
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000 refreservation - -
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_clearmount(self):
|
||||||
|
|
||||||
|
#on zfs utils 0.6.x -o isnt supported
|
||||||
|
r=shelltest("zfs recv -o bla=1 test >/dev/null </dev/zero; echo $?")
|
||||||
|
if r=="\n2\n":
|
||||||
|
self.skipTest("This zfs-userspace version doesnt support -o")
|
||||||
|
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --clear-mountpoint --debug".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs get canmount -r test_source1 test_source2 test_target1")
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
NAME PROPERTY VALUE SOURCE
|
||||||
|
test_source1 canmount on default
|
||||||
|
test_source1/fs1 canmount on default
|
||||||
|
test_source1/fs1@test-20101111000000 canmount - -
|
||||||
|
test_source1/fs1/sub canmount on default
|
||||||
|
test_source1/fs1/sub@test-20101111000000 canmount - -
|
||||||
|
test_source2 canmount on default
|
||||||
|
test_source2/fs2 canmount on default
|
||||||
|
test_source2/fs2/sub canmount on default
|
||||||
|
test_source2/fs2/sub@test-20101111000000 canmount - -
|
||||||
|
test_source2/fs3 canmount on default
|
||||||
|
test_source2/fs3/sub canmount on default
|
||||||
|
test_target1 canmount on default
|
||||||
|
test_target1/test_source1 canmount on default
|
||||||
|
test_target1/test_source1/fs1 canmount noauto local
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000 canmount - -
|
||||||
|
test_target1/test_source1/fs1/sub canmount noauto local
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000 canmount - -
|
||||||
|
test_target1/test_source2 canmount on default
|
||||||
|
test_target1/test_source2/fs2 canmount on default
|
||||||
|
test_target1/test_source2/fs2/sub canmount noauto local
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_rollback(self):
|
||||||
|
|
||||||
|
#initial backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
#make change
|
||||||
|
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
||||||
|
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
#should fail (busy)
|
||||||
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
#rollback, should succeed
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --rollback".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
def test_destroyincompat(self):
|
||||||
|
|
||||||
|
#initial backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
#add multiple compatible snapshot (written is still 0)
|
||||||
|
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible1")
|
||||||
|
r=shelltest("zfs snapshot test_target1/test_source1/fs1@compatible2")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
#should be ok, is compatible
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
#add incompatible snapshot by changing and snapshotting
|
||||||
|
r=shelltest("zfs mount test_target1/test_source1/fs1")
|
||||||
|
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
|
||||||
|
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")
|
||||||
|
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
#--test should fail, now incompatible
|
||||||
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --test".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
#should fail, now incompatible
|
||||||
|
self.assertTrue(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000003"):
|
||||||
|
#--test should succeed by destroying incompatibles
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible --test".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000003"):
|
||||||
|
#should succeed by destroying incompatibles
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --destroy-incompatible".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all test_target1")
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@compatible1
|
||||||
|
test_target1/test_source1/fs1@compatible2
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1@test-20101111000003
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_ssh(self):
|
||||||
|
|
||||||
|
#test all ssh directions
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost --exclude-received".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1@test-20101111000002
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs2/sub@test-20101111000002
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000002
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_minchange(self):
|
||||||
|
|
||||||
|
#initial
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
|
||||||
|
|
||||||
|
#make small change, use umount to reflect the changes immediately
|
||||||
|
r=shelltest("zfs set compress=off test_source1")
|
||||||
|
r=shelltest("touch /test_source1/fs1/change.txt")
|
||||||
|
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
|
||||||
|
|
||||||
|
|
||||||
|
#too small change, takes no snapshots
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
|
||||||
|
|
||||||
|
#make big change
|
||||||
|
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/change.txt bs=200000 count=1")
|
||||||
|
r=shelltest("zfs umount test_source1/fs1; zfs mount test_source1/fs1")
|
||||||
|
|
||||||
|
#bigger change, should take snapshot
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --min-change 100000".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1@test-20101111000002
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000002
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_test(self):
|
||||||
|
|
||||||
|
#initial
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
""")
|
||||||
|
|
||||||
|
#actual make initial backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#test incremental
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --allow-empty --verbose --test".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_migrate(self):
|
||||||
|
"""test migration from other snapshotting systems. zfs-autobackup should be able to continue from any common snapshot, not just its own."""
|
||||||
|
|
||||||
|
shelltest("zfs snapshot test_source1/fs1@migrate1")
|
||||||
|
shelltest("zfs create test_target1/test_source1")
|
||||||
|
shelltest("zfs send test_source1/fs1@migrate1| zfs recv test_target1/test_source1/fs1")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@migrate1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@migrate1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_keep0(self):
|
||||||
|
"""test if keep-source=0 and keep-target=0 dont delete common snapshot and break backup"""
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0".split(" ")).run())
|
||||||
|
|
||||||
|
#make snapshot, shouldnt delete 0
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
#make snapshot 2, shouldnt delete 0 since it has holds, but will delete 1 since it has no holds
|
||||||
|
with patch('time.strftime', return_value="test-20101111000002"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1@test-20101111000002
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs2/sub@test-20101111000002
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
#make another backup but with no-holds. we should naturally endup with only number 3
|
||||||
|
with patch('time.strftime', return_value="test-20101111000003"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --keep-source=0 --keep-target=0 --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000003
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000003
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000003
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000003
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
# run with snapshot-only for 4, since we used no-holds, it will delete 3 on the source, breaking the backup
|
||||||
|
with patch('time.strftime', return_value="test-20101111000004"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test --no-progress --verbose --keep-source=0 --keep-target=0 --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000004
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000004
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000004
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000003
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000003
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000003
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_progress(self):
|
||||||
|
|
||||||
|
r=shelltest("dd if=/dev/zero of=/test_source1/data.txt bs=200000 count=1")
|
||||||
|
r = shelltest("zfs snapshot test_source1@test")
|
||||||
|
|
||||||
|
l=LogConsole(show_verbose=True, show_debug=False, color=False)
|
||||||
|
n=ZfsNode(snapshot_time_format="bla", hold_name="bla", logger=l)
|
||||||
|
d=ZfsDataset(n,"test_source1@test")
|
||||||
|
|
||||||
|
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True, zfs_compressed=True)
|
||||||
|
|
||||||
|
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stderr(buf):
|
||||||
|
try:
|
||||||
|
n.run(["sleep", "2"], inp=sp)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
# correct message?
|
||||||
|
self.assertRegex(buf.getvalue(),".*>>> .*minutes left.*")
|
||||||
81
tests/test_zfsautobackup31.py
Normal file
81
tests/test_zfsautobackup31.py
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
from basetest import *
|
||||||
|
import time
|
||||||
|
|
||||||
|
class TestZfsAutobackup31(unittest2.TestCase):
|
||||||
|
"""various new 3.1 features"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage=True
|
||||||
|
|
||||||
|
def test_no_thinning(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_re_replication(self):
|
||||||
|
"""test re-replication of something thats already a backup (new in v3.1-beta5)"""
|
||||||
|
|
||||||
|
shelltest("zfs create test_target1/a")
|
||||||
|
shelltest("zfs create test_target1/b")
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/a --no-progress --verbose --debug".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1/b --no-progress --verbose".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
|
||||||
|
#NOTE: it wont backup test_target1/a/test_source2/fs2/sub to test_target1/b since it doesnt have the zfs_autobackup property anymore.
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_target1/a/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/a/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/a/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/b/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/b/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/b/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_zfs_compressed(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="test-20101111000000"):
|
||||||
|
self.assertFalse(
|
||||||
|
ZfsAutobackup("test test_target1 --no-progress --verbose --debug --zfs-compressed".split(" ")).run())
|
||||||
|
|
||||||
176
tests/test_zfsnode.py
Normal file
176
tests/test_zfsnode.py
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
from basetest import *
|
||||||
|
from zfs_autobackup.LogStub import LogStub
|
||||||
|
from zfs_autobackup.ExecuteNode import ExecuteError
|
||||||
|
|
||||||
|
|
||||||
|
class TestZfsNode(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
# return super().setUp()
|
||||||
|
|
||||||
|
def test_consistent_snapshot(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||||
|
|
||||||
|
with self.subTest("first snapshot"):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000001", 100000)
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self.subTest("second snapshot, no changes, no snapshot"):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test",exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 1)
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
""")
|
||||||
|
|
||||||
|
with self.subTest("second snapshot, no changes, empty snapshot"):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-20101111000002", 0)
|
||||||
|
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||||
|
self.assertEqual(r, """
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1@test-20101111000002
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source1/fs1/sub@test-20101111000002
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs2/sub@test-20101111000002
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
""")
|
||||||
|
|
||||||
|
def test_consistent_snapshot_prepostcmds(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test", hold_name="test", logger=logger, description=description, debug_output=True)
|
||||||
|
|
||||||
|
with self.subTest("Test if all cmds are executed correctly (no failures)"):
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||||
|
0,
|
||||||
|
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||||
|
post_snapshot_cmds=["echo post1 >&2", "echo post2 >&2"]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertIn("STDOUT > pre1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > pre2", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post2", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
with self.subTest("Failure in the middle, only pre1 and both post1 and post2 should be executed, no snapshot should be attempted"):
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
with self.assertRaises(ExecuteError):
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||||
|
0,
|
||||||
|
pre_snapshot_cmds=["echo pre1", "false", "echo pre2"],
|
||||||
|
post_snapshot_cmds=["echo post1", "false", "echo post2"]
|
||||||
|
)
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > pre1", buf.getvalue())
|
||||||
|
self.assertNotIn("STDOUT > pre2", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post2", buf.getvalue())
|
||||||
|
|
||||||
|
with self.subTest("Snapshot fails"):
|
||||||
|
with OutputIO() as buf:
|
||||||
|
with redirect_stdout(buf):
|
||||||
|
with self.assertRaises(ExecuteError):
|
||||||
|
#same snapshot name as before so it fails
|
||||||
|
node.consistent_snapshot(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||||
|
0,
|
||||||
|
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||||
|
post_snapshot_cmds=["echo post1", "echo post2"]
|
||||||
|
)
|
||||||
|
|
||||||
|
print(buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > pre1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > pre2", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post1", buf.getvalue())
|
||||||
|
self.assertIn("STDOUT > post2", buf.getvalue())
|
||||||
|
|
||||||
|
|
||||||
|
def test_getselected(self):
|
||||||
|
|
||||||
|
# should be excluded by property
|
||||||
|
shelltest("zfs create test_source1/fs1/subexcluded")
|
||||||
|
shelltest("zfs set autobackup:test=false test_source1/fs1/subexcluded")
|
||||||
|
|
||||||
|
# should be excluded by being unchanged
|
||||||
|
shelltest("zfs create test_source1/fs1/unchanged")
|
||||||
|
shelltest("zfs snapshot test_source1/fs1/unchanged@somesnapshot")
|
||||||
|
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||||
|
s = pformat(node.selected_datasets(property_name="autobackup:test", exclude_paths=[], exclude_received=False, exclude_unchanged=True, min_change=1))
|
||||||
|
print(s)
|
||||||
|
|
||||||
|
# basics
|
||||||
|
self.assertEqual(s, """[(local): test_source1/fs1,
|
||||||
|
(local): test_source1/fs1/sub,
|
||||||
|
(local): test_source2/fs2/sub]""")
|
||||||
|
|
||||||
|
|
||||||
|
def test_validcommand(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||||
|
|
||||||
|
with self.subTest("test invalid option"):
|
||||||
|
self.assertFalse(node.valid_command(["zfs", "send", "--invalid-option", "nonexisting"]))
|
||||||
|
with self.subTest("test valid option"):
|
||||||
|
self.assertTrue(node.valid_command(["zfs", "send", "-v", "nonexisting"]))
|
||||||
|
|
||||||
|
def test_supportedsendoptions(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description)
|
||||||
|
# -D propably always supported
|
||||||
|
self.assertGreater(len(node.supported_send_options), 0)
|
||||||
|
|
||||||
|
def test_supportedrecvoptions(self):
|
||||||
|
logger = LogStub()
|
||||||
|
description = "[Source]"
|
||||||
|
# NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
|
||||||
|
node = ZfsNode(snapshot_time_format="test-%Y%m%d%H%M%S", hold_name="zfs_autobackup:test", logger=logger, description=description, ssh_to='localhost')
|
||||||
|
self.assertIsInstance(node.supported_recv_options, list)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
741
zfs_autobackup
741
zfs_autobackup
@ -1,741 +0,0 @@
|
|||||||
#!/usr/bin/env python2
|
|
||||||
# -*- coding: utf8 -*-
|
|
||||||
from __future__ import print_function
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import re
|
|
||||||
import traceback
|
|
||||||
import subprocess
|
|
||||||
import pprint
|
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
def error(txt):
|
|
||||||
print(txt, file=sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def verbose(txt):
|
|
||||||
if args.verbose:
|
|
||||||
print(txt)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def debug(txt):
|
|
||||||
if args.debug:
|
|
||||||
print(txt)
|
|
||||||
|
|
||||||
|
|
||||||
"""run a command. specifiy ssh user@host to run remotely"""
|
|
||||||
def run(cmd, input=None, ssh_to="local", tab_split=False, valid_exitcodes=[ 0 ], test=False):
|
|
||||||
|
|
||||||
encoded_cmd=[]
|
|
||||||
|
|
||||||
|
|
||||||
#use ssh?
|
|
||||||
if ssh_to != "local":
|
|
||||||
encoded_cmd.extend(["ssh", ssh_to])
|
|
||||||
|
|
||||||
|
|
||||||
#make sure the command gets all the data in utf8 format:
|
|
||||||
#(this is neccesary if LC_ALL=en_US.utf8 is not set in the environment)
|
|
||||||
for arg in cmd:
|
|
||||||
#add single quotes for remote commands to support spaces and other wierd stuff (remote commands are executed in a shell)
|
|
||||||
encoded_cmd.append( ("'"+arg+"'").encode('utf-8'))
|
|
||||||
|
|
||||||
else:
|
|
||||||
for arg in cmd:
|
|
||||||
encoded_cmd.append(arg.encode('utf-8'))
|
|
||||||
|
|
||||||
|
|
||||||
#the accurate way of displaying it whould be: print encoded_cmd
|
|
||||||
#However, we use the more human-readable way, but this is not always properly escaped!
|
|
||||||
#(most of the time it should be copypastable however.)
|
|
||||||
debug_txt="# "+" ".join(encoded_cmd)
|
|
||||||
|
|
||||||
if test:
|
|
||||||
debug("[TEST] "+debug_txt)
|
|
||||||
else:
|
|
||||||
debug(debug_txt)
|
|
||||||
|
|
||||||
if input:
|
|
||||||
debug("INPUT:\n"+input.rstrip())
|
|
||||||
stdin=subprocess.PIPE
|
|
||||||
else:
|
|
||||||
stdin=None
|
|
||||||
|
|
||||||
if test:
|
|
||||||
return
|
|
||||||
|
|
||||||
p=subprocess.Popen(encoded_cmd, env=os.environ, stdout=subprocess.PIPE, stdin=stdin)
|
|
||||||
output=p.communicate(input=input)[0]
|
|
||||||
if p.returncode not in valid_exitcodes:
|
|
||||||
raise(subprocess.CalledProcessError(p.returncode, encoded_cmd))
|
|
||||||
|
|
||||||
lines=output.splitlines()
|
|
||||||
if not tab_split:
|
|
||||||
return(lines)
|
|
||||||
else:
|
|
||||||
ret=[]
|
|
||||||
for line in lines:
|
|
||||||
ret.append(line.split("\t"))
|
|
||||||
return(ret)
|
|
||||||
|
|
||||||
|
|
||||||
"""determine filesystems that should be backupped by looking at the special autobackup-property"""
|
|
||||||
def zfs_get_selected_filesystems(ssh_to, backup_name):
|
|
||||||
#get all source filesystems that have the backup property
|
|
||||||
source_filesystems=run(ssh_to=ssh_to, tab_split=True, cmd=[
|
|
||||||
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-s", "local,inherited", "-H", "autobackup:"+backup_name
|
|
||||||
])
|
|
||||||
|
|
||||||
#determine filesystems that should be actually backupped
|
|
||||||
selected_filesystems=[]
|
|
||||||
direct_filesystems=[]
|
|
||||||
for source_filesystem in source_filesystems:
|
|
||||||
(name,value,source)=source_filesystem
|
|
||||||
if value=="false":
|
|
||||||
verbose("Ignored : {0} (disabled)".format(name))
|
|
||||||
|
|
||||||
else:
|
|
||||||
if source=="local" and ( value=="true" or value=="child"):
|
|
||||||
direct_filesystems.append(name)
|
|
||||||
|
|
||||||
if source=="local" and value=="true":
|
|
||||||
selected_filesystems.append(name)
|
|
||||||
verbose("Selected: {0} (direct selection)".format(name))
|
|
||||||
elif source.find("inherited from ")==0 and (value=="true" or value=="child"):
|
|
||||||
inherited_from=re.sub("^inherited from ", "", source)
|
|
||||||
if inherited_from in direct_filesystems:
|
|
||||||
selected_filesystems.append(name)
|
|
||||||
verbose("Selected: {0} (inherited selection)".format(name))
|
|
||||||
else:
|
|
||||||
verbose("Ignored : {0} (already a backup)".format(name))
|
|
||||||
else:
|
|
||||||
verbose("Ignored : {0} (only childs)".format(name))
|
|
||||||
|
|
||||||
return(selected_filesystems)
|
|
||||||
|
|
||||||
|
|
||||||
"""determine filesystems that can be resumed via receive_resume_token"""
|
|
||||||
def zfs_get_resumable_filesystems(ssh_to, filesystems):
|
|
||||||
|
|
||||||
cmd=[ "zfs", "get", "-t", "volume,filesystem", "-o", "name,value", "-H", "receive_resume_token" ]
|
|
||||||
cmd.extend(filesystems)
|
|
||||||
|
|
||||||
#TODO: get rid of ugly errors for non-existing target filesystems
|
|
||||||
resumable_filesystems=run(ssh_to=ssh_to, tab_split=True, cmd=cmd, valid_exitcodes= [ 0,1 ] )
|
|
||||||
|
|
||||||
ret={}
|
|
||||||
|
|
||||||
for (resumable_filesystem,token) in resumable_filesystems:
|
|
||||||
if token!='-':
|
|
||||||
ret[resumable_filesystem]=token
|
|
||||||
|
|
||||||
return(ret)
|
|
||||||
|
|
||||||
|
|
||||||
"""deferred destroy list of snapshots (in @format). """
|
|
||||||
def zfs_destroy_snapshots(ssh_to, snapshots):
|
|
||||||
|
|
||||||
#zfs can only destroy one filesystem at once so we use xargs and stdin
|
|
||||||
run(ssh_to=ssh_to, test=args.test, input="\0".join(snapshots), cmd=
|
|
||||||
[ "xargs", "-0", "-n", "1", "zfs", "destroy", "-d" ]
|
|
||||||
)
|
|
||||||
|
|
||||||
"""destroy list of filesystems """
|
|
||||||
def zfs_destroy(ssh_to, filesystems, recursive=False):
|
|
||||||
|
|
||||||
cmd=[ "xargs", "-0", "-n", "1", "zfs", "destroy" ]
|
|
||||||
|
|
||||||
if recursive:
|
|
||||||
cmd.append("-r")
|
|
||||||
|
|
||||||
#zfs can only destroy one filesystem at once so we use xargs and stdin
|
|
||||||
run(ssh_to=ssh_to, test=args.test, input="\0".join(filesystems), cmd=cmd)
|
|
||||||
|
|
||||||
#simulate snapshots for --test option
|
|
||||||
test_snapshots={}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""create snapshot on multiple filesystems at once (atomicly per pool)"""
|
|
||||||
def zfs_create_snapshot(ssh_to, filesystems, snapshot):
|
|
||||||
|
|
||||||
|
|
||||||
#collect per pool, zfs can only take atomic snapshots per pool
|
|
||||||
pools={}
|
|
||||||
for filesystem in filesystems:
|
|
||||||
pool=filesystem.split('/')[0]
|
|
||||||
if pool not in pools:
|
|
||||||
pools[pool]=[]
|
|
||||||
pools[pool].append(filesystem)
|
|
||||||
|
|
||||||
for pool in pools:
|
|
||||||
cmd=[ "zfs", "snapshot" ]
|
|
||||||
for filesystem in pools[pool]:
|
|
||||||
cmd.append(filesystem+"@"+snapshot)
|
|
||||||
|
|
||||||
#in testmode we dont actually make changes, so keep them in a list to simulate
|
|
||||||
if args.test:
|
|
||||||
if not ssh_to in test_snapshots:
|
|
||||||
test_snapshots[ssh_to]={}
|
|
||||||
if not filesystem in test_snapshots[ssh_to]:
|
|
||||||
test_snapshots[ssh_to][filesystem]=[]
|
|
||||||
test_snapshots[ssh_to][filesystem].append(snapshot)
|
|
||||||
|
|
||||||
run(ssh_to=ssh_to, tab_split=False, cmd=cmd, test=args.test)
|
|
||||||
|
|
||||||
|
|
||||||
"""get names of all snapshots for specified filesystems belonging to backup_name
|
|
||||||
|
|
||||||
return[filesystem_name]=[ "snashot1", "snapshot2", ... ]
|
|
||||||
"""
|
|
||||||
def zfs_get_snapshots(ssh_to, filesystems, backup_name):
|
|
||||||
|
|
||||||
ret={}
|
|
||||||
|
|
||||||
if filesystems:
|
|
||||||
#TODO: get rid of ugly errors for non-existing target filesystems
|
|
||||||
cmd=[
|
|
||||||
"zfs", "list", "-d", "1", "-r", "-t" ,"snapshot", "-H", "-o", "name"
|
|
||||||
]
|
|
||||||
cmd.extend(filesystems)
|
|
||||||
|
|
||||||
snapshots=run(ssh_to=ssh_to, tab_split=False, cmd=cmd, valid_exitcodes=[ 0,1 ])
|
|
||||||
|
|
||||||
|
|
||||||
for snapshot in snapshots:
|
|
||||||
(filesystem, snapshot_name)=snapshot.split("@")
|
|
||||||
if re.match("^"+backup_name+"-[0-9]*$", snapshot_name):
|
|
||||||
if not filesystem in ret:
|
|
||||||
ret[filesystem]=[]
|
|
||||||
ret[filesystem].append(snapshot_name)
|
|
||||||
|
|
||||||
#also add any test-snapshots that where created with --test mode
|
|
||||||
if args.test:
|
|
||||||
if ssh_to in test_snapshots:
|
|
||||||
for filesystem in filesystems:
|
|
||||||
if filesystem in test_snapshots[ssh_to]:
|
|
||||||
if not filesystem in ret:
|
|
||||||
ret[filesystem]=[]
|
|
||||||
ret[filesystem].extend(test_snapshots[ssh_to][filesystem])
|
|
||||||
|
|
||||||
return(ret)
|
|
||||||
|
|
||||||
|
|
||||||
def default_tag():
|
|
||||||
return("zfs_autobackup:"+args.backup_name)
|
|
||||||
|
|
||||||
"""hold a snapshot so it cant be destroyed accidently by admin or other processes"""
|
|
||||||
def zfs_hold_snapshot(ssh_to, snapshot, tag=None):
|
|
||||||
cmd=[
|
|
||||||
"zfs", "hold", tag or default_tag(), snapshot
|
|
||||||
]
|
|
||||||
|
|
||||||
run(ssh_to=ssh_to, test=args.test, tab_split=False, cmd=cmd, valid_exitcodes=[ 0, 1 ])
|
|
||||||
|
|
||||||
|
|
||||||
"""release a snapshot"""
|
|
||||||
def zfs_release_snapshot(ssh_to, snapshot, tag=None):
|
|
||||||
cmd=[
|
|
||||||
"zfs", "release", tag or default_tag(), snapshot
|
|
||||||
]
|
|
||||||
|
|
||||||
run(ssh_to=ssh_to, test=args.test, tab_split=False, cmd=cmd, valid_exitcodes=[ 0, 1 ])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""transfer a zfs snapshot from source to target. both can be either local or via ssh.
|
|
||||||
|
|
||||||
|
|
||||||
TODO:
|
|
||||||
|
|
||||||
(parially implemented, local buffer is a bit more annoying to do)
|
|
||||||
|
|
||||||
buffering: specify buffer_size to use mbuffer (or alike) to apply buffering where neccesary
|
|
||||||
|
|
||||||
local to local:
|
|
||||||
local send -> local buffer -> local receive
|
|
||||||
|
|
||||||
local to remote and remote to local:
|
|
||||||
local send -> local buffer -> ssh -> remote buffer -> remote receive
|
|
||||||
remote send -> remote buffer -> ssh -> local buffer -> local receive
|
|
||||||
|
|
||||||
remote to remote:
|
|
||||||
remote send -> remote buffer -> ssh -> local buffer -> ssh -> remote buffer -> remote receive
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
def zfs_transfer(ssh_source, source_filesystem, first_snapshot, second_snapshot,
|
|
||||||
ssh_target, target_filesystem, resume_token=None, buffer_size=None):
|
|
||||||
|
|
||||||
#### build source command
|
|
||||||
source_cmd=[]
|
|
||||||
|
|
||||||
if ssh_source != "local":
|
|
||||||
source_cmd.extend([ "ssh", ssh_source ])
|
|
||||||
|
|
||||||
source_cmd.extend(["zfs", "send", ])
|
|
||||||
|
|
||||||
#all kind of performance options:
|
|
||||||
source_cmd.append("-L") # large block support
|
|
||||||
source_cmd.append("-e") # WRITE_EMBEDDED, more compact stream
|
|
||||||
source_cmd.append("-c") # use compressed WRITE records
|
|
||||||
if not args.resume:
|
|
||||||
source_cmd.append("-D") # dedupped stream, sends less duplicate data
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#only verbose in debug mode, lots of output
|
|
||||||
if args.debug :
|
|
||||||
source_cmd.append("-v")
|
|
||||||
|
|
||||||
|
|
||||||
if not first_snapshot:
|
|
||||||
txt="Initial transfer of "+source_filesystem+" snapshot "+second_snapshot
|
|
||||||
else:
|
|
||||||
txt="Incremental transfer of "+source_filesystem+" between snapshots "+first_snapshot+"..."+second_snapshot
|
|
||||||
|
|
||||||
if resume_token:
|
|
||||||
source_cmd.extend([ "-t", resume_token ])
|
|
||||||
verbose("RESUMING "+txt)
|
|
||||||
|
|
||||||
else:
|
|
||||||
source_cmd.append("-p")
|
|
||||||
|
|
||||||
if first_snapshot:
|
|
||||||
source_cmd.extend([ "-i", first_snapshot ])
|
|
||||||
|
|
||||||
if ssh_source != "local":
|
|
||||||
source_cmd.append("'" + source_filesystem + "@" + second_snapshot + "'")
|
|
||||||
else:
|
|
||||||
source_cmd.append(source_filesystem + "@" + second_snapshot)
|
|
||||||
|
|
||||||
verbose(txt)
|
|
||||||
|
|
||||||
if args.buffer and args.ssh_source!="local":
|
|
||||||
source_cmd.append("|mbuffer -m {}".format(args.buffer))
|
|
||||||
|
|
||||||
|
|
||||||
#### build target command
|
|
||||||
target_cmd=[]
|
|
||||||
|
|
||||||
if ssh_target != "local":
|
|
||||||
target_cmd.extend([ "ssh", ssh_target ])
|
|
||||||
|
|
||||||
target_cmd.extend(["zfs", "recv", "-u" ])
|
|
||||||
|
|
||||||
# filter certain properties on receive (usefull for linux->freebsd in some cases)
|
|
||||||
if args.filter_properties:
|
|
||||||
for filter_property in args.filter_properties:
|
|
||||||
target_cmd.extend([ "-x" , filter_property ])
|
|
||||||
|
|
||||||
#also verbose in --verbose mode so we can see the transfer speed when its completed
|
|
||||||
if args.verbose or args.debug:
|
|
||||||
target_cmd.append("-v")
|
|
||||||
|
|
||||||
if args.resume:
|
|
||||||
target_cmd.append("-s")
|
|
||||||
|
|
||||||
|
|
||||||
if ssh_target!="local":
|
|
||||||
target_cmd.append("'" + target_filesystem + "'")
|
|
||||||
else:
|
|
||||||
target_cmd.append(target_filesystem)
|
|
||||||
|
|
||||||
if args.buffer and args.ssh_target!="local":
|
|
||||||
target_cmd.append("|mbuffer -m {}".format(args.buffer))
|
|
||||||
|
|
||||||
|
|
||||||
#### make sure parent on target exists
|
|
||||||
parent_filesystem= "/".join(target_filesystem.split("/")[:-1])
|
|
||||||
run(ssh_to=ssh_target, cmd=[ "zfs", "create" ,"-p", parent_filesystem ], test=args.test)
|
|
||||||
|
|
||||||
### execute pipe
|
|
||||||
debug_txt="# "+source_cmd[0]+" '"+("' '".join(source_cmd[1:]))+"'" + " | " + target_cmd[0]+" '"+("' '".join(target_cmd[1:]))+"'"
|
|
||||||
|
|
||||||
if args.test:
|
|
||||||
debug("[TEST] "+debug_txt)
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
debug(debug_txt)
|
|
||||||
|
|
||||||
source_proc=subprocess.Popen(source_cmd, env=os.environ, stdout=subprocess.PIPE)
|
|
||||||
target_proc=subprocess.Popen(target_cmd, env=os.environ, stdin=source_proc.stdout)
|
|
||||||
source_proc.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
|
|
||||||
target_proc.communicate()
|
|
||||||
|
|
||||||
if not args.ignore_transfer_errors:
|
|
||||||
if source_proc.returncode:
|
|
||||||
raise(subprocess.CalledProcessError(source_proc.returncode, source_cmd))
|
|
||||||
|
|
||||||
#zfs recv sometimes gives an exitcode 1 while the transfer was succesfull, therefore we ignore exit 1's and do an extra check to see if the snapshot is there.
|
|
||||||
if target_proc.returncode and target_proc.returncode!=1:
|
|
||||||
raise(subprocess.CalledProcessError(target_proc.returncode, target_cmd))
|
|
||||||
|
|
||||||
debug("Verifying if snapshot exists on target")
|
|
||||||
run(ssh_to=ssh_target, cmd=["zfs", "list", target_filesystem+"@"+second_snapshot ])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""get filesystems that where already backupped to a target. """
|
|
||||||
def zfs_get_backupped_filesystems(ssh_to, backup_name, target_fs):
|
|
||||||
#get all target filesystems that have received or inherited the backup propert, under the target_fs tree
|
|
||||||
ret=run(ssh_to=ssh_to, tab_split=False, cmd=[
|
|
||||||
"zfs", "get", "-r", "-t", "volume,filesystem", "-o", "name", "-s", "received,inherited", "-H", "autobackup:"+backup_name, target_fs
|
|
||||||
])
|
|
||||||
|
|
||||||
return(ret)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""get filesystems that where once backupped to target but are no longer selected on source
|
|
||||||
|
|
||||||
these are filesystems that are not in the list in target_filesystems.
|
|
||||||
|
|
||||||
this happens when filesystems are destroyed or unselected on the source.
|
|
||||||
"""
|
|
||||||
def get_stale_backupped_filesystems(ssh_to, backup_name, target_fs, target_filesystems):
|
|
||||||
|
|
||||||
backupped_filesystems=zfs_get_backupped_filesystems(ssh_to=ssh_to, backup_name=backup_name, target_fs=target_fs)
|
|
||||||
|
|
||||||
#determine backupped filesystems that are not in target_filesystems anymore
|
|
||||||
stale_backupped_filesystems=[]
|
|
||||||
for backupped_filesystem in backupped_filesystems:
|
|
||||||
if backupped_filesystem not in target_filesystems:
|
|
||||||
stale_backupped_filesystems.append(backupped_filesystem)
|
|
||||||
|
|
||||||
return(stale_backupped_filesystems)
|
|
||||||
|
|
||||||
|
|
||||||
now=time.time()
|
|
||||||
"""determine list of snapshot (in @format) to destroy, according to age"""
|
|
||||||
def determine_destroy_list(snapshots, days):
|
|
||||||
ret=[]
|
|
||||||
for filesystem in snapshots:
|
|
||||||
for snapshot in snapshots[filesystem]:
|
|
||||||
time_str=re.findall("^.*-([0-9]*)$", snapshot)[0]
|
|
||||||
if len(time_str)==14:
|
|
||||||
#new format:
|
|
||||||
time_secs=time.mktime(time.strptime(time_str,"%Y%m%d%H%M%S"))
|
|
||||||
else:
|
|
||||||
time_secs=int(time_str)
|
|
||||||
# verbose("time_secs"+time_str)
|
|
||||||
if (now-time_secs) > (24 * 3600 * days):
|
|
||||||
ret.append(filesystem+"@"+snapshot)
|
|
||||||
|
|
||||||
return(ret)
|
|
||||||
|
|
||||||
|
|
||||||
def lstrip_path(path, count):
|
|
||||||
return("/".join(path.split("/")[count:]))
|
|
||||||
|
|
||||||
|
|
||||||
"""get list of filesystems that are changed, compared to the latest snapshot"""
|
|
||||||
def zfs_get_unchanged_filesystems(ssh_to, snapshots):
|
|
||||||
|
|
||||||
ret=[]
|
|
||||||
for ( filesystem, snapshot_list ) in snapshots.items():
|
|
||||||
latest_snapshot=snapshot_list[-1]
|
|
||||||
|
|
||||||
cmd=[
|
|
||||||
"zfs", "get","-H" ,"-ovalue", "written@"+latest_snapshot, filesystem
|
|
||||||
]
|
|
||||||
|
|
||||||
output=run(ssh_to=ssh_to, tab_split=False, cmd=cmd, valid_exitcodes=[ 0 ])
|
|
||||||
|
|
||||||
if output[0]=="0B":
|
|
||||||
ret.append(filesystem)
|
|
||||||
verbose("No changes on {}".format(filesystem))
|
|
||||||
|
|
||||||
return(ret)
|
|
||||||
|
|
||||||
|
|
||||||
def zfs_autobackup():
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
############## data gathering section
|
|
||||||
|
|
||||||
if args.test:
|
|
||||||
args.verbose=True
|
|
||||||
verbose("RUNNING IN TEST-MODE, NOT MAKING ACTUAL BACKUP!")
|
|
||||||
|
|
||||||
|
|
||||||
### getting and determinging source/target filesystems
|
|
||||||
|
|
||||||
# get selected filesystem on backup source
|
|
||||||
verbose("Getting selected source filesystems for backup {0} on {1}".format(args.backup_name,args.ssh_source))
|
|
||||||
source_filesystems=zfs_get_selected_filesystems(args.ssh_source, args.backup_name)
|
|
||||||
|
|
||||||
#nothing todo
|
|
||||||
if not source_filesystems:
|
|
||||||
error("No filesystems source selected, please do a 'zfs set autobackup:{0}=true' on {1}".format(args.backup_name,args.ssh_source))
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
# determine target filesystems
|
|
||||||
target_filesystems=[]
|
|
||||||
for source_filesystem in source_filesystems:
|
|
||||||
#append args.target_fs prefix and strip args.strip_path paths from source_filesystem
|
|
||||||
target_filesystems.append(args.target_fs + "/" + lstrip_path(source_filesystem, args.strip_path))
|
|
||||||
|
|
||||||
|
|
||||||
### get resumable transfers
|
|
||||||
resumable_target_filesystems={}
|
|
||||||
if args.resume:
|
|
||||||
verbose("Checking for aborted transfers that can be resumed")
|
|
||||||
resumable_target_filesystems=zfs_get_resumable_filesystems(args.ssh_target, target_filesystems)
|
|
||||||
debug("Resumable filesystems: "+str(pprint.pformat(resumable_target_filesystems)))
|
|
||||||
|
|
||||||
|
|
||||||
### get all snapshots of all selected filesystems
|
|
||||||
verbose("Getting source snapshot-list from {0}".format(args.ssh_source))
|
|
||||||
source_snapshots=zfs_get_snapshots(args.ssh_source, source_filesystems, args.backup_name)
|
|
||||||
debug("Source snapshots: " + str(pprint.pformat(source_snapshots)))
|
|
||||||
|
|
||||||
|
|
||||||
#create new snapshot?
|
|
||||||
if not args.no_snapshot:
|
|
||||||
#determine which filesystems changed since last snapshot
|
|
||||||
if not args.allow_empty:
|
|
||||||
verbose("Determining unchanged filesystems")
|
|
||||||
unchanged_filesystems=zfs_get_unchanged_filesystems(args.ssh_source, source_snapshots)
|
|
||||||
else:
|
|
||||||
unchanged_filesystems=[]
|
|
||||||
|
|
||||||
snapshot_filesystems=[]
|
|
||||||
for source_filesystem in source_filesystems:
|
|
||||||
if source_filesystem not in unchanged_filesystems:
|
|
||||||
snapshot_filesystems.append(source_filesystem)
|
|
||||||
|
|
||||||
|
|
||||||
#create snapshot
|
|
||||||
if snapshot_filesystems:
|
|
||||||
new_snapshot_name=args.backup_name+"-"+time.strftime("%Y%m%d%H%M%S")
|
|
||||||
verbose("Creating source snapshot {0} on {1} ".format(new_snapshot_name, args.ssh_source))
|
|
||||||
zfs_create_snapshot(args.ssh_source, snapshot_filesystems, new_snapshot_name)
|
|
||||||
else:
|
|
||||||
verbose("No changes at all, not creating snapshot.")
|
|
||||||
|
|
||||||
|
|
||||||
#add it to the list of source filesystems
|
|
||||||
for snapshot_filesystem in snapshot_filesystems:
|
|
||||||
source_snapshots.setdefault(snapshot_filesystem,[]).append(new_snapshot_name)
|
|
||||||
|
|
||||||
|
|
||||||
#### get target snapshots
|
|
||||||
target_snapshots={}
|
|
||||||
try:
|
|
||||||
verbose("Getting target snapshot-list from {0}".format(args.ssh_target))
|
|
||||||
target_snapshots=zfs_get_snapshots(args.ssh_target, target_filesystems, args.backup_name)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
verbose("(ignoring errors, probably initial backup for this filesystem)")
|
|
||||||
pass
|
|
||||||
debug("Target snapshots: " + str(pprint.pformat(target_snapshots)))
|
|
||||||
|
|
||||||
|
|
||||||
#obsolete snapshots that may be removed
|
|
||||||
source_obsolete_snapshots={}
|
|
||||||
target_obsolete_snapshots={}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
############## backup section
|
|
||||||
|
|
||||||
#determine which snapshots to send for each filesystem
|
|
||||||
for source_filesystem in source_filesystems:
|
|
||||||
target_filesystem=args.target_fs + "/" + lstrip_path(source_filesystem, args.strip_path)
|
|
||||||
|
|
||||||
if source_filesystem not in source_snapshots:
|
|
||||||
#this happens if you use --no-snapshot and there are new filesystems without snapshots
|
|
||||||
verbose("Skipping source filesystem {0}, no snapshots found".format(source_filesystem))
|
|
||||||
else:
|
|
||||||
|
|
||||||
#incremental or initial send?
|
|
||||||
if target_filesystem in target_snapshots and target_snapshots[target_filesystem]:
|
|
||||||
#incremental mode, determine what to send and what is obsolete
|
|
||||||
|
|
||||||
#latest succesfully send snapshot, should be common on both source and target
|
|
||||||
latest_target_snapshot=target_snapshots[target_filesystem][-1]
|
|
||||||
|
|
||||||
if latest_target_snapshot not in source_snapshots[source_filesystem]:
|
|
||||||
#cant find latest target anymore. find first common snapshot and inform user
|
|
||||||
error_msg="Cant find latest target snapshot on source, did you destroy/rename it?"
|
|
||||||
error_msg=error_msg+"\nLatest on target : "+target_filesystem+"@"+latest_target_snapshot
|
|
||||||
error_msg=error_msg+"\nMissing on source: "+source_filesystem+"@"+latest_target_snapshot
|
|
||||||
found=False
|
|
||||||
for latest_target_snapshot in reversed(target_snapshots[target_filesystem]):
|
|
||||||
if latest_target_snapshot in source_snapshots[source_filesystem]:
|
|
||||||
error_msg=error_msg+"\nYou could solve this by rolling back to this common snapshot on target: "+target_filesystem+"@"+latest_target_snapshot
|
|
||||||
found=True
|
|
||||||
break
|
|
||||||
if not found:
|
|
||||||
error_msg=error_msg+"\nAlso could not find an earlier common snapshot to rollback to."
|
|
||||||
|
|
||||||
raise(Exception(error_msg))
|
|
||||||
|
|
||||||
#send all new source snapshots that come AFTER the last target snapshot
|
|
||||||
latest_source_index=source_snapshots[source_filesystem].index(latest_target_snapshot)
|
|
||||||
send_snapshots=source_snapshots[source_filesystem][latest_source_index+1:]
|
|
||||||
|
|
||||||
#source snapshots that come BEFORE last target snapshot are obsolete
|
|
||||||
source_obsolete_snapshots[source_filesystem]=source_snapshots[source_filesystem][0:latest_source_index]
|
|
||||||
|
|
||||||
#target snapshots that come BEFORE last target snapshot are obsolete
|
|
||||||
latest_target_index=target_snapshots[target_filesystem].index(latest_target_snapshot)
|
|
||||||
target_obsolete_snapshots[target_filesystem]=target_snapshots[target_filesystem][0:latest_target_index]
|
|
||||||
else:
|
|
||||||
#initial mode, send all snapshots, nothing is obsolete:
|
|
||||||
latest_target_snapshot=None
|
|
||||||
send_snapshots=source_snapshots[source_filesystem]
|
|
||||||
target_obsolete_snapshots[target_filesystem]=[]
|
|
||||||
source_obsolete_snapshots[source_filesystem]=[]
|
|
||||||
|
|
||||||
#now actually send the snapshots
|
|
||||||
if not args.no_send:
|
|
||||||
|
|
||||||
if send_snapshots and args.rollback and latest_target_snapshot:
|
|
||||||
#roll back any changes on target
|
|
||||||
debug("Rolling back target to latest snapshot.")
|
|
||||||
run(ssh_to=args.ssh_target, test=args.test, cmd=["zfs", "rollback", target_filesystem+"@"+latest_target_snapshot ])
|
|
||||||
|
|
||||||
|
|
||||||
for send_snapshot in send_snapshots:
|
|
||||||
|
|
||||||
#resumable?
|
|
||||||
if target_filesystem in resumable_target_filesystems:
|
|
||||||
resume_token=resumable_target_filesystems.pop(target_filesystem)
|
|
||||||
else:
|
|
||||||
resume_token=None
|
|
||||||
|
|
||||||
#hold the snapshot we're sending on the source
|
|
||||||
zfs_hold_snapshot(ssh_to=args.ssh_source, snapshot=source_filesystem+"@"+send_snapshot)
|
|
||||||
|
|
||||||
zfs_transfer(
|
|
||||||
ssh_source=args.ssh_source, source_filesystem=source_filesystem,
|
|
||||||
first_snapshot=latest_target_snapshot, second_snapshot=send_snapshot,
|
|
||||||
ssh_target=args.ssh_target, target_filesystem=target_filesystem,
|
|
||||||
resume_token=resume_token
|
|
||||||
)
|
|
||||||
|
|
||||||
#hold the snapshot we just send to the target
|
|
||||||
zfs_hold_snapshot(ssh_to=args.ssh_target, snapshot=target_filesystem+"@"+send_snapshot)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#now that we succesfully transferred this snapshot, the previous snapshot is obsolete:
|
|
||||||
if latest_target_snapshot:
|
|
||||||
zfs_release_snapshot(ssh_to=args.ssh_target, snapshot=target_filesystem+"@"+latest_target_snapshot)
|
|
||||||
target_obsolete_snapshots[target_filesystem].append(latest_target_snapshot)
|
|
||||||
|
|
||||||
zfs_release_snapshot(ssh_to=args.ssh_source, snapshot=source_filesystem+"@"+latest_target_snapshot)
|
|
||||||
source_obsolete_snapshots[source_filesystem].append(latest_target_snapshot)
|
|
||||||
#we just received a new filesytem?
|
|
||||||
else:
|
|
||||||
if args.clear_refreservation:
|
|
||||||
debug("Clearing refreservation to save space.")
|
|
||||||
|
|
||||||
run(ssh_to=args.ssh_target, test=args.test, cmd=["zfs", "set", "refreservation=none", target_filesystem ])
|
|
||||||
|
|
||||||
|
|
||||||
if args.clear_mountpoint:
|
|
||||||
debug("Setting canmount=noauto to prevent auto-mounting in the wrong place. (ignoring errors)")
|
|
||||||
|
|
||||||
run(ssh_to=args.ssh_target, test=args.test, cmd=["zfs", "set", "canmount=noauto", target_filesystem ], valid_exitcodes= [0, 1] )
|
|
||||||
|
|
||||||
|
|
||||||
latest_target_snapshot=send_snapshot
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
############## cleanup section
|
|
||||||
#we only do cleanups after everything is complete, to keep everything consistent (same snapshots everywhere)
|
|
||||||
|
|
||||||
|
|
||||||
#find stale backups on target that have become obsolete
|
|
||||||
verbose("Getting stale filesystems and snapshots from {0}".format(args.ssh_target))
|
|
||||||
stale_target_filesystems=get_stale_backupped_filesystems(ssh_to=args.ssh_target, backup_name=args.backup_name, target_fs=args.target_fs, target_filesystems=target_filesystems)
|
|
||||||
debug("Stale target filesystems: {0}".format("\n".join(stale_target_filesystems)))
|
|
||||||
|
|
||||||
stale_target_snapshots=zfs_get_snapshots(args.ssh_target, stale_target_filesystems, args.backup_name)
|
|
||||||
debug("Stale target snapshots: " + str(pprint.pformat(stale_target_snapshots)))
|
|
||||||
target_obsolete_snapshots.update(stale_target_snapshots)
|
|
||||||
|
|
||||||
#determine stale filesystems that have no snapshots left (the can be destroyed)
|
|
||||||
#TODO: prevent destroying filesystems that have underlying filesystems that are still active.
|
|
||||||
stale_target_destroys=[]
|
|
||||||
for stale_target_filesystem in stale_target_filesystems:
|
|
||||||
if stale_target_filesystem not in stale_target_snapshots:
|
|
||||||
stale_target_destroys.append(stale_target_filesystem)
|
|
||||||
|
|
||||||
if stale_target_destroys:
|
|
||||||
if args.destroy_stale:
|
|
||||||
verbose("Destroying stale filesystems on target {0}:\n{1}".format(args.ssh_target, "\n".join(stale_target_destroys)))
|
|
||||||
zfs_destroy(ssh_to=args.ssh_target, filesystems=stale_target_destroys, recursive=True)
|
|
||||||
else:
|
|
||||||
verbose("Stale filesystems on {0}, use --destroy-stale to destroy:\n{1}".format(args.ssh_target, "\n".join(stale_target_destroys)))
|
|
||||||
|
|
||||||
|
|
||||||
#now actually destroy the old snapshots
|
|
||||||
source_destroys=determine_destroy_list(source_obsolete_snapshots, args.keep_source)
|
|
||||||
if source_destroys:
|
|
||||||
verbose("Destroying old snapshots on source {0}:\n{1}".format(args.ssh_source, "\n".join(source_destroys)))
|
|
||||||
zfs_destroy_snapshots(ssh_to=args.ssh_source, snapshots=source_destroys)
|
|
||||||
|
|
||||||
target_destroys=determine_destroy_list(target_obsolete_snapshots, args.keep_target)
|
|
||||||
if target_destroys:
|
|
||||||
verbose("Destroying old snapshots on target {0}:\n{1}".format(args.ssh_target, "\n".join(target_destroys)))
|
|
||||||
zfs_destroy_snapshots(ssh_to=args.ssh_target, snapshots=target_destroys)
|
|
||||||
|
|
||||||
|
|
||||||
verbose("All done")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
################################################################## ENTRY POINT
|
|
||||||
|
|
||||||
# parse arguments
|
|
||||||
import argparse
|
|
||||||
parser = argparse.ArgumentParser(description='ZFS autobackup v2.2')
|
|
||||||
parser.add_argument('--ssh-source', default="local", help='Source host to get backup from. (user@hostname) Default %(default)s.')
|
|
||||||
parser.add_argument('--ssh-target', default="local", help='Target host to push backup to. (user@hostname) Default %(default)s.')
|
|
||||||
parser.add_argument('--keep-source', type=int, default=30, help='Number of days to keep old snapshots on source. Default %(default)s.')
|
|
||||||
parser.add_argument('--keep-target', type=int, default=30, help='Number of days to keep old snapshots on target. Default %(default)s.')
|
|
||||||
parser.add_argument('backup_name', help='Name of the backup (you should set the zfs property "autobackup:backup-name" to true on filesystems you want to backup')
|
|
||||||
parser.add_argument('target_fs', help='Target filesystem')
|
|
||||||
|
|
||||||
parser.add_argument('--no-snapshot', action='store_true', help='dont create new snapshot (usefull for finishing uncompleted backups, or cleanups)')
|
|
||||||
parser.add_argument('--no-send', action='store_true', help='dont send snapshots (usefull to only do a cleanup)')
|
|
||||||
parser.add_argument('--allow-empty', action='store_true', help='if nothing has changed, still create empty snapshots.')
|
|
||||||
parser.add_argument('--resume', action='store_true', help='support resuming of interrupted transfers by using the zfs extensible_dataset feature (both zpools should have it enabled) Disadvantage is that you need to use zfs recv -A if another snapshot is created on the target during a receive. Otherwise it will keep failing.')
|
|
||||||
parser.add_argument('--strip-path', default=0, type=int, help='number of directory to strip from path (use 1 when cloning zones between 2 SmartOS machines)')
|
|
||||||
parser.add_argument('--buffer', default="", help='Use mbuffer with specified size to speedup zfs transfer. (e.g. --buffer 1G)')
|
|
||||||
|
|
||||||
|
|
||||||
parser.add_argument('--destroy-stale', action='store_true', help='Destroy stale backups that have no more snapshots. Be sure to verify the output before using this! ')
|
|
||||||
parser.add_argument('--clear-refreservation', action='store_true', help='Set refreservation property to none for new filesystems. Usefull when backupping SmartOS volumes. (recommended)')
|
|
||||||
parser.add_argument('--clear-mountpoint', action='store_true', help='Sets canmount=noauto property, to prevent the received filesystem from mounting over existing filesystems. (recommended)')
|
|
||||||
parser.add_argument('--filter-properties', action='append', help='Filter properties when receiving filesystems. Can be specified multiple times. (Example: If you send data from Linux to FreeNAS, you should filter xattr)')
|
|
||||||
parser.add_argument('--rollback', action='store_true', help='Rollback changes on the target before starting a backup. (normally you can prevent changes by setting the readonly property on the target_fs to on)')
|
|
||||||
parser.add_argument('--ignore-transfer-errors', action='store_true', help='Ignore transfer errors (still checks if received filesystem exists. usefull for acltype errors)')
|
|
||||||
|
|
||||||
|
|
||||||
parser.add_argument('--test', action='store_true', help='dont change anything, just show what would be done (still does all read-only operations)')
|
|
||||||
parser.add_argument('--verbose', action='store_true', help='verbose output')
|
|
||||||
parser.add_argument('--debug', action='store_true', help='debug output (shows commands that are executed)')
|
|
||||||
|
|
||||||
#note args is the only global variable we use, since its a global readonly setting anyway
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
try:
|
|
||||||
zfs_autobackup()
|
|
||||||
except Exception as e:
|
|
||||||
if args.debug:
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
print("* ABORTED *")
|
|
||||||
print(str(e))
|
|
||||||
39
zfs_autobackup/CachedProperty.py
Normal file
39
zfs_autobackup/CachedProperty.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# NOTE: this should inherit from (object) to function correctly with python 2.7
|
||||||
|
class CachedProperty(object):
|
||||||
|
""" A property that is only computed once per instance and
|
||||||
|
then stores the result in _cached_properties of the object.
|
||||||
|
|
||||||
|
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, func):
|
||||||
|
self.__doc__ = getattr(func, '__doc__')
|
||||||
|
self.func = func
|
||||||
|
|
||||||
|
def __get__(self, obj, cls):
|
||||||
|
if obj is None:
|
||||||
|
return self
|
||||||
|
|
||||||
|
propname = self.func.__name__
|
||||||
|
|
||||||
|
if not hasattr(obj, '_cached_properties'):
|
||||||
|
obj._cached_properties = {}
|
||||||
|
|
||||||
|
if propname not in obj._cached_properties:
|
||||||
|
obj._cached_properties[propname] = self.func(obj)
|
||||||
|
# value = obj.__dict__[propname] = self.func(obj)
|
||||||
|
|
||||||
|
return obj._cached_properties[propname]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def clear(obj):
|
||||||
|
"""clears cache of obj"""
|
||||||
|
if hasattr(obj, '_cached_properties'):
|
||||||
|
obj._cached_properties = {}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_cached(obj, propname):
|
||||||
|
if hasattr(obj, '_cached_properties') and propname in obj._cached_properties:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
160
zfs_autobackup/CmdPipe.py
Normal file
160
zfs_autobackup/CmdPipe.py
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
import select
|
||||||
|
|
||||||
|
try:
|
||||||
|
from shlex import quote as cmd_quote
|
||||||
|
except ImportError:
|
||||||
|
from pipes import quote as cmd_quote
|
||||||
|
|
||||||
|
|
||||||
|
class CmdItem:
|
||||||
|
"""one command item, to be added to a CmdPipe"""
|
||||||
|
|
||||||
|
def __init__(self, cmd, readonly=False, stderr_handler=None, exit_handler=None, shell=False):
|
||||||
|
"""create item. caller has to make sure cmd is properly escaped when using shell.
|
||||||
|
:type cmd: list of str
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.cmd = cmd
|
||||||
|
self.readonly = readonly
|
||||||
|
self.stderr_handler = stderr_handler
|
||||||
|
self.exit_handler = exit_handler
|
||||||
|
self.shell = shell
|
||||||
|
self.process = None
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""return copy-pastable version of command."""
|
||||||
|
if self.shell:
|
||||||
|
# its already copy pastable for a shell:
|
||||||
|
return " ".join(self.cmd)
|
||||||
|
else:
|
||||||
|
# make it copy-pastable, will make a mess of quotes sometimes, but is correct
|
||||||
|
return " ".join(map(cmd_quote, self.cmd))
|
||||||
|
|
||||||
|
def create(self, stdin):
|
||||||
|
"""actually create the subprocess (called by CmdPipe)"""
|
||||||
|
|
||||||
|
# make sure the command gets all the data in utf8 format:
|
||||||
|
# (this is necessary if LC_ALL=en_US.utf8 is not set in the environment)
|
||||||
|
encoded_cmd = []
|
||||||
|
for arg in self.cmd:
|
||||||
|
encoded_cmd.append(arg.encode('utf-8'))
|
||||||
|
|
||||||
|
self.process = subprocess.Popen(encoded_cmd, env=os.environ, stdout=subprocess.PIPE, stdin=stdin,
|
||||||
|
stderr=subprocess.PIPE, shell=self.shell)
|
||||||
|
|
||||||
|
|
||||||
|
class CmdPipe:
|
||||||
|
"""a pipe of one or more commands. also takes care of utf-8 encoding/decoding and line based parsing"""
|
||||||
|
|
||||||
|
def __init__(self, readonly=False, inp=None):
|
||||||
|
"""
|
||||||
|
:param inp: input string for stdin
|
||||||
|
:param readonly: Only execute if entire pipe consist of readonly commands
|
||||||
|
"""
|
||||||
|
# list of commands + error handlers to execute
|
||||||
|
self.items = []
|
||||||
|
|
||||||
|
self.inp = inp
|
||||||
|
self.readonly = readonly
|
||||||
|
self._should_execute = True
|
||||||
|
|
||||||
|
def add(self, cmd_item):
|
||||||
|
"""adds a CmdItem to pipe.
|
||||||
|
:type cmd_item: CmdItem
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.items.append(cmd_item)
|
||||||
|
|
||||||
|
if not cmd_item.readonly and self.readonly:
|
||||||
|
self._should_execute = False
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""transform whole pipe into oneliner for debugging and testing. this should generate a copy-pastable string for in a console """
|
||||||
|
|
||||||
|
ret = ""
|
||||||
|
for item in self.items:
|
||||||
|
if ret:
|
||||||
|
ret = ret + " | "
|
||||||
|
ret = ret + "({})".format(item) # this will do proper escaping to make it copypastable
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def should_execute(self):
|
||||||
|
return self._should_execute
|
||||||
|
|
||||||
|
def execute(self, stdout_handler):
|
||||||
|
"""run the pipe. returns True all exit handlers returned true"""
|
||||||
|
|
||||||
|
if not self._should_execute:
|
||||||
|
return True
|
||||||
|
|
||||||
|
# first process should have actual user input as stdin:
|
||||||
|
selectors = []
|
||||||
|
|
||||||
|
# create processes
|
||||||
|
last_stdout = None
|
||||||
|
stdin = subprocess.PIPE
|
||||||
|
for item in self.items:
|
||||||
|
|
||||||
|
item.create(stdin)
|
||||||
|
selectors.append(item.process.stderr)
|
||||||
|
|
||||||
|
if last_stdout is None:
|
||||||
|
# we're the first process in the pipe, do we have some input?
|
||||||
|
if self.inp is not None:
|
||||||
|
# TODO: make streaming to support big inputs?
|
||||||
|
item.process.stdin.write(self.inp.encode('utf-8'))
|
||||||
|
item.process.stdin.close()
|
||||||
|
else:
|
||||||
|
# last stdout was piped to this stdin already, so close it because we dont need it anymore
|
||||||
|
last_stdout.close()
|
||||||
|
|
||||||
|
last_stdout = item.process.stdout
|
||||||
|
stdin = last_stdout
|
||||||
|
|
||||||
|
# monitor last stdout as well
|
||||||
|
selectors.append(last_stdout)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# wait for output on one of the stderrs or last_stdout
|
||||||
|
(read_ready, write_ready, ex_ready) = select.select(selectors, [], [])
|
||||||
|
eof_count = 0
|
||||||
|
done_count = 0
|
||||||
|
|
||||||
|
# read line and call appropriate handlers
|
||||||
|
if last_stdout in read_ready:
|
||||||
|
line = last_stdout.readline().decode('utf-8').rstrip()
|
||||||
|
if line != "":
|
||||||
|
stdout_handler(line)
|
||||||
|
else:
|
||||||
|
eof_count = eof_count + 1
|
||||||
|
|
||||||
|
for item in self.items:
|
||||||
|
if item.process.stderr in read_ready:
|
||||||
|
line = item.process.stderr.readline().decode('utf-8').rstrip()
|
||||||
|
if line != "":
|
||||||
|
item.stderr_handler(line)
|
||||||
|
else:
|
||||||
|
eof_count = eof_count + 1
|
||||||
|
|
||||||
|
if item.process.poll() is not None:
|
||||||
|
done_count = done_count + 1
|
||||||
|
|
||||||
|
# all filehandles are eof and all processes are done (poll() is not None)
|
||||||
|
if eof_count == len(selectors) and done_count == len(self.items):
|
||||||
|
break
|
||||||
|
|
||||||
|
# close filehandles
|
||||||
|
last_stdout.close()
|
||||||
|
for item in self.items:
|
||||||
|
item.process.stderr.close()
|
||||||
|
|
||||||
|
# call exit handlers
|
||||||
|
success = True
|
||||||
|
for item in self.items:
|
||||||
|
if item.exit_handler is not None:
|
||||||
|
success=item.exit_handler(item.process.returncode) and success
|
||||||
|
|
||||||
|
return success
|
||||||
166
zfs_autobackup/ExecuteNode.py
Normal file
166
zfs_autobackup/ExecuteNode.py
Normal file
@ -0,0 +1,166 @@
|
|||||||
|
import os
|
||||||
|
import select
|
||||||
|
import subprocess
|
||||||
|
from .CmdPipe import CmdPipe, CmdItem
|
||||||
|
from .LogStub import LogStub
|
||||||
|
|
||||||
|
try:
|
||||||
|
from shlex import quote as cmd_quote
|
||||||
|
except ImportError:
|
||||||
|
from pipes import quote as cmd_quote
|
||||||
|
|
||||||
|
|
||||||
|
class ExecuteError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ExecuteNode(LogStub):
|
||||||
|
"""an endpoint to execute local or remote commands via ssh"""
|
||||||
|
|
||||||
|
PIPE=1
|
||||||
|
|
||||||
|
def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False):
|
||||||
|
"""ssh_config: custom ssh config
|
||||||
|
ssh_to: server you want to ssh to. none means local
|
||||||
|
readonly: only execute commands that don't make any changes (useful for testing-runs)
|
||||||
|
debug_output: show output and exit codes of commands in debugging output.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.ssh_config = ssh_config
|
||||||
|
self.ssh_to = ssh_to
|
||||||
|
self.readonly = readonly
|
||||||
|
self.debug_output = debug_output
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if self.ssh_to is None:
|
||||||
|
return "(local)"
|
||||||
|
else:
|
||||||
|
return self.ssh_to
|
||||||
|
|
||||||
|
def _parse_stdout(self, line):
|
||||||
|
"""parse stdout. can be overridden in subclass"""
|
||||||
|
if self.debug_output:
|
||||||
|
self.debug("STDOUT > " + line.rstrip())
|
||||||
|
|
||||||
|
def _parse_stderr(self, line, hide_errors):
|
||||||
|
"""parse stderr. can be overridden in subclass"""
|
||||||
|
if hide_errors:
|
||||||
|
self.debug("STDERR > " + line.rstrip())
|
||||||
|
else:
|
||||||
|
self.error("STDERR > " + line.rstrip())
|
||||||
|
|
||||||
|
def _quote(self, cmd):
|
||||||
|
"""return quoted version of command. if it has value PIPE it will add an actual | """
|
||||||
|
if cmd==self.PIPE:
|
||||||
|
return('|')
|
||||||
|
else:
|
||||||
|
return(cmd_quote(cmd))
|
||||||
|
|
||||||
|
def _shell_cmd(self, cmd):
|
||||||
|
"""prefix specified ssh shell to command and escape shell characters"""
|
||||||
|
|
||||||
|
ret=[]
|
||||||
|
|
||||||
|
#add remote shell
|
||||||
|
if not self.is_local():
|
||||||
|
ret=["ssh"]
|
||||||
|
|
||||||
|
if self.ssh_config is not None:
|
||||||
|
ret.extend(["-F", self.ssh_config])
|
||||||
|
|
||||||
|
ret.append(self.ssh_to)
|
||||||
|
|
||||||
|
ret.append(" ".join(map(self._quote, cmd)))
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def is_local(self):
|
||||||
|
return self.ssh_to is None
|
||||||
|
|
||||||
|
def run(self, cmd, inp=None, tab_split=False, valid_exitcodes=None, readonly=False, hide_errors=False,
|
||||||
|
return_stderr=False, pipe=False, return_all=False):
|
||||||
|
"""run a command on the node , checks output and parses/handle output and returns it
|
||||||
|
|
||||||
|
Either uses a local shell (sh -c) or remote shell (ssh) to execute the command.
|
||||||
|
Therefore the command can have stuff like actual pipes in it, if you dont want to use pipe=True to pipe stuff.
|
||||||
|
|
||||||
|
:param cmd: the actual command, should be a list, where the first item is the command
|
||||||
|
and the rest are parameters. use ExecuteNode.PIPE to add an unescaped |
|
||||||
|
(if you want to use system piping instead of python piping)
|
||||||
|
:param pipe: return CmdPipe instead of executing it.
|
||||||
|
:param inp: Can be None, a string or a CmdPipe that was previously returned.
|
||||||
|
:param tab_split: split tabbed files in output into a list
|
||||||
|
:param valid_exitcodes: list of valid exit codes for this command (checks exit code of both sides of a pipe)
|
||||||
|
Use [] to accept all exit codes. Default [0]
|
||||||
|
:param readonly: make this True if the command doesn't make any changes and is safe to execute in testmode
|
||||||
|
:param hide_errors: don't show stderr output as error, instead show it as debugging output (use to hide expected errors)
|
||||||
|
:param return_stderr: return both stdout and stderr as a tuple. (normally only returns stdout)
|
||||||
|
:param return_all: return both stdout and stderr and exit_code as a tuple. (normally only returns stdout)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# create new pipe?
|
||||||
|
if not isinstance(inp, CmdPipe):
|
||||||
|
cmd_pipe = CmdPipe(self.readonly, inp)
|
||||||
|
else:
|
||||||
|
# add stuff to existing pipe
|
||||||
|
cmd_pipe = inp
|
||||||
|
|
||||||
|
# stderr parser
|
||||||
|
error_lines = []
|
||||||
|
returned_exit_code=None
|
||||||
|
|
||||||
|
def stderr_handler(line):
|
||||||
|
if tab_split:
|
||||||
|
error_lines.append(line.rstrip().split('\t'))
|
||||||
|
else:
|
||||||
|
error_lines.append(line.rstrip())
|
||||||
|
self._parse_stderr(line, hide_errors)
|
||||||
|
|
||||||
|
# exit code hanlder
|
||||||
|
if valid_exitcodes is None:
|
||||||
|
valid_exitcodes = [0]
|
||||||
|
|
||||||
|
def exit_handler(exit_code):
|
||||||
|
if self.debug_output:
|
||||||
|
self.debug("EXIT > {}".format(exit_code))
|
||||||
|
|
||||||
|
if (valid_exitcodes != []) and (exit_code not in valid_exitcodes):
|
||||||
|
self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code, valid_exitcodes))
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
# add shell command and handlers to pipe
|
||||||
|
cmd_item=CmdItem(cmd=self._shell_cmd(cmd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local())
|
||||||
|
cmd_pipe.add(cmd_item)
|
||||||
|
|
||||||
|
# return pipe instead of executing?
|
||||||
|
if pipe:
|
||||||
|
return cmd_pipe
|
||||||
|
|
||||||
|
# stdout parser
|
||||||
|
output_lines = []
|
||||||
|
|
||||||
|
def stdout_handler(line):
|
||||||
|
if tab_split:
|
||||||
|
output_lines.append(line.rstrip().split('\t'))
|
||||||
|
else:
|
||||||
|
output_lines.append(line.rstrip())
|
||||||
|
self._parse_stdout(line)
|
||||||
|
|
||||||
|
if cmd_pipe.should_execute():
|
||||||
|
self.debug("CMD > {}".format(cmd_pipe))
|
||||||
|
else:
|
||||||
|
self.debug("CMDSKIP> {}".format(cmd_pipe))
|
||||||
|
|
||||||
|
# execute and calls handlers in CmdPipe
|
||||||
|
if not cmd_pipe.execute(stdout_handler=stdout_handler):
|
||||||
|
raise(ExecuteError("Last command returned error"))
|
||||||
|
|
||||||
|
if return_all:
|
||||||
|
return output_lines, error_lines, cmd_item.process and cmd_item.process.returncode
|
||||||
|
elif return_stderr:
|
||||||
|
return output_lines, error_lines
|
||||||
|
else:
|
||||||
|
return output_lines
|
||||||
66
zfs_autobackup/LogConsole.py
Normal file
66
zfs_autobackup/LogConsole.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
# python 2 compatibility
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
class LogConsole:
|
||||||
|
"""Log-class that outputs to console, adding colors if needed"""
|
||||||
|
|
||||||
|
def __init__(self, show_debug, show_verbose, color):
|
||||||
|
self.last_log = ""
|
||||||
|
self.show_debug = show_debug
|
||||||
|
self.show_verbose = show_verbose
|
||||||
|
|
||||||
|
if color:
|
||||||
|
# try to use color, failback if colorama not available
|
||||||
|
self.colorama=False
|
||||||
|
try:
|
||||||
|
import colorama
|
||||||
|
global colorama
|
||||||
|
self.colorama = True
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.colorama=False
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
if self.colorama:
|
||||||
|
print(colorama.Fore.RED + colorama.Style.BRIGHT + "! " + txt + colorama.Style.RESET_ALL, file=sys.stderr)
|
||||||
|
else:
|
||||||
|
print("! " + txt, file=sys.stderr)
|
||||||
|
sys.stderr.flush()
|
||||||
|
|
||||||
|
def warning(self, txt):
|
||||||
|
if self.colorama:
|
||||||
|
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + " NOTE: " + txt + colorama.Style.RESET_ALL)
|
||||||
|
else:
|
||||||
|
print(" NOTE: " + txt)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
if self.show_verbose:
|
||||||
|
if self.colorama:
|
||||||
|
print(colorama.Style.NORMAL + " " + txt + colorama.Style.RESET_ALL)
|
||||||
|
else:
|
||||||
|
print(" " + txt)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
def debug(self, txt):
|
||||||
|
if self.show_debug:
|
||||||
|
if self.colorama:
|
||||||
|
print(colorama.Fore.GREEN + "# " + txt + colorama.Style.RESET_ALL)
|
||||||
|
else:
|
||||||
|
print("# " + txt)
|
||||||
|
sys.stdout.flush()
|
||||||
|
|
||||||
|
def progress(self, txt):
|
||||||
|
"""print progress output to stderr (stays on same line)"""
|
||||||
|
self.clear_progress()
|
||||||
|
print(">>> {}\r".format(txt), end='', file=sys.stderr)
|
||||||
|
sys.stderr.flush()
|
||||||
|
|
||||||
|
def clear_progress(self):
|
||||||
|
import colorama
|
||||||
|
print(colorama.ansi.clear_line(), end='', file=sys.stderr)
|
||||||
|
sys.stderr.flush()
|
||||||
18
zfs_autobackup/LogStub.py
Normal file
18
zfs_autobackup/LogStub.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
#Used for baseclasses that dont implement their own logging (Like ExecuteNode)
|
||||||
|
#Usually logging is implemented in subclasses (Like ZfsNode thats a subclass of ExecuteNode), but for regression testing its nice to have these stubs.
|
||||||
|
|
||||||
|
class LogStub:
|
||||||
|
"""Just a stub, usually overriden in subclasses."""
|
||||||
|
|
||||||
|
# simple logging stubs
|
||||||
|
def debug(self, txt):
|
||||||
|
print("DEBUG : " + txt)
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
print("VERBOSE: " + txt)
|
||||||
|
|
||||||
|
def warning(self, txt):
|
||||||
|
print("WARNING: " + txt)
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
print("ERROR : " + txt)
|
||||||
99
zfs_autobackup/Thinner.py
Normal file
99
zfs_autobackup/Thinner.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
import time
|
||||||
|
|
||||||
|
from .ThinnerRule import ThinnerRule
|
||||||
|
|
||||||
|
|
||||||
|
class Thinner:
|
||||||
|
"""progressive thinner (universal, used for cleaning up snapshots)"""
|
||||||
|
|
||||||
|
def __init__(self, schedule_str=""):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
schedule_str: comma seperated list of ThinnerRules. A plain number specifies how many snapshots to always keep.
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.rules = []
|
||||||
|
self.always_keep = 0
|
||||||
|
|
||||||
|
if schedule_str == "":
|
||||||
|
return
|
||||||
|
|
||||||
|
rule_strs = schedule_str.split(",")
|
||||||
|
for rule_str in rule_strs:
|
||||||
|
if rule_str.lstrip('-').isdigit():
|
||||||
|
self.always_keep = int(rule_str)
|
||||||
|
if self.always_keep < 0:
|
||||||
|
raise (Exception("Number of snapshots to keep cant be negative: {}".format(self.always_keep)))
|
||||||
|
else:
|
||||||
|
self.rules.append(ThinnerRule(rule_str))
|
||||||
|
|
||||||
|
def human_rules(self):
|
||||||
|
"""get list of human readable rules"""
|
||||||
|
ret = []
|
||||||
|
if self.always_keep:
|
||||||
|
ret.append("Keep the last {} snapshot{}.".format(self.always_keep, self.always_keep != 1 and "s" or ""))
|
||||||
|
for rule in self.rules:
|
||||||
|
ret.append(rule.human_str)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def thin(self, objects, keep_objects=None, now=None):
|
||||||
|
"""thin list of objects with current schedule rules. objects: list of
|
||||||
|
objects to thin. every object should have timestamp attribute.
|
||||||
|
|
||||||
|
return( keeps, removes )
|
||||||
|
|
||||||
|
Args:
|
||||||
|
objects: list of objects to check (should have a timestamp attribute)
|
||||||
|
keep_objects: objects to always keep (if they also are in the in the normal objects list)
|
||||||
|
now: if specified, use this time as current time
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not keep_objects:
|
||||||
|
keep_objects = []
|
||||||
|
|
||||||
|
# always keep a number of the last objets?
|
||||||
|
if self.always_keep:
|
||||||
|
# all of them
|
||||||
|
if len(objects) <= self.always_keep:
|
||||||
|
return objects, []
|
||||||
|
|
||||||
|
# determine which ones
|
||||||
|
always_keep_objects = objects[-self.always_keep:]
|
||||||
|
else:
|
||||||
|
always_keep_objects = []
|
||||||
|
|
||||||
|
# determine time blocks
|
||||||
|
time_blocks = {}
|
||||||
|
for rule in self.rules:
|
||||||
|
time_blocks[rule.period] = {}
|
||||||
|
|
||||||
|
if not now:
|
||||||
|
now = int(time.time())
|
||||||
|
|
||||||
|
keeps = []
|
||||||
|
removes = []
|
||||||
|
|
||||||
|
# traverse objects
|
||||||
|
for thisobject in objects:
|
||||||
|
# important they are ints!
|
||||||
|
timestamp = int(thisobject.timestamp)
|
||||||
|
age = int(now) - timestamp
|
||||||
|
|
||||||
|
# store in the correct time blocks, per period-size, if not too old yet
|
||||||
|
# e.g.: look if there is ANY timeblock that wants to keep this object
|
||||||
|
keep = False
|
||||||
|
for rule in self.rules:
|
||||||
|
if age <= rule.ttl:
|
||||||
|
block_nr = int(timestamp / rule.period)
|
||||||
|
if block_nr not in time_blocks[rule.period]:
|
||||||
|
time_blocks[rule.period][block_nr] = True
|
||||||
|
keep = True
|
||||||
|
|
||||||
|
# keep it according to schedule, or keep it because it is in the keep_objects list
|
||||||
|
if keep or thisobject in keep_objects or thisobject in always_keep_objects:
|
||||||
|
keeps.append(thisobject)
|
||||||
|
else:
|
||||||
|
removes.append(thisobject)
|
||||||
|
|
||||||
|
return keeps, removes
|
||||||
71
zfs_autobackup/ThinnerRule.py
Normal file
71
zfs_autobackup/ThinnerRule.py
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class ThinnerRule:
|
||||||
|
"""a thinning schedule rule for Thinner"""
|
||||||
|
|
||||||
|
TIME_NAMES = {
|
||||||
|
'y': 3600 * 24 * 365.25,
|
||||||
|
'm': 3600 * 24 * 30,
|
||||||
|
'w': 3600 * 24 * 7,
|
||||||
|
'd': 3600 * 24,
|
||||||
|
'h': 3600,
|
||||||
|
'min': 60,
|
||||||
|
's': 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
TIME_DESC = {
|
||||||
|
'y': 'year',
|
||||||
|
'm': 'month',
|
||||||
|
'w': 'week',
|
||||||
|
'd': 'day',
|
||||||
|
'h': 'hour',
|
||||||
|
'min': 'minute',
|
||||||
|
's': 'second',
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, rule_str):
|
||||||
|
"""parse scheduling string
|
||||||
|
example:
|
||||||
|
daily snapshot, remove after a week: 1d1w
|
||||||
|
weekly snapshot, remove after a month: 1w1m
|
||||||
|
monthly snapshot, remove after 6 months: 1m6m
|
||||||
|
yearly snapshot, remove after 2 year: 1y2y
|
||||||
|
keep all snapshots, remove after a day 1s1d
|
||||||
|
keep nothing: 1s1s
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
rule_str = rule_str.lower()
|
||||||
|
matches = re.findall("([0-9]*)([a-z]*)([0-9]*)([a-z]*)", rule_str)[0]
|
||||||
|
|
||||||
|
if '' in matches:
|
||||||
|
raise (Exception("Invalid schedule string: '{}'".format(rule_str)))
|
||||||
|
|
||||||
|
period_amount = int(matches[0])
|
||||||
|
period_unit = matches[1]
|
||||||
|
ttl_amount = int(matches[2])
|
||||||
|
ttl_unit = matches[3]
|
||||||
|
|
||||||
|
if period_unit not in self.TIME_NAMES:
|
||||||
|
raise (Exception("Invalid period string in schedule: '{}'".format(rule_str)))
|
||||||
|
|
||||||
|
if ttl_unit not in self.TIME_NAMES:
|
||||||
|
raise (Exception("Invalid ttl string in schedule: '{}'".format(rule_str)))
|
||||||
|
|
||||||
|
self.period = period_amount * self.TIME_NAMES[period_unit]
|
||||||
|
self.ttl = ttl_amount * self.TIME_NAMES[ttl_unit]
|
||||||
|
|
||||||
|
if self.period > self.ttl:
|
||||||
|
raise (Exception("Period cant be longer than ttl in schedule: '{}'".format(rule_str)))
|
||||||
|
|
||||||
|
self.rule_str = rule_str
|
||||||
|
|
||||||
|
self.human_str = "Keep every {} {}{}, delete after {} {}{}.".format(
|
||||||
|
period_amount, self.TIME_DESC[period_unit], period_amount != 1 and "s" or "", ttl_amount,
|
||||||
|
self.TIME_DESC[ttl_unit], ttl_amount != 1 and "s" or "")
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""get schedule as a schedule string"""
|
||||||
|
|
||||||
|
return self.rule_str
|
||||||
191
zfs_autobackup/ZfsAuto.py
Normal file
191
zfs_autobackup/ZfsAuto.py
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
import argparse
|
||||||
|
import os.path
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from .LogConsole import LogConsole
|
||||||
|
|
||||||
|
|
||||||
|
class ZfsAuto(object):
|
||||||
|
"""Common Base class, this class is always used subclassed. Look at ZfsAutobackup and ZfsAutoverify ."""
|
||||||
|
|
||||||
|
# also used by setup.py
|
||||||
|
VERSION = "3.2-alpha1"
|
||||||
|
HEADER = "{} v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
|
||||||
|
|
||||||
|
def __init__(self, argv, print_arguments=True):
|
||||||
|
|
||||||
|
self.hold_name = None
|
||||||
|
self.snapshot_time_format = None
|
||||||
|
self.property_name = None
|
||||||
|
self.exclude_paths = None
|
||||||
|
|
||||||
|
# helps with investigating failed regression tests:
|
||||||
|
if print_arguments:
|
||||||
|
print("ARGUMENTS: " + " ".join(argv))
|
||||||
|
|
||||||
|
self.args = self.parse_args(argv)
|
||||||
|
|
||||||
|
def parse_args(self, argv):
|
||||||
|
"""parse common arguments, setup logging, check and adjust parameters"""
|
||||||
|
|
||||||
|
parser=self.get_parser()
|
||||||
|
args = parser.parse_args(argv)
|
||||||
|
|
||||||
|
if args.help:
|
||||||
|
parser.print_help()
|
||||||
|
sys.exit(255)
|
||||||
|
|
||||||
|
if args.version:
|
||||||
|
print(self.HEADER)
|
||||||
|
sys.exit(255)
|
||||||
|
|
||||||
|
# auto enable progress?
|
||||||
|
if sys.stderr.isatty() and not args.no_progress:
|
||||||
|
args.progress = True
|
||||||
|
|
||||||
|
if args.debug_output:
|
||||||
|
args.debug = True
|
||||||
|
|
||||||
|
if args.test:
|
||||||
|
args.verbose = True
|
||||||
|
|
||||||
|
if args.debug:
|
||||||
|
args.verbose = True
|
||||||
|
|
||||||
|
self.log = LogConsole(show_debug=args.debug, show_verbose=args.verbose, color=sys.stdout.isatty())
|
||||||
|
|
||||||
|
self.verbose(self.HEADER)
|
||||||
|
self.verbose("")
|
||||||
|
|
||||||
|
if args.backup_name == None:
|
||||||
|
parser.print_usage()
|
||||||
|
self.log.error("Please specify BACKUP-NAME")
|
||||||
|
sys.exit(255)
|
||||||
|
|
||||||
|
if args.target_path is not None and args.target_path[0] == "/":
|
||||||
|
self.log.error("Target should not start with a /")
|
||||||
|
sys.exit(255)
|
||||||
|
|
||||||
|
if args.ignore_replicated:
|
||||||
|
self.warning("--ignore-replicated has been renamed, using --exclude-unchanged")
|
||||||
|
args.exclude_unchanged = True
|
||||||
|
|
||||||
|
# Note: Before version v3.1-beta5, we always used exclude_received. This was a problem if you wanted to
|
||||||
|
# replicate an existing backup to another host and use the same backupname/snapshots. However, exclude_received
|
||||||
|
# may still need to be used to explicitly exclude a backup with the 'received' source property to avoid accidental
|
||||||
|
# recursive replication of a zvol that is currently being received in another session (as it will have changes).
|
||||||
|
|
||||||
|
self.exclude_paths = []
|
||||||
|
if args.ssh_source == args.ssh_target:
|
||||||
|
if args.target_path:
|
||||||
|
# target and source are the same, make sure to exclude target_path
|
||||||
|
self.verbose("NOTE: Source and target are on the same host, excluding target-path from selection.")
|
||||||
|
self.exclude_paths.append(args.target_path)
|
||||||
|
else:
|
||||||
|
self.verbose("NOTE: Source and target are on the same host, excluding received datasets from selection.")
|
||||||
|
args.exclude_received = True
|
||||||
|
|
||||||
|
if args.test:
|
||||||
|
self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
|
||||||
|
|
||||||
|
#format all the names
|
||||||
|
self.property_name = args.property_format.format(args.backup_name)
|
||||||
|
self.snapshot_time_format = args.snapshot_format.format(args.backup_name)
|
||||||
|
self.hold_name = args.hold_format.format(args.backup_name)
|
||||||
|
|
||||||
|
self.verbose("")
|
||||||
|
self.verbose("Selecting dataset property : {}".format(self.property_name))
|
||||||
|
self.verbose("Snapshot format : {}".format(self.snapshot_time_format))
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
def get_parser(self):
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description=self.HEADER, add_help=False,
|
||||||
|
epilog='Full manual at: https://github.com/psy0rz/zfs_autobackup')
|
||||||
|
|
||||||
|
#positional arguments
|
||||||
|
parser.add_argument('backup_name', metavar='BACKUP-NAME', default=None, nargs='?',
|
||||||
|
help='Name of the backup to select')
|
||||||
|
|
||||||
|
parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
|
||||||
|
help='Target ZFS filesystem (optional)')
|
||||||
|
|
||||||
|
# Basic options
|
||||||
|
group=parser.add_argument_group("Basic options")
|
||||||
|
group.add_argument('--help', '-h', action='store_true', help='show help')
|
||||||
|
group.add_argument('--test', '--dry-run', '-n', action='store_true',
|
||||||
|
help='Dry run, dont change anything, just show what would be done (still does all read-only '
|
||||||
|
'operations)')
|
||||||
|
group.add_argument('--verbose', '-v', action='store_true', help='verbose output')
|
||||||
|
group.add_argument('--debug', '-d', action='store_true',
|
||||||
|
help='Show zfs commands that are executed, stops after an exception.')
|
||||||
|
group.add_argument('--debug-output', action='store_true',
|
||||||
|
help='Show zfs commands and their output/exit codes. (noisy)')
|
||||||
|
group.add_argument('--progress', action='store_true',
|
||||||
|
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
|
||||||
|
group.add_argument('--no-progress', action='store_true',
|
||||||
|
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
||||||
|
group.add_argument('--version', action='store_true',
|
||||||
|
help='Show version.')
|
||||||
|
group.add_argument('--strip-path', metavar='N', default=0, type=int,
|
||||||
|
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
|
||||||
|
'SmartOS machines)')
|
||||||
|
|
||||||
|
# SSH options
|
||||||
|
group=parser.add_argument_group("SSH options")
|
||||||
|
group.add_argument('--ssh-config', metavar='CONFIG-FILE', default=None, help='Custom ssh client config')
|
||||||
|
group.add_argument('--ssh-source', metavar='USER@HOST', default=None,
|
||||||
|
help='Source host to get backup from.')
|
||||||
|
group.add_argument('--ssh-target', metavar='USER@HOST', default=None,
|
||||||
|
help='Target host to push backup to.')
|
||||||
|
|
||||||
|
group=parser.add_argument_group("String formatting options")
|
||||||
|
group.add_argument('--property-format', metavar='FORMAT', default="autobackup:{}",
|
||||||
|
help='Dataset selection string format. Default: %(default)s')
|
||||||
|
group.add_argument('--snapshot-format', metavar='FORMAT', default="{}-%Y%m%d%H%M%S",
|
||||||
|
help='ZFS Snapshot string format. Default: %(default)s')
|
||||||
|
group.add_argument('--hold-format', metavar='FORMAT', default="zfs_autobackup:{}",
|
||||||
|
help='ZFS hold string format. Default: %(default)s')
|
||||||
|
|
||||||
|
group=parser.add_argument_group("Selection options")
|
||||||
|
group.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
|
||||||
|
group.add_argument('--exclude-unchanged', action='store_true',
|
||||||
|
help='Exclude datasets that have no changes since any last snapshot. (Useful in combination with proxmox HA replication)')
|
||||||
|
group.add_argument('--exclude-received', action='store_true',
|
||||||
|
help='Exclude datasets that have the origin of their autobackup: property as "received". '
|
||||||
|
'This can avoid recursive replication between two backup partners.')
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
self.log.verbose(txt)
|
||||||
|
|
||||||
|
def warning(self, txt):
|
||||||
|
self.log.warning(txt)
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
self.log.error(txt)
|
||||||
|
|
||||||
|
def debug(self, txt):
|
||||||
|
self.log.debug(txt)
|
||||||
|
|
||||||
|
def progress(self, txt):
|
||||||
|
self.log.progress(txt)
|
||||||
|
|
||||||
|
def clear_progress(self):
|
||||||
|
self.log.clear_progress()
|
||||||
|
|
||||||
|
def set_title(self, title):
|
||||||
|
self.log.verbose("")
|
||||||
|
self.log.verbose("#### " + title)
|
||||||
|
|
||||||
|
def print_error_sources(self):
|
||||||
|
self.error(
|
||||||
|
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
|
||||||
|
"you want to select.".format(
|
||||||
|
self.args.backup_name))
|
||||||
|
|
||||||
|
def make_target_name(self, source_dataset):
|
||||||
|
"""make target_name from a source_dataset"""
|
||||||
|
return self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
||||||
496
zfs_autobackup/ZfsAutobackup.py
Normal file
496
zfs_autobackup/ZfsAutobackup.py
Normal file
@ -0,0 +1,496 @@
|
|||||||
|
import time
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
from .ZfsAuto import ZfsAuto
|
||||||
|
|
||||||
|
from . import compressors
|
||||||
|
from .ExecuteNode import ExecuteNode
|
||||||
|
from .Thinner import Thinner
|
||||||
|
from .ZfsDataset import ZfsDataset
|
||||||
|
from .ZfsNode import ZfsNode
|
||||||
|
from .ThinnerRule import ThinnerRule
|
||||||
|
|
||||||
|
|
||||||
|
class ZfsAutobackup(ZfsAuto):
|
||||||
|
"""The main zfs-autobackup class. Start here, at run() :)"""
|
||||||
|
|
||||||
|
def __init__(self, argv, print_arguments=True):
|
||||||
|
|
||||||
|
# NOTE: common options and parameters are in ZfsAuto
|
||||||
|
super(ZfsAutobackup, self).__init__(argv, print_arguments)
|
||||||
|
|
||||||
|
def parse_args(self, argv):
|
||||||
|
"""do extra checks on common args"""
|
||||||
|
|
||||||
|
args = super(ZfsAutobackup, self).parse_args(argv)
|
||||||
|
|
||||||
|
if not args.no_holds:
|
||||||
|
self.verbose("Hold name : {}".format(self.hold_name))
|
||||||
|
|
||||||
|
if args.allow_empty:
|
||||||
|
args.min_change = 0
|
||||||
|
|
||||||
|
if args.destroy_incompatible:
|
||||||
|
args.rollback = True
|
||||||
|
|
||||||
|
if args.resume:
|
||||||
|
self.warning("The --resume option isn't needed anymore (its autodetected now)")
|
||||||
|
|
||||||
|
if args.raw:
|
||||||
|
self.warning(
|
||||||
|
"The --raw option isn't needed anymore (its autodetected now). Also see --encrypt and --decrypt.")
|
||||||
|
|
||||||
|
if args.compress and args.ssh_source is None and args.ssh_target is None:
|
||||||
|
self.warning("Using compression, but transfer is local.")
|
||||||
|
|
||||||
|
if args.compress and args.zfs_compressed:
|
||||||
|
self.warning("Using --compress with --zfs-compressed, might be inefficient.")
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
def get_parser(self):
|
||||||
|
"""extend common parser with extra stuff needed for zfs-autobackup"""
|
||||||
|
|
||||||
|
parser = super(ZfsAutobackup, self).get_parser()
|
||||||
|
|
||||||
|
group = parser.add_argument_group("Snapshot options")
|
||||||
|
group.add_argument('--no-snapshot', action='store_true',
|
||||||
|
help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
|
||||||
|
group.add_argument('--pre-snapshot-cmd', metavar="COMMAND", default=[], action='append',
|
||||||
|
help='Run COMMAND before snapshotting (can be used multiple times.')
|
||||||
|
group.add_argument('--post-snapshot-cmd', metavar="COMMAND", default=[], action='append',
|
||||||
|
help='Run COMMAND after snapshotting (can be used multiple times.')
|
||||||
|
group.add_argument('--min-change', metavar='BYTES', type=int, default=1,
|
||||||
|
help='Only create snapshot if enough bytes are changed. (default %('
|
||||||
|
'default)s)')
|
||||||
|
group.add_argument('--allow-empty', action='store_true',
|
||||||
|
help='If nothing has changed, still create empty snapshots. (Faster. Same as --min-change=0)')
|
||||||
|
group.add_argument('--other-snapshots', action='store_true',
|
||||||
|
help='Send over other snapshots as well, not just the ones created by this tool.')
|
||||||
|
|
||||||
|
group = parser.add_argument_group("Transfer options")
|
||||||
|
group.add_argument('--no-send', action='store_true',
|
||||||
|
help='Don\'t transfer snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
||||||
|
group.add_argument('--no-holds', action='store_true',
|
||||||
|
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
|
||||||
|
group.add_argument('--clear-refreservation', action='store_true',
|
||||||
|
help='Filter "refreservation" property. (recommended, safes space. same as '
|
||||||
|
'--filter-properties refreservation)')
|
||||||
|
group.add_argument('--clear-mountpoint', action='store_true',
|
||||||
|
help='Set property canmount=noauto for new datasets. (recommended, prevents mount '
|
||||||
|
'conflicts. same as --set-properties canmount=noauto)')
|
||||||
|
group.add_argument('--filter-properties', metavar='PROPERTY,...', type=str,
|
||||||
|
help='List of properties to "filter" when receiving filesystems. (you can still restore '
|
||||||
|
'them with zfs inherit -S)')
|
||||||
|
group.add_argument('--set-properties', metavar='PROPERTY=VALUE,...', type=str,
|
||||||
|
help='List of propererties to override when receiving filesystems. (you can still restore '
|
||||||
|
'them with zfs inherit -S)')
|
||||||
|
group.add_argument('--rollback', action='store_true',
|
||||||
|
help='Rollback changes to the latest target snapshot before starting. (normally you can '
|
||||||
|
'prevent changes by setting the readonly property on the target_path to on)')
|
||||||
|
group.add_argument('--destroy-incompatible', action='store_true',
|
||||||
|
help='Destroy incompatible snapshots on target. Use with care! (implies --rollback)')
|
||||||
|
group.add_argument('--ignore-transfer-errors', action='store_true',
|
||||||
|
help='Ignore transfer errors (still checks if received filesystem exists. useful for '
|
||||||
|
'acltype errors)')
|
||||||
|
|
||||||
|
group.add_argument('--decrypt', action='store_true',
|
||||||
|
help='Decrypt data before sending it over.')
|
||||||
|
group.add_argument('--encrypt', action='store_true',
|
||||||
|
help='Encrypt data after receiving it.')
|
||||||
|
|
||||||
|
group.add_argument('--zfs-compressed', action='store_true',
|
||||||
|
help='Transfer blocks that already have zfs-compression as-is.')
|
||||||
|
|
||||||
|
group = parser.add_argument_group("ZFS send/recv pipes")
|
||||||
|
group.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-fast',
|
||||||
|
choices=compressors.choices(),
|
||||||
|
help='Use compression during transfer, defaults to zstd-fast if TYPE is not specified. ({})'.format(
|
||||||
|
", ".join(compressors.choices())))
|
||||||
|
group.add_argument('--rate', metavar='DATARATE', default=None,
|
||||||
|
help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
|
||||||
|
group.add_argument('--buffer', metavar='SIZE', default=None,
|
||||||
|
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
|
||||||
|
group.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
|
||||||
|
help='pipe zfs send output through COMMAND (can be used multiple times)')
|
||||||
|
group.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
|
||||||
|
help='pipe zfs recv input through COMMAND (can be used multiple times)')
|
||||||
|
|
||||||
|
group = parser.add_argument_group("Thinner options")
|
||||||
|
group.add_argument('--no-thinning', action='store_true', help="Do not destroy any snapshots.")
|
||||||
|
group.add_argument('--keep-source', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
|
||||||
|
help='Thinning schedule for old source snapshots. Default: %(default)s')
|
||||||
|
group.add_argument('--keep-target', metavar='SCHEDULE', type=str, default="10,1d1w,1w1m,1m1y",
|
||||||
|
help='Thinning schedule for old target snapshots. Default: %(default)s')
|
||||||
|
group.add_argument('--destroy-missing', metavar="SCHEDULE", type=str, default=None,
|
||||||
|
help='Destroy datasets on target that are missing on the source. Specify the time since '
|
||||||
|
'the last snapshot, e.g: --destroy-missing 30d')
|
||||||
|
|
||||||
|
# obsolete
|
||||||
|
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
|
||||||
|
parser.add_argument('--raw', action='store_true', help=argparse.SUPPRESS)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
|
def thin_missing_targets(self, target_dataset, used_target_datasets):
|
||||||
|
"""thin target datasets that are missing on the source."""
|
||||||
|
|
||||||
|
self.debug("Thinning obsolete datasets")
|
||||||
|
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
|
||||||
|
dataset not in used_target_datasets]
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
for dataset in missing_datasets:
|
||||||
|
|
||||||
|
count = count + 1
|
||||||
|
if self.args.progress:
|
||||||
|
self.progress("Analysing missing {}/{}".format(count, len(missing_datasets)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
dataset.debug("Missing on source, thinning")
|
||||||
|
dataset.thin()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
dataset.error("Error during thinning of missing datasets ({})".format(str(e)))
|
||||||
|
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
|
def destroy_missing_targets(self, target_dataset, used_target_datasets):
|
||||||
|
"""destroy target datasets that are missing on the source and that meet the requirements"""
|
||||||
|
|
||||||
|
self.debug("Destroying obsolete datasets")
|
||||||
|
|
||||||
|
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
|
||||||
|
dataset not in used_target_datasets]
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
for dataset in missing_datasets:
|
||||||
|
|
||||||
|
count = count + 1
|
||||||
|
if self.args.progress:
|
||||||
|
self.progress("Analysing destroy missing {}/{}".format(count, len(missing_datasets)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# cant do anything without our own snapshots
|
||||||
|
if not dataset.our_snapshots:
|
||||||
|
if dataset.datasets:
|
||||||
|
# its not a leaf, just ignore
|
||||||
|
dataset.debug("Destroy missing: ignoring")
|
||||||
|
else:
|
||||||
|
dataset.verbose(
|
||||||
|
"Destroy missing: has no snapshots made by us. (please destroy manually)")
|
||||||
|
else:
|
||||||
|
# past the deadline?
|
||||||
|
deadline_ttl = ThinnerRule("0s" + self.args.destroy_missing).ttl
|
||||||
|
now = int(time.time())
|
||||||
|
if dataset.our_snapshots[-1].timestamp + deadline_ttl > now:
|
||||||
|
dataset.verbose("Destroy missing: Waiting for deadline.")
|
||||||
|
else:
|
||||||
|
|
||||||
|
dataset.debug("Destroy missing: Removing our snapshots.")
|
||||||
|
|
||||||
|
# remove all our snaphots, except last, to safe space in case we fail later on
|
||||||
|
for snapshot in dataset.our_snapshots[:-1]:
|
||||||
|
snapshot.destroy(fail_exception=True)
|
||||||
|
|
||||||
|
# does it have other snapshots?
|
||||||
|
has_others = False
|
||||||
|
for snapshot in dataset.snapshots:
|
||||||
|
if not snapshot.is_ours():
|
||||||
|
has_others = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if has_others:
|
||||||
|
dataset.verbose("Destroy missing: Still in use by other snapshots")
|
||||||
|
else:
|
||||||
|
if dataset.datasets:
|
||||||
|
dataset.verbose("Destroy missing: Still has children here.")
|
||||||
|
else:
|
||||||
|
dataset.verbose("Destroy missing.")
|
||||||
|
dataset.our_snapshots[-1].destroy(fail_exception=True)
|
||||||
|
dataset.destroy(fail_exception=True)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
dataset.error("Error during --destroy-missing: {}".format(str(e)))
|
||||||
|
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
def get_send_pipes(self, logger):
|
||||||
|
"""determine the zfs send pipe"""
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
|
||||||
|
# IO buffer
|
||||||
|
if self.args.buffer:
|
||||||
|
logger("zfs send buffer : {}".format(self.args.buffer))
|
||||||
|
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m" + self.args.buffer])
|
||||||
|
|
||||||
|
# custom pipes
|
||||||
|
for send_pipe in self.args.send_pipe:
|
||||||
|
ret.append(ExecuteNode.PIPE)
|
||||||
|
ret.extend(send_pipe.split(" "))
|
||||||
|
logger("zfs send custom pipe : {}".format(send_pipe))
|
||||||
|
|
||||||
|
# compression
|
||||||
|
if self.args.compress != None:
|
||||||
|
ret.append(ExecuteNode.PIPE)
|
||||||
|
cmd = compressors.compress_cmd(self.args.compress)
|
||||||
|
ret.extend(cmd)
|
||||||
|
logger("zfs send compression : {}".format(" ".join(cmd)))
|
||||||
|
|
||||||
|
# transfer rate
|
||||||
|
if self.args.rate:
|
||||||
|
logger("zfs send transfer rate : {}".format(self.args.rate))
|
||||||
|
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R" + self.args.rate])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def get_recv_pipes(self, logger):
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
|
||||||
|
# decompression
|
||||||
|
if self.args.compress != None:
|
||||||
|
cmd = compressors.decompress_cmd(self.args.compress)
|
||||||
|
ret.extend(cmd)
|
||||||
|
ret.append(ExecuteNode.PIPE)
|
||||||
|
logger("zfs recv decompression : {}".format(" ".join(cmd)))
|
||||||
|
|
||||||
|
# custom pipes
|
||||||
|
for recv_pipe in self.args.recv_pipe:
|
||||||
|
ret.extend(recv_pipe.split(" "))
|
||||||
|
ret.append(ExecuteNode.PIPE)
|
||||||
|
logger("zfs recv custom pipe : {}".format(recv_pipe))
|
||||||
|
|
||||||
|
# IO buffer
|
||||||
|
if self.args.buffer:
|
||||||
|
# only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
|
||||||
|
if self.args.ssh_source != None or self.args.ssh_target != None or self.args.recv_pipe or self.args.send_pipe or self.args.compress != None:
|
||||||
|
logger("zfs recv buffer : {}".format(self.args.buffer))
|
||||||
|
ret.extend(["mbuffer", "-q", "-s128k", "-m" + self.args.buffer, ExecuteNode.PIPE])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
|
def sync_datasets(self, source_node, source_datasets, target_node):
|
||||||
|
"""Sync datasets, or thin-only on both sides
|
||||||
|
:type target_node: ZfsNode
|
||||||
|
:type source_datasets: list of ZfsDataset
|
||||||
|
:type source_node: ZfsNode
|
||||||
|
"""
|
||||||
|
|
||||||
|
send_pipes = self.get_send_pipes(source_node.verbose)
|
||||||
|
recv_pipes = self.get_recv_pipes(target_node.verbose)
|
||||||
|
|
||||||
|
fail_count = 0
|
||||||
|
count = 0
|
||||||
|
target_datasets = []
|
||||||
|
for source_dataset in source_datasets:
|
||||||
|
|
||||||
|
# stats
|
||||||
|
if self.args.progress:
|
||||||
|
count = count + 1
|
||||||
|
self.progress("Analysing dataset {}/{} ({} failed)".format(count, len(source_datasets), fail_count))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# determine corresponding target_dataset
|
||||||
|
target_name = self.make_target_name(source_dataset)
|
||||||
|
target_dataset = target_node.get_dataset(target_name)
|
||||||
|
target_datasets.append(target_dataset)
|
||||||
|
|
||||||
|
# ensure parents exists
|
||||||
|
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
|
||||||
|
if not self.args.no_send \
|
||||||
|
and target_dataset.parent not in target_datasets \
|
||||||
|
and not target_dataset.parent.exists:
|
||||||
|
target_dataset.parent.create_filesystem(parents=True)
|
||||||
|
|
||||||
|
# determine common zpool features (cached, so no problem we call it often)
|
||||||
|
source_features = source_node.get_pool(source_dataset).features
|
||||||
|
target_features = target_node.get_pool(target_dataset).features
|
||||||
|
common_features = source_features and target_features
|
||||||
|
|
||||||
|
# sync the snapshots of this dataset
|
||||||
|
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress,
|
||||||
|
features=common_features, filter_properties=self.filter_properties_list(),
|
||||||
|
set_properties=self.set_properties_list(),
|
||||||
|
ignore_recv_exit_code=self.args.ignore_transfer_errors,
|
||||||
|
holds=not self.args.no_holds, rollback=self.args.rollback,
|
||||||
|
also_other_snapshots=self.args.other_snapshots,
|
||||||
|
no_send=self.args.no_send,
|
||||||
|
destroy_incompatible=self.args.destroy_incompatible,
|
||||||
|
send_pipes=send_pipes, recv_pipes=recv_pipes,
|
||||||
|
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
|
||||||
|
zfs_compressed=self.args.zfs_compressed)
|
||||||
|
except Exception as e:
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
fail_count = fail_count + 1
|
||||||
|
source_dataset.error("FAILED: " + str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
self.verbose("Debug mode, aborting on first error")
|
||||||
|
raise
|
||||||
|
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
target_path_dataset = target_node.get_dataset(self.args.target_path)
|
||||||
|
if not self.args.no_thinning:
|
||||||
|
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
|
||||||
|
|
||||||
|
if self.args.destroy_missing is not None:
|
||||||
|
self.destroy_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
|
||||||
|
|
||||||
|
return fail_count
|
||||||
|
|
||||||
|
def thin_source(self, source_datasets):
|
||||||
|
|
||||||
|
self.set_title("Thinning source")
|
||||||
|
|
||||||
|
for source_dataset in source_datasets:
|
||||||
|
source_dataset.thin(skip_holds=True)
|
||||||
|
|
||||||
|
def filter_properties_list(self):
|
||||||
|
|
||||||
|
if self.args.filter_properties:
|
||||||
|
filter_properties = self.args.filter_properties.split(",")
|
||||||
|
else:
|
||||||
|
filter_properties = []
|
||||||
|
|
||||||
|
if self.args.clear_refreservation:
|
||||||
|
filter_properties.append("refreservation")
|
||||||
|
|
||||||
|
return filter_properties
|
||||||
|
|
||||||
|
def set_properties_list(self):
|
||||||
|
|
||||||
|
if self.args.set_properties:
|
||||||
|
set_properties = self.args.set_properties.split(",")
|
||||||
|
else:
|
||||||
|
set_properties = []
|
||||||
|
|
||||||
|
if self.args.clear_mountpoint:
|
||||||
|
set_properties.append("canmount=noauto")
|
||||||
|
|
||||||
|
return set_properties
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
################ create source zfsNode
|
||||||
|
self.set_title("Source settings")
|
||||||
|
|
||||||
|
description = "[Source]"
|
||||||
|
if self.args.no_thinning:
|
||||||
|
source_thinner = None
|
||||||
|
else:
|
||||||
|
source_thinner = Thinner(self.args.keep_source)
|
||||||
|
source_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
|
||||||
|
ssh_config=self.args.ssh_config,
|
||||||
|
ssh_to=self.args.ssh_source, readonly=self.args.test,
|
||||||
|
debug_output=self.args.debug_output, description=description, thinner=source_thinner)
|
||||||
|
|
||||||
|
################# select source datasets
|
||||||
|
self.set_title("Selecting")
|
||||||
|
source_datasets = source_node.selected_datasets(property_name=self.property_name,
|
||||||
|
exclude_received=self.args.exclude_received,
|
||||||
|
exclude_paths=self.exclude_paths,
|
||||||
|
exclude_unchanged=self.args.exclude_unchanged,
|
||||||
|
min_change=self.args.min_change)
|
||||||
|
if not source_datasets:
|
||||||
|
self.print_error_sources()
|
||||||
|
return 255
|
||||||
|
|
||||||
|
################# snapshotting
|
||||||
|
if not self.args.no_snapshot:
|
||||||
|
self.set_title("Snapshotting")
|
||||||
|
snapshot_name = time.strftime(self.snapshot_time_format)
|
||||||
|
source_node.consistent_snapshot(source_datasets, snapshot_name,
|
||||||
|
min_changed_bytes=self.args.min_change,
|
||||||
|
pre_snapshot_cmds=self.args.pre_snapshot_cmd,
|
||||||
|
post_snapshot_cmds=self.args.post_snapshot_cmd)
|
||||||
|
|
||||||
|
################# sync
|
||||||
|
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
|
||||||
|
if self.args.target_path:
|
||||||
|
|
||||||
|
# create target_node
|
||||||
|
self.set_title("Target settings")
|
||||||
|
if self.args.no_thinning:
|
||||||
|
target_thinner = None
|
||||||
|
else:
|
||||||
|
target_thinner = Thinner(self.args.keep_target)
|
||||||
|
target_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
|
||||||
|
logger=self, ssh_config=self.args.ssh_config,
|
||||||
|
ssh_to=self.args.ssh_target,
|
||||||
|
readonly=self.args.test, debug_output=self.args.debug_output,
|
||||||
|
description="[Target]",
|
||||||
|
thinner=target_thinner)
|
||||||
|
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
|
||||||
|
|
||||||
|
self.set_title("Synchronising")
|
||||||
|
|
||||||
|
# check if exists, to prevent vague errors
|
||||||
|
target_dataset = target_node.get_dataset(self.args.target_path)
|
||||||
|
if not target_dataset.exists:
|
||||||
|
raise (Exception(
|
||||||
|
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
|
||||||
|
|
||||||
|
# do the actual sync
|
||||||
|
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
|
||||||
|
fail_count = self.sync_datasets(
|
||||||
|
source_node=source_node,
|
||||||
|
source_datasets=source_datasets,
|
||||||
|
target_node=target_node)
|
||||||
|
|
||||||
|
# no target specified, run in snapshot-only mode
|
||||||
|
else:
|
||||||
|
if not self.args.no_thinning:
|
||||||
|
self.thin_source(source_datasets)
|
||||||
|
fail_count = 0
|
||||||
|
|
||||||
|
if not fail_count:
|
||||||
|
if self.args.test:
|
||||||
|
self.set_title("All tests successful.")
|
||||||
|
else:
|
||||||
|
self.set_title("All operations completed successfully")
|
||||||
|
if not self.args.target_path:
|
||||||
|
self.verbose("(No target_path specified, only operated as snapshot tool.)")
|
||||||
|
|
||||||
|
else:
|
||||||
|
if fail_count != 255:
|
||||||
|
self.error("{} dataset(s) failed!".format(fail_count))
|
||||||
|
|
||||||
|
if self.args.test:
|
||||||
|
self.verbose("")
|
||||||
|
self.warning("TEST MODE - DID NOT MAKE ANY CHANGES!")
|
||||||
|
|
||||||
|
return fail_count
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.error("Exception: " + str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
raise
|
||||||
|
return 255
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
self.error("Aborted")
|
||||||
|
return 255
|
||||||
|
|
||||||
|
|
||||||
|
def cli():
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.exit(ZfsAutobackup(sys.argv[1:], False).run())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
cli()
|
||||||
402
zfs_autobackup/ZfsAutoverify.py
Normal file
402
zfs_autobackup/ZfsAutoverify.py
Normal file
@ -0,0 +1,402 @@
|
|||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .ExecuteNode import ExecuteNode
|
||||||
|
from .ZfsAuto import ZfsAuto
|
||||||
|
from .ZfsDataset import ZfsDataset
|
||||||
|
from .ZfsNode import ZfsNode
|
||||||
|
import sys
|
||||||
|
import platform
|
||||||
|
|
||||||
|
def tmp_name(suffix=""):
|
||||||
|
"""create temporary name unique to this process and node"""
|
||||||
|
|
||||||
|
#we could use uuids but those are ugly and confusing
|
||||||
|
name="zfstmp_{}_{}".format(platform.node(), os.getpid())
|
||||||
|
name=name+suffix
|
||||||
|
return name
|
||||||
|
|
||||||
|
def hash_tree_tar(node, path):
|
||||||
|
"""calculate md5sum of a directory tree, using tar"""
|
||||||
|
|
||||||
|
node.debug("Hashing filesystem {} ".format(path))
|
||||||
|
|
||||||
|
cmd=[ "tar", "-cf", "-", "-C", path, ".",
|
||||||
|
ExecuteNode.PIPE, "md5sum"]
|
||||||
|
|
||||||
|
stdout = node.run(cmd)
|
||||||
|
|
||||||
|
if node.readonly:
|
||||||
|
hashed=None
|
||||||
|
else:
|
||||||
|
hashed = stdout[0].split(" ")[0]
|
||||||
|
|
||||||
|
node.debug("Hash of {} filesytem is {}".format(path, hashed))
|
||||||
|
|
||||||
|
return hashed
|
||||||
|
|
||||||
|
|
||||||
|
def compare_trees_tar(source_node, source_path, target_node, target_path):
|
||||||
|
"""compare two trees using tar. compatible and simple"""
|
||||||
|
|
||||||
|
source_hash= hash_tree_tar(source_node, source_path)
|
||||||
|
target_hash= hash_tree_tar(target_node, target_path)
|
||||||
|
|
||||||
|
if source_hash != target_hash:
|
||||||
|
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
|
||||||
|
|
||||||
|
|
||||||
|
def compare_trees_rsync(source_node, source_path, target_node, target_path):
|
||||||
|
"""use rsync to compare two trees.
|
||||||
|
Advantage is that we can see which individual files differ.
|
||||||
|
But requires rsync and cant do remote to remote."""
|
||||||
|
|
||||||
|
cmd = ["rsync", "-rcn", "--info=COPY,DEL,MISC,NAME,SYMSAFE", "--msgs2stderr", "--delete" ]
|
||||||
|
|
||||||
|
#local
|
||||||
|
if source_node.ssh_to is None and target_node.ssh_to is None:
|
||||||
|
cmd.append("{}/".format(source_path))
|
||||||
|
cmd.append("{}/".format(target_path))
|
||||||
|
source_node.debug("Running rsync locally, on source.")
|
||||||
|
stdout, stderr = source_node.run(cmd, return_stderr=True)
|
||||||
|
|
||||||
|
#source is local
|
||||||
|
elif source_node.ssh_to is None and target_node.ssh_to is not None:
|
||||||
|
cmd.append("{}/".format(source_path))
|
||||||
|
cmd.append("{}:{}/".format(target_node.ssh_to, target_path))
|
||||||
|
source_node.debug("Running rsync locally, on source.")
|
||||||
|
stdout, stderr = source_node.run(cmd, return_stderr=True)
|
||||||
|
|
||||||
|
#target is local
|
||||||
|
elif source_node.ssh_to is not None and target_node.ssh_to is None:
|
||||||
|
cmd.append("{}:{}/".format(source_node.ssh_to, source_path))
|
||||||
|
cmd.append("{}/".format(target_path))
|
||||||
|
source_node.debug("Running rsync locally, on target.")
|
||||||
|
stdout, stderr=target_node.run(cmd, return_stderr=True)
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise Exception("Source and target cant both be remote when verifying. (rsync limitation)")
|
||||||
|
|
||||||
|
if stderr:
|
||||||
|
raise Exception("Dataset verify failed, see above list for differences")
|
||||||
|
|
||||||
|
|
||||||
|
def verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt, method):
|
||||||
|
"""Compare the contents of two zfs filesystem snapshots """
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
# mount the snapshots
|
||||||
|
source_snapshot.mount(source_mnt)
|
||||||
|
target_snapshot.mount(target_mnt)
|
||||||
|
|
||||||
|
if method=='rsync':
|
||||||
|
compare_trees_rsync(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
|
||||||
|
elif method == 'tar':
|
||||||
|
compare_trees_tar(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
|
||||||
|
else:
|
||||||
|
raise(Exception("program errror, unknown method"))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
source_snapshot.unmount()
|
||||||
|
target_snapshot.unmount()
|
||||||
|
|
||||||
|
|
||||||
|
def hash_dev(node, dev):
|
||||||
|
"""calculate md5sum of a device on a node"""
|
||||||
|
|
||||||
|
node.debug("Hashing volume {} ".format(dev))
|
||||||
|
|
||||||
|
cmd = [ "md5sum", dev ]
|
||||||
|
|
||||||
|
stdout = node.run(cmd)
|
||||||
|
|
||||||
|
if node.readonly:
|
||||||
|
hashed=None
|
||||||
|
else:
|
||||||
|
hashed = stdout[0].split(" ")[0]
|
||||||
|
|
||||||
|
node.debug("Hash of volume {} is {}".format(dev, hashed))
|
||||||
|
|
||||||
|
return hashed
|
||||||
|
|
||||||
|
# def activate_volume_snapshot(dataset, snapshot):
|
||||||
|
# """enables snapdev, waits and tries to findout /dev path to the volume, in a compatible way. (linux/freebsd/smartos)"""
|
||||||
|
#
|
||||||
|
# dataset.set("snapdev", "visible")
|
||||||
|
#
|
||||||
|
# #NOTE: add smartos location to this list as well
|
||||||
|
# locations=[
|
||||||
|
# "/dev/zvol/" + snapshot.name
|
||||||
|
# ]
|
||||||
|
#
|
||||||
|
# dataset.debug("Waiting for /dev entry to appear...")
|
||||||
|
# time.sleep(0.1)
|
||||||
|
#
|
||||||
|
# start_time=time.time()
|
||||||
|
# while time.time()-start_time<10:
|
||||||
|
# for location in locations:
|
||||||
|
# stdout, stderr, exit_code=dataset.zfs_node.run(["test", "-e", location], return_all=True, valid_exitcodes=[0,1])
|
||||||
|
#
|
||||||
|
# #fake it in testmode
|
||||||
|
# if dataset.zfs_node.readonly:
|
||||||
|
# return location
|
||||||
|
#
|
||||||
|
# if exit_code==0:
|
||||||
|
# return location
|
||||||
|
# time.sleep(1)
|
||||||
|
#
|
||||||
|
# raise(Exception("Timeout while waiting for {} entry to appear.".format(locations)))
|
||||||
|
#
|
||||||
|
# def deacitvate_volume_snapshot(dataset):
|
||||||
|
# dataset.inherit("snapdev")
|
||||||
|
|
||||||
|
#NOTE: https://www.google.com/search?q=Mount+Path+Limit+freebsd
|
||||||
|
#Freebsd has limitations regarding path length, so we cant use the above method.
|
||||||
|
#Instead we create a temporary clone
|
||||||
|
|
||||||
|
def get_tmp_clone_name(snapshot):
|
||||||
|
pool=snapshot.zfs_node.get_pool(snapshot)
|
||||||
|
return pool.name+"/"+tmp_name()
|
||||||
|
|
||||||
|
def activate_volume_snapshot(snapshot):
|
||||||
|
"""clone volume, waits and tries to findout /dev path to the volume, in a compatible way. (linux/freebsd/smartos)"""
|
||||||
|
|
||||||
|
clone_name=get_tmp_clone_name(snapshot)
|
||||||
|
clone=snapshot.clone(clone_name)
|
||||||
|
|
||||||
|
#NOTE: add smartos location to this list as well
|
||||||
|
locations=[
|
||||||
|
"/dev/zvol/" + clone_name
|
||||||
|
]
|
||||||
|
|
||||||
|
clone.debug("Waiting for /dev entry to appear...")
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
start_time=time.time()
|
||||||
|
while time.time()-start_time<10:
|
||||||
|
for location in locations:
|
||||||
|
stdout, stderr, exit_code=clone.zfs_node.run(["test", "-e", location], return_all=True, valid_exitcodes=[0,1])
|
||||||
|
|
||||||
|
#fake it in testmode
|
||||||
|
if clone.zfs_node.readonly:
|
||||||
|
return location
|
||||||
|
|
||||||
|
if exit_code==0:
|
||||||
|
return location
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
raise(Exception("Timeout while waiting for {} entry to appear.".format(locations)))
|
||||||
|
|
||||||
|
def deacitvate_volume_snapshot(snapshot):
|
||||||
|
clone_name=get_tmp_clone_name(snapshot)
|
||||||
|
clone=snapshot.zfs_node.get_dataset(clone_name)
|
||||||
|
clone.destroy()
|
||||||
|
|
||||||
|
def verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot):
|
||||||
|
"""compare the contents of two zfs volume snapshots"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
source_dev= activate_volume_snapshot(source_snapshot)
|
||||||
|
target_dev= activate_volume_snapshot(target_snapshot)
|
||||||
|
|
||||||
|
source_hash= hash_dev(source_snapshot.zfs_node, source_dev)
|
||||||
|
target_hash= hash_dev(target_snapshot.zfs_node, target_dev)
|
||||||
|
|
||||||
|
if source_hash!=target_hash:
|
||||||
|
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
deacitvate_volume_snapshot(source_snapshot)
|
||||||
|
deacitvate_volume_snapshot(target_snapshot)
|
||||||
|
|
||||||
|
def create_mountpoints(source_node, target_node):
|
||||||
|
|
||||||
|
# prepare mount points
|
||||||
|
source_node.debug("Create temporary mount point")
|
||||||
|
source_mnt = "/tmp/"+tmp_name("source")
|
||||||
|
source_node.run(["mkdir", source_mnt])
|
||||||
|
|
||||||
|
target_node.debug("Create temporary mount point")
|
||||||
|
target_mnt = "/tmp/"+tmp_name("target")
|
||||||
|
target_node.run(["mkdir", target_mnt])
|
||||||
|
|
||||||
|
return source_mnt, target_mnt
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup_mountpoint(node, mnt):
|
||||||
|
node.debug("Cleaning up temporary mount point")
|
||||||
|
node.run([ "rmdir", mnt ], hide_errors=True, valid_exitcodes=[] )
|
||||||
|
|
||||||
|
|
||||||
|
class ZfsAutoverify(ZfsAuto):
|
||||||
|
"""The zfs-autoverify class, default agruments and stuff come from ZfsAuto"""
|
||||||
|
|
||||||
|
def __init__(self, argv, print_arguments=True):
|
||||||
|
|
||||||
|
# NOTE: common options and parameters are in ZfsAuto
|
||||||
|
super(ZfsAutoverify, self).__init__(argv, print_arguments)
|
||||||
|
|
||||||
|
def parse_args(self, argv):
|
||||||
|
"""do extra checks on common args"""
|
||||||
|
|
||||||
|
args=super(ZfsAutoverify, self).parse_args(argv)
|
||||||
|
|
||||||
|
if args.target_path == None:
|
||||||
|
self.log.error("Please specify TARGET-PATH")
|
||||||
|
sys.exit(255)
|
||||||
|
|
||||||
|
return args
|
||||||
|
|
||||||
|
def get_parser(self):
|
||||||
|
"""extend common parser with extra stuff needed for zfs-autobackup"""
|
||||||
|
|
||||||
|
parser=super(ZfsAutoverify, self).get_parser()
|
||||||
|
|
||||||
|
group=parser.add_argument_group("Verify options")
|
||||||
|
group.add_argument('--fs-compare', metavar='METHOD', default="tar", choices=["tar", "rsync"],
|
||||||
|
help='Compare method to use for filesystems. (tar, rsync) Default: %(default)s ')
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
def verify_datasets(self, source_mnt, source_datasets, target_node, target_mnt):
|
||||||
|
|
||||||
|
fail_count=0
|
||||||
|
count = 0
|
||||||
|
for source_dataset in source_datasets:
|
||||||
|
|
||||||
|
# stats
|
||||||
|
if self.args.progress:
|
||||||
|
count = count + 1
|
||||||
|
self.progress("Analysing dataset {}/{} ({} failed)".format(count, len(source_datasets), fail_count))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# determine corresponding target_dataset
|
||||||
|
target_name = self.make_target_name(source_dataset)
|
||||||
|
target_dataset = target_node.get_dataset(target_name)
|
||||||
|
|
||||||
|
# find common snapshots to verify
|
||||||
|
source_snapshot = source_dataset.find_common_snapshot(target_dataset)
|
||||||
|
target_snapshot = target_dataset.find_snapshot(source_snapshot)
|
||||||
|
|
||||||
|
if source_snapshot is None or target_snapshot is None:
|
||||||
|
raise(Exception("Cant find common snapshot"))
|
||||||
|
|
||||||
|
target_snapshot.verbose("Verifying...")
|
||||||
|
|
||||||
|
if source_dataset.properties['type']=="filesystem":
|
||||||
|
verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt, self.args.fs_compare)
|
||||||
|
elif source_dataset.properties['type']=="volume":
|
||||||
|
verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot)
|
||||||
|
else:
|
||||||
|
raise(Exception("{} has unknown type {}".format(source_dataset, source_dataset.properties['type'])))
|
||||||
|
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
fail_count = fail_count + 1
|
||||||
|
target_dataset.error("FAILED: " + str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
self.verbose("Debug mode, aborting on first error")
|
||||||
|
raise
|
||||||
|
|
||||||
|
if self.args.progress:
|
||||||
|
self.clear_progress()
|
||||||
|
|
||||||
|
return fail_count
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
|
||||||
|
source_node=None
|
||||||
|
source_mnt=None
|
||||||
|
target_node=None
|
||||||
|
target_mnt=None
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
################ create source zfsNode
|
||||||
|
self.set_title("Source settings")
|
||||||
|
|
||||||
|
description = "[Source]"
|
||||||
|
source_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name, logger=self,
|
||||||
|
ssh_config=self.args.ssh_config,
|
||||||
|
ssh_to=self.args.ssh_source, readonly=self.args.test,
|
||||||
|
debug_output=self.args.debug_output, description=description)
|
||||||
|
|
||||||
|
################# select source datasets
|
||||||
|
self.set_title("Selecting")
|
||||||
|
source_datasets = source_node.selected_datasets(property_name=self.property_name,
|
||||||
|
exclude_received=self.args.exclude_received,
|
||||||
|
exclude_paths=self.exclude_paths,
|
||||||
|
exclude_unchanged=self.args.exclude_unchanged,
|
||||||
|
min_change=0)
|
||||||
|
if not source_datasets:
|
||||||
|
self.print_error_sources()
|
||||||
|
return 255
|
||||||
|
|
||||||
|
# create target_node
|
||||||
|
self.set_title("Target settings")
|
||||||
|
target_node = ZfsNode(snapshot_time_format=self.snapshot_time_format, hold_name=self.hold_name,
|
||||||
|
logger=self, ssh_config=self.args.ssh_config,
|
||||||
|
ssh_to=self.args.ssh_target,
|
||||||
|
readonly=self.args.test, debug_output=self.args.debug_output,
|
||||||
|
description="[Target]")
|
||||||
|
target_node.verbose("Verify datasets under: {}".format(self.args.target_path))
|
||||||
|
|
||||||
|
self.set_title("Verifying")
|
||||||
|
|
||||||
|
source_mnt, target_mnt= create_mountpoints(source_node, target_node)
|
||||||
|
|
||||||
|
fail_count = self.verify_datasets(
|
||||||
|
source_mnt=source_mnt,
|
||||||
|
source_datasets=source_datasets,
|
||||||
|
target_mnt=target_mnt,
|
||||||
|
target_node=target_node)
|
||||||
|
|
||||||
|
if not fail_count:
|
||||||
|
if self.args.test:
|
||||||
|
self.set_title("All tests successful.")
|
||||||
|
else:
|
||||||
|
self.set_title("All datasets verified ok")
|
||||||
|
|
||||||
|
else:
|
||||||
|
if fail_count != 255:
|
||||||
|
self.error("{} dataset(s) failed!".format(fail_count))
|
||||||
|
|
||||||
|
if self.args.test:
|
||||||
|
self.verbose("")
|
||||||
|
self.warning("TEST MODE - DID NOT VERIFY ANYTHING!")
|
||||||
|
|
||||||
|
return fail_count
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.error("Exception: " + str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
raise
|
||||||
|
return 255
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
self.error("Aborted")
|
||||||
|
return 255
|
||||||
|
finally:
|
||||||
|
|
||||||
|
# cleanup
|
||||||
|
if source_mnt is not None:
|
||||||
|
cleanup_mountpoint(source_node, source_mnt)
|
||||||
|
|
||||||
|
if target_mnt is not None:
|
||||||
|
cleanup_mountpoint(target_node, target_mnt)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def cli():
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.exit(ZfsAutoverify(sys.argv[1:], False).run())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
cli()
|
||||||
1159
zfs_autobackup/ZfsDataset.py
Normal file
1159
zfs_autobackup/ZfsDataset.py
Normal file
File diff suppressed because it is too large
Load Diff
269
zfs_autobackup/ZfsNode.py
Normal file
269
zfs_autobackup/ZfsNode.py
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
# python 2 compatibility
|
||||||
|
from __future__ import print_function
|
||||||
|
import re
|
||||||
|
import shlex
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
from .ExecuteNode import ExecuteNode
|
||||||
|
from .Thinner import Thinner
|
||||||
|
from .CachedProperty import CachedProperty
|
||||||
|
from .ZfsPool import ZfsPool
|
||||||
|
from .ZfsDataset import ZfsDataset
|
||||||
|
from .ExecuteNode import ExecuteError
|
||||||
|
|
||||||
|
|
||||||
|
class ZfsNode(ExecuteNode):
|
||||||
|
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
|
||||||
|
|
||||||
|
def __init__(self, snapshot_time_format, hold_name, logger, ssh_config=None, ssh_to=None, readonly=False,
|
||||||
|
description="",
|
||||||
|
debug_output=False, thinner=None):
|
||||||
|
|
||||||
|
self.snapshot_time_format = snapshot_time_format
|
||||||
|
self.hold_name = hold_name
|
||||||
|
|
||||||
|
self.description = description
|
||||||
|
|
||||||
|
self.logger = logger
|
||||||
|
|
||||||
|
if ssh_config:
|
||||||
|
self.verbose("Using custom SSH config: {}".format(ssh_config))
|
||||||
|
|
||||||
|
if ssh_to:
|
||||||
|
self.verbose("Datasets on: {}".format(ssh_to))
|
||||||
|
else:
|
||||||
|
self.verbose("Datasets are local")
|
||||||
|
|
||||||
|
if thinner is not None:
|
||||||
|
rules = thinner.human_rules()
|
||||||
|
if rules:
|
||||||
|
for rule in rules:
|
||||||
|
self.verbose(rule)
|
||||||
|
else:
|
||||||
|
self.verbose("Keep no old snaphots")
|
||||||
|
|
||||||
|
self.__thinner = thinner
|
||||||
|
|
||||||
|
# list of ZfsPools
|
||||||
|
self.__pools = {}
|
||||||
|
self.__datasets = {}
|
||||||
|
|
||||||
|
self._progress_total_bytes = 0
|
||||||
|
self._progress_start_time = time.time()
|
||||||
|
|
||||||
|
ExecuteNode.__init__(self, ssh_config=ssh_config, ssh_to=ssh_to, readonly=readonly, debug_output=debug_output)
|
||||||
|
|
||||||
|
def thin(self, objects, keep_objects):
|
||||||
|
# NOTE: if thinning is disabled with --no-thinning, self.__thinner will be none.
|
||||||
|
if self.__thinner is not None:
|
||||||
|
return self.__thinner.thin(objects, keep_objects)
|
||||||
|
else:
|
||||||
|
return (keep_objects, [])
|
||||||
|
|
||||||
|
@CachedProperty
|
||||||
|
def supported_send_options(self):
|
||||||
|
"""list of supported options, for optimizing sends"""
|
||||||
|
# not every zfs implementation supports them all
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
for option in ["-L", "-e", "-c"]:
|
||||||
|
if self.valid_command(["zfs", "send", option, "zfs_autobackup_option_test"]):
|
||||||
|
ret.append(option)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@CachedProperty
|
||||||
|
def supported_recv_options(self):
|
||||||
|
"""list of supported options"""
|
||||||
|
# not every zfs implementation supports them all
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
for option in ["-s"]:
|
||||||
|
if self.valid_command(["zfs", "recv", option, "zfs_autobackup_option_test"]):
|
||||||
|
ret.append(option)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def valid_command(self, cmd):
|
||||||
|
"""test if a specified zfs options are valid exit code. use this to determine support options"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.run(cmd, hide_errors=True, valid_exitcodes=[0, 1])
|
||||||
|
except ExecuteError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_pool(self, dataset):
|
||||||
|
"""get a ZfsPool() object from dataset. stores objects internally to enable caching"""
|
||||||
|
|
||||||
|
if not isinstance(dataset, ZfsDataset):
|
||||||
|
raise (Exception("{} is not a ZfsDataset".format(dataset)))
|
||||||
|
|
||||||
|
zpool_name = dataset.name.split("/")[0]
|
||||||
|
|
||||||
|
return self.__pools.setdefault(zpool_name, ZfsPool(self, zpool_name))
|
||||||
|
|
||||||
|
def get_dataset(self, name, force_exists=None):
|
||||||
|
"""get a ZfsDataset() object from name. stores objects internally to enable caching"""
|
||||||
|
|
||||||
|
return self.__datasets.setdefault(name, ZfsDataset(self, name))
|
||||||
|
|
||||||
|
def reset_progress(self):
|
||||||
|
"""reset progress output counters"""
|
||||||
|
self._progress_total_bytes = 0
|
||||||
|
self._progress_start_time = time.time()
|
||||||
|
|
||||||
|
def parse_zfs_progress(self, line, hide_errors, prefix):
|
||||||
|
"""try to parse progress output of zfs recv -Pv, and don't show it as error to the user """
|
||||||
|
|
||||||
|
# is it progress output?
|
||||||
|
progress_fields = line.rstrip().split("\t")
|
||||||
|
|
||||||
|
if (line.find("nvlist version") == 0 or
|
||||||
|
line.find("resume token contents") == 0 or
|
||||||
|
len(progress_fields) != 1 or
|
||||||
|
line.find("skipping ") == 0 or
|
||||||
|
re.match("send from .*estimated size is ", line)):
|
||||||
|
|
||||||
|
# always output for debugging offcourse
|
||||||
|
self.debug(prefix + line.rstrip())
|
||||||
|
|
||||||
|
# actual useful info
|
||||||
|
if len(progress_fields) >= 3:
|
||||||
|
if progress_fields[0] == 'full' or progress_fields[0] == 'size':
|
||||||
|
self._progress_total_bytes = int(progress_fields[2])
|
||||||
|
elif progress_fields[0] == 'incremental':
|
||||||
|
self._progress_total_bytes = int(progress_fields[3])
|
||||||
|
elif progress_fields[1].isnumeric():
|
||||||
|
bytes_ = int(progress_fields[1])
|
||||||
|
if self._progress_total_bytes:
|
||||||
|
percentage = min(100, int(bytes_ * 100 / self._progress_total_bytes))
|
||||||
|
speed = int(bytes_ / (time.time() - self._progress_start_time) / (1024 * 1024))
|
||||||
|
bytes_left = self._progress_total_bytes - bytes_
|
||||||
|
minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60)
|
||||||
|
|
||||||
|
self.logger.progress(
|
||||||
|
"Transfer {}% {}MB/s (total {}MB, {} minutes left)".format(percentage, speed, int(
|
||||||
|
self._progress_total_bytes / (1024 * 1024)), minutes_left))
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
# still do the normal stderr output handling
|
||||||
|
if hide_errors:
|
||||||
|
self.debug(prefix + line.rstrip())
|
||||||
|
else:
|
||||||
|
self.error(prefix + line.rstrip())
|
||||||
|
|
||||||
|
# def _parse_stderr_pipe(self, line, hide_errors):
|
||||||
|
# self.parse_zfs_progress(line, hide_errors, "STDERR|> ")
|
||||||
|
|
||||||
|
def _parse_stderr(self, line, hide_errors):
|
||||||
|
self.parse_zfs_progress(line, hide_errors, "STDERR > ")
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
self.logger.verbose("{} {}".format(self.description, txt))
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
self.logger.error("{} {}".format(self.description, txt))
|
||||||
|
|
||||||
|
def warning(self, txt):
|
||||||
|
self.logger.warning("{} {}".format(self.description, txt))
|
||||||
|
|
||||||
|
def debug(self, txt):
|
||||||
|
self.logger.debug("{} {}".format(self.description, txt))
|
||||||
|
|
||||||
|
def consistent_snapshot(self, datasets, snapshot_name, min_changed_bytes, pre_snapshot_cmds=[],
|
||||||
|
post_snapshot_cmds=[]):
|
||||||
|
"""create a consistent (atomic) snapshot of specified datasets, per pool.
|
||||||
|
"""
|
||||||
|
|
||||||
|
pools = {}
|
||||||
|
|
||||||
|
# collect snapshots that we want to make, per pool
|
||||||
|
# self.debug(datasets)
|
||||||
|
for dataset in datasets:
|
||||||
|
if not dataset.is_changed_ours(min_changed_bytes):
|
||||||
|
dataset.verbose("No changes since {}".format(dataset.our_snapshots[-1].snapshot_name))
|
||||||
|
continue
|
||||||
|
|
||||||
|
# force_exist, since we're making it
|
||||||
|
snapshot = self.get_dataset(dataset.name + "@" + snapshot_name, force_exists=True)
|
||||||
|
|
||||||
|
pool = dataset.split_path()[0]
|
||||||
|
if pool not in pools:
|
||||||
|
pools[pool] = []
|
||||||
|
|
||||||
|
pools[pool].append(snapshot)
|
||||||
|
|
||||||
|
# update cache, but try to prevent an unneeded zfs list
|
||||||
|
if self.readonly or CachedProperty.is_cached(dataset, 'snapshots'):
|
||||||
|
dataset.snapshots.append(snapshot) # NOTE: this will trigger zfs list if its not cached
|
||||||
|
|
||||||
|
if not pools:
|
||||||
|
self.verbose("No changes anywhere: not creating snapshots.")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
for cmd in pre_snapshot_cmds:
|
||||||
|
self.verbose("Running pre-snapshot-cmd")
|
||||||
|
self.run(cmd=shlex.split(cmd), readonly=False)
|
||||||
|
|
||||||
|
# create consistent snapshot per pool
|
||||||
|
for (pool_name, snapshots) in pools.items():
|
||||||
|
cmd = ["zfs", "snapshot"]
|
||||||
|
|
||||||
|
cmd.extend(map(lambda snapshot_: str(snapshot_), snapshots))
|
||||||
|
|
||||||
|
self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name))
|
||||||
|
self.run(cmd, readonly=False)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
for cmd in post_snapshot_cmds:
|
||||||
|
self.verbose("Running post-snapshot-cmd")
|
||||||
|
try:
|
||||||
|
self.run(cmd=shlex.split(cmd), readonly=False)
|
||||||
|
except Exception as e:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change):
|
||||||
|
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
|
||||||
|
|
||||||
|
returns: list of ZfsDataset
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.debug("Getting selected datasets")
|
||||||
|
|
||||||
|
# get all source filesystems that have the backup property
|
||||||
|
lines = self.run(tab_split=True, readonly=True, cmd=[
|
||||||
|
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H",
|
||||||
|
property_name
|
||||||
|
])
|
||||||
|
|
||||||
|
# The returnlist of selected ZfsDataset's:
|
||||||
|
selected_filesystems = []
|
||||||
|
|
||||||
|
# list of sources, used to resolve inherited sources
|
||||||
|
sources = {}
|
||||||
|
|
||||||
|
for line in lines:
|
||||||
|
(name, value, raw_source) = line
|
||||||
|
dataset = self.get_dataset(name, force_exists=True)
|
||||||
|
|
||||||
|
# "resolve" inherited sources
|
||||||
|
sources[name] = raw_source
|
||||||
|
if raw_source.find("inherited from ") == 0:
|
||||||
|
inherited = True
|
||||||
|
inherited_from = re.sub("^inherited from ", "", raw_source)
|
||||||
|
source = sources[inherited_from]
|
||||||
|
else:
|
||||||
|
inherited = False
|
||||||
|
source = raw_source
|
||||||
|
|
||||||
|
# determine it
|
||||||
|
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
|
||||||
|
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
|
||||||
|
min_change=min_change):
|
||||||
|
selected_filesystems.append(dataset)
|
||||||
|
|
||||||
|
return selected_filesystems
|
||||||
63
zfs_autobackup/ZfsPool.py
Normal file
63
zfs_autobackup/ZfsPool.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
from .CachedProperty import CachedProperty
|
||||||
|
|
||||||
|
|
||||||
|
class ZfsPool():
|
||||||
|
"""a zfs pool"""
|
||||||
|
|
||||||
|
def __init__(self, zfs_node, name):
|
||||||
|
"""name: name of the pool
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.zfs_node = zfs_node
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "{}: {}".format(self.zfs_node, self.name)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.name
|
||||||
|
|
||||||
|
def __eq__(self, obj):
|
||||||
|
if not isinstance(obj, ZfsPool):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.name == obj.name
|
||||||
|
|
||||||
|
def verbose(self, txt):
|
||||||
|
self.zfs_node.verbose("zpool {}: {}".format(self.name, txt))
|
||||||
|
|
||||||
|
def error(self, txt):
|
||||||
|
self.zfs_node.error("zpool {}: {}".format(self.name, txt))
|
||||||
|
|
||||||
|
def debug(self, txt):
|
||||||
|
self.zfs_node.debug("zpool {}: {}".format(self.name, txt))
|
||||||
|
|
||||||
|
@CachedProperty
|
||||||
|
def properties(self):
|
||||||
|
"""all zpool properties"""
|
||||||
|
|
||||||
|
self.debug("Getting zpool properties")
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
"zpool", "get", "-H", "-p", "all", self.name
|
||||||
|
]
|
||||||
|
|
||||||
|
ret = {}
|
||||||
|
|
||||||
|
for pair in self.zfs_node.run(tab_split=True, cmd=cmd, readonly=True, valid_exitcodes=[0]):
|
||||||
|
ret[pair[1]] = pair[2]
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@property
|
||||||
|
def features(self):
|
||||||
|
"""get list of active zpool features"""
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
for (key, value) in self.properties.items():
|
||||||
|
if key.startswith("feature@"):
|
||||||
|
feature = key.split("@")[1]
|
||||||
|
if value == 'enabled' or value == 'active':
|
||||||
|
ret.append(feature)
|
||||||
|
|
||||||
|
return ret
|
||||||
3
zfs_autobackup/__init__.py
Executable file
3
zfs_autobackup/__init__.py
Executable file
@ -0,0 +1,3 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
7
zfs_autobackup/__main__.py
Executable file
7
zfs_autobackup/__main__.py
Executable file
@ -0,0 +1,7 @@
|
|||||||
|
# (c)edwin@datux.nl - Released under GPL V3
|
||||||
|
#
|
||||||
|
# Greetings from eth0 2019 :)
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
75
zfs_autobackup/compressors.py
Normal file
75
zfs_autobackup/compressors.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
# Adopted from Syncoid :)
|
||||||
|
|
||||||
|
# this software is licensed for use under the Free Software Foundation's GPL v3.0 license, as retrieved
|
||||||
|
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
|
||||||
|
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
|
||||||
|
|
||||||
|
COMPRESS_CMDS = {
|
||||||
|
'gzip': {
|
||||||
|
'cmd': 'gzip',
|
||||||
|
'args': [ '-3' ],
|
||||||
|
'dcmd': 'zcat',
|
||||||
|
'dargs': [],
|
||||||
|
},
|
||||||
|
'pigz-fast': {
|
||||||
|
'cmd': 'pigz',
|
||||||
|
'args': [ '-3' ],
|
||||||
|
'dcmd': 'pigz',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'pigz-slow': {
|
||||||
|
'cmd': 'pigz',
|
||||||
|
'args': [ '-9' ],
|
||||||
|
'dcmd': 'pigz',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'zstd-fast': {
|
||||||
|
'cmd': 'zstdmt',
|
||||||
|
'args': [ '-3' ],
|
||||||
|
'dcmd': 'zstdmt',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'zstd-slow': {
|
||||||
|
'cmd': 'zstdmt',
|
||||||
|
'args': [ '-19' ],
|
||||||
|
'dcmd': 'zstdmt',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'zstd-adapt': {
|
||||||
|
'cmd': 'zstdmt',
|
||||||
|
'args': [ '--adapt' ],
|
||||||
|
'dcmd': 'zstdmt',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
'xz': {
|
||||||
|
'cmd': 'xz',
|
||||||
|
'args': [],
|
||||||
|
'dcmd': 'xz',
|
||||||
|
'dargs': [ '-d' ],
|
||||||
|
},
|
||||||
|
'lzo': {
|
||||||
|
'cmd': 'lzop',
|
||||||
|
'args': [],
|
||||||
|
'dcmd': 'lzop',
|
||||||
|
'dargs': [ '-dfc' ],
|
||||||
|
},
|
||||||
|
'lz4': {
|
||||||
|
'cmd': 'lz4',
|
||||||
|
'args': [],
|
||||||
|
'dcmd': 'lz4',
|
||||||
|
'dargs': [ '-dc' ],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def compress_cmd(compressor):
|
||||||
|
ret=[ COMPRESS_CMDS[compressor]['cmd'] ]
|
||||||
|
ret.extend( COMPRESS_CMDS[compressor]['args'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def decompress_cmd(compressor):
|
||||||
|
ret= [ COMPRESS_CMDS[compressor]['dcmd'] ]
|
||||||
|
ret.extend(COMPRESS_CMDS[compressor]['dargs'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def choices():
|
||||||
|
return COMPRESS_CMDS.keys()
|
||||||
Reference in New Issue
Block a user