Compare commits

...

19 Commits

Author SHA1 Message Date
ea9012e476 beta6 2021-05-18 23:42:13 +02:00
97e3c110b3 added bandwidth throttling. fixes #51 2021-05-18 19:56:33 +02:00
9264e8de6d more warnings 2021-05-18 19:36:33 +02:00
830ccf1bd4 added warnings in yellow 2021-05-18 19:22:46 +02:00
a389e4c81c fix 2021-05-18 18:18:54 +02:00
36a66fbafc fix 2021-05-18 18:10:34 +02:00
b70c9986c7 regression tests for all compressors 2021-05-18 18:04:47 +02:00
664ea32c96 doc 2021-05-15 16:18:34 +02:00
30f30babea added compression, fixes #40 2021-05-15 16:18:02 +02:00
5e04aabf37 show pipes in verbose 2021-05-15 12:34:21 +02:00
59d53e9664 --recv-pipe and --send-pipe implemented. Added CmdItem to make CmdPipe more consitent 2021-05-11 00:59:26 +02:00
171f0ac5ad as final step we now can do system piping. fixes #50 2021-05-09 14:03:57 +02:00
0ce3bf1297 python 2 compat 2021-05-09 13:04:22 +02:00
c682665888 python 2 compat 2021-05-09 11:09:55 +02:00
086cfe570b run everything in either local shell (shell=true), or remote shell (ssh). this it to allow external shell piping 2021-05-09 10:56:30 +02:00
521d1078bd working on send pipe 2021-05-03 20:25:49 +02:00
8ea178af1f test re-replication 2021-05-03 00:03:22 +02:00
3e39e1553e allow re-replication of a backup with the same name. (now filters on target_path instead of received-status when selecting when appropriate. also shows notes about this) 2021-05-02 22:51:20 +02:00
f0cc2bca2a improved progress reporting. improved no_thinning performance 2021-04-23 20:31:37 +02:00
17 changed files with 586 additions and 243 deletions

View File

@ -17,7 +17,7 @@ jobs:
- name: Prepare - name: Prepare
run: sudo apt update && sudo apt install zfsutils-linux && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls run: sudo apt update && sudo apt install zfsutils-linux lzop pigz zstd gzip xz-utils lz4 && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
- name: Regression test - name: Regression test
@ -39,7 +39,7 @@ jobs:
- name: Prepare - name: Prepare
run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls run: sudo apt update && sudo apt install zfsutils-linux python3-setuptools lzop pigz zstd gzip xz-utils liblz4-tool && sudo -H pip3 install coverage unittest2 mock==3.0.5 coveralls
- name: Regression test - name: Regression test
@ -64,7 +64,7 @@ jobs:
python-version: '2.x' python-version: '2.x'
- name: Prepare - name: Prepare
run: sudo apt update && sudo apt install zfsutils-linux python-setuptools && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls colorama run: sudo apt update && sudo apt install zfsutils-linux python-setuptools lzop pigz zstd gzip xz-utils liblz4-tool && sudo -H pip install coverage unittest2 mock==3.0.5 coveralls colorama
- name: Regression test - name: Regression test
run: sudo -E ./tests/run_tests run: sudo -E ./tests/run_tests

View File

@ -64,6 +64,8 @@ The recommended way on most servers is to use [pip](https://pypi.org/project/zfs
This can also be used to upgrade zfs-autobackup to the newest stable version. This can also be used to upgrade zfs-autobackup to the newest stable version.
To install the latest beta version add the `--pre` option.
### Using easy_install ### Using easy_install
On older servers you might have to use easy_install On older servers you might have to use easy_install

View File

@ -1,5 +1,5 @@
from basetest import * from basetest import *
from zfs_autobackup.CmdPipe import CmdPipe from zfs_autobackup.CmdPipe import CmdPipe,CmdItem
class TestCmdPipe(unittest2.TestCase): class TestCmdPipe(unittest2.TestCase):
@ -9,24 +9,24 @@ class TestCmdPipe(unittest2.TestCase):
p=CmdPipe(readonly=False, inp=None) p=CmdPipe(readonly=False, inp=None)
err=[] err=[]
out=[] out=[]
p.add(["ls", "-d", "/", "/", "/nonexistent"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)) p.add(CmdItem(["ls", "-d", "/", "/", "/nonexistent"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
executed=p.execute(stdout_handler=lambda line: out.append(line)) executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err, ["ls: cannot access '/nonexistent': No such file or directory"]) self.assertEqual(err, ["ls: cannot access '/nonexistent': No such file or directory"])
self.assertEqual(out, ["/","/"]) self.assertEqual(out, ["/","/"])
self.assertTrue(executed) self.assertIsNone(executed)
def test_input(self): def test_input(self):
"""test stdinput""" """test stdinput"""
p=CmdPipe(readonly=False, inp="test") p=CmdPipe(readonly=False, inp="test")
err=[] err=[]
out=[] out=[]
p.add(["echo", "test"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)) p.add(CmdItem(["echo", "test"], stderr_handler=lambda line: err.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
executed=p.execute(stdout_handler=lambda line: out.append(line)) executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err, []) self.assertEqual(err, [])
self.assertEqual(out, ["test"]) self.assertEqual(out, ["test"])
self.assertTrue(executed) self.assertIsNone(executed)
def test_pipe(self): def test_pipe(self):
"""test piped""" """test piped"""
@ -35,16 +35,16 @@ class TestCmdPipe(unittest2.TestCase):
err2=[] err2=[]
err3=[] err3=[]
out=[] out=[]
p.add(["echo", "test"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)) p.add(CmdItem(["echo", "test"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
p.add(["tr", "e", "E"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)) p.add(CmdItem(["tr", "e", "E"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
p.add(["tr", "t", "T"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)) p.add(CmdItem(["tr", "t", "T"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,0)))
executed=p.execute(stdout_handler=lambda line: out.append(line)) executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, []) self.assertEqual(err1, [])
self.assertEqual(err2, []) self.assertEqual(err2, [])
self.assertEqual(err3, []) self.assertEqual(err3, [])
self.assertEqual(out, ["TEsT"]) self.assertEqual(out, ["TEsT"])
self.assertTrue(executed) self.assertIsNone(executed)
#test str representation as well #test str representation as well
self.assertEqual(str(p), "(echo test) | (tr e E) | (tr t T)") self.assertEqual(str(p), "(echo test) | (tr e E) | (tr t T)")
@ -56,16 +56,16 @@ class TestCmdPipe(unittest2.TestCase):
err2=[] err2=[]
err3=[] err3=[]
out=[] out=[]
p.add(["ls", "/nonexistent1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)) p.add(CmdItem(["ls", "/nonexistent1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
p.add(["ls", "/nonexistent2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)) p.add(CmdItem(["ls", "/nonexistent2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
p.add(["ls", "/nonexistent3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)) p.add(CmdItem(["ls", "/nonexistent3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
executed=p.execute(stdout_handler=lambda line: out.append(line)) executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, ["ls: cannot access '/nonexistent1': No such file or directory"]) self.assertEqual(err1, ["ls: cannot access '/nonexistent1': No such file or directory"])
self.assertEqual(err2, ["ls: cannot access '/nonexistent2': No such file or directory"]) self.assertEqual(err2, ["ls: cannot access '/nonexistent2': No such file or directory"])
self.assertEqual(err3, ["ls: cannot access '/nonexistent3': No such file or directory"]) self.assertEqual(err3, ["ls: cannot access '/nonexistent3': No such file or directory"])
self.assertEqual(out, []) self.assertEqual(out, [])
self.assertTrue(executed) self.assertIsNone(executed)
def test_exitcode(self): def test_exitcode(self):
"""test piped exitcodes """ """test piped exitcodes """
@ -74,16 +74,16 @@ class TestCmdPipe(unittest2.TestCase):
err2=[] err2=[]
err3=[] err3=[]
out=[] out=[]
p.add(["bash", "-c", "exit 1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,1)) p.add(CmdItem(["bash", "-c", "exit 1"], stderr_handler=lambda line: err1.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,1)))
p.add(["bash", "-c", "exit 2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)) p.add(CmdItem(["bash", "-c", "exit 2"], stderr_handler=lambda line: err2.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,2)))
p.add(["bash", "-c", "exit 3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,3)) p.add(CmdItem(["bash", "-c", "exit 3"], stderr_handler=lambda line: err3.append(line), exit_handler=lambda exit_code: self.assertEqual(exit_code,3)))
executed=p.execute(stdout_handler=lambda line: out.append(line)) executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, []) self.assertEqual(err1, [])
self.assertEqual(err2, []) self.assertEqual(err2, [])
self.assertEqual(err3, []) self.assertEqual(err3, [])
self.assertEqual(out, []) self.assertEqual(out, [])
self.assertTrue(executed) self.assertIsNone(executed)
def test_readonly_execute(self): def test_readonly_execute(self):
"""everything readonly, just should execute""" """everything readonly, just should execute"""
@ -92,16 +92,18 @@ class TestCmdPipe(unittest2.TestCase):
err1=[] err1=[]
err2=[] err2=[]
out=[] out=[]
p.add(["echo", "test1"], stderr_handler=lambda line: err1.append(line), readonly=True)
p.add(["echo", "test2"], stderr_handler=lambda line: err2.append(line), readonly=True) def true_exit(exit_code):
return True
p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), exit_handler=true_exit, readonly=True))
p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), exit_handler=true_exit, readonly=True))
executed=p.execute(stdout_handler=lambda line: out.append(line)) executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, []) self.assertEqual(err1, [])
self.assertEqual(err2, []) self.assertEqual(err2, [])
self.assertEqual(out, ["test2"]) self.assertEqual(out, ["test2"])
self.assertTrue(executed) self.assertTrue(executed)
self.assertEqual(p.items[0]['process'].returncode,0)
self.assertEqual(p.items[1]['process'].returncode,0)
def test_readonly_skip(self): def test_readonly_skip(self):
"""one command not readonly, skip""" """one command not readonly, skip"""
@ -110,12 +112,12 @@ class TestCmdPipe(unittest2.TestCase):
err1=[] err1=[]
err2=[] err2=[]
out=[] out=[]
p.add(["echo", "test1"], stderr_handler=lambda line: err1.append(line), readonly=False) p.add(CmdItem(["echo", "test1"], stderr_handler=lambda line: err1.append(line), readonly=False))
p.add(["echo", "test2"], stderr_handler=lambda line: err2.append(line), readonly=True) p.add(CmdItem(["echo", "test2"], stderr_handler=lambda line: err2.append(line), readonly=True))
executed=p.execute(stdout_handler=lambda line: out.append(line)) executed=p.execute(stdout_handler=lambda line: out.append(line))
self.assertEqual(err1, []) self.assertEqual(err1, [])
self.assertEqual(err2, []) self.assertEqual(err2, [])
self.assertEqual(out, []) self.assertEqual(out, [])
self.assertFalse(executed) self.assertTrue(executed)

View File

@ -49,12 +49,12 @@ class TestZfsEncryption(unittest2.TestCase):
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget") self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --no-snapshot --exclude-received".split(" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1") r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertMultiLineEqual(r,""" self.assertMultiLineEqual(r,"""
@ -86,12 +86,12 @@ test_target1/test_source2/fs2/sub encryption
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget") self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --decrypt --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --decrypt --no-snapshot --exclude-received".split(" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1") r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertEqual(r, """ self.assertEqual(r, """
@ -121,12 +121,12 @@ test_target1/test_source2/fs2/sub encryptionroot -
self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget") self.prepare_encrypted_dataset("22222222", "test_target1/encryptedtarget")
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --verbose --no-progress --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1/encryptedtarget --verbose --no-progress --encrypt --debug --no-snapshot --exclude-received".split(" ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1") r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")
self.assertEqual(r, """ self.assertEqual(r, """
@ -157,16 +157,16 @@ test_target1/test_source2/fs2/sub encryptionroot -
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty".split(" ")).run()) "test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot".split( "test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
" ")).run()) " ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(
"test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty".split(" ")).run()) "test test_target1 --verbose --no-progress --decrypt --encrypt --debug --allow-empty --exclude-received".split(" ")).run())
self.assertFalse(ZfsAutobackup( self.assertFalse(ZfsAutobackup(
"test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot".split( "test test_target1/encryptedtarget --verbose --no-progress --decrypt --encrypt --debug --no-snapshot --exclude-received".split(
" ")).run()) " ")).run())
r = shelltest("zfs get -r -t filesystem encryptionroot test_target1") r = shelltest("zfs get -r -t filesystem encryptionroot test_target1")

View File

@ -26,9 +26,9 @@ class TestExecuteNode(unittest2.TestCase):
with self.subTest("multiline tabsplit"): with self.subTest("multiline tabsplit"):
self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=True), [['l1c1', 'l1c2'], ['l2c1', 'l2c2']]) self.assertEqual(node.run(["echo","l1c1\tl1c2\nl2c1\tl2c2"], tab_split=True), [['l1c1', 'l1c2'], ['l2c1', 'l2c2']])
#escaping test (shouldnt be a problem locally, single quotes can be a problem remote via ssh) #escaping test
with self.subTest("escape test"): with self.subTest("escape test"):
s="><`'\"@&$()$bla\\/.*!#test _+-={}[]|" s="><`'\"@&$()$bla\\/.* !#test _+-={}[]|${bla} $bla"
self.assertEqual(node.run(["echo",s]), [s]) self.assertEqual(node.run(["echo",s]), [s])
#return std err as well, trigger stderr by listing something non existing #return std err as well, trigger stderr by listing something non existing
@ -51,6 +51,15 @@ class TestExecuteNode(unittest2.TestCase):
with self.subTest("stdin process with inp=None (shouldn't hang)"): with self.subTest("stdin process with inp=None (shouldn't hang)"):
self.assertEqual(node.run(["cat"]), []) self.assertEqual(node.run(["cat"]), [])
# let the system do the piping with an unescaped |:
with self.subTest("system piping test"):
#first make sure the actual | character is still properly escaped:
self.assertEqual(node.run(["echo","|"]), ["|"])
#now pipe
self.assertEqual(node.run(["echo", "abc", node.PIPE, "tr", "a", "A" ]), ["Abc"])
def test_basics_local(self): def test_basics_local(self):
node=ExecuteNode(debug_output=True) node=ExecuteNode(debug_output=True)
self.basics(node) self.basics(node)

View File

@ -0,0 +1,49 @@
import zfs_autobackup.compressors
from basetest import *
import time
class TestSendRecvPipes(unittest2.TestCase):
"""test input/output pipes for zfs send and recv"""
def setUp(self):
prepare_zpools()
self.longMessage=True
def test_send_basics(self):
"""send basics (remote/local send pipe)"""
with self.subTest("local local pipe"):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote local pipe"):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("local remote pipe"):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")
with self.subTest("remote remote pipe"):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
def test_compress(self):
"""send basics (remote/local send pipe)"""
for compress in zfs_autobackup.compressors.COMPRESS_CMDS.keys():
with self.subTest("compress "+compress):
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--exclude-received", "--no-holds", "--no-progress", "--compress="+compress]).run())
shelltest("zfs destroy -r test_target1/test_source1/fs1/sub")

View File

@ -590,10 +590,10 @@ test_target1/test_source2/fs2/sub@test-20101111000003
#test all ssh directions #test all ssh directions
with patch('time.strftime', return_value="20101111000000"): with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"): with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-target localhost --exclude-received".split(" ")).run())
with patch('time.strftime', return_value="20101111000002"): with patch('time.strftime', return_value="20101111000002"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run()) self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --ssh-source localhost --ssh-target localhost".split(" ")).run())
@ -890,7 +890,7 @@ test_target1/test_source2/fs2/sub@test-20101111000003
n=ZfsNode("test",l) n=ZfsNode("test",l)
d=ZfsDataset(n,"test_source1@test") d=ZfsDataset(n,"test_source1@test")
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, output_pipes=[], send_properties=True, write_embedded=True) sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True)
with OutputIO() as buf: with OutputIO() as buf:

View File

@ -2,6 +2,7 @@ from basetest import *
import time import time
class TestZfsAutobackup31(unittest2.TestCase): class TestZfsAutobackup31(unittest2.TestCase):
"""various new 3.1 features"""
def setUp(self): def setUp(self):
prepare_zpools() prepare_zpools()
@ -47,3 +48,27 @@ test_target1/test_source2/fs2/sub@test-20101111000001
""") """)
def test_re_replication(self):
"""test re-replication of something thats already a backup (new in v3.1-beta5)"""
shelltest("zfs create test_target1/a")
shelltest("zfs create test_target1/b")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1/a --no-progress --verbose --debug".split(" ")).run())
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup("test test_target1/b --no-progress --verbose".split(" ")).run())
r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
#NOTE: it wont backup test_target1/a/test_source2/fs2/sub to test_target1/b since it doesnt have the zfs_autobackup property anymore.
self.assertMultiLineEqual(r,"""
test_target1/a/test_source1/fs1@test-20101111000000
test_target1/a/test_source1/fs1/sub@test-20101111000000
test_target1/a/test_source2/fs2/sub@test-20101111000000
test_target1/b/test_source1/fs1@test-20101111000000
test_target1/b/test_source1/fs1/sub@test-20101111000000
test_target1/b/test_source2/fs2/sub@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000
test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
""")

View File

@ -16,7 +16,7 @@ class TestZfsNode(unittest2.TestCase):
node=ZfsNode("test", logger, description=description) node=ZfsNode("test", logger, description=description)
with self.subTest("first snapshot"): with self.subTest("first snapshot"):
node.consistent_snapshot(node.selected_datasets, "test-1",100000) node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1",100000)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertEqual(r,""" self.assertEqual(r,"""
test_source1 test_source1
@ -35,7 +35,7 @@ test_target1
with self.subTest("second snapshot, no changes, no snapshot"): with self.subTest("second snapshot, no changes, no snapshot"):
node.consistent_snapshot(node.selected_datasets, "test-2",1) node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-2",1)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertEqual(r,""" self.assertEqual(r,"""
test_source1 test_source1
@ -53,7 +53,7 @@ test_target1
""") """)
with self.subTest("second snapshot, no changes, empty snapshot"): with self.subTest("second snapshot, no changes, empty snapshot"):
node.consistent_snapshot(node.selected_datasets, "test-2",0) node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-2",0)
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS) r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
self.assertEqual(r,""" self.assertEqual(r,"""
test_source1 test_source1
@ -78,7 +78,7 @@ test_target1
logger=LogStub() logger=LogStub()
description="[Source]" description="[Source]"
node=ZfsNode("test", logger, description=description) node=ZfsNode("test", logger, description=description)
s=pformat(node.selected_datasets) s=pformat(node.selected_datasets(exclude_paths=[], exclude_received=False))
print(s) print(s)
#basics #basics

View File

@ -2,6 +2,49 @@ import subprocess
import os import os
import select import select
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
class CmdItem:
"""one command item, to be added to a CmdPipe"""
def __init__(self, cmd, readonly=False, stderr_handler=None, exit_handler=None, shell=False):
"""create item. caller has to make sure cmd is properly escaped when using shell.
:type cmd: list of str
"""
self.cmd = cmd
self.readonly = readonly
self.stderr_handler = stderr_handler
self.exit_handler = exit_handler
self.shell = shell
self.process = None
def __str__(self):
"""return copy-pastable version of command."""
if self.shell:
# its already copy pastable for a shell:
return " ".join(self.cmd)
else:
# make it copy-pastable, will make a mess of quotes sometimes, but is correct
return " ".join(map(cmd_quote, self.cmd))
def create(self, stdin):
"""actually create the subprocess (called by CmdPipe)"""
# make sure the command gets all the data in utf8 format:
# (this is necessary if LC_ALL=en_US.utf8 is not set in the environment)
encoded_cmd = []
for arg in self.cmd:
encoded_cmd.append(arg.encode('utf-8'))
self.process = subprocess.Popen(encoded_cmd, env=os.environ, stdout=subprocess.PIPE, stdin=stdin,
stderr=subprocess.PIPE, shell=self.shell)
class CmdPipe: class CmdPipe:
"""a pipe of one or more commands. also takes care of utf-8 encoding/decoding and line based parsing""" """a pipe of one or more commands. also takes care of utf-8 encoding/decoding and line based parsing"""
@ -17,42 +60,35 @@ class CmdPipe:
self.readonly = readonly self.readonly = readonly
self._should_execute = True self._should_execute = True
def add(self, cmd, readonly=False, stderr_handler=None, exit_handler=None): def add(self, cmd_item):
"""adds a command to pipe""" """adds a CmdItem to pipe.
:type cmd_item: CmdItem
"""
self.items.append({ self.items.append(cmd_item)
'cmd': cmd,
'stderr_handler': stderr_handler,
'exit_handler': exit_handler
})
if not readonly and self.readonly: if not cmd_item.readonly and self.readonly:
self._should_execute = False self._should_execute = False
def __str__(self): def __str__(self):
"""transform into oneliner for debugging and testing """ """transform whole pipe into oneliner for debugging and testing. this should generate a copy-pastable string for in a console """
#just one command?
if len(self.items)==1:
return " ".join(self.items[0]['cmd'])
#an actual pipe
ret = "" ret = ""
for item in self.items: for item in self.items:
if ret: if ret:
ret = ret + " | " ret = ret + " | "
ret = ret + "(" + " ".join(item['cmd']) + ")" ret = ret + "({})".format(item) # this will do proper escaping to make it copypastable
return ret return ret
def should_execute(self): def should_execute(self):
return(self._should_execute) return self._should_execute
def execute(self, stdout_handler): def execute(self, stdout_handler):
"""run the pipe. returns True if it executed, and false if it skipped due to readonly conditions""" """run the pipe. returns True all exit handlers returned true"""
if not self._should_execute: if not self._should_execute:
return False return True
# first process should have actual user input as stdin: # first process should have actual user input as stdin:
selectors = [] selectors = []
@ -62,29 +98,21 @@ class CmdPipe:
stdin = subprocess.PIPE stdin = subprocess.PIPE
for item in self.items: for item in self.items:
# make sure the command gets all the data in utf8 format: item.create(stdin)
# (this is necessary if LC_ALL=en_US.utf8 is not set in the environment) selectors.append(item.process.stderr)
encoded_cmd = []
for arg in item['cmd']:
encoded_cmd.append(arg.encode('utf-8'))
item['process'] = subprocess.Popen(encoded_cmd, env=os.environ, stdout=subprocess.PIPE, stdin=stdin,
stderr=subprocess.PIPE)
selectors.append(item['process'].stderr)
if last_stdout is None: if last_stdout is None:
# we're the first process in the pipe, do we have some input? # we're the first process in the pipe, do we have some input?
if self.inp is not None: if self.inp is not None:
# TODO: make streaming to support big inputs? # TODO: make streaming to support big inputs?
item['process'].stdin.write(self.inp.encode('utf-8')) item.process.stdin.write(self.inp.encode('utf-8'))
item['process'].stdin.close() item.process.stdin.close()
else: else:
#last stdout was piped to this stdin already, so close it because we dont need it anymore # last stdout was piped to this stdin already, so close it because we dont need it anymore
last_stdout.close() last_stdout.close()
last_stdout = item['process'].stdout last_stdout = item.process.stdout
stdin=last_stdout stdin = last_stdout
# monitor last stdout as well # monitor last stdout as well
selectors.append(last_stdout) selectors.append(last_stdout)
@ -104,29 +132,29 @@ class CmdPipe:
eof_count = eof_count + 1 eof_count = eof_count + 1
for item in self.items: for item in self.items:
if item['process'].stderr in read_ready: if item.process.stderr in read_ready:
line = item['process'].stderr.readline().decode('utf-8').rstrip() line = item.process.stderr.readline().decode('utf-8').rstrip()
if line != "": if line != "":
item['stderr_handler'](line) item.stderr_handler(line)
else: else:
eof_count = eof_count + 1 eof_count = eof_count + 1
if item['process'].poll() is not None: if item.process.poll() is not None:
done_count = done_count + 1 done_count = done_count + 1
# all filehandles are eof and all processes are done (poll() is not None) # all filehandles are eof and all processes are done (poll() is not None)
if eof_count == len(selectors) and done_count == len(self.items): if eof_count == len(selectors) and done_count == len(self.items):
break break
#close filehandles # close filehandles
last_stdout.close() last_stdout.close()
for item in self.items: for item in self.items:
item['process'].stderr.close() item.process.stderr.close()
#call exit handlers # call exit handlers
success = True
for item in self.items: for item in self.items:
if item['exit_handler'] is not None: if item.exit_handler is not None:
item['exit_handler'](item['process'].returncode) success=item.exit_handler(item.process.returncode) and success
return success
return True

View File

@ -1,16 +1,24 @@
import os import os
import select import select
import subprocess import subprocess
from zfs_autobackup.CmdPipe import CmdPipe, CmdItem
from zfs_autobackup.CmdPipe import CmdPipe
from zfs_autobackup.LogStub import LogStub from zfs_autobackup.LogStub import LogStub
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
class ExecuteError(Exception): class ExecuteError(Exception):
pass pass
class ExecuteNode(LogStub): class ExecuteNode(LogStub):
"""an endpoint to execute local or remote commands via ssh""" """an endpoint to execute local or remote commands via ssh"""
PIPE=1
def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False): def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False):
"""ssh_config: custom ssh config """ssh_config: custom ssh config
ssh_to: server you want to ssh to. none means local ssh_to: server you want to ssh to. none means local
@ -41,48 +49,43 @@ class ExecuteNode(LogStub):
else: else:
self.error("STDERR > " + line.rstrip()) self.error("STDERR > " + line.rstrip())
# def _parse_stderr_pipe(self, line, hide_errors): def _quote(self, cmd):
# """parse stderr from pipe input process. can be overridden in subclass""" """return quoted version of command. if it has value PIPE it will add an actual | """
# if hide_errors: if cmd==self.PIPE:
# self.debug("STDERR|> " + line.rstrip()) return('|')
# else: else:
# self.error("STDERR|> " + line.rstrip()) return(cmd_quote(cmd))
def _remote_cmd(self, cmd): def _shell_cmd(self, cmd):
"""transforms cmd in correct form for remote over ssh, if needed""" """prefix specified ssh shell to command and escape shell characters"""
# use ssh? ret=[]
if self.ssh_to is not None:
encoded_cmd = [] #add remote shell
encoded_cmd.append("ssh") if not self.is_local():
ret=["ssh"]
if self.ssh_config is not None: if self.ssh_config is not None:
encoded_cmd.extend(["-F", self.ssh_config]) ret.extend(["-F", self.ssh_config])
encoded_cmd.append(self.ssh_to) ret.append(self.ssh_to)
for arg in cmd: ret.append(" ".join(map(self._quote, cmd)))
# add single quotes for remote commands to support spaces and other weird stuff (remote commands are
# executed in a shell) and escape existing single quotes (bash needs ' to end the quoted string,
# then a \' for the actual quote and then another ' to start a new quoted string) (and then python
# needs the double \ to get a single \)
encoded_cmd.append(("'" + arg.replace("'", "'\\''") + "'"))
return encoded_cmd
else:
return(cmd)
return ret
def is_local(self): def is_local(self):
return self.ssh_to is None return self.ssh_to is None
def run(self, cmd, inp=None, tab_split=False, valid_exitcodes=None, readonly=False, hide_errors=False, def run(self, cmd, inp=None, tab_split=False, valid_exitcodes=None, readonly=False, hide_errors=False,
return_stderr=False, pipe=False): return_stderr=False, pipe=False):
"""run a command on the node , checks output and parses/handle output and returns it """run a command on the node , checks output and parses/handle output and returns it
Either uses a local shell (sh -c) or remote shell (ssh) to execute the command. Therefore the command can have stuff like actual pipes in it, if you dont want to use pipe=True to pipe stuff.
:param cmd: the actual command, should be a list, where the first item is the command :param cmd: the actual command, should be a list, where the first item is the command
and the rest are parameters. and the rest are parameters. use ExecuteNode.PIPE to add an unescaped |
(if you want to use system piping instead of python piping)
:param pipe: return CmdPipe instead of executing it. :param pipe: return CmdPipe instead of executing it.
:param inp: Can be None, a string or a CmdPipe that was previously returned. :param inp: Can be None, a string or a CmdPipe that was previously returned.
:param tab_split: split tabbed files in output into a list :param tab_split: split tabbed files in output into a list
@ -96,13 +99,14 @@ class ExecuteNode(LogStub):
# create new pipe? # create new pipe?
if not isinstance(inp, CmdPipe): if not isinstance(inp, CmdPipe):
p = CmdPipe(self.readonly, inp) cmd_pipe = CmdPipe(self.readonly, inp)
else: else:
# add stuff to existing pipe # add stuff to existing pipe
p = inp cmd_pipe = inp
# stderr parser # stderr parser
error_lines = [] error_lines = []
def stderr_handler(line): def stderr_handler(line):
if tab_split: if tab_split:
error_lines.append(line.rstrip().split('\t')) error_lines.append(line.rstrip().split('\t'))
@ -119,18 +123,22 @@ class ExecuteNode(LogStub):
self.debug("EXIT > {}".format(exit_code)) self.debug("EXIT > {}".format(exit_code))
if (valid_exitcodes != []) and (exit_code not in valid_exitcodes): if (valid_exitcodes != []) and (exit_code not in valid_exitcodes):
raise (ExecuteError("Command '{}' return exit code '{}' (valid codes: {})".format(" ".join(cmd), exit_code, valid_exitcodes))) self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code, valid_exitcodes))
return False
# add command to pipe return True
encoded_cmd = self._remote_cmd(cmd)
p.add(cmd=encoded_cmd, readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler) # add shell command and handlers to pipe
cmd_item=CmdItem(cmd=self._shell_cmd(cmd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local())
cmd_pipe.add(cmd_item)
# return pipe instead of executing? # return pipe instead of executing?
if pipe: if pipe:
return p return cmd_pipe
# stdout parser # stdout parser
output_lines = [] output_lines = []
def stdout_handler(line): def stdout_handler(line):
if tab_split: if tab_split:
output_lines.append(line.rstrip().split('\t')) output_lines.append(line.rstrip().split('\t'))
@ -138,13 +146,14 @@ class ExecuteNode(LogStub):
output_lines.append(line.rstrip()) output_lines.append(line.rstrip())
self._parse_stdout(line) self._parse_stdout(line)
if p.should_execute(): if cmd_pipe.should_execute():
self.debug("CMD > {}".format(p)) self.debug("CMD > {}".format(cmd_pipe))
else: else:
self.debug("CMDSKIP> {}".format(p)) self.debug("CMDSKIP> {}".format(cmd_pipe))
# execute and calls handlers in CmdPipe # execute and calls handlers in CmdPipe
p.execute(stdout_handler=stdout_handler) if not cmd_pipe.execute(stdout_handler=stdout_handler):
raise(ExecuteError("Last command returned error"))
if return_stderr: if return_stderr:
return output_lines, error_lines return output_lines, error_lines

View File

@ -31,6 +31,13 @@ class LogConsole:
print("! " + txt, file=sys.stderr) print("! " + txt, file=sys.stderr)
sys.stderr.flush() sys.stderr.flush()
def warning(self, txt):
if self.colorama:
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + " NOTE: " + txt + colorama.Style.RESET_ALL)
else:
print(" NOTE: " + txt)
sys.stdout.flush()
def verbose(self, txt): def verbose(self, txt):
if self.show_verbose: if self.show_verbose:
if self.colorama: if self.colorama:
@ -46,3 +53,14 @@ class LogConsole:
else: else:
print("# " + txt) print("# " + txt)
sys.stdout.flush() sys.stdout.flush()
def progress(self, txt):
"""print progress output to stderr (stays on same line)"""
self.clear_progress()
print(">>> {}\r".format(txt), end='', file=sys.stderr)
sys.stderr.flush()
def clear_progress(self):
import colorama
print(colorama.ansi.clear_line(), end='', file=sys.stderr)
sys.stderr.flush()

View File

@ -11,5 +11,8 @@ class LogStub:
def verbose(self, txt): def verbose(self, txt):
print("VERBOSE: " + txt) print("VERBOSE: " + txt)
def warning(self, txt):
print("WARNING: " + txt)
def error(self, txt): def error(self, txt):
print("ERROR : " + txt) print("ERROR : " + txt)

View File

@ -2,6 +2,8 @@ import argparse
import sys import sys
import time import time
from zfs_autobackup import compressors
from zfs_autobackup.ExecuteNode import ExecuteNode
from zfs_autobackup.Thinner import Thinner from zfs_autobackup.Thinner import Thinner
from zfs_autobackup.ZfsDataset import ZfsDataset from zfs_autobackup.ZfsDataset import ZfsDataset
from zfs_autobackup.LogConsole import LogConsole from zfs_autobackup.LogConsole import LogConsole
@ -9,11 +11,13 @@ from zfs_autobackup.ZfsNode import ZfsNode
from zfs_autobackup.ThinnerRule import ThinnerRule from zfs_autobackup.ThinnerRule import ThinnerRule
class ZfsAutobackup: class ZfsAutobackup:
"""main class""" """main class"""
VERSION = "3.1-beta4" VERSION = "3.1-beta6"
HEADER = "zfs-autobackup v{} - Copyright 2020 E.H.Eefting (edwin@datux.nl)".format(VERSION) HEADER = "zfs-autobackup v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(VERSION)
def __init__(self, argv, print_arguments=True): def __init__(self, argv, print_arguments=True):
@ -59,7 +63,6 @@ class ZfsAutobackup:
help='Ignore datasets that seem to be replicated some other way. (No changes since ' help='Ignore datasets that seem to be replicated some other way. (No changes since '
'lastest snapshot. Useful for proxmox HA replication)') 'lastest snapshot. Useful for proxmox HA replication)')
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--strip-path', metavar='N', default=0, type=int, parser.add_argument('--strip-path', metavar='N', default=0, type=int,
help='Number of directories to strip from target path (use 1 when cloning zones between 2 ' help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
'SmartOS machines)') 'SmartOS machines)')
@ -89,8 +92,6 @@ class ZfsAutobackup:
parser.add_argument('--ignore-transfer-errors', action='store_true', parser.add_argument('--ignore-transfer-errors', action='store_true',
help='Ignore transfer errors (still checks if received filesystem exists. useful for ' help='Ignore transfer errors (still checks if received filesystem exists. useful for '
'acltype errors)') 'acltype errors)')
parser.add_argument('--raw', action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--decrypt', action='store_true', parser.add_argument('--decrypt', action='store_true',
help='Decrypt data before sending it over.') help='Decrypt data before sending it over.')
@ -108,13 +109,24 @@ class ZfsAutobackup:
help='Show zfs commands and their output/exit codes. (noisy)') help='Show zfs commands and their output/exit codes. (noisy)')
parser.add_argument('--progress', action='store_true', parser.add_argument('--progress', action='store_true',
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)') help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
parser.add_argument('--no-progress', action='store_true', help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug parser.add_argument('--no-progress', action='store_true',
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--raw', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--exclude-received', action='store_true',
help=argparse.SUPPRESS) # probably never needed anymore
#these things all do stuff by piping zfs send/recv IO
parser.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append', parser.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs send output through COMMAND') help='pipe zfs send output through COMMAND (can be used multiple times)')
parser.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append', parser.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs recv input through COMMAND') help='pipe zfs recv input through COMMAND (can be used multiple times)')
parser.add_argument('--compress', metavar='TYPE', default=None, choices=compressors.choices(), help='Use compression during transfer, zstd-fast recommended. ({})'.format(", ".join(compressors.choices())))
parser.add_argument('--rate', metavar='DATARATE', default=None, help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
parser.add_argument('--buffer', metavar='SIZE', default=None, help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
# note args is the only global variable we use, since its a global readonly setting anyway # note args is the only global variable we use, since its a global readonly setting anyway
args = parser.parse_args(argv) args = parser.parse_args(argv)
@ -138,20 +150,28 @@ class ZfsAutobackup:
args.rollback = True args.rollback = True
self.log = LogConsole(show_debug=self.args.debug, show_verbose=self.args.verbose, color=sys.stdout.isatty()) self.log = LogConsole(show_debug=self.args.debug, show_verbose=self.args.verbose, color=sys.stdout.isatty())
self.verbose(self.HEADER)
if args.resume: if args.resume:
self.verbose("NOTE: The --resume option isn't needed anymore (its autodetected now)") self.warning("The --resume option isn't needed anymore (its autodetected now)")
if args.raw: if args.raw:
self.verbose("NOTE: The --raw option isn't needed anymore (its autodetected now). Use --decrypt to explicitly send data decrypted.") self.warning(
"The --raw option isn't needed anymore (its autodetected now). Also see --encrypt and --decrypt.")
if args.target_path is not None and args.target_path[0] == "/": if args.target_path is not None and args.target_path[0] == "/":
self.log.error("Target should not start with a /") self.log.error("Target should not start with a /")
sys.exit(255) sys.exit(255)
if args.compress and not args.ssh_source and not args.ssh_target:
self.warning("Using compression, but transfer is local.")
def verbose(self, txt): def verbose(self, txt):
self.log.verbose(txt) self.log.verbose(txt)
def warning(self, txt):
self.log.warning(txt)
def error(self, txt): def error(self, txt):
self.log.error(txt) self.log.error(txt)
@ -162,31 +182,54 @@ class ZfsAutobackup:
self.log.verbose("") self.log.verbose("")
self.log.verbose("#### " + title) self.log.verbose("#### " + title)
def progress(self, txt):
self.log.progress(txt)
def clear_progress(self):
self.log.clear_progress()
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters: # NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def thin_missing_targets(self, target_dataset, used_target_datasets): def thin_missing_targets(self, target_dataset, used_target_datasets):
"""thin target datasets that are missing on the source.""" """thin target datasets that are missing on the source."""
self.debug("Thinning obsolete datasets") self.debug("Thinning obsolete datasets")
missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
dataset not in used_target_datasets]
count = 0
for dataset in missing_datasets:
count = count + 1
if self.args.progress:
self.progress("Analysing missing {}/{}".format(count, len(missing_datasets)))
for dataset in target_dataset.recursive_datasets:
try: try:
if dataset not in used_target_datasets:
dataset.debug("Missing on source, thinning") dataset.debug("Missing on source, thinning")
dataset.thin() dataset.thin()
except Exception as e: except Exception as e:
dataset.error("Error during thinning of missing datasets ({})".format(str(e))) dataset.error("Error during thinning of missing datasets ({})".format(str(e)))
if self.args.progress:
self.clear_progress()
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters: # NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def destroy_missing_targets(self, target_dataset, used_target_datasets): def destroy_missing_targets(self, target_dataset, used_target_datasets):
"""destroy target datasets that are missing on the source and that meet the requirements""" """destroy target datasets that are missing on the source and that meet the requirements"""
self.debug("Destroying obsolete datasets") self.debug("Destroying obsolete datasets")
for dataset in target_dataset.recursive_datasets: missing_datasets = [dataset for dataset in target_dataset.recursive_datasets if
try: dataset not in used_target_datasets]
if dataset not in used_target_datasets:
count = 0
for dataset in missing_datasets:
count = count + 1
if self.args.progress:
self.progress("Analysing destroy missing {}/{}".format(count, len(missing_datasets)))
try:
# cant do anything without our own snapshots # cant do anything without our own snapshots
if not dataset.our_snapshots: if not dataset.our_snapshots:
if dataset.datasets: if dataset.datasets:
@ -229,6 +272,59 @@ class ZfsAutobackup:
except Exception as e: except Exception as e:
dataset.error("Error during --destroy-missing: {}".format(str(e))) dataset.error("Error during --destroy-missing: {}".format(str(e)))
if self.args.progress:
self.clear_progress()
def get_send_pipes(self, logger):
"""determine the zfs send pipe"""
ret=[]
# IO buffer
if self.args.buffer:
logger("zfs send buffer : {}".format(self.args.buffer))
ret.extend([ ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m"+self.args.buffer ])
# custom pipes
for send_pipe in self.args.send_pipe:
ret.append(ExecuteNode.PIPE)
ret.extend(send_pipe.split(" "))
logger("zfs send custom pipe : {}".format(send_pipe))
# compression
if self.args.compress!=None:
ret.append(ExecuteNode.PIPE)
cmd=compressors.compress_cmd(self.args.compress)
ret.extend(cmd)
logger("zfs send compression : {}".format(" ".join(cmd)))
# transfer rate
if self.args.rate:
logger("zfs send transfer rate : {}".format(self.args.rate))
ret.extend([ ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R"+self.args.rate ])
return ret
def get_recv_pipes(self, logger):
ret=[]
# decompression
if self.args.compress!=None:
cmd=compressors.decompress_cmd(self.args.compress)
ret.extend(cmd)
ret.append(ExecuteNode.PIPE)
logger("zfs recv decompression : {}".format(" ".join(cmd)))
# custom pipes
for recv_pipe in self.args.recv_pipe:
ret.extend(recv_pipe.split(" "))
ret.append(ExecuteNode.PIPE)
logger("zfs recv custom pipe : {}".format(recv_pipe))
return ret
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters: # NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node): def sync_datasets(self, source_node, source_datasets, target_node):
"""Sync datasets, or thin-only on both sides """Sync datasets, or thin-only on both sides
@ -237,10 +333,19 @@ class ZfsAutobackup:
:type source_node: ZfsNode :type source_node: ZfsNode
""" """
send_pipes=self.get_send_pipes(source_node.verbose)
recv_pipes=self.get_recv_pipes(target_node.verbose)
fail_count = 0 fail_count = 0
count = 0
target_datasets = [] target_datasets = []
for source_dataset in source_datasets: for source_dataset in source_datasets:
# stats
if self.args.progress:
count = count + 1
self.progress("Analysing dataset {}/{} ({} failed)".format(count, len(source_datasets), fail_count))
try: try:
# determine corresponding target_dataset # determine corresponding target_dataset
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path) target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
@ -268,14 +373,19 @@ class ZfsAutobackup:
also_other_snapshots=self.args.other_snapshots, also_other_snapshots=self.args.other_snapshots,
no_send=self.args.no_send, no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible, destroy_incompatible=self.args.destroy_incompatible,
output_pipes=self.args.send_pipe, input_pipes=self.args.recv_pipe, decrypt=self.args.decrypt, encrypt=self.args.encrypt) send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, )
except Exception as e: except Exception as e:
fail_count = fail_count + 1 fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e)) source_dataset.error("FAILED: " + str(e))
if self.args.debug: if self.args.debug:
raise raise
if self.args.progress:
self.clear_progress()
target_path_dataset = ZfsDataset(target_node, self.args.target_path) target_path_dataset = ZfsDataset(target_node, self.args.target_path)
if not self.args.no_thinning:
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets) self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
if self.args.destroy_missing is not None: if self.args.destroy_missing is not None:
@ -285,7 +395,6 @@ class ZfsAutobackup:
def thin_source(self, source_datasets): def thin_source(self, source_datasets):
if not self.args.no_thinning:
self.set_title("Thinning source") self.set_title("Thinning source")
for source_dataset in source_datasets: for source_dataset in source_datasets:
@ -332,16 +441,16 @@ class ZfsAutobackup:
def run(self): def run(self):
try: try:
self.verbose(self.HEADER)
if self.args.test: if self.args.test:
self.verbose("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES") self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
################ create source zfsNode
self.set_title("Source settings") self.set_title("Source settings")
description = "[Source]" description = "[Source]"
if self.args.no_thinning: if self.args.no_thinning:
source_thinner=None source_thinner = None
else: else:
source_thinner = Thinner(self.args.keep_source) source_thinner = Thinner(self.args.keep_source)
source_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, source_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
@ -352,8 +461,24 @@ class ZfsAutobackup:
"'autobackup:{}=child')".format( "'autobackup:{}=child')".format(
self.args.backup_name, self.args.backup_name)) self.args.backup_name, self.args.backup_name))
################# select source datasets
self.set_title("Selecting") self.set_title("Selecting")
selected_source_datasets = source_node.selected_datasets
#Note: Before version v3.1-beta5, we always used exclude_received. This was a problem if you wanto to replicate an existing backup to another host and use the same backupname/snapshots.
exclude_paths = []
exclude_received=self.args.exclude_received
if self.args.ssh_source == self.args.ssh_target:
if self.args.target_path:
# target and source are the same, make sure to exclude target_path
self.warning("Source and target are on the same host, excluding target-path from selection.")
exclude_paths.append(self.args.target_path)
else:
self.warning("Source and target are on the same host, excluding received datasets from selection.")
exclude_received=True
selected_source_datasets = source_node.selected_datasets(exclude_received=exclude_received,
exclude_paths=exclude_paths)
if not selected_source_datasets: if not selected_source_datasets:
self.error( self.error(
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets " "No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
@ -364,18 +489,20 @@ class ZfsAutobackup:
# filter out already replicated stuff? # filter out already replicated stuff?
source_datasets = self.filter_replicated(selected_source_datasets) source_datasets = self.filter_replicated(selected_source_datasets)
################# snapshotting
if not self.args.no_snapshot: if not self.args.no_snapshot:
self.set_title("Snapshotting") self.set_title("Snapshotting")
source_node.consistent_snapshot(source_datasets, source_node.new_snapshotname(), source_node.consistent_snapshot(source_datasets, source_node.new_snapshotname(),
min_changed_bytes=self.args.min_change) min_changed_bytes=self.args.min_change)
################# sync
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode) # if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
if self.args.target_path: if self.args.target_path:
# create target_node # create target_node
self.set_title("Target settings") self.set_title("Target settings")
if self.args.no_thinning: if self.args.no_thinning:
target_thinner=None target_thinner = None
else: else:
target_thinner = Thinner(self.args.keep_target) target_thinner = Thinner(self.args.keep_target)
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
@ -390,7 +517,7 @@ class ZfsAutobackup:
# check if exists, to prevent vague errors # check if exists, to prevent vague errors
target_dataset = ZfsDataset(target_node, self.args.target_path) target_dataset = ZfsDataset(target_node, self.args.target_path)
if not target_dataset.exists: if not target_dataset.exists:
raise(Exception( raise (Exception(
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset))) "Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
# do the actual sync # do the actual sync
@ -400,8 +527,9 @@ class ZfsAutobackup:
source_datasets=source_datasets, source_datasets=source_datasets,
target_node=target_node) target_node=target_node)
#no target specified, run in snapshot-only mode # no target specified, run in snapshot-only mode
else: else:
if not self.args.no_thinning:
self.thin_source(source_datasets) self.thin_source(source_datasets)
fail_count = 0 fail_count = 0
@ -419,7 +547,7 @@ class ZfsAutobackup:
if self.args.test: if self.args.test:
self.verbose("") self.verbose("")
self.verbose("TEST MODE - DID NOT MAKE ANY CHANGES!") self.warning("TEST MODE - DID NOT MAKE ANY CHANGES!")
return fail_count return fail_count

View File

@ -1,5 +1,4 @@
import re import re
import subprocess
import time import time
from zfs_autobackup.CachedProperty import CachedProperty from zfs_autobackup.CachedProperty import CachedProperty
@ -113,15 +112,16 @@ class ZfsDataset:
"""true if this dataset is a snapshot""" """true if this dataset is a snapshot"""
return self.name.find("@") != -1 return self.name.find("@") != -1
def is_selected(self, value, source, inherited, ignore_received): def is_selected(self, value, source, inherited, exclude_received, exclude_paths):
"""determine if dataset should be selected for backup (called from """determine if dataset should be selected for backup (called from
ZfsNode) ZfsNode)
Args: Args:
:type exclude_paths: list of str
:type value: str :type value: str
:type source: str :type source: str
:type inherited: bool :type inherited: bool
:type ignore_received: bool :type exclude_received: bool
""" """
# sanity checks # sanity checks
@ -129,22 +129,30 @@ class ZfsDataset:
# probably a program error in zfs-autobackup or new feature in zfs # probably a program error in zfs-autobackup or new feature in zfs
raise (Exception( raise (Exception(
"{} autobackup-property has illegal source: '{}' (possible BUG)".format(self.name, source))) "{} autobackup-property has illegal source: '{}' (possible BUG)".format(self.name, source)))
if value not in ["false", "true", "child", "-"]: if value not in ["false", "true", "child", "-"]:
# user error # user error
raise (Exception( raise (Exception(
"{} autobackup-property has illegal value: '{}'".format(self.name, value))) "{} autobackup-property has illegal value: '{}'".format(self.name, value)))
# our path starts with one of the excluded paths?
for exclude_path in exclude_paths:
if self.name.startswith(exclude_path):
# too noisy for verbose
self.debug("Excluded (in exclude list)")
return False
# now determine if its actually selected # now determine if its actually selected
if value == "false": if value == "false":
self.verbose("Ignored (disabled)") self.verbose("Excluded (disabled)")
return False return False
elif value == "true" or (value == "child" and inherited): elif value == "true" or (value == "child" and inherited):
if source == "local": if source == "local":
self.verbose("Selected") self.verbose("Selected")
return True return True
elif source == "received": elif source == "received":
if ignore_received: if exclude_received:
self.verbose("Ignored (local backup)") self.verbose("Excluded (dataset already received)")
return False return False
else: else:
self.verbose("Selected") self.verbose("Selected")
@ -495,14 +503,15 @@ class ZfsDataset:
return self.from_names(names[1:]) return self.from_names(names[1:])
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, output_pipes): def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes):
"""returns a pipe with zfs send output for this snapshot """returns a pipe with zfs send output for this snapshot
resume_token: resume sending from this token. (in that case we don't resume_token: resume sending from this token. (in that case we don't
need to know snapshot names) need to know snapshot names)
Args: Args:
:type output_pipes: list of str :param send_pipes: output cmd array that will be added to actual zfs send command. (e.g. mbuffer or compression program)
:type send_pipes: list of str
:type features: list of str :type features: list of str
:type prev_snapshot: ZfsDataset :type prev_snapshot: ZfsDataset
:type resume_token: str :type resume_token: str
@ -548,24 +557,13 @@ class ZfsDataset:
cmd.append(self.name) cmd.append(self.name)
# #add custom output pipes? cmd.extend(send_pipes)
# if output_pipes:
# #local so do our own piping
# if self.zfs_node.is_local():
# output_pipe = self.zfs_node.run(cmd)
# for pipe_cmd in output_pipes:
# output_pipe=self.zfs_node.run(pipe_cmd, inp=output_pipe, )
# return output_pipe
# #remote, so add with actual | and let remote shell handle it
# else:
# for pipe_cmd in output_pipes:
# cmd.append("|")
# cmd.extend(pipe_cmd)
return self.zfs_node.run(cmd, pipe=True, readonly=True) output_pipe = self.zfs_node.run(cmd, pipe=True, readonly=True)
return output_pipe
def recv_pipe(self, pipe, features, filter_properties=None, set_properties=None, ignore_exit_code=False): def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False):
"""starts a zfs recv for this snapshot and uses pipe as input """starts a zfs recv for this snapshot and uses pipe as input
note: you can it both on a snapshot or filesystem object. The note: you can it both on a snapshot or filesystem object. The
@ -573,6 +571,7 @@ class ZfsDataset:
differently. differently.
Args: Args:
:param recv_pipes: input cmd array that will be prepended to actual zfs recv command. (e.g. mbuffer or decompression program)
:type pipe: subprocess.pOpen :type pipe: subprocess.pOpen
:type features: list of str :type features: list of str
:type filter_properties: list of str :type filter_properties: list of str
@ -589,6 +588,8 @@ class ZfsDataset:
# build target command # build target command
cmd = [] cmd = []
cmd.extend(recv_pipes)
cmd.extend(["zfs", "recv"]) cmd.extend(["zfs", "recv"])
# don't mount filesystem that is received # don't mount filesystem that is received
@ -633,15 +634,15 @@ class ZfsDataset:
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress, def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token, filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, output_pipes, input_pipes): raw, send_properties, write_embedded, send_pipes, recv_pipes):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for """transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer incremental transfer
connects a send_pipe() to recv_pipe() connects a send_pipe() to recv_pipe()
Args: Args:
:type output_pipes: list of str :type send_pipes: list of str
:type input_pipes: list of str :type recv_pipes: list of str
:type target_snapshot: ZfsDataset :type target_snapshot: ZfsDataset
:type features: list of str :type features: list of str
:type prev_snapshot: ZfsDataset :type prev_snapshot: ZfsDataset
@ -672,9 +673,9 @@ class ZfsDataset:
# do it # do it
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot, pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, output_pipes=output_pipes) resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties, target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code) set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes)
def abort_resume(self): def abort_resume(self):
"""abort current resume state""" """abort current resume state"""
@ -962,13 +963,13 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties, def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots, ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, output_pipes, input_pipes): no_send, destroy_incompatible, send_pipes, recv_pipes):
"""sync this dataset's snapshots to target_dataset, while also thinning """sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way. out old snapshots along the way.
Args: Args:
:type output_pipes: list of str :type send_pipes: list of str
:type input_pipes: list of str :type recv_pipes: list of str
:type target_dataset: ZfsDataset :type target_dataset: ZfsDataset
:type features: list of str :type features: list of str
:type show_progress: bool :type show_progress: bool
@ -977,7 +978,6 @@ class ZfsDataset:
:type ignore_recv_exit_code: bool :type ignore_recv_exit_code: bool
:type holds: bool :type holds: bool
:type rollback: bool :type rollback: bool
:type raw: bool
:type decrypt: bool :type decrypt: bool
:type also_other_snapshots: bool :type also_other_snapshots: bool
:type no_send: bool :type no_send: bool
@ -1046,7 +1046,7 @@ class ZfsDataset:
filter_properties=active_filter_properties, filter_properties=active_filter_properties,
set_properties=active_set_properties, set_properties=active_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code, ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, write_embedded=write_embedded,raw=raw, send_properties=send_properties, output_pipes=output_pipes, input_pipes=input_pipes) resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes)
resume_token = None resume_token = None

View File

@ -120,7 +120,7 @@ class ZfsNode(ExecuteNode):
self._progress_total_bytes = int(progress_fields[2]) self._progress_total_bytes = int(progress_fields[2])
elif progress_fields[0] == 'incremental': elif progress_fields[0] == 'incremental':
self._progress_total_bytes = int(progress_fields[3]) self._progress_total_bytes = int(progress_fields[3])
else: elif progress_fields[1].isnumeric():
bytes_ = int(progress_fields[1]) bytes_ = int(progress_fields[1])
if self._progress_total_bytes: if self._progress_total_bytes:
percentage = min(100, int(bytes_ * 100 / self._progress_total_bytes)) percentage = min(100, int(bytes_ * 100 / self._progress_total_bytes))
@ -128,9 +128,8 @@ class ZfsNode(ExecuteNode):
bytes_left = self._progress_total_bytes - bytes_ bytes_left = self._progress_total_bytes - bytes_
minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60) minutes_left = int((bytes_left / (bytes_ / (time.time() - self._progress_start_time))) / 60)
print(">>> {}% {}MB/s (total {}MB, {} minutes left) \r".format(percentage, speed, int( self.logger.progress("Transfer {}% {}MB/s (total {}MB, {} minutes left)".format(percentage, speed, int(
self._progress_total_bytes / (1024 * 1024)), minutes_left), end='', file=sys.stderr) self._progress_total_bytes / (1024 * 1024)), minutes_left))
sys.stderr.flush()
return return
@ -152,6 +151,9 @@ class ZfsNode(ExecuteNode):
def error(self, txt): def error(self, txt):
self.logger.error("{} {}".format(self.description, txt)) self.logger.error("{} {}".format(self.description, txt))
def warning(self, txt):
self.logger.warning("{} {}".format(self.description, txt))
def debug(self, txt): def debug(self, txt):
self.logger.debug("{} {}".format(self.description, txt)) self.logger.debug("{} {}".format(self.description, txt))
@ -198,8 +200,7 @@ class ZfsNode(ExecuteNode):
self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name)) self.verbose("Creating snapshots {} in pool {}".format(snapshot_name, pool_name))
self.run(cmd, readonly=False) self.run(cmd, readonly=False)
@CachedProperty def selected_datasets(self, exclude_received, exclude_paths):
def selected_datasets(self, ignore_received=True):
"""determine filesystems that should be backupped by looking at the special autobackup-property, systemwide """determine filesystems that should be backupped by looking at the special autobackup-property, systemwide
returns: list of ZfsDataset returns: list of ZfsDataset
@ -234,7 +235,7 @@ class ZfsNode(ExecuteNode):
source = raw_source source = raw_source
# determine it # determine it
if dataset.is_selected(value=value, source=source, inherited=inherited, ignore_received=ignore_received): if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received, exclude_paths=exclude_paths):
selected_filesystems.append(dataset) selected_filesystems.append(dataset)
return selected_filesystems return selected_filesystems

View File

@ -0,0 +1,69 @@
# Adopted from Syncoid :)
# this software is licensed for use under the Free Software Foundation's GPL v3.0 license, as retrieved
# from http://www.gnu.org/licenses/gpl-3.0.html on 2014-11-17. A copy should also be available in this
# project's Git repository at https://github.com/jimsalterjrs/sanoid/blob/master/LICENSE.
COMPRESS_CMDS = {
'gzip': {
'cmd': 'gzip',
'args': [ '-3' ],
'dcmd': 'zcat',
'dargs': [],
},
'pigz-fast': {
'cmd': 'pigz',
'args': [ '-3' ],
'dcmd': 'pigz',
'dargs': [ '-dc' ],
},
'pigz-slow': {
'cmd': 'pigz',
'args': [ '-9' ],
'dcmd': 'pigz',
'dargs': [ '-dc' ],
},
'zstd-fast': {
'cmd': 'zstdmt',
'args': [ '-3' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'zstd-slow': {
'cmd': 'zstdmt',
'args': [ '-19' ],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
},
'xz': {
'cmd': 'xz',
'args': [],
'dcmd': 'xz',
'dargs': [ '-d' ],
},
'lzo': {
'cmd': 'lzop',
'args': [],
'dcmd': 'lzop',
'dargs': [ '-dfc' ],
},
'lz4': {
'cmd': 'lz4',
'args': [],
'dcmd': 'lz4',
'dargs': [ '-dc' ],
},
}
def compress_cmd(compressor):
ret=[ COMPRESS_CMDS[compressor]['cmd'] ]
ret.extend( COMPRESS_CMDS[compressor]['args'])
return ret
def decompress_cmd(compressor):
ret= [ COMPRESS_CMDS[compressor]['dcmd'] ]
ret.extend(COMPRESS_CMDS[compressor]['dargs'])
return ret
def choices():
return COMPRESS_CMDS.keys()