seperated piping
This commit is contained in:
@ -73,39 +73,39 @@ class TestExecuteNode(unittest2.TestCase):
|
||||
def pipe(self, nodea, nodeb):
|
||||
|
||||
with self.subTest("pipe data"):
|
||||
output=nodea.run(["dd", "if=/dev/zero", "count=1000"], pipe=True)
|
||||
output=nodea.get_pipe(["dd", "if=/dev/zero", "count=1000"])
|
||||
self.assertEqual(nodeb.run(["md5sum"], inp=output), ["816df6f64deba63b029ca19d880ee10a -"])
|
||||
|
||||
with self.subTest("exit code both ends of pipe ok"):
|
||||
output=nodea.run(["true"], pipe=True)
|
||||
output=nodea.get_pipe(["true"])
|
||||
nodeb.run(["true"], inp=output)
|
||||
|
||||
with self.subTest("error on pipe input side"):
|
||||
with self.assertRaises(subprocess.CalledProcessError):
|
||||
output=nodea.run(["false"], pipe=True)
|
||||
output=nodea.get_pipe(["false"])
|
||||
nodeb.run(["true"], inp=output)
|
||||
|
||||
with self.subTest("error on pipe output side "):
|
||||
with self.assertRaises(subprocess.CalledProcessError):
|
||||
output=nodea.run(["true"], pipe=True)
|
||||
output=nodea.get_pipe(["true"])
|
||||
nodeb.run(["false"], inp=output)
|
||||
|
||||
with self.subTest("error on both sides of pipe"):
|
||||
with self.assertRaises(subprocess.CalledProcessError):
|
||||
output=nodea.run(["false"], pipe=True)
|
||||
output=nodea.get_pipe(["false"])
|
||||
nodeb.run(["false"], inp=output)
|
||||
|
||||
with self.subTest("check stderr on pipe output side"):
|
||||
output=nodea.run(["true"], pipe=True)
|
||||
output=nodea.get_pipe(["true"])
|
||||
(stdout, stderr)=nodeb.run(["ls", "nonexistingfile"], inp=output, return_stderr=True, valid_exitcodes=[0,2])
|
||||
self.assertEqual(stdout,[])
|
||||
self.assertRegex(stderr[0], "nonexistingfile" )
|
||||
|
||||
with self.subTest("check stderr on pipe input side (should be only printed)"):
|
||||
output=nodea.run(["ls", "nonexistingfile"], pipe=True)
|
||||
output=nodea.get_pipe(["ls", "nonexistingfile"])
|
||||
(stdout, stderr)=nodeb.run(["true"], inp=output, return_stderr=True, valid_exitcodes=[0,2])
|
||||
self.assertEqual(stdout,[])
|
||||
self.assertEqual(stderr,[] )
|
||||
self.assertEqual(stderr,[])
|
||||
|
||||
|
||||
|
||||
|
||||
@ -38,7 +38,7 @@ class TestZfsScaling(unittest2.TestCase):
|
||||
|
||||
|
||||
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||
expected_runs=343
|
||||
expected_runs=240
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||
|
||||
@ -77,7 +77,7 @@ class TestZfsScaling(unittest2.TestCase):
|
||||
|
||||
|
||||
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||
expected_runs=743
|
||||
expected_runs=640
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||
|
||||
@ -90,6 +90,6 @@ class TestZfsScaling(unittest2.TestCase):
|
||||
|
||||
|
||||
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
|
||||
expected_runs=947
|
||||
expected_runs=844
|
||||
print("ACTUAL RUNS: {}".format(run_counter))
|
||||
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)
|
||||
|
||||
Reference in New Issue
Block a user