zfs-check broken pipe handling tests

This commit is contained in:
Edwin Eefting 2022-02-21 12:31:19 +01:00
parent ed61f03b4b
commit 3e6a327647
5 changed files with 25 additions and 8 deletions

View File

@ -107,7 +107,21 @@ dir/testfile 0 2e863f1fcccd6642e4e28453eba10d2d3f74d798
""", buf.getvalue()) """, buf.getvalue())
# def test_brokenpipe_cleanup_filesystem(self): def test_brokenpipe_cleanup_filesystem(self):
# """test if stuff is cleaned up correctly, in debugging mode , when a pipe breaks. """ """test if stuff is cleaned up correctly, in debugging mode , when a pipe breaks. """
prepare_zpools()
shelltest("cp tests/data/whole /test_source1/testfile")
shelltest("zfs snapshot test_source1@test")
#breaks pipe when grep exists:
#important to use --debug, since that generates extra output which would be problematic if we didnt do correct SIGPIPE handling
shelltest("python -m zfs_autobackup.ZfsCheck test_source1@test --debug | grep -m1 Hashing")
#should NOT be mounted anymore if cleanup went ok:
self.assertNotRegex(shelltest("mount"), "test_source1@test")

View File

@ -2,7 +2,7 @@ import time
import argparse import argparse
from signal import signal, SIGPIPE from signal import signal, SIGPIPE
from .util import output_redir from .util import output_redir, sigpipe_handler
from .ZfsAuto import ZfsAuto from .ZfsAuto import ZfsAuto
@ -492,7 +492,7 @@ class ZfsAutobackup(ZfsAuto):
def cli(): def cli():
import sys import sys
signal(SIGPIPE, output_redir) signal(SIGPIPE, sigpipe_handler)
sys.exit(ZfsAutobackup(sys.argv[1:], False).run()) sys.exit(ZfsAutobackup(sys.argv[1:], False).run())

View File

@ -1,6 +1,6 @@
# from util import activate_volume_snapshot, create_mountpoints, cleanup_mountpoint # from util import activate_volume_snapshot, create_mountpoints, cleanup_mountpoint
from signal import signal, SIGPIPE from signal import signal, SIGPIPE
from .util import output_redir from .util import output_redir, sigpipe_handler
from .ZfsAuto import ZfsAuto from .ZfsAuto import ZfsAuto
from .ZfsNode import ZfsNode from .ZfsNode import ZfsNode
@ -305,7 +305,7 @@ class ZfsAutoverify(ZfsAuto):
def cli(): def cli():
import sys import sys
signal(SIGPIPE, output_redir) signal(SIGPIPE, sigpipe_handler)
sys.exit(ZfsAutoverify(sys.argv[1:], False).run()) sys.exit(ZfsAutoverify(sys.argv[1:], False).run())

View File

@ -147,8 +147,7 @@ class ZfsCheck(CliBase):
def cli(): def cli():
import sys import sys
signal(SIGPIPE, sigpipe_handler)
signal(SIGPIPE, output_redir)
sys.exit(ZfsCheck(sys.argv[1:], False).run()) sys.exit(ZfsCheck(sys.argv[1:], False).run())

View File

@ -21,6 +21,7 @@ import sys
import time import time
def block_hash(fname, count=10000, bs=4096): def block_hash(fname, count=10000, bs=4096):
"""This function was created to checksum huge files and blockdevices (TB's) """This function was created to checksum huge files and blockdevices (TB's)
Instead of one sha1sum of the whole file, it generates sha1susms of chunks of the file. Instead of one sha1sum of the whole file, it generates sha1susms of chunks of the file.
@ -100,3 +101,6 @@ def output_redir():
os.dup2(devnull, sys.stdout.fileno()) os.dup2(devnull, sys.stdout.fileno())
os.dup2(devnull, sys.stderr.fileno()) os.dup2(devnull, sys.stderr.fileno())
def sigpipe_handler(sig, stack):
#redir output so we dont get more SIGPIPES during cleanup. (which my try to write to stdout)
output_redir()