reformatting. changed version number

This commit is contained in:
Your Name 2024-09-24 18:22:17 +02:00
parent 5f2e686a1b
commit 07542365ac
18 changed files with 268 additions and 276 deletions

View File

@ -19,15 +19,14 @@ class BlockHasher():
def __init__(self, count=10000, bs=4096, hash_class=hashlib.sha1, skip=0):
self.count = count
self.bs = bs
self.chunk_size=bs*count
self.chunk_size = bs * count
self.hash_class = hash_class
# self.coverage=coverage
self.skip=skip
self._skip_count=0
self.stats_total_bytes=0
self.skip = skip
self._skip_count = 0
self.stats_total_bytes = 0
def _seek_next_chunk(self, fh, fsize):
"""seek fh to next chunk and update skip counter.
@ -37,8 +36,8 @@ class BlockHasher():
"""
#ignore rempty files
if fsize==0:
# ignore rempty files
if fsize == 0:
return False
# need to skip chunks?
@ -53,7 +52,7 @@ class BlockHasher():
# seek to next chunk, reset skip count
fh.seek(self.chunk_size * self._skip_count, os.SEEK_CUR)
self._skip_count = self.skip
return fh.tell()//self.chunk_size
return fh.tell() // self.chunk_size
else:
# should read this chunk, reset skip count
self._skip_count = self.skip
@ -67,24 +66,23 @@ class BlockHasher():
yields nothing for empty files.
"""
with open(fname, "rb") as fh:
fh.seek(0, os.SEEK_END)
fsize=fh.tell()
fsize = fh.tell()
fh.seek(0)
while fh.tell()<fsize:
chunk_nr=self._seek_next_chunk(fh, fsize)
while fh.tell() < fsize:
chunk_nr = self._seek_next_chunk(fh, fsize)
if chunk_nr is False:
return
#read chunk
# read chunk
hash = self.hash_class()
block_nr = 0
while block_nr != self.count:
block=fh.read(self.bs)
if block==b"":
block = fh.read(self.bs)
if block == b"":
break
hash.update(block)
block_nr = block_nr + 1
@ -121,7 +119,7 @@ class BlockHasher():
yield (chunk_nr, hexdigest, hash.hexdigest())
except Exception as e:
yield ( chunk_nr , hexdigest, 'ERROR: '+str(e))
yield (chunk_nr, hexdigest, 'ERROR: ' + str(e))
except Exception as e:
yield ( '-', '-', 'ERROR: '+ str(e))
yield ('-', '-', 'ERROR: ' + str(e))

View File

@ -36,4 +36,4 @@ class CachedProperty(object):
if hasattr(obj, '_cached_properties') and propname in obj._cached_properties:
return True
else:
return False
return False

View File

@ -10,12 +10,12 @@ class CliBase(object):
Overridden in subclasses that add stuff for the specific programs."""
# also used by setup.py
VERSION = "3.3-beta.3"
VERSION = "3.4-beta.1"
HEADER = "{} v{} - (c)2022 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
def __init__(self, argv, print_arguments=True):
self.parser=self.get_parser()
self.parser = self.get_parser()
self.args = self.parse_args(argv)
# helps with investigating failed regression tests:
@ -66,25 +66,24 @@ class CliBase(object):
epilog='Full manual at: https://github.com/psy0rz/zfs_autobackup')
# Basic options
group=parser.add_argument_group("Common options")
group = parser.add_argument_group("Common options")
group.add_argument('--help', '-h', action='store_true', help='show help')
group.add_argument('--test', '--dry-run', '-n', action='store_true',
help='Dry run, dont change anything, just show what would be done (still does all read-only '
'operations)')
help='Dry run, dont change anything, just show what would be done (still does all read-only '
'operations)')
group.add_argument('--verbose', '-v', action='store_true', help='verbose output')
group.add_argument('--debug', '-d', action='store_true',
help='Show zfs commands that are executed, stops after an exception.')
help='Show zfs commands that are executed, stops after an exception.')
group.add_argument('--debug-output', action='store_true',
help='Show zfs commands and their output/exit codes. (noisy)')
help='Show zfs commands and their output/exit codes. (noisy)')
group.add_argument('--progress', action='store_true',
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
group.add_argument('--no-progress', action='store_true',
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
group.add_argument('--utc', action='store_true',
help='Use UTC instead of local time when dealing with timestamps for both formatting and parsing. To snapshot in an ISO 8601 compliant time format you may for example specify --snapshot-format "{}-%%Y-%%m-%%dT%%H:%%M:%%SZ". Changing this parameter after-the-fact (existing snapshots) will cause their timestamps to be interpreted as a different time than before.')
help='Use UTC instead of local time when dealing with timestamps for both formatting and parsing. To snapshot in an ISO 8601 compliant time format you may for example specify --snapshot-format "{}-%%Y-%%m-%%dT%%H:%%M:%%SZ". Changing this parameter after-the-fact (existing snapshots) will cause their timestamps to be interpreted as a different time than before.')
group.add_argument('--version', action='store_true',
help='Show version.')
help='Show version.')
return parser

View File

@ -41,7 +41,7 @@ class CmdItem:
self.exit_handler = exit_handler
self.shell = shell
self.process = None
self.next = None #next item in pipe, set by CmdPipe
self.next = None # next item in pipe, set by CmdPipe
def __str__(self):
"""return copy-pastable version of command."""
@ -126,7 +126,7 @@ class CmdPipe:
success = True
for item in self.items:
if item.exit_handler is not None:
success=item.exit_handler(item.process.returncode) and success
success = item.exit_handler(item.process.returncode) and success
return success
@ -159,7 +159,6 @@ class CmdPipe:
else:
eof_count = eof_count + 1
if item.process.poll() is not None:
done_count = done_count + 1
@ -167,8 +166,6 @@ class CmdPipe:
if eof_count == len(selectors) and done_count == len(self.items):
break
def __create(self):
"""create actual processes, do piping and return selectors."""

View File

@ -17,7 +17,7 @@ class ExecuteError(Exception):
class ExecuteNode(LogStub):
"""an endpoint to execute local or remote commands via ssh"""
PIPE=1
PIPE = 1
def __init__(self, ssh_config=None, ssh_to=None, readonly=False, debug_output=False):
"""ssh_config: custom ssh config
@ -51,35 +51,35 @@ class ExecuteNode(LogStub):
def _quote(self, cmd):
"""return quoted version of command. if it has value PIPE it will add an actual | """
if cmd==self.PIPE:
return('|')
if cmd == self.PIPE:
return ('|')
else:
return cmd_quote(cmd)
def _shell_cmd(self, cmd, cwd):
"""prefix specified ssh shell to command and escape shell characters"""
ret=[]
ret = []
#add remote shell
# add remote shell
if not self.is_local():
#note: dont escape this part (executed directly without shell)
ret=["ssh"]
# note: dont escape this part (executed directly without shell)
ret = ["ssh"]
if self.ssh_config is not None:
ret.extend(["-F", self.ssh_config])
ret.append(self.ssh_to)
#note: DO escape from here, executed in either local or remote shell.
# note: DO escape from here, executed in either local or remote shell.
shell_str=""
shell_str = ""
#add cwd change?
# add cwd change?
if cwd is not None:
shell_str=shell_str + "cd " + self._quote(cwd) + "; "
shell_str = shell_str + "cd " + self._quote(cwd) + "; "
shell_str=shell_str + " ".join(map(self._quote, cmd))
shell_str = shell_str + " ".join(map(self._quote, cmd))
ret.append(shell_str)
@ -121,7 +121,7 @@ class ExecuteNode(LogStub):
# stderr parser
error_lines = []
returned_exit_code=None
returned_exit_code = None
def stderr_handler(line):
if tab_split:
@ -139,7 +139,8 @@ class ExecuteNode(LogStub):
self.debug("EXIT > {}".format(exit_code))
if (valid_exitcodes != []) and (exit_code not in valid_exitcodes):
self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code, valid_exitcodes))
self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code,
valid_exitcodes))
return False
return True
@ -149,7 +150,7 @@ class ExecuteNode(LogStub):
if pipe:
# dont specify output handler, so it will get piped to next process
stdout_handler=None
stdout_handler = None
else:
# handle output manually, dont pipe it
def stdout_handler(line):
@ -160,7 +161,8 @@ class ExecuteNode(LogStub):
self._parse_stdout(line)
# add shell command and handlers to pipe
cmd_item=CmdItem(cmd=self._shell_cmd(cmd, cwd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local(), stdout_handler=stdout_handler)
cmd_item = CmdItem(cmd=self._shell_cmd(cmd, cwd), readonly=readonly, stderr_handler=stderr_handler,
exit_handler=exit_handler, shell=self.is_local(), stdout_handler=stdout_handler)
cmd_pipe.add(cmd_item)
# return CmdPipe instead of executing?
@ -174,7 +176,7 @@ class ExecuteNode(LogStub):
# execute and calls handlers in CmdPipe
if not cmd_pipe.execute():
raise(ExecuteError("Last command returned error"))
raise (ExecuteError("Last command returned error"))
if return_all:
return output_lines, error_lines, cmd_item.process and cmd_item.process.returncode
@ -183,7 +185,8 @@ class ExecuteNode(LogStub):
else:
return output_lines
def script(self, lines, inp=None, stdout_handler=None, stderr_handler=None, exit_handler=None, valid_exitcodes=None, readonly=False, hide_errors=False, pipe=False):
def script(self, lines, inp=None, stdout_handler=None, stderr_handler=None, exit_handler=None, valid_exitcodes=None,
readonly=False, hide_errors=False, pipe=False):
"""Run a multiline script on the node.
This is much more low level than run() and allows for finer grained control.
@ -212,14 +215,14 @@ class ExecuteNode(LogStub):
# add stuff to existing pipe
cmd_pipe = inp
internal_stdout_handler=None
internal_stdout_handler = None
if stdout_handler is not None:
if self.debug_output:
def internal_stdout_handler(line):
self.debug("STDOUT > " + line.rstrip())
stdout_handler(line)
else:
internal_stdout_handler=stdout_handler
internal_stdout_handler = stdout_handler
def internal_stderr_handler(line):
self._parse_stderr(line, hide_errors)
@ -243,12 +246,12 @@ class ExecuteNode(LogStub):
return True
#build command
cmd=[]
# build command
cmd = []
#add remote shell
# add remote shell
if not self.is_local():
#note: dont escape this part (executed directly without shell)
# note: dont escape this part (executed directly without shell)
cmd.append("ssh")
if self.ssh_config is not None:
@ -260,7 +263,9 @@ class ExecuteNode(LogStub):
cmd.append("\n".join(lines))
# add shell command and handlers to pipe
cmd_item=CmdItem(cmd=cmd, readonly=readonly, stderr_handler=internal_stderr_handler, exit_handler=internal_exit_handler, stdout_handler=internal_stdout_handler, shell=self.is_local())
cmd_item = CmdItem(cmd=cmd, readonly=readonly, stderr_handler=internal_stderr_handler,
exit_handler=internal_exit_handler, stdout_handler=internal_stdout_handler,
shell=self.is_local())
cmd_pipe.add(cmd_item)
self.debug("SCRIPT > {}".format(cmd_pipe))

View File

@ -3,6 +3,7 @@ from __future__ import print_function
import sys
class LogConsole:
"""Log-class that outputs to console, adding colors if needed"""
@ -10,11 +11,11 @@ class LogConsole:
self.last_log = ""
self.show_debug = show_debug
self.show_verbose = show_verbose
self._progress_uncleared=False
self._progress_uncleared = False
if color:
# try to use color, failback if colorama not available
self.colorama=False
self.colorama = False
try:
import colorama
global colorama
@ -23,7 +24,7 @@ class LogConsole:
pass
else:
self.colorama=False
self.colorama = False
def error(self, txt):
self.clear_progress()
@ -62,7 +63,7 @@ class LogConsole:
def progress(self, txt):
"""print progress output to stderr (stays on same line)"""
self.clear_progress()
self._progress_uncleared=True
self._progress_uncleared = True
print(">>> {}\r".format(txt), end='', file=sys.stderr)
sys.stderr.flush()
@ -71,4 +72,4 @@ class LogConsole:
import colorama
print(colorama.ansi.clear_line(), end='', file=sys.stderr)
# sys.stderr.flush()
self._progress_uncleared=False
self._progress_uncleared = False

View File

@ -1,5 +1,5 @@
#Used for baseclasses that dont implement their own logging (Like ExecuteNode)
#Usually logging is implemented in subclasses (Like ZfsNode thats a subclass of ExecuteNode), but for regression testing its nice to have these stubs.
# Used for baseclasses that dont implement their own logging (Like ExecuteNode)
# Usually logging is implemented in subclasses (Like ZfsNode thats a subclass of ExecuteNode), but for regression testing its nice to have these stubs.
class LogStub:
"""Just a stub, usually overriden in subclasses."""
@ -15,4 +15,4 @@ class LogStub:
print("WARNING: " + txt)
def error(self, txt):
print("ERROR : " + txt)
print("ERROR : " + txt)

View File

@ -1,4 +1,3 @@
from .ThinnerRule import ThinnerRule
@ -48,7 +47,6 @@ class Thinner:
now: if specified, use this time as current time
"""
# always keep a number of the last objets?
if self.always_keep:
# all of them
@ -71,7 +69,7 @@ class Thinner:
# traverse objects
for thisobject in objects:
#ignore stuff without timestamp, always keep those.
# ignore stuff without timestamp, always keep those.
if thisobject.timestamp is None:
keeps.append(thisobject)
else:
@ -96,4 +94,4 @@ class Thinner:
else:
removes.append(thisobject)
return keeps, removes
return keeps, removes

View File

@ -68,4 +68,4 @@ class ThinnerRule:
def __str__(self):
"""get schedule as a schedule string"""
return self.rule_str
return self.rule_str

View File

@ -14,7 +14,7 @@ class TreeHasher():
:type block_hasher: BlockHasher
"""
self.block_hasher=block_hasher
self.block_hasher = block_hasher
def generate(self, start_path):
"""Use BlockHasher on every file in a tree, yielding the results
@ -28,12 +28,11 @@ class TreeHasher():
for (dirpath, dirnames, filenames) in os.walk(start_path, onerror=walkerror):
for f in filenames:
file_path=os.path.join(dirpath, f)
file_path = os.path.join(dirpath, f)
if (not os.path.islink(file_path)) and os.path.isfile(file_path):
for (chunk_nr, hash) in self.block_hasher.generate(file_path):
yield ( os.path.relpath(file_path,start_path), chunk_nr, hash )
yield (os.path.relpath(file_path, start_path), chunk_nr, hash)
def compare(self, start_path, generator):
"""reads from generator and compares blocks
@ -43,18 +42,14 @@ class TreeHasher():
"""
count=0
def filter_file_name( file_name, chunk_nr, hexdigest):
return ( chunk_nr, hexdigest )
count = 0
def filter_file_name(file_name, chunk_nr, hexdigest):
return (chunk_nr, hexdigest)
for file_name, group_generator in itertools.groupby(generator, lambda x: x[0]):
count=count+1
block_generator=itertools.starmap(filter_file_name, group_generator)
for ( chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(os.path.join(start_path,file_name), block_generator):
yield ( file_name, chunk_nr, compare_hexdigest, actual_hexdigest )
count = count + 1
block_generator = itertools.starmap(filter_file_name, group_generator)
for (chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(
os.path.join(start_path, file_name), block_generator):
yield (file_name, chunk_nr, compare_hexdigest, actual_hexdigest)

View File

@ -49,13 +49,14 @@ class ZfsAuto(CliBase):
self.exclude_paths.append(args.target_path)
else:
if not args.exclude_received and not args.include_received:
self.verbose("NOTE: Source and target are on the same host, adding --exclude-received to commandline. (use --include-received to overrule)")
self.verbose(
"NOTE: Source and target are on the same host, adding --exclude-received to commandline. (use --include-received to overrule)")
args.exclude_received = True
if args.test:
self.warning("TEST MODE - SIMULATING WITHOUT MAKING ANY CHANGES")
#format all the names
# format all the names
self.property_name = args.property_format.format(args.backup_name)
self.snapshot_time_format = args.snapshot_format.format(args.backup_name)
self.hold_name = args.hold_format.format(args.backup_name)
@ -63,7 +64,8 @@ class ZfsAuto(CliBase):
dt = datetime_now(args.utc)
self.verbose("")
self.verbose("Current time {} : {}".format(args.utc and "UTC" or " ", dt.strftime("%Y-%m-%d %H:%M:%S")))
self.verbose(
"Current time {} : {}".format(args.utc and "UTC" or " ", dt.strftime("%Y-%m-%d %H:%M:%S")))
self.verbose("Selecting dataset property : {}".format(self.property_name))
self.verbose("Snapshot format : {}".format(self.snapshot_time_format))
@ -75,43 +77,40 @@ class ZfsAuto(CliBase):
parser = super(ZfsAuto, self).get_parser()
#positional arguments
# positional arguments
parser.add_argument('backup_name', metavar='BACKUP-NAME', default=None, nargs='?',
help='Name of the backup to select')
parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
help='Target ZFS filesystem (optional)')
# SSH options
group=parser.add_argument_group("SSH options")
group = parser.add_argument_group("SSH options")
group.add_argument('--ssh-config', metavar='CONFIG-FILE', default=None, help='Custom ssh client config')
group.add_argument('--ssh-source', metavar='USER@HOST', default=None,
help='Source host to pull backup from.')
help='Source host to pull backup from.')
group.add_argument('--ssh-target', metavar='USER@HOST', default=None,
help='Target host to push backup to.')
help='Target host to push backup to.')
group=parser.add_argument_group("String formatting options")
group = parser.add_argument_group("String formatting options")
group.add_argument('--property-format', metavar='FORMAT', default="autobackup:{}",
help='Dataset selection string format. Default: %(default)s')
help='Dataset selection string format. Default: %(default)s')
group.add_argument('--snapshot-format', metavar='FORMAT', default="{}-%Y%m%d%H%M%S",
help='ZFS Snapshot string format. Default: %(default)s')
help='ZFS Snapshot string format. Default: %(default)s')
group.add_argument('--hold-format', metavar='FORMAT', default="zfs_autobackup:{}",
help='ZFS hold string format. Default: %(default)s')
help='ZFS hold string format. Default: %(default)s')
group.add_argument('--strip-path', metavar='N', default=0, type=int,
help='Number of directories to strip from target path.')
group=parser.add_argument_group("Selection options")
group = parser.add_argument_group("Selection options")
group.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
group.add_argument('--exclude-unchanged', metavar='BYTES', default=0, type=int,
help='Exclude datasets that have less than BYTES data changed since any last snapshot. (Use with proxmox HA replication)')
help='Exclude datasets that have less than BYTES data changed since any last snapshot. (Use with proxmox HA replication)')
group.add_argument('--exclude-received', action='store_true',
help='Exclude datasets that have the origin of their autobackup: property as "received". '
'This can avoid recursive replication between two backup partners.')
help='Exclude datasets that have the origin of their autobackup: property as "received". '
'This can avoid recursive replication between two backup partners.')
group.add_argument('--include-received', action='store_true',
help=argparse.SUPPRESS)
help=argparse.SUPPRESS)
def regex_argument_type(input_line):
"""Parses regex arguments into re.Pattern objects"""
@ -119,7 +118,9 @@ class ZfsAuto(CliBase):
return re.compile(input_line)
except:
raise ValueError("Could not parse argument '{}' as a regular expression".format(input_line))
group.add_argument('--exclude-snapshot-pattern', action='append', default=[], type=regex_argument_type, help="Regular expression to match snapshots that will be ignored.")
group.add_argument('--exclude-snapshot-pattern', action='append', default=[], type=regex_argument_type,
help="Regular expression to match snapshots that will be ignored.")
return parser

View File

@ -1,4 +1,3 @@
import argparse
from signal import signal, SIGPIPE
from .util import output_redir, sigpipe_handler, datetime_now
@ -12,6 +11,7 @@ from .ZfsDataset import ZfsDataset
from .ZfsNode import ZfsNode
from .ThinnerRule import ThinnerRule
class ZfsAutobackup(ZfsAuto):
"""The main zfs-autobackup class. Start here, at run() :)"""
@ -73,7 +73,6 @@ class ZfsAutobackup(ZfsAuto):
group.add_argument('--no-guid-check', action='store_true',
help='Dont check guid of common snapshots. (faster)')
group = parser.add_argument_group("Transfer options")
group.add_argument('--no-send', action='store_true',
help='Don\'t transfer snapshots (useful for cleanups, or if you want a separate send-cronjob)')
@ -324,8 +323,8 @@ class ZfsAutobackup(ZfsAuto):
def make_target_name(self, source_dataset):
"""make target_name from a source_dataset"""
stripped=source_dataset.lstrip_path(self.args.strip_path)
if stripped!="":
stripped = source_dataset.lstrip_path(self.args.strip_path)
if stripped != "":
return self.args.target_path + "/" + stripped
else:
return self.args.target_path
@ -334,16 +333,20 @@ class ZfsAutobackup(ZfsAuto):
"""check all target names for collesions etc due to strip-options"""
self.debug("Checking target names:")
target_datasets={}
target_datasets = {}
for source_dataset in source_datasets:
target_name = self.make_target_name(source_dataset)
source_dataset.debug("-> {}".format(target_name))
if target_name in target_datasets:
raise Exception("Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name, source_dataset, target_datasets[target_name]))
raise Exception(
"Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name,
source_dataset,
target_datasets[
target_name]))
target_datasets[target_name]=source_dataset
target_datasets[target_name] = source_dataset
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node):
@ -397,7 +400,8 @@ class ZfsAutobackup(ZfsAuto):
destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed, force=self.args.force, guid_check=not self.args.no_guid_check)
zfs_compressed=self.args.zfs_compressed, force=self.args.force,
guid_check=not self.args.no_guid_check)
except Exception as e:
fail_count = fail_count + 1
@ -406,7 +410,6 @@ class ZfsAutobackup(ZfsAuto):
self.verbose("Debug mode, aborting on first error")
raise
target_path_dataset = target_node.get_dataset(self.args.target_path)
if not self.args.no_thinning:
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
@ -477,10 +480,10 @@ class ZfsAutobackup(ZfsAuto):
################# select source datasets
self.set_title("Selecting")
( source_datasets, excluded_datasets) = source_node.selected_datasets(property_name=self.property_name,
exclude_received=self.args.exclude_received,
exclude_paths=self.exclude_paths,
exclude_unchanged=self.args.exclude_unchanged)
(source_datasets, excluded_datasets) = source_node.selected_datasets(property_name=self.property_name,
exclude_received=self.args.exclude_received,
exclude_paths=self.exclude_paths,
exclude_unchanged=self.args.exclude_unchanged)
if not source_datasets and not excluded_datasets:
self.print_error_sources()
return 255
@ -572,7 +575,7 @@ def cli():
signal(SIGPIPE, sigpipe_handler)
failed_datasets=ZfsAutobackup(sys.argv[1:], False).run()
failed_datasets = ZfsAutobackup(sys.argv[1:], False).run()
sys.exit(min(failed_datasets, 255))

View File

@ -76,14 +76,14 @@ def verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt,
source_snapshot.mount(source_mnt)
target_snapshot.mount(target_mnt)
if method=='rsync':
if method == 'rsync':
compare_trees_rsync(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
# elif method == 'tar':
# compare_trees_tar(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
elif method == 'find':
compare_trees_find(source_snapshot.zfs_node, source_mnt, target_snapshot.zfs_node, target_mnt)
else:
raise(Exception("program errror, unknown method"))
raise (Exception("program errror, unknown method"))
finally:
source_snapshot.unmount(source_mnt)
@ -109,7 +109,6 @@ def verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt,
# return hashed
# def deacitvate_volume_snapshot(snapshot):
# clone_name=get_tmp_clone_name(snapshot)
# clone=snapshot.zfs_node.get_dataset(clone_name)
@ -119,13 +118,13 @@ def verify_volume(source_dataset, source_snapshot, target_dataset, target_snapsh
"""compare the contents of two zfs volume snapshots"""
# try:
source_dev= activate_volume_snapshot(source_snapshot)
target_dev= activate_volume_snapshot(target_snapshot)
source_dev = activate_volume_snapshot(source_snapshot)
target_dev = activate_volume_snapshot(target_snapshot)
source_hash= hash_dev(source_snapshot.zfs_node, source_dev)
target_hash= hash_dev(target_snapshot.zfs_node, target_dev)
source_hash = hash_dev(source_snapshot.zfs_node, source_dev)
target_hash = hash_dev(target_snapshot.zfs_node, target_dev)
if source_hash!=target_hash:
if source_hash != target_hash:
raise Exception("md5hash difference: {} != {}".format(source_hash, target_hash))
# finally:
@ -150,7 +149,7 @@ class ZfsAutoverify(ZfsAuto):
def parse_args(self, argv):
"""do extra checks on common args"""
args=super(ZfsAutoverify, self).parse_args(argv)
args = super(ZfsAutoverify, self).parse_args(argv)
if args.target_path == None:
self.log.error("Please specify TARGET-PATH")
@ -161,17 +160,17 @@ class ZfsAutoverify(ZfsAuto):
def get_parser(self):
"""extend common parser with extra stuff needed for zfs-autobackup"""
parser=super(ZfsAutoverify, self).get_parser()
parser = super(ZfsAutoverify, self).get_parser()
group=parser.add_argument_group("Verify options")
group = parser.add_argument_group("Verify options")
group.add_argument('--fs-compare', metavar='METHOD', default="find", choices=["find", "rsync"],
help='Compare method to use for filesystems. (find, rsync) Default: %(default)s ')
help='Compare method to use for filesystems. (find, rsync) Default: %(default)s ')
return parser
def verify_datasets(self, source_mnt, source_datasets, target_node, target_mnt):
fail_count=0
fail_count = 0
count = 0
for source_dataset in source_datasets:
@ -190,16 +189,17 @@ class ZfsAutoverify(ZfsAuto):
target_snapshot = target_dataset.find_snapshot(source_snapshot)
if source_snapshot is None or target_snapshot is None:
raise(Exception("Cant find common snapshot"))
raise (Exception("Cant find common snapshot"))
target_snapshot.verbose("Verifying...")
if source_dataset.properties['type']=="filesystem":
if source_dataset.properties['type'] == "filesystem":
verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt, self.args.fs_compare)
elif source_dataset.properties['type']=="volume":
elif source_dataset.properties['type'] == "volume":
verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot)
else:
raise(Exception("{} has unknown type {}".format(source_dataset, source_dataset.properties['type'])))
raise (
Exception("{} has unknown type {}".format(source_dataset, source_dataset.properties['type'])))
except Exception as e:
@ -219,11 +219,10 @@ class ZfsAutoverify(ZfsAuto):
def run(self):
source_node=None
source_mnt=None
target_node=None
target_mnt=None
source_node = None
source_mnt = None
target_node = None
target_mnt = None
try:
@ -240,10 +239,10 @@ class ZfsAutoverify(ZfsAuto):
################# select source datasets
self.set_title("Selecting")
( source_datasets, excluded_datasets) = source_node.selected_datasets(property_name=self.property_name,
exclude_received=self.args.exclude_received,
exclude_paths=self.exclude_paths,
exclude_unchanged=self.args.exclude_unchanged)
(source_datasets, excluded_datasets) = source_node.selected_datasets(property_name=self.property_name,
exclude_received=self.args.exclude_received,
exclude_paths=self.exclude_paths,
exclude_unchanged=self.args.exclude_unchanged)
if not source_datasets and not excluded_datasets:
self.print_error_sources()
return 255
@ -260,7 +259,7 @@ class ZfsAutoverify(ZfsAuto):
self.set_title("Verifying")
source_mnt, target_mnt= create_mountpoints(source_node, target_node)
source_mnt, target_mnt = create_mountpoints(source_node, target_node)
fail_count = self.verify_datasets(
source_mnt=source_mnt,
@ -302,15 +301,13 @@ class ZfsAutoverify(ZfsAuto):
cleanup_mountpoint(target_node, target_mnt)
def cli():
import sys
raise(Exception("This program is incomplete, dont use it yet."))
raise (Exception("This program is incomplete, dont use it yet."))
signal(SIGPIPE, sigpipe_handler)
failed = ZfsAutoverify(sys.argv[1:], False).run()
sys.exit(min(failed,255))
sys.exit(min(failed, 255))
if __name__ == "__main__":

View File

@ -27,14 +27,16 @@ class ZfsCheck(CliBase):
parser = super(ZfsCheck, self).get_parser()
# positional arguments
parser.add_argument('target', metavar='TARGET', default=None, nargs='?', help='Target to checkum. (can be blockdevice, directory or ZFS snapshot)')
parser.add_argument('target', metavar='TARGET', default=None, nargs='?',
help='Target to checkum. (can be blockdevice, directory or ZFS snapshot)')
group = parser.add_argument_group('Checker options')
group.add_argument('--block-size', metavar="BYTES", default=4096, help="Read block-size, default %(default)s",
type=int)
group.add_argument('--count', metavar="COUNT", default=int((100 * (1024 ** 2)) / 4096),
help="Hash chunks of COUNT blocks. Default %(default)s . (CHUNK size is BYTES * COUNT) ", type=int) # 100MiB
help="Hash chunks of COUNT blocks. Default %(default)s . (CHUNK size is BYTES * COUNT) ",
type=int) # 100MiB
group.add_argument('--check', '-c', metavar="FILE", default=None, const=True, nargs='?',
help="Read hashes from STDIN (or FILE) and compare them")
@ -57,11 +59,10 @@ class ZfsCheck(CliBase):
self.verbose("Target : {}".format(args.target))
self.verbose("Block size : {} bytes".format(args.block_size))
self.verbose("Block count : {}".format(args.count))
self.verbose("Effective chunk size : {} bytes".format(args.count*args.block_size))
self.verbose("Skip chunk count : {} (checks {:.2f}% of data)".format(args.skip, 100/(1+args.skip)))
self.verbose("Effective chunk size : {} bytes".format(args.count * args.block_size))
self.verbose("Skip chunk count : {} (checks {:.2f}% of data)".format(args.skip, 100 / (1 + args.skip)))
self.verbose("")
return args
def prepare_zfs_filesystem(self, snapshot):
@ -106,7 +107,9 @@ class ZfsCheck(CliBase):
time.sleep(1)
raise (Exception("Timeout while waiting for /dev entry to appear. (looking in: {}). Hint: did you forget to load the encryption key?".format(locations)))
raise (Exception(
"Timeout while waiting for /dev entry to appear. (looking in: {}). Hint: did you forget to load the encryption key?".format(
locations)))
def cleanup_zfs_volume(self, snapshot):
"""destroys temporary volume snapshot"""
@ -144,34 +147,34 @@ class ZfsCheck(CliBase):
"""parse input lines and yield items to use in compare functions"""
if self.args.check is True:
input_fh=sys.stdin
input_fh = sys.stdin
else:
input_fh=open(self.args.check, 'r')
input_fh = open(self.args.check, 'r')
last_progress_time = time.time()
progress_checked = 0
progress_skipped = 0
line=input_fh.readline()
skip=0
line = input_fh.readline()
skip = 0
while line:
i=line.rstrip().split("\t")
#ignores lines without tabs
if (len(i)>1):
i = line.rstrip().split("\t")
# ignores lines without tabs
if (len(i) > 1):
if skip==0:
progress_checked=progress_checked+1
if skip == 0:
progress_checked = progress_checked + 1
yield i
skip=self.args.skip
skip = self.args.skip
else:
skip=skip-1
progress_skipped=progress_skipped+1
skip = skip - 1
progress_skipped = progress_skipped + 1
if self.args.progress and time.time() - last_progress_time > 1:
last_progress_time = time.time()
self.progress("Checked {} hashes (skipped {})".format(progress_checked, progress_skipped))
line=input_fh.readline()
line = input_fh.readline()
self.verbose("Checked {} hashes (skipped {})".format(progress_checked, progress_skipped))
@ -224,7 +227,7 @@ class ZfsCheck(CliBase):
if "@" in self.args.target:
# zfs snapshot
snapshot=self.node.get_dataset(self.args.target)
snapshot = self.node.get_dataset(self.args.target)
if not snapshot.exists:
raise Exception("ZFS snapshot {} does not exist!".format(snapshot))
dataset_type = snapshot.parent.properties['type']
@ -240,7 +243,7 @@ class ZfsCheck(CliBase):
def cleanup_target(self):
if "@" in self.args.target:
# zfs snapshot
snapshot=self.node.get_dataset(self.args.target)
snapshot = self.node.get_dataset(self.args.target)
if not snapshot.exists:
return
@ -253,28 +256,28 @@ class ZfsCheck(CliBase):
def run(self):
compare_generator=None
hash_generator=None
compare_generator = None
hash_generator = None
try:
prepared_target=self.prepare_target()
is_dir=os.path.isdir(prepared_target)
prepared_target = self.prepare_target()
is_dir = os.path.isdir(prepared_target)
#run as compare
# run as compare
if self.args.check is not None:
input_generator=self.generate_input()
input_generator = self.generate_input()
if is_dir:
compare_generator = self.generate_tree_compare(prepared_target, input_generator)
else:
compare_generator=self.generate_file_compare(prepared_target, input_generator)
errors=self.print_errors(compare_generator)
#run as generator
compare_generator = self.generate_file_compare(prepared_target, input_generator)
errors = self.print_errors(compare_generator)
# run as generator
else:
if is_dir:
hash_generator = self.generate_tree_hashes(prepared_target)
else:
hash_generator=self.generate_file_hashes(prepared_target)
hash_generator = self.generate_file_hashes(prepared_target)
errors=self.print_hashes(hash_generator)
errors = self.print_hashes(hash_generator)
except Exception as e:
self.error("Exception: " + str(e))
@ -286,10 +289,10 @@ class ZfsCheck(CliBase):
return 255
finally:
#important to call check_output so that cleanup still functions in case of a broken pipe:
# important to call check_output so that cleanup still functions in case of a broken pipe:
# util.check_output()
#close generators, to make sure files are not in use anymore when cleaning up
# close generators, to make sure files are not in use anymore when cleaning up
if hash_generator is not None:
hash_generator.close()
if compare_generator is not None:
@ -302,8 +305,8 @@ class ZfsCheck(CliBase):
def cli():
import sys
signal(SIGPIPE, sigpipe_handler)
failed=ZfsCheck(sys.argv[1:], False).run()
sys.exit(min(failed,255))
failed = ZfsCheck(sys.argv[1:], False).run()
sys.exit(min(failed, 255))
if __name__ == "__main__":

View File

@ -1,4 +1,3 @@
import re
from datetime import datetime
import sys
@ -29,28 +28,27 @@ class ZfsDataset:
self.zfs_node = zfs_node
self.name = name # full name
#caching
self.__snapshots=None #type: None|list[ZfsDataset]
self.__written_since_ours=None #type: None|int
self.__exists_check=None #type: None|bool
self.__properties=None #type: None|dict[str,str]
self.__recursive_datasets=None #type: None|list[ZfsDataset]
self.__datasets=None #type: None|list[ZfsDataset]
# caching
self.__snapshots = None # type: None|list[ZfsDataset]
self.__written_since_ours = None # type: None|int
self.__exists_check = None # type: None|bool
self.__properties = None # type: None|dict[str,str]
self.__recursive_datasets = None # type: None|list[ZfsDataset]
self.__datasets = None # type: None|list[ZfsDataset]
self.invalidate_cache()
self.force_exists = force_exists
def invalidate_cache(self):
"""clear caches"""
# CachedProperty.clear(self)
self.force_exists = None
self.__snapshots=None
self.__written_since_ours=None
self.__exists_check=None
self.__properties=None
self.__recursive_datasets=None
self.__datasets=None
self.__snapshots = None
self.__written_since_ours = None
self.__exists_check = None
self.__properties = None
self.__recursive_datasets = None
self.__datasets = None
def __repr__(self):
return "{}: {}".format(self.zfs_node, self.name)
@ -93,7 +91,6 @@ class ZfsDataset:
"""
self.zfs_node.debug("{}: {}".format(self.name, txt))
def split_path(self):
"""return the path elements as an array"""
return self.name.split("/")
@ -147,14 +144,11 @@ class ZfsDataset:
if not self.is_snapshot:
return False
for pattern in self.zfs_node.exclude_snapshot_patterns:
if pattern.search(self.name) is not None:
self.debug("Excluded (path matches snapshot exclude pattern)")
return True
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged):
"""determine if dataset should be selected for backup (called from
ZfsNode)
@ -290,9 +284,9 @@ class ZfsDataset:
if self.__exists_check is None:
self.debug("Checking if dataset exists")
self.__exists_check=(len(self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True,
valid_exitcodes=[0, 1],
hide_errors=True)) > 0)
self.__exists_check = (len(self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True,
valid_exitcodes=[0, 1],
hide_errors=True)) > 0)
return self.__exists_check
@ -454,7 +448,6 @@ class ZfsDataset:
seconds = time.mktime(dt.timetuple())
return seconds
# def add_virtual_snapshot(self, snapshot):
# """pretend a snapshot exists (usefull in test mode)"""
#
@ -474,18 +467,15 @@ class ZfsDataset:
:rtype: list[ZfsDataset]
"""
#cached?
# cached?
if self.__snapshots is None:
self.debug("Getting snapshots")
cmd = [
"zfs", "list", "-d", "1", "-r", "-t", "snapshot", "-H", "-o", "name", self.name
]
self.__snapshots=self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True)
self.__snapshots = self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True)
return self.__snapshots
@ -517,7 +507,7 @@ class ZfsDataset:
"""
for snapshot in snapshots:
if snapshot.snapshot_name==self.snapshot_name:
if snapshot.snapshot_name == self.snapshot_name:
return snapshot
return None
@ -571,7 +561,6 @@ class ZfsDataset:
"""get number of bytes written since our last snapshot"""
if self.__written_since_ours is None:
latest_snapshot = self.our_snapshots[-1]
self.debug("Getting bytes written since our last snapshot")
@ -579,7 +568,7 @@ class ZfsDataset:
output = self.zfs_node.run(readonly=True, tab_split=False, cmd=cmd, valid_exitcodes=[0])
self.__written_since_ours=int(output[0])
self.__written_since_ours = int(output[0])
return self.__written_since_ours
@ -612,14 +601,13 @@ class ZfsDataset:
"""
if self.__recursive_datasets is None:
self.debug("Getting all recursive datasets under us")
names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
"zfs", "list", "-r", "-t", types, "-o", "name", "-H", self.name
])
self.__recursive_datasets=self.zfs_node.get_datasets(names[1:], force_exists=True)
self.__recursive_datasets = self.zfs_node.get_datasets(names[1:], force_exists=True)
return self.__recursive_datasets
@ -632,14 +620,13 @@ class ZfsDataset:
"""
if self.__datasets is None:
self.debug("Getting all datasets under us")
names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
"zfs", "list", "-r", "-t", types, "-o", "name", "-H", "-d", "1", self.name
])
self.__datasets=self.zfs_node.get_datasets(names[1:], force_exists=True)
self.__datasets = self.zfs_node.get_datasets(names[1:], force_exists=True)
return self.__datasets
@ -804,7 +791,7 @@ class ZfsDataset:
if self.properties['encryption'] != 'off' and self.properties['keystatus'] == 'unavailable':
return
self.zfs_node.run(["zfs", "mount", self.name], valid_exitcodes=[0,1])
self.zfs_node.run(["zfs", "mount", self.name], valid_exitcodes=[0, 1])
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
@ -960,7 +947,6 @@ class ZfsDataset:
# target_dataset.error("Cant find common snapshot with source.")
raise (Exception("Cant find common snapshot with target."))
def find_incompatible_snapshots(self, common_snapshot, raw):
"""returns a list[snapshots] that is incompatible for a zfs recv onto
the common_snapshot. all direct followup snapshots with written=0 are
@ -1006,7 +992,6 @@ class ZfsDataset:
return allowed_filter_properties, allowed_set_properties
def _pre_clean(self, source_common_snapshot, target_dataset, source_obsoletes, target_obsoletes, target_transfers):
"""cleanup old stuff before starting snapshot syncing
@ -1021,7 +1006,7 @@ class ZfsDataset:
# on source: delete all obsoletes that are not in target_transfers (except common snapshot)
for source_snapshot in self.snapshots:
if (source_snapshot in source_obsoletes
and source_common_snapshot!=source_snapshot
and source_common_snapshot != source_snapshot
and source_snapshot.find_snapshot_in_list(target_transfers) is None):
source_snapshot.destroy()
@ -1029,7 +1014,8 @@ class ZfsDataset:
if target_dataset.exists:
for target_snapshot in target_dataset.snapshots:
if (target_snapshot in target_obsoletes) \
and (not source_common_snapshot or (target_snapshot.snapshot_name != source_common_snapshot.snapshot_name)):
and (not source_common_snapshot or (
target_snapshot.snapshot_name != source_common_snapshot.snapshot_name)):
if target_snapshot.exists:
target_snapshot.destroy()
@ -1089,36 +1075,40 @@ class ZfsDataset:
# start with snapshots that already exist, minus imcompatibles
if target_dataset.exists:
possible_target_snapshots = [snapshot for snapshot in target_dataset.snapshots if snapshot not in incompatible_target_snapshots]
possible_target_snapshots = [snapshot for snapshot in target_dataset.snapshots if
snapshot not in incompatible_target_snapshots]
else:
possible_target_snapshots = []
# add all snapshots from the source, starting after the common snapshot if it exists
if source_common_snapshot:
source_snapshot=self.find_next_snapshot(source_common_snapshot )
source_snapshot = self.find_next_snapshot(source_common_snapshot)
else:
if self.snapshots:
source_snapshot=self.snapshots[0]
source_snapshot = self.snapshots[0]
else:
source_snapshot=None
source_snapshot = None
while source_snapshot:
# we want it?
if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_excluded:
# create virtual target snapshot
target_snapshot=target_dataset.zfs_node.get_dataset(target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name, force_exists=False)
target_snapshot = target_dataset.zfs_node.get_dataset(
target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name, force_exists=False)
possible_target_snapshots.append(target_snapshot)
source_snapshot = self.find_next_snapshot(source_snapshot)
### 3: Let the thinner decide what it wants by looking at all the possible target_snaphots at once
if possible_target_snapshots:
(target_keeps, target_obsoletes)=target_dataset.zfs_node.thin_list(possible_target_snapshots, keep_snapshots=[possible_target_snapshots[-1]])
(target_keeps, target_obsoletes) = target_dataset.zfs_node.thin_list(possible_target_snapshots,
keep_snapshots=[
possible_target_snapshots[-1]])
else:
target_keeps = []
target_obsoletes = []
### 4: Look at what the thinner wants and create a list of snapshots we still need to transfer
target_transfers=[]
target_transfers = []
for target_keep in target_keeps:
if not target_keep.exists:
target_transfers.append(target_keep)
@ -1188,7 +1178,7 @@ class ZfsDataset:
# keep data encrypted by sending it raw (including properties)
raw = True
(source_common_snapshot, source_obsoletes, target_obsoletes, target_transfers,
(source_common_snapshot, source_obsoletes, target_obsoletes, target_transfers,
incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots,
guid_check=guid_check, raw=raw)
@ -1203,7 +1193,7 @@ class ZfsDataset:
target_dataset.handle_incompatible_snapshots(incompatible_target_snapshots, destroy_incompatible)
# now actually transfer the snapshots, if we want
if no_send or len(target_transfers)==0:
if no_send or len(target_transfers) == 0:
return
# check if we can resume
@ -1221,11 +1211,11 @@ class ZfsDataset:
# now actually transfer the snapshots
do_rollback = rollback
prev_source_snapshot=source_common_snapshot
prev_target_snapshot=target_dataset.find_snapshot(source_common_snapshot)
prev_source_snapshot = source_common_snapshot
prev_target_snapshot = target_dataset.find_snapshot(source_common_snapshot)
for target_snapshot in target_transfers:
source_snapshot=self.find_snapshot(target_snapshot)
source_snapshot = self.find_snapshot(target_snapshot)
# do the rollback, one time at first transfer
if do_rollback:
@ -1312,8 +1302,8 @@ class ZfsDataset:
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
#invalidate cache
self.__properties=None
# invalidate cache
self.__properties = None
def inherit(self, prop):
"""inherit zfs property"""
@ -1326,5 +1316,5 @@ class ZfsDataset:
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
#invalidate cache
self.__properties=None
# invalidate cache
self.__properties = None

View File

@ -60,4 +60,4 @@ class ZfsPool():
if value == 'enabled' or value == 'active':
ret.append(feature)
return ret
return ret

View File

@ -7,69 +7,72 @@
COMPRESS_CMDS = {
'gzip': {
'cmd': 'gzip',
'args': [ '-3' ],
'args': ['-3'],
'dcmd': 'zcat',
'dargs': [],
},
'pigz-fast': {
'cmd': 'pigz',
'args': [ '-3' ],
'args': ['-3'],
'dcmd': 'pigz',
'dargs': [ '-dc' ],
'dargs': ['-dc'],
},
'pigz-slow': {
'cmd': 'pigz',
'args': [ '-9' ],
'args': ['-9'],
'dcmd': 'pigz',
'dargs': [ '-dc' ],
'dargs': ['-dc'],
},
'zstd-fast': {
'cmd': 'zstdmt',
'args': [ '-3' ],
'args': ['-3'],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
'dargs': ['-dc'],
},
'zstd-slow': {
'cmd': 'zstdmt',
'args': [ '-19' ],
'args': ['-19'],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
'dargs': ['-dc'],
},
'zstd-adapt': {
'cmd': 'zstdmt',
'args': [ '--adapt' ],
'args': ['--adapt'],
'dcmd': 'zstdmt',
'dargs': [ '-dc' ],
'dargs': ['-dc'],
},
'xz': {
'cmd': 'xz',
'args': [],
'dcmd': 'xz',
'dargs': [ '-d' ],
'dargs': ['-d'],
},
'lzo': {
'cmd': 'lzop',
'args': [],
'dcmd': 'lzop',
'dargs': [ '-dfc' ],
'dargs': ['-dfc'],
},
'lz4': {
'cmd': 'lz4',
'args': [],
'dcmd': 'lz4',
'dargs': [ '-dc' ],
'dargs': ['-dc'],
},
}
def compress_cmd(compressor):
ret=[ COMPRESS_CMDS[compressor]['cmd'] ]
ret.extend( COMPRESS_CMDS[compressor]['args'])
ret = [COMPRESS_CMDS[compressor]['cmd']]
ret.extend(COMPRESS_CMDS[compressor]['args'])
return ret
def decompress_cmd(compressor):
ret= [ COMPRESS_CMDS[compressor]['dcmd'] ]
ret = [COMPRESS_CMDS[compressor]['dcmd']]
ret.extend(COMPRESS_CMDS[compressor]['dargs'])
return ret
def choices():
return COMPRESS_CMDS.keys()

View File

@ -1,4 +1,3 @@
# NOTE: surprisingly sha1 in via python3 is faster than the native sha1sum utility, even in the way we use below!
import os
import platform
@ -9,19 +8,18 @@ from datetime import datetime
def tmp_name(suffix=""):
"""create temporary name unique to this process and node. always retruns the same result during the same execution"""
#we could use uuids but those are ugly and confusing
name="{}-{}-{}".format(
os.path.basename(sys.argv[0]).replace(" ","_"),
# we could use uuids but those are ugly and confusing
name = "{}-{}-{}".format(
os.path.basename(sys.argv[0]).replace(" ", "_"),
platform.node(),
os.getpid())
name=name+suffix
name = name + suffix
return name
def get_tmp_clone_name(snapshot):
pool=snapshot.zfs_node.get_pool(snapshot)
return pool.name+"/"+tmp_name()
pool = snapshot.zfs_node.get_pool(snapshot)
return pool.name + "/" + tmp_name()
def output_redir():
@ -33,10 +31,12 @@ def output_redir():
os.dup2(devnull, sys.stdout.fileno())
os.dup2(devnull, sys.stderr.fileno())
def sigpipe_handler(sig, stack):
#redir output so we dont get more SIGPIPES during cleanup. (which my try to write to stdout)
# redir output so we dont get more SIGPIPES during cleanup. (which my try to write to stdout)
output_redir()
#deb('redir')
# deb('redir')
# def check_output():
# """make sure stdout still functions. if its broken, this will trigger a SIGPIPE which will be handled by the sigpipe_handler."""
@ -55,9 +55,11 @@ def sigpipe_handler(sig, stack):
# This function will be mocked during unit testing.
datetime_now_mock=None
datetime_now_mock = None
def datetime_now(utc):
if datetime_now_mock is None:
return( datetime.utcnow() if utc else datetime.now())
return (datetime.utcnow() if utc else datetime.now())
else:
return datetime_now_mock