reformatting. changed version number

This commit is contained in:
Your Name 2024-09-24 18:22:17 +02:00
parent 5f2e686a1b
commit 07542365ac
18 changed files with 268 additions and 276 deletions

View File

@ -28,7 +28,6 @@ class BlockHasher():
self.stats_total_bytes = 0 self.stats_total_bytes = 0
def _seek_next_chunk(self, fh, fsize): def _seek_next_chunk(self, fh, fsize):
"""seek fh to next chunk and update skip counter. """seek fh to next chunk and update skip counter.
returns chunk_nr returns chunk_nr
@ -67,7 +66,6 @@ class BlockHasher():
yields nothing for empty files. yields nothing for empty files.
""" """
with open(fname, "rb") as fh: with open(fname, "rb") as fh:
fh.seek(0, os.SEEK_END) fh.seek(0, os.SEEK_END)

View File

@ -10,7 +10,7 @@ class CliBase(object):
Overridden in subclasses that add stuff for the specific programs.""" Overridden in subclasses that add stuff for the specific programs."""
# also used by setup.py # also used by setup.py
VERSION = "3.3-beta.3" VERSION = "3.4-beta.1"
HEADER = "{} v{} - (c)2022 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION) HEADER = "{} v{} - (c)2022 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
def __init__(self, argv, print_arguments=True): def __init__(self, argv, print_arguments=True):
@ -85,7 +85,6 @@ class CliBase(object):
group.add_argument('--version', action='store_true', group.add_argument('--version', action='store_true',
help='Show version.') help='Show version.')
return parser return parser
def verbose(self, txt): def verbose(self, txt):

View File

@ -159,7 +159,6 @@ class CmdPipe:
else: else:
eof_count = eof_count + 1 eof_count = eof_count + 1
if item.process.poll() is not None: if item.process.poll() is not None:
done_count = done_count + 1 done_count = done_count + 1
@ -167,8 +166,6 @@ class CmdPipe:
if eof_count == len(selectors) and done_count == len(self.items): if eof_count == len(selectors) and done_count == len(self.items):
break break
def __create(self): def __create(self):
"""create actual processes, do piping and return selectors.""" """create actual processes, do piping and return selectors."""

View File

@ -139,7 +139,8 @@ class ExecuteNode(LogStub):
self.debug("EXIT > {}".format(exit_code)) self.debug("EXIT > {}".format(exit_code))
if (valid_exitcodes != []) and (exit_code not in valid_exitcodes): if (valid_exitcodes != []) and (exit_code not in valid_exitcodes):
self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code, valid_exitcodes)) self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code,
valid_exitcodes))
return False return False
return True return True
@ -160,7 +161,8 @@ class ExecuteNode(LogStub):
self._parse_stdout(line) self._parse_stdout(line)
# add shell command and handlers to pipe # add shell command and handlers to pipe
cmd_item=CmdItem(cmd=self._shell_cmd(cmd, cwd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local(), stdout_handler=stdout_handler) cmd_item = CmdItem(cmd=self._shell_cmd(cmd, cwd), readonly=readonly, stderr_handler=stderr_handler,
exit_handler=exit_handler, shell=self.is_local(), stdout_handler=stdout_handler)
cmd_pipe.add(cmd_item) cmd_pipe.add(cmd_item)
# return CmdPipe instead of executing? # return CmdPipe instead of executing?
@ -183,7 +185,8 @@ class ExecuteNode(LogStub):
else: else:
return output_lines return output_lines
def script(self, lines, inp=None, stdout_handler=None, stderr_handler=None, exit_handler=None, valid_exitcodes=None, readonly=False, hide_errors=False, pipe=False): def script(self, lines, inp=None, stdout_handler=None, stderr_handler=None, exit_handler=None, valid_exitcodes=None,
readonly=False, hide_errors=False, pipe=False):
"""Run a multiline script on the node. """Run a multiline script on the node.
This is much more low level than run() and allows for finer grained control. This is much more low level than run() and allows for finer grained control.
@ -260,7 +263,9 @@ class ExecuteNode(LogStub):
cmd.append("\n".join(lines)) cmd.append("\n".join(lines))
# add shell command and handlers to pipe # add shell command and handlers to pipe
cmd_item=CmdItem(cmd=cmd, readonly=readonly, stderr_handler=internal_stderr_handler, exit_handler=internal_exit_handler, stdout_handler=internal_stdout_handler, shell=self.is_local()) cmd_item = CmdItem(cmd=cmd, readonly=readonly, stderr_handler=internal_stderr_handler,
exit_handler=internal_exit_handler, stdout_handler=internal_stdout_handler,
shell=self.is_local())
cmd_pipe.add(cmd_item) cmd_pipe.add(cmd_item)
self.debug("SCRIPT > {}".format(cmd_pipe)) self.debug("SCRIPT > {}".format(cmd_pipe))

View File

@ -3,6 +3,7 @@ from __future__ import print_function
import sys import sys
class LogConsole: class LogConsole:
"""Log-class that outputs to console, adding colors if needed""" """Log-class that outputs to console, adding colors if needed"""

View File

@ -1,4 +1,3 @@
from .ThinnerRule import ThinnerRule from .ThinnerRule import ThinnerRule
@ -48,7 +47,6 @@ class Thinner:
now: if specified, use this time as current time now: if specified, use this time as current time
""" """
# always keep a number of the last objets? # always keep a number of the last objets?
if self.always_keep: if self.always_keep:
# all of them # all of them

View File

@ -34,7 +34,6 @@ class TreeHasher():
for (chunk_nr, hash) in self.block_hasher.generate(file_path): for (chunk_nr, hash) in self.block_hasher.generate(file_path):
yield (os.path.relpath(file_path, start_path), chunk_nr, hash) yield (os.path.relpath(file_path, start_path), chunk_nr, hash)
def compare(self, start_path, generator): def compare(self, start_path, generator):
"""reads from generator and compares blocks """reads from generator and compares blocks
@ -48,13 +47,9 @@ class TreeHasher():
def filter_file_name(file_name, chunk_nr, hexdigest): def filter_file_name(file_name, chunk_nr, hexdigest):
return (chunk_nr, hexdigest) return (chunk_nr, hexdigest)
for file_name, group_generator in itertools.groupby(generator, lambda x: x[0]): for file_name, group_generator in itertools.groupby(generator, lambda x: x[0]):
count = count + 1 count = count + 1
block_generator = itertools.starmap(filter_file_name, group_generator) block_generator = itertools.starmap(filter_file_name, group_generator)
for ( chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(os.path.join(start_path,file_name), block_generator): for (chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(
os.path.join(start_path, file_name), block_generator):
yield (file_name, chunk_nr, compare_hexdigest, actual_hexdigest) yield (file_name, chunk_nr, compare_hexdigest, actual_hexdigest)

View File

@ -49,7 +49,8 @@ class ZfsAuto(CliBase):
self.exclude_paths.append(args.target_path) self.exclude_paths.append(args.target_path)
else: else:
if not args.exclude_received and not args.include_received: if not args.exclude_received and not args.include_received:
self.verbose("NOTE: Source and target are on the same host, adding --exclude-received to commandline. (use --include-received to overrule)") self.verbose(
"NOTE: Source and target are on the same host, adding --exclude-received to commandline. (use --include-received to overrule)")
args.exclude_received = True args.exclude_received = True
if args.test: if args.test:
@ -63,7 +64,8 @@ class ZfsAuto(CliBase):
dt = datetime_now(args.utc) dt = datetime_now(args.utc)
self.verbose("") self.verbose("")
self.verbose("Current time {} : {}".format(args.utc and "UTC" or " ", dt.strftime("%Y-%m-%d %H:%M:%S"))) self.verbose(
"Current time {} : {}".format(args.utc and "UTC" or " ", dt.strftime("%Y-%m-%d %H:%M:%S")))
self.verbose("Selecting dataset property : {}".format(self.property_name)) self.verbose("Selecting dataset property : {}".format(self.property_name))
self.verbose("Snapshot format : {}".format(self.snapshot_time_format)) self.verbose("Snapshot format : {}".format(self.snapshot_time_format))
@ -82,8 +84,6 @@ class ZfsAuto(CliBase):
parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?', parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
help='Target ZFS filesystem (optional)') help='Target ZFS filesystem (optional)')
# SSH options # SSH options
group = parser.add_argument_group("SSH options") group = parser.add_argument_group("SSH options")
group.add_argument('--ssh-config', metavar='CONFIG-FILE', default=None, help='Custom ssh client config') group.add_argument('--ssh-config', metavar='CONFIG-FILE', default=None, help='Custom ssh client config')
@ -112,14 +112,15 @@ class ZfsAuto(CliBase):
group.add_argument('--include-received', action='store_true', group.add_argument('--include-received', action='store_true',
help=argparse.SUPPRESS) help=argparse.SUPPRESS)
def regex_argument_type(input_line): def regex_argument_type(input_line):
"""Parses regex arguments into re.Pattern objects""" """Parses regex arguments into re.Pattern objects"""
try: try:
return re.compile(input_line) return re.compile(input_line)
except: except:
raise ValueError("Could not parse argument '{}' as a regular expression".format(input_line)) raise ValueError("Could not parse argument '{}' as a regular expression".format(input_line))
group.add_argument('--exclude-snapshot-pattern', action='append', default=[], type=regex_argument_type, help="Regular expression to match snapshots that will be ignored.")
group.add_argument('--exclude-snapshot-pattern', action='append', default=[], type=regex_argument_type,
help="Regular expression to match snapshots that will be ignored.")
return parser return parser

View File

@ -1,4 +1,3 @@
import argparse import argparse
from signal import signal, SIGPIPE from signal import signal, SIGPIPE
from .util import output_redir, sigpipe_handler, datetime_now from .util import output_redir, sigpipe_handler, datetime_now
@ -12,6 +11,7 @@ from .ZfsDataset import ZfsDataset
from .ZfsNode import ZfsNode from .ZfsNode import ZfsNode
from .ThinnerRule import ThinnerRule from .ThinnerRule import ThinnerRule
class ZfsAutobackup(ZfsAuto): class ZfsAutobackup(ZfsAuto):
"""The main zfs-autobackup class. Start here, at run() :)""" """The main zfs-autobackup class. Start here, at run() :)"""
@ -73,7 +73,6 @@ class ZfsAutobackup(ZfsAuto):
group.add_argument('--no-guid-check', action='store_true', group.add_argument('--no-guid-check', action='store_true',
help='Dont check guid of common snapshots. (faster)') help='Dont check guid of common snapshots. (faster)')
group = parser.add_argument_group("Transfer options") group = parser.add_argument_group("Transfer options")
group.add_argument('--no-send', action='store_true', group.add_argument('--no-send', action='store_true',
help='Don\'t transfer snapshots (useful for cleanups, or if you want a separate send-cronjob)') help='Don\'t transfer snapshots (useful for cleanups, or if you want a separate send-cronjob)')
@ -341,7 +340,11 @@ class ZfsAutobackup(ZfsAuto):
source_dataset.debug("-> {}".format(target_name)) source_dataset.debug("-> {}".format(target_name))
if target_name in target_datasets: if target_name in target_datasets:
raise Exception("Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name, source_dataset, target_datasets[target_name])) raise Exception(
"Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name,
source_dataset,
target_datasets[
target_name]))
target_datasets[target_name] = source_dataset target_datasets[target_name] = source_dataset
@ -397,7 +400,8 @@ class ZfsAutobackup(ZfsAuto):
destroy_incompatible=self.args.destroy_incompatible, destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes, send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed, force=self.args.force, guid_check=not self.args.no_guid_check) zfs_compressed=self.args.zfs_compressed, force=self.args.force,
guid_check=not self.args.no_guid_check)
except Exception as e: except Exception as e:
fail_count = fail_count + 1 fail_count = fail_count + 1
@ -406,7 +410,6 @@ class ZfsAutobackup(ZfsAuto):
self.verbose("Debug mode, aborting on first error") self.verbose("Debug mode, aborting on first error")
raise raise
target_path_dataset = target_node.get_dataset(self.args.target_path) target_path_dataset = target_node.get_dataset(self.args.target_path)
if not self.args.no_thinning: if not self.args.no_thinning:
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets) self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)

View File

@ -109,7 +109,6 @@ def verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt,
# return hashed # return hashed
# def deacitvate_volume_snapshot(snapshot): # def deacitvate_volume_snapshot(snapshot):
# clone_name=get_tmp_clone_name(snapshot) # clone_name=get_tmp_clone_name(snapshot)
# clone=snapshot.zfs_node.get_dataset(clone_name) # clone=snapshot.zfs_node.get_dataset(clone_name)
@ -199,7 +198,8 @@ class ZfsAutoverify(ZfsAuto):
elif source_dataset.properties['type'] == "volume": elif source_dataset.properties['type'] == "volume":
verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot) verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot)
else: else:
raise(Exception("{} has unknown type {}".format(source_dataset, source_dataset.properties['type']))) raise (
Exception("{} has unknown type {}".format(source_dataset, source_dataset.properties['type'])))
except Exception as e: except Exception as e:
@ -224,7 +224,6 @@ class ZfsAutoverify(ZfsAuto):
target_node = None target_node = None
target_mnt = None target_mnt = None
try: try:
################ create source zfsNode ################ create source zfsNode
@ -302,8 +301,6 @@ class ZfsAutoverify(ZfsAuto):
cleanup_mountpoint(target_node, target_mnt) cleanup_mountpoint(target_node, target_mnt)
def cli(): def cli():
import sys import sys

View File

@ -27,14 +27,16 @@ class ZfsCheck(CliBase):
parser = super(ZfsCheck, self).get_parser() parser = super(ZfsCheck, self).get_parser()
# positional arguments # positional arguments
parser.add_argument('target', metavar='TARGET', default=None, nargs='?', help='Target to checkum. (can be blockdevice, directory or ZFS snapshot)') parser.add_argument('target', metavar='TARGET', default=None, nargs='?',
help='Target to checkum. (can be blockdevice, directory or ZFS snapshot)')
group = parser.add_argument_group('Checker options') group = parser.add_argument_group('Checker options')
group.add_argument('--block-size', metavar="BYTES", default=4096, help="Read block-size, default %(default)s", group.add_argument('--block-size', metavar="BYTES", default=4096, help="Read block-size, default %(default)s",
type=int) type=int)
group.add_argument('--count', metavar="COUNT", default=int((100 * (1024 ** 2)) / 4096), group.add_argument('--count', metavar="COUNT", default=int((100 * (1024 ** 2)) / 4096),
help="Hash chunks of COUNT blocks. Default %(default)s . (CHUNK size is BYTES * COUNT) ", type=int) # 100MiB help="Hash chunks of COUNT blocks. Default %(default)s . (CHUNK size is BYTES * COUNT) ",
type=int) # 100MiB
group.add_argument('--check', '-c', metavar="FILE", default=None, const=True, nargs='?', group.add_argument('--check', '-c', metavar="FILE", default=None, const=True, nargs='?',
help="Read hashes from STDIN (or FILE) and compare them") help="Read hashes from STDIN (or FILE) and compare them")
@ -61,7 +63,6 @@ class ZfsCheck(CliBase):
self.verbose("Skip chunk count : {} (checks {:.2f}% of data)".format(args.skip, 100 / (1 + args.skip))) self.verbose("Skip chunk count : {} (checks {:.2f}% of data)".format(args.skip, 100 / (1 + args.skip)))
self.verbose("") self.verbose("")
return args return args
def prepare_zfs_filesystem(self, snapshot): def prepare_zfs_filesystem(self, snapshot):
@ -106,7 +107,9 @@ class ZfsCheck(CliBase):
time.sleep(1) time.sleep(1)
raise (Exception("Timeout while waiting for /dev entry to appear. (looking in: {}). Hint: did you forget to load the encryption key?".format(locations))) raise (Exception(
"Timeout while waiting for /dev entry to appear. (looking in: {}). Hint: did you forget to load the encryption key?".format(
locations)))
def cleanup_zfs_volume(self, snapshot): def cleanup_zfs_volume(self, snapshot):
"""destroys temporary volume snapshot""" """destroys temporary volume snapshot"""

View File

@ -1,4 +1,3 @@
import re import re
from datetime import datetime from datetime import datetime
import sys import sys
@ -40,7 +39,6 @@ class ZfsDataset:
self.invalidate_cache() self.invalidate_cache()
self.force_exists = force_exists self.force_exists = force_exists
def invalidate_cache(self): def invalidate_cache(self):
"""clear caches""" """clear caches"""
# CachedProperty.clear(self) # CachedProperty.clear(self)
@ -93,7 +91,6 @@ class ZfsDataset:
""" """
self.zfs_node.debug("{}: {}".format(self.name, txt)) self.zfs_node.debug("{}: {}".format(self.name, txt))
def split_path(self): def split_path(self):
"""return the path elements as an array""" """return the path elements as an array"""
return self.name.split("/") return self.name.split("/")
@ -147,14 +144,11 @@ class ZfsDataset:
if not self.is_snapshot: if not self.is_snapshot:
return False return False
for pattern in self.zfs_node.exclude_snapshot_patterns: for pattern in self.zfs_node.exclude_snapshot_patterns:
if pattern.search(self.name) is not None: if pattern.search(self.name) is not None:
self.debug("Excluded (path matches snapshot exclude pattern)") self.debug("Excluded (path matches snapshot exclude pattern)")
return True return True
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged): def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged):
"""determine if dataset should be selected for backup (called from """determine if dataset should be selected for backup (called from
ZfsNode) ZfsNode)
@ -454,7 +448,6 @@ class ZfsDataset:
seconds = time.mktime(dt.timetuple()) seconds = time.mktime(dt.timetuple())
return seconds return seconds
# def add_virtual_snapshot(self, snapshot): # def add_virtual_snapshot(self, snapshot):
# """pretend a snapshot exists (usefull in test mode)""" # """pretend a snapshot exists (usefull in test mode)"""
# #
@ -476,8 +469,6 @@ class ZfsDataset:
# cached? # cached?
if self.__snapshots is None: if self.__snapshots is None:
self.debug("Getting snapshots") self.debug("Getting snapshots")
cmd = [ cmd = [
@ -486,7 +477,6 @@ class ZfsDataset:
self.__snapshots = self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True) self.__snapshots = self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True)
return self.__snapshots return self.__snapshots
def cache_snapshot(self, snapshot): def cache_snapshot(self, snapshot):
@ -571,7 +561,6 @@ class ZfsDataset:
"""get number of bytes written since our last snapshot""" """get number of bytes written since our last snapshot"""
if self.__written_since_ours is None: if self.__written_since_ours is None:
latest_snapshot = self.our_snapshots[-1] latest_snapshot = self.our_snapshots[-1]
self.debug("Getting bytes written since our last snapshot") self.debug("Getting bytes written since our last snapshot")
@ -612,7 +601,6 @@ class ZfsDataset:
""" """
if self.__recursive_datasets is None: if self.__recursive_datasets is None:
self.debug("Getting all recursive datasets under us") self.debug("Getting all recursive datasets under us")
names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[ names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
@ -632,7 +620,6 @@ class ZfsDataset:
""" """
if self.__datasets is None: if self.__datasets is None:
self.debug("Getting all datasets under us") self.debug("Getting all datasets under us")
names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[ names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
@ -960,7 +947,6 @@ class ZfsDataset:
# target_dataset.error("Cant find common snapshot with source.") # target_dataset.error("Cant find common snapshot with source.")
raise (Exception("Cant find common snapshot with target.")) raise (Exception("Cant find common snapshot with target."))
def find_incompatible_snapshots(self, common_snapshot, raw): def find_incompatible_snapshots(self, common_snapshot, raw):
"""returns a list[snapshots] that is incompatible for a zfs recv onto """returns a list[snapshots] that is incompatible for a zfs recv onto
the common_snapshot. all direct followup snapshots with written=0 are the common_snapshot. all direct followup snapshots with written=0 are
@ -1006,7 +992,6 @@ class ZfsDataset:
return allowed_filter_properties, allowed_set_properties return allowed_filter_properties, allowed_set_properties
def _pre_clean(self, source_common_snapshot, target_dataset, source_obsoletes, target_obsoletes, target_transfers): def _pre_clean(self, source_common_snapshot, target_dataset, source_obsoletes, target_obsoletes, target_transfers):
"""cleanup old stuff before starting snapshot syncing """cleanup old stuff before starting snapshot syncing
@ -1029,7 +1014,8 @@ class ZfsDataset:
if target_dataset.exists: if target_dataset.exists:
for target_snapshot in target_dataset.snapshots: for target_snapshot in target_dataset.snapshots:
if (target_snapshot in target_obsoletes) \ if (target_snapshot in target_obsoletes) \
and (not source_common_snapshot or (target_snapshot.snapshot_name != source_common_snapshot.snapshot_name)): and (not source_common_snapshot or (
target_snapshot.snapshot_name != source_common_snapshot.snapshot_name)):
if target_snapshot.exists: if target_snapshot.exists:
target_snapshot.destroy() target_snapshot.destroy()
@ -1089,7 +1075,8 @@ class ZfsDataset:
# start with snapshots that already exist, minus imcompatibles # start with snapshots that already exist, minus imcompatibles
if target_dataset.exists: if target_dataset.exists:
possible_target_snapshots = [snapshot for snapshot in target_dataset.snapshots if snapshot not in incompatible_target_snapshots] possible_target_snapshots = [snapshot for snapshot in target_dataset.snapshots if
snapshot not in incompatible_target_snapshots]
else: else:
possible_target_snapshots = [] possible_target_snapshots = []
@ -1106,13 +1093,16 @@ class ZfsDataset:
# we want it? # we want it?
if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_excluded: if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_excluded:
# create virtual target snapshot # create virtual target snapshot
target_snapshot=target_dataset.zfs_node.get_dataset(target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name, force_exists=False) target_snapshot = target_dataset.zfs_node.get_dataset(
target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name, force_exists=False)
possible_target_snapshots.append(target_snapshot) possible_target_snapshots.append(target_snapshot)
source_snapshot = self.find_next_snapshot(source_snapshot) source_snapshot = self.find_next_snapshot(source_snapshot)
### 3: Let the thinner decide what it wants by looking at all the possible target_snaphots at once ### 3: Let the thinner decide what it wants by looking at all the possible target_snaphots at once
if possible_target_snapshots: if possible_target_snapshots:
(target_keeps, target_obsoletes)=target_dataset.zfs_node.thin_list(possible_target_snapshots, keep_snapshots=[possible_target_snapshots[-1]]) (target_keeps, target_obsoletes) = target_dataset.zfs_node.thin_list(possible_target_snapshots,
keep_snapshots=[
possible_target_snapshots[-1]])
else: else:
target_keeps = [] target_keeps = []
target_obsoletes = [] target_obsoletes = []

View File

@ -61,15 +61,18 @@ COMPRESS_CMDS = {
}, },
} }
def compress_cmd(compressor): def compress_cmd(compressor):
ret = [COMPRESS_CMDS[compressor]['cmd']] ret = [COMPRESS_CMDS[compressor]['cmd']]
ret.extend(COMPRESS_CMDS[compressor]['args']) ret.extend(COMPRESS_CMDS[compressor]['args'])
return ret return ret
def decompress_cmd(compressor): def decompress_cmd(compressor):
ret = [COMPRESS_CMDS[compressor]['dcmd']] ret = [COMPRESS_CMDS[compressor]['dcmd']]
ret.extend(COMPRESS_CMDS[compressor]['dargs']) ret.extend(COMPRESS_CMDS[compressor]['dargs'])
return ret return ret
def choices(): def choices():
return COMPRESS_CMDS.keys() return COMPRESS_CMDS.keys()

View File

@ -1,4 +1,3 @@
# NOTE: surprisingly sha1 in via python3 is faster than the native sha1sum utility, even in the way we use below! # NOTE: surprisingly sha1 in via python3 is faster than the native sha1sum utility, even in the way we use below!
import os import os
import platform import platform
@ -23,7 +22,6 @@ def get_tmp_clone_name(snapshot):
return pool.name + "/" + tmp_name() return pool.name + "/" + tmp_name()
def output_redir(): def output_redir():
"""use this after a BrokenPipeError to prevent further exceptions. """use this after a BrokenPipeError to prevent further exceptions.
Redirects stdout/err to /dev/null Redirects stdout/err to /dev/null
@ -33,11 +31,13 @@ def output_redir():
os.dup2(devnull, sys.stdout.fileno()) os.dup2(devnull, sys.stdout.fileno())
os.dup2(devnull, sys.stderr.fileno()) os.dup2(devnull, sys.stderr.fileno())
def sigpipe_handler(sig, stack): def sigpipe_handler(sig, stack):
# redir output so we dont get more SIGPIPES during cleanup. (which my try to write to stdout) # redir output so we dont get more SIGPIPES during cleanup. (which my try to write to stdout)
output_redir() output_redir()
# deb('redir') # deb('redir')
# def check_output(): # def check_output():
# """make sure stdout still functions. if its broken, this will trigger a SIGPIPE which will be handled by the sigpipe_handler.""" # """make sure stdout still functions. if its broken, this will trigger a SIGPIPE which will be handled by the sigpipe_handler."""
# try: # try:
@ -56,6 +56,8 @@ def sigpipe_handler(sig, stack):
datetime_now_mock = None datetime_now_mock = None
def datetime_now(utc): def datetime_now(utc):
if datetime_now_mock is None: if datetime_now_mock is None:
return (datetime.utcnow() if utc else datetime.now()) return (datetime.utcnow() if utc else datetime.now())