reformatting. changed version number

This commit is contained in:
Your Name 2024-09-24 18:22:17 +02:00
parent 5f2e686a1b
commit 07542365ac
18 changed files with 268 additions and 276 deletions

View File

@ -28,7 +28,6 @@ class BlockHasher():
self.stats_total_bytes = 0
def _seek_next_chunk(self, fh, fsize):
"""seek fh to next chunk and update skip counter.
returns chunk_nr
@ -67,7 +66,6 @@ class BlockHasher():
yields nothing for empty files.
"""
with open(fname, "rb") as fh:
fh.seek(0, os.SEEK_END)

View File

@ -10,7 +10,7 @@ class CliBase(object):
Overridden in subclasses that add stuff for the specific programs."""
# also used by setup.py
VERSION = "3.3-beta.3"
VERSION = "3.4-beta.1"
HEADER = "{} v{} - (c)2022 E.H.Eefting (edwin@datux.nl)".format(os.path.basename(sys.argv[0]), VERSION)
def __init__(self, argv, print_arguments=True):
@ -85,7 +85,6 @@ class CliBase(object):
group.add_argument('--version', action='store_true',
help='Show version.')
return parser
def verbose(self, txt):

View File

@ -159,7 +159,6 @@ class CmdPipe:
else:
eof_count = eof_count + 1
if item.process.poll() is not None:
done_count = done_count + 1
@ -167,8 +166,6 @@ class CmdPipe:
if eof_count == len(selectors) and done_count == len(self.items):
break
def __create(self):
"""create actual processes, do piping and return selectors."""

View File

@ -139,7 +139,8 @@ class ExecuteNode(LogStub):
self.debug("EXIT > {}".format(exit_code))
if (valid_exitcodes != []) and (exit_code not in valid_exitcodes):
self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code, valid_exitcodes))
self.error("Command \"{}\" returned exit code {} (valid codes: {})".format(cmd_item, exit_code,
valid_exitcodes))
return False
return True
@ -160,7 +161,8 @@ class ExecuteNode(LogStub):
self._parse_stdout(line)
# add shell command and handlers to pipe
cmd_item=CmdItem(cmd=self._shell_cmd(cmd, cwd), readonly=readonly, stderr_handler=stderr_handler, exit_handler=exit_handler, shell=self.is_local(), stdout_handler=stdout_handler)
cmd_item = CmdItem(cmd=self._shell_cmd(cmd, cwd), readonly=readonly, stderr_handler=stderr_handler,
exit_handler=exit_handler, shell=self.is_local(), stdout_handler=stdout_handler)
cmd_pipe.add(cmd_item)
# return CmdPipe instead of executing?
@ -183,7 +185,8 @@ class ExecuteNode(LogStub):
else:
return output_lines
def script(self, lines, inp=None, stdout_handler=None, stderr_handler=None, exit_handler=None, valid_exitcodes=None, readonly=False, hide_errors=False, pipe=False):
def script(self, lines, inp=None, stdout_handler=None, stderr_handler=None, exit_handler=None, valid_exitcodes=None,
readonly=False, hide_errors=False, pipe=False):
"""Run a multiline script on the node.
This is much more low level than run() and allows for finer grained control.
@ -260,7 +263,9 @@ class ExecuteNode(LogStub):
cmd.append("\n".join(lines))
# add shell command and handlers to pipe
cmd_item=CmdItem(cmd=cmd, readonly=readonly, stderr_handler=internal_stderr_handler, exit_handler=internal_exit_handler, stdout_handler=internal_stdout_handler, shell=self.is_local())
cmd_item = CmdItem(cmd=cmd, readonly=readonly, stderr_handler=internal_stderr_handler,
exit_handler=internal_exit_handler, stdout_handler=internal_stdout_handler,
shell=self.is_local())
cmd_pipe.add(cmd_item)
self.debug("SCRIPT > {}".format(cmd_pipe))

View File

@ -3,6 +3,7 @@ from __future__ import print_function
import sys
class LogConsole:
"""Log-class that outputs to console, adding colors if needed"""

View File

@ -1,4 +1,3 @@
from .ThinnerRule import ThinnerRule
@ -48,7 +47,6 @@ class Thinner:
now: if specified, use this time as current time
"""
# always keep a number of the last objets?
if self.always_keep:
# all of them

View File

@ -34,7 +34,6 @@ class TreeHasher():
for (chunk_nr, hash) in self.block_hasher.generate(file_path):
yield (os.path.relpath(file_path, start_path), chunk_nr, hash)
def compare(self, start_path, generator):
"""reads from generator and compares blocks
@ -48,13 +47,9 @@ class TreeHasher():
def filter_file_name(file_name, chunk_nr, hexdigest):
return (chunk_nr, hexdigest)
for file_name, group_generator in itertools.groupby(generator, lambda x: x[0]):
count = count + 1
block_generator = itertools.starmap(filter_file_name, group_generator)
for ( chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(os.path.join(start_path,file_name), block_generator):
for (chunk_nr, compare_hexdigest, actual_hexdigest) in self.block_hasher.compare(
os.path.join(start_path, file_name), block_generator):
yield (file_name, chunk_nr, compare_hexdigest, actual_hexdigest)

View File

@ -49,7 +49,8 @@ class ZfsAuto(CliBase):
self.exclude_paths.append(args.target_path)
else:
if not args.exclude_received and not args.include_received:
self.verbose("NOTE: Source and target are on the same host, adding --exclude-received to commandline. (use --include-received to overrule)")
self.verbose(
"NOTE: Source and target are on the same host, adding --exclude-received to commandline. (use --include-received to overrule)")
args.exclude_received = True
if args.test:
@ -63,7 +64,8 @@ class ZfsAuto(CliBase):
dt = datetime_now(args.utc)
self.verbose("")
self.verbose("Current time {} : {}".format(args.utc and "UTC" or " ", dt.strftime("%Y-%m-%d %H:%M:%S")))
self.verbose(
"Current time {} : {}".format(args.utc and "UTC" or " ", dt.strftime("%Y-%m-%d %H:%M:%S")))
self.verbose("Selecting dataset property : {}".format(self.property_name))
self.verbose("Snapshot format : {}".format(self.snapshot_time_format))
@ -82,8 +84,6 @@ class ZfsAuto(CliBase):
parser.add_argument('target_path', metavar='TARGET-PATH', default=None, nargs='?',
help='Target ZFS filesystem (optional)')
# SSH options
group = parser.add_argument_group("SSH options")
group.add_argument('--ssh-config', metavar='CONFIG-FILE', default=None, help='Custom ssh client config')
@ -112,14 +112,15 @@ class ZfsAuto(CliBase):
group.add_argument('--include-received', action='store_true',
help=argparse.SUPPRESS)
def regex_argument_type(input_line):
"""Parses regex arguments into re.Pattern objects"""
try:
return re.compile(input_line)
except:
raise ValueError("Could not parse argument '{}' as a regular expression".format(input_line))
group.add_argument('--exclude-snapshot-pattern', action='append', default=[], type=regex_argument_type, help="Regular expression to match snapshots that will be ignored.")
group.add_argument('--exclude-snapshot-pattern', action='append', default=[], type=regex_argument_type,
help="Regular expression to match snapshots that will be ignored.")
return parser

View File

@ -1,4 +1,3 @@
import argparse
from signal import signal, SIGPIPE
from .util import output_redir, sigpipe_handler, datetime_now
@ -12,6 +11,7 @@ from .ZfsDataset import ZfsDataset
from .ZfsNode import ZfsNode
from .ThinnerRule import ThinnerRule
class ZfsAutobackup(ZfsAuto):
"""The main zfs-autobackup class. Start here, at run() :)"""
@ -73,7 +73,6 @@ class ZfsAutobackup(ZfsAuto):
group.add_argument('--no-guid-check', action='store_true',
help='Dont check guid of common snapshots. (faster)')
group = parser.add_argument_group("Transfer options")
group.add_argument('--no-send', action='store_true',
help='Don\'t transfer snapshots (useful for cleanups, or if you want a separate send-cronjob)')
@ -341,7 +340,11 @@ class ZfsAutobackup(ZfsAuto):
source_dataset.debug("-> {}".format(target_name))
if target_name in target_datasets:
raise Exception("Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name, source_dataset, target_datasets[target_name]))
raise Exception(
"Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name,
source_dataset,
target_datasets[
target_name]))
target_datasets[target_name] = source_dataset
@ -397,7 +400,8 @@ class ZfsAutobackup(ZfsAuto):
destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed, force=self.args.force, guid_check=not self.args.no_guid_check)
zfs_compressed=self.args.zfs_compressed, force=self.args.force,
guid_check=not self.args.no_guid_check)
except Exception as e:
fail_count = fail_count + 1
@ -406,7 +410,6 @@ class ZfsAutobackup(ZfsAuto):
self.verbose("Debug mode, aborting on first error")
raise
target_path_dataset = target_node.get_dataset(self.args.target_path)
if not self.args.no_thinning:
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)

View File

@ -109,7 +109,6 @@ def verify_filesystem(source_snapshot, source_mnt, target_snapshot, target_mnt,
# return hashed
# def deacitvate_volume_snapshot(snapshot):
# clone_name=get_tmp_clone_name(snapshot)
# clone=snapshot.zfs_node.get_dataset(clone_name)
@ -199,7 +198,8 @@ class ZfsAutoverify(ZfsAuto):
elif source_dataset.properties['type'] == "volume":
verify_volume(source_dataset, source_snapshot, target_dataset, target_snapshot)
else:
raise(Exception("{} has unknown type {}".format(source_dataset, source_dataset.properties['type'])))
raise (
Exception("{} has unknown type {}".format(source_dataset, source_dataset.properties['type'])))
except Exception as e:
@ -224,7 +224,6 @@ class ZfsAutoverify(ZfsAuto):
target_node = None
target_mnt = None
try:
################ create source zfsNode
@ -302,8 +301,6 @@ class ZfsAutoverify(ZfsAuto):
cleanup_mountpoint(target_node, target_mnt)
def cli():
import sys

View File

@ -27,14 +27,16 @@ class ZfsCheck(CliBase):
parser = super(ZfsCheck, self).get_parser()
# positional arguments
parser.add_argument('target', metavar='TARGET', default=None, nargs='?', help='Target to checkum. (can be blockdevice, directory or ZFS snapshot)')
parser.add_argument('target', metavar='TARGET', default=None, nargs='?',
help='Target to checkum. (can be blockdevice, directory or ZFS snapshot)')
group = parser.add_argument_group('Checker options')
group.add_argument('--block-size', metavar="BYTES", default=4096, help="Read block-size, default %(default)s",
type=int)
group.add_argument('--count', metavar="COUNT", default=int((100 * (1024 ** 2)) / 4096),
help="Hash chunks of COUNT blocks. Default %(default)s . (CHUNK size is BYTES * COUNT) ", type=int) # 100MiB
help="Hash chunks of COUNT blocks. Default %(default)s . (CHUNK size is BYTES * COUNT) ",
type=int) # 100MiB
group.add_argument('--check', '-c', metavar="FILE", default=None, const=True, nargs='?',
help="Read hashes from STDIN (or FILE) and compare them")
@ -61,7 +63,6 @@ class ZfsCheck(CliBase):
self.verbose("Skip chunk count : {} (checks {:.2f}% of data)".format(args.skip, 100 / (1 + args.skip)))
self.verbose("")
return args
def prepare_zfs_filesystem(self, snapshot):
@ -106,7 +107,9 @@ class ZfsCheck(CliBase):
time.sleep(1)
raise (Exception("Timeout while waiting for /dev entry to appear. (looking in: {}). Hint: did you forget to load the encryption key?".format(locations)))
raise (Exception(
"Timeout while waiting for /dev entry to appear. (looking in: {}). Hint: did you forget to load the encryption key?".format(
locations)))
def cleanup_zfs_volume(self, snapshot):
"""destroys temporary volume snapshot"""

View File

@ -1,4 +1,3 @@
import re
from datetime import datetime
import sys
@ -40,7 +39,6 @@ class ZfsDataset:
self.invalidate_cache()
self.force_exists = force_exists
def invalidate_cache(self):
"""clear caches"""
# CachedProperty.clear(self)
@ -93,7 +91,6 @@ class ZfsDataset:
"""
self.zfs_node.debug("{}: {}".format(self.name, txt))
def split_path(self):
"""return the path elements as an array"""
return self.name.split("/")
@ -147,14 +144,11 @@ class ZfsDataset:
if not self.is_snapshot:
return False
for pattern in self.zfs_node.exclude_snapshot_patterns:
if pattern.search(self.name) is not None:
self.debug("Excluded (path matches snapshot exclude pattern)")
return True
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged):
"""determine if dataset should be selected for backup (called from
ZfsNode)
@ -454,7 +448,6 @@ class ZfsDataset:
seconds = time.mktime(dt.timetuple())
return seconds
# def add_virtual_snapshot(self, snapshot):
# """pretend a snapshot exists (usefull in test mode)"""
#
@ -476,8 +469,6 @@ class ZfsDataset:
# cached?
if self.__snapshots is None:
self.debug("Getting snapshots")
cmd = [
@ -486,7 +477,6 @@ class ZfsDataset:
self.__snapshots = self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True)
return self.__snapshots
def cache_snapshot(self, snapshot):
@ -571,7 +561,6 @@ class ZfsDataset:
"""get number of bytes written since our last snapshot"""
if self.__written_since_ours is None:
latest_snapshot = self.our_snapshots[-1]
self.debug("Getting bytes written since our last snapshot")
@ -612,7 +601,6 @@ class ZfsDataset:
"""
if self.__recursive_datasets is None:
self.debug("Getting all recursive datasets under us")
names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
@ -632,7 +620,6 @@ class ZfsDataset:
"""
if self.__datasets is None:
self.debug("Getting all datasets under us")
names = self.zfs_node.run(tab_split=False, readonly=True, valid_exitcodes=[0], cmd=[
@ -960,7 +947,6 @@ class ZfsDataset:
# target_dataset.error("Cant find common snapshot with source.")
raise (Exception("Cant find common snapshot with target."))
def find_incompatible_snapshots(self, common_snapshot, raw):
"""returns a list[snapshots] that is incompatible for a zfs recv onto
the common_snapshot. all direct followup snapshots with written=0 are
@ -1006,7 +992,6 @@ class ZfsDataset:
return allowed_filter_properties, allowed_set_properties
def _pre_clean(self, source_common_snapshot, target_dataset, source_obsoletes, target_obsoletes, target_transfers):
"""cleanup old stuff before starting snapshot syncing
@ -1029,7 +1014,8 @@ class ZfsDataset:
if target_dataset.exists:
for target_snapshot in target_dataset.snapshots:
if (target_snapshot in target_obsoletes) \
and (not source_common_snapshot or (target_snapshot.snapshot_name != source_common_snapshot.snapshot_name)):
and (not source_common_snapshot or (
target_snapshot.snapshot_name != source_common_snapshot.snapshot_name)):
if target_snapshot.exists:
target_snapshot.destroy()
@ -1089,7 +1075,8 @@ class ZfsDataset:
# start with snapshots that already exist, minus imcompatibles
if target_dataset.exists:
possible_target_snapshots = [snapshot for snapshot in target_dataset.snapshots if snapshot not in incompatible_target_snapshots]
possible_target_snapshots = [snapshot for snapshot in target_dataset.snapshots if
snapshot not in incompatible_target_snapshots]
else:
possible_target_snapshots = []
@ -1106,13 +1093,16 @@ class ZfsDataset:
# we want it?
if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_excluded:
# create virtual target snapshot
target_snapshot=target_dataset.zfs_node.get_dataset(target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name, force_exists=False)
target_snapshot = target_dataset.zfs_node.get_dataset(
target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name, force_exists=False)
possible_target_snapshots.append(target_snapshot)
source_snapshot = self.find_next_snapshot(source_snapshot)
### 3: Let the thinner decide what it wants by looking at all the possible target_snaphots at once
if possible_target_snapshots:
(target_keeps, target_obsoletes)=target_dataset.zfs_node.thin_list(possible_target_snapshots, keep_snapshots=[possible_target_snapshots[-1]])
(target_keeps, target_obsoletes) = target_dataset.zfs_node.thin_list(possible_target_snapshots,
keep_snapshots=[
possible_target_snapshots[-1]])
else:
target_keeps = []
target_obsoletes = []

View File

@ -61,15 +61,18 @@ COMPRESS_CMDS = {
},
}
def compress_cmd(compressor):
ret = [COMPRESS_CMDS[compressor]['cmd']]
ret.extend(COMPRESS_CMDS[compressor]['args'])
return ret
def decompress_cmd(compressor):
ret = [COMPRESS_CMDS[compressor]['dcmd']]
ret.extend(COMPRESS_CMDS[compressor]['dargs'])
return ret
def choices():
return COMPRESS_CMDS.keys()

View File

@ -1,4 +1,3 @@
# NOTE: surprisingly sha1 in via python3 is faster than the native sha1sum utility, even in the way we use below!
import os
import platform
@ -23,7 +22,6 @@ def get_tmp_clone_name(snapshot):
return pool.name + "/" + tmp_name()
def output_redir():
"""use this after a BrokenPipeError to prevent further exceptions.
Redirects stdout/err to /dev/null
@ -33,11 +31,13 @@ def output_redir():
os.dup2(devnull, sys.stdout.fileno())
os.dup2(devnull, sys.stderr.fileno())
def sigpipe_handler(sig, stack):
# redir output so we dont get more SIGPIPES during cleanup. (which my try to write to stdout)
output_redir()
# deb('redir')
# def check_output():
# """make sure stdout still functions. if its broken, this will trigger a SIGPIPE which will be handled by the sigpipe_handler."""
# try:
@ -56,6 +56,8 @@ def sigpipe_handler(sig, stack):
datetime_now_mock = None
def datetime_now(utc):
if datetime_now_mock is None:
return (datetime.utcnow() if utc else datetime.now())