also changed internal logic: thinner now isnt actually created when --no-thinning is active.

When using --no-thinning --no-snapshot --no-send it still does usefull stuff like checking common snapshots and showing incompatible snapshots
This commit is contained in:
Edwin Eefting 2021-04-18 14:30:23 +02:00
parent 352d5e6094
commit ec1d3ff93e
4 changed files with 61 additions and 49 deletions

View File

@ -7,8 +7,9 @@ class Thinner:
"""progressive thinner (universal, used for cleaning up snapshots)"""
def __init__(self, schedule_str=""):
"""schedule_str: comma seperated list of ThinnerRules. A plain number specifies how many snapshots to always
keep.
"""
Args:
schedule_str: comma seperated list of ThinnerRules. A plain number specifies how many snapshots to always keep.
"""
self.rules = []
@ -37,11 +38,15 @@ class Thinner:
return ret
def thin(self, objects, keep_objects=None, now=None):
"""thin list of objects with current schedule rules. objects: list of objects to thin. every object should
have timestamp attribute. keep_objects: objects to always keep (these should also be in normal objects list,
so we can use them to perhaps delete other obsolete objects)
"""thin list of objects with current schedule rules. objects: list of
objects to thin. every object should have timestamp attribute.
return( keeps, removes )
Args:
objects: list of objects to check (should have a timestamp attribute)
keep_objects: objects to always keep (if they also are in the in the normal objects list)
now: if specified, use this time as current time
"""
if not keep_objects:

View File

@ -259,7 +259,7 @@ class ZfsAutobackup:
raw=self.args.raw, also_other_snapshots=self.args.other_snapshots,
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible,
no_thinning=self.args.no_thinning, output_pipes=self.args.send_pipe, input_pipes=self.args.recv_pipe)
output_pipes=self.args.send_pipe, input_pipes=self.args.recv_pipe)
except Exception as e:
fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e))
@ -267,8 +267,7 @@ class ZfsAutobackup:
raise
target_path_dataset = ZfsDataset(target_node, self.args.target_path)
if not self.args.no_thinning:
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
if self.args.destroy_missing is not None:
self.destroy_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
@ -277,10 +276,11 @@ class ZfsAutobackup:
def thin_source(self, source_datasets):
self.set_title("Thinning source")
if not self.args.no_thinning:
self.set_title("Thinning source")
for source_dataset in source_datasets:
source_dataset.thin(skip_holds=True)
for source_dataset in source_datasets:
source_dataset.thin(skip_holds=True)
def filter_replicated(self, datasets):
if not self.args.ignore_replicated:
@ -331,7 +331,10 @@ class ZfsAutobackup:
self.set_title("Source settings")
description = "[Source]"
source_thinner = Thinner(self.args.keep_source)
if self.args.no_thinning:
source_thinner=None
else:
source_thinner = Thinner(self.args.keep_source)
source_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_source, readonly=self.args.test,
debug_output=self.args.debug_output, description=description, thinner=source_thinner)
@ -362,7 +365,10 @@ class ZfsAutobackup:
# create target_node
self.set_title("Target settings")
target_thinner = Thinner(self.args.keep_target)
if self.args.no_thinning:
target_thinner=None
else:
target_thinner = Thinner(self.args.keep_target)
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output,
@ -370,10 +376,7 @@ class ZfsAutobackup:
thinner=target_thinner)
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
if self.args.no_send:
self.set_title("Thinning source and target")
else:
self.set_title("Sending and thinning")
self.set_title("Synchronising")
# check if exists, to prevent vague errors
target_dataset = ZfsDataset(target_node, self.args.target_path)
@ -382,6 +385,7 @@ class ZfsAutobackup:
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
# do the actual sync
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
fail_count = self.sync_datasets(
source_node=source_node,
source_datasets=source_datasets,
@ -389,8 +393,7 @@ class ZfsAutobackup:
#no target specified, run in snapshot-only mode
else:
if not self.args.no_thinning:
self.thin_source(source_datasets)
self.thin_source(source_datasets)
fail_count = 0
if not fail_count:

View File

@ -720,13 +720,13 @@ class ZfsDataset:
def thin_list(self, keeps=None, ignores=None):
"""determines list of snapshots that should be kept or deleted based on
the thinning schedule. cull the herd! keep: list of snapshots to always
keep (usually the last) ignores: snapshots to completely ignore (usually
incompatible target snapshots that are going to be destroyed anyway)
the thinning schedule. cull the herd!
returns: ( keeps, obsoletes )
Args:
:param keeps: list of snapshots to always keep (usually the last)
:param ignores: snapshots to completely ignore (usually incompatible target snapshots that are going to be destroyed anyway)
:type keeps: list of ZfsDataset
:type ignores: list of ZfsDataset
"""
@ -738,7 +738,7 @@ class ZfsDataset:
snapshots = [snapshot for snapshot in self.our_snapshots if snapshot not in ignores]
return self.zfs_node.thinner.thin(snapshots, keep_objects=keeps)
return self.zfs_node.thin(snapshots, keep_objects=keeps)
def thin(self, skip_holds=False):
"""destroys snapshots according to thin_list, except last snapshot
@ -965,7 +965,7 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, raw, also_other_snapshots,
no_send, destroy_incompatible, no_thinning, output_pipes, input_pipes):
no_send, destroy_incompatible, output_pipes, input_pipes):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
@ -984,7 +984,6 @@ class ZfsDataset:
:type also_other_snapshots: bool
:type no_send: bool
:type destroy_incompatible: bool
:type no_thinning: bool
"""
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
@ -993,10 +992,12 @@ class ZfsDataset:
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
# Also usefull with no_send to still cleanup stuff.
if not no_thinning:
self._pre_clean(
common_snapshot=common_snapshot, target_dataset=target_dataset,
target_keeps=target_keeps, target_obsoletes=target_obsoletes, source_obsoletes=source_obsoletes)
self._pre_clean(
common_snapshot=common_snapshot, target_dataset=target_dataset,
target_keeps=target_keeps, target_obsoletes=target_obsoletes, source_obsoletes=source_obsoletes)
# handle incompatible stuff on target
target_dataset.handle_incompatible_snapshots(incompatible_target_snapshots, destroy_incompatible)
# now actually transfer the snapshots, if we want
if no_send:
@ -1005,9 +1006,6 @@ class ZfsDataset:
# check if we can resume
resume_token = self._validate_resume_token(target_dataset, start_snapshot)
# handle incompatible stuff on target
target_dataset.handle_incompatible_snapshots(incompatible_target_snapshots, destroy_incompatible)
# rollback target to latest?
if rollback:
target_dataset.rollback()
@ -1042,16 +1040,15 @@ class ZfsDataset:
prev_source_snapshot.release()
target_dataset.find_snapshot(prev_source_snapshot).release()
if not no_thinning:
# we may now destroy the previous source snapshot if its obsolete
if prev_source_snapshot in source_obsoletes:
prev_source_snapshot.destroy()
# we may now destroy the previous source snapshot if its obsolete
if prev_source_snapshot in source_obsoletes:
prev_source_snapshot.destroy()
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
# the rest was already destroyed or will not be send)
prev_target_snapshot = target_dataset.find_snapshot(prev_source_snapshot)
if prev_target_snapshot in target_obsoletes:
prev_target_snapshot.destroy()
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
# the rest was already destroyed or will not be send)
prev_target_snapshot = target_dataset.find_snapshot(prev_source_snapshot)
if prev_target_snapshot in target_obsoletes:
prev_target_snapshot.destroy()
prev_source_snapshot = source_snapshot
else:

View File

@ -16,7 +16,7 @@ class ZfsNode(ExecuteNode):
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
def __init__(self, backup_name, logger, ssh_config=None, ssh_to=None, readonly=False, description="",
debug_output=False, thinner=Thinner()):
debug_output=False, thinner=None):
self.backup_name = backup_name
self.description = description
@ -30,14 +30,15 @@ class ZfsNode(ExecuteNode):
else:
self.verbose("Datasets are local")
rules = thinner.human_rules()
if rules:
for rule in rules:
self.verbose(rule)
else:
self.verbose("Keep no old snaphots")
if thinner is not None:
rules = thinner.human_rules()
if rules:
for rule in rules:
self.verbose(rule)
else:
self.verbose("Keep no old snaphots")
self.thinner = thinner
self.__thinner = thinner
# list of ZfsPools
self.__pools = {}
@ -47,6 +48,12 @@ class ZfsNode(ExecuteNode):
ExecuteNode.__init__(self, ssh_config=ssh_config, ssh_to=ssh_to, readonly=readonly, debug_output=debug_output)
def thin(self, objects, keep_objects):
if self.__thinner is not None:
return self.__thinner.thin(objects, keep_objects)
else:
return ( keep_objects, [] )
@CachedProperty
def supported_send_options(self):
"""list of supported options, for optimizing sends"""