mirror of
https://github.com/psy0rz/zfs_autobackup.git
synced 2025-06-09 01:52:07 +03:00
some more refactoring. splitting of smaller cleaner functions. started work on --no-thinning
This commit is contained in:
parent
36e134eb75
commit
d3ce222921
@ -96,7 +96,7 @@ class TestZfsNode(unittest2.TestCase):
|
|||||||
#now tries to destroy our own last snapshot (before the final destroy of the dataset)
|
#now tries to destroy our own last snapshot (before the final destroy of the dataset)
|
||||||
self.assertIn("fs1@test-20101111000000: Destroying", buf.getvalue())
|
self.assertIn("fs1@test-20101111000000: Destroying", buf.getvalue())
|
||||||
#but cant finish because still in use:
|
#but cant finish because still in use:
|
||||||
self.assertIn("fs1: Error during destoy missing", buf.getvalue())
|
self.assertIn("fs1: Error during --destroy-missing", buf.getvalue())
|
||||||
|
|
||||||
shelltest("zfs destroy test_target1/clone1")
|
shelltest("zfs destroy test_target1/clone1")
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ from zfs_autobackup.ThinnerRule import ThinnerRule
|
|||||||
class ZfsAutobackup:
|
class ZfsAutobackup:
|
||||||
"""main class"""
|
"""main class"""
|
||||||
|
|
||||||
VERSION = "3.0.1-beta8"
|
VERSION = "3.1-beta1"
|
||||||
HEADER = "zfs-autobackup v{} - Copyright 2020 E.H.Eefting (edwin@datux.nl)".format(VERSION)
|
HEADER = "zfs-autobackup v{} - Copyright 2020 E.H.Eefting (edwin@datux.nl)".format(VERSION)
|
||||||
|
|
||||||
def __init__(self, argv, print_arguments=True):
|
def __init__(self, argv, print_arguments=True):
|
||||||
@ -104,6 +104,8 @@ class ZfsAutobackup:
|
|||||||
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
|
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
|
||||||
parser.add_argument('--no-progress', action='store_true', help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
parser.add_argument('--no-progress', action='store_true', help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
||||||
|
|
||||||
|
parser.add_argument('--no-thinning', action='store_true', help="Do not destroy any snapshots.")
|
||||||
|
|
||||||
# note args is the only global variable we use, since its a global readonly setting anyway
|
# note args is the only global variable we use, since its a global readonly setting anyway
|
||||||
args = parser.parse_args(argv)
|
args = parser.parse_args(argv)
|
||||||
|
|
||||||
@ -147,92 +149,9 @@ class ZfsAutobackup:
|
|||||||
self.log.verbose("")
|
self.log.verbose("")
|
||||||
self.log.verbose("#### " + title)
|
self.log.verbose("#### " + title)
|
||||||
|
|
||||||
# sync datasets, or thin-only on both sides
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
# target is needed for this.
|
|
||||||
def sync_datasets(self, source_node, source_datasets):
|
|
||||||
|
|
||||||
description = "[Target]"
|
|
||||||
|
|
||||||
self.set_title("Target settings")
|
|
||||||
|
|
||||||
target_thinner = Thinner(self.args.keep_target)
|
|
||||||
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config, ssh_to=self.args.ssh_target,
|
|
||||||
readonly=self.args.test, debug_output=self.args.debug_output, description=description,
|
|
||||||
thinner=target_thinner)
|
|
||||||
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
|
|
||||||
|
|
||||||
if self.args.no_send:
|
|
||||||
self.set_title("Thinning source and target")
|
|
||||||
else:
|
|
||||||
self.set_title("Sending and thinning")
|
|
||||||
|
|
||||||
# check if exists, to prevent vague errors
|
|
||||||
target_dataset = ZfsDataset(target_node, self.args.target_path)
|
|
||||||
if not target_dataset.exists:
|
|
||||||
self.error("Target path '{}' does not exist. Please create this dataset first.".format(target_dataset))
|
|
||||||
return 255
|
|
||||||
|
|
||||||
if self.args.filter_properties:
|
|
||||||
filter_properties = self.args.filter_properties.split(",")
|
|
||||||
else:
|
|
||||||
filter_properties = []
|
|
||||||
|
|
||||||
if self.args.set_properties:
|
|
||||||
set_properties = self.args.set_properties.split(",")
|
|
||||||
else:
|
|
||||||
set_properties = []
|
|
||||||
|
|
||||||
if self.args.clear_refreservation:
|
|
||||||
filter_properties.append("refreservation")
|
|
||||||
|
|
||||||
if self.args.clear_mountpoint:
|
|
||||||
set_properties.append("canmount=noauto")
|
|
||||||
|
|
||||||
# sync datasets
|
|
||||||
fail_count = 0
|
|
||||||
target_datasets = []
|
|
||||||
for source_dataset in source_datasets:
|
|
||||||
|
|
||||||
try:
|
|
||||||
# determine corresponding target_dataset
|
|
||||||
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
|
||||||
target_dataset = ZfsDataset(target_node, target_name)
|
|
||||||
target_datasets.append(target_dataset)
|
|
||||||
|
|
||||||
# ensure parents exists
|
|
||||||
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
|
|
||||||
if not self.args.no_send \
|
|
||||||
and target_dataset.parent not in target_datasets \
|
|
||||||
and not target_dataset.parent.exists:
|
|
||||||
target_dataset.parent.create_filesystem(parents=True)
|
|
||||||
|
|
||||||
# determine common zpool features
|
|
||||||
source_features = source_node.get_zfs_pool(source_dataset.split_path()[0]).features
|
|
||||||
target_features = target_node.get_zfs_pool(target_dataset.split_path()[0]).features
|
|
||||||
common_features = source_features and target_features
|
|
||||||
# source_dataset.debug("Common features: {}".format(common_features))
|
|
||||||
|
|
||||||
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress,
|
|
||||||
features=common_features, filter_properties=filter_properties,
|
|
||||||
set_properties=set_properties,
|
|
||||||
ignore_recv_exit_code=self.args.ignore_transfer_errors,
|
|
||||||
holds=not self.args.no_holds, rollback=self.args.rollback,
|
|
||||||
raw=self.args.raw, other_snapshots=self.args.other_snapshots,
|
|
||||||
no_send=self.args.no_send,
|
|
||||||
destroy_incompatible=self.args.destroy_incompatible)
|
|
||||||
except Exception as e:
|
|
||||||
fail_count = fail_count + 1
|
|
||||||
source_dataset.error("FAILED: " + str(e))
|
|
||||||
if self.args.debug:
|
|
||||||
raise
|
|
||||||
|
|
||||||
# if not self.args.no_thinning:
|
|
||||||
self.thin_missing_targets(ZfsDataset(target_node, self.args.target_path), target_datasets)
|
|
||||||
|
|
||||||
return fail_count
|
|
||||||
|
|
||||||
def thin_missing_targets(self, target_dataset, used_target_datasets):
|
def thin_missing_targets(self, target_dataset, used_target_datasets):
|
||||||
"""thin/destroy target datasets that are missing on the source."""
|
"""thin target datasets that are missing on the source."""
|
||||||
|
|
||||||
self.debug("Thinning obsolete datasets")
|
self.debug("Thinning obsolete datasets")
|
||||||
|
|
||||||
@ -242,12 +161,23 @@ class ZfsAutobackup:
|
|||||||
dataset.debug("Missing on source, thinning")
|
dataset.debug("Missing on source, thinning")
|
||||||
dataset.thin()
|
dataset.thin()
|
||||||
|
|
||||||
# destroy_missing enabled?
|
except Exception as e:
|
||||||
if self.args.destroy_missing is not None:
|
dataset.error("Error during thinning of missing datasets ({})".format(str(e)))
|
||||||
|
|
||||||
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
|
def destroy_missing_targets(self, target_dataset, used_target_datasets):
|
||||||
|
"""destroy target datasets that are missing on the source and that meet the requirements"""
|
||||||
|
|
||||||
|
self.debug("Destroying obsolete datasets")
|
||||||
|
|
||||||
|
for dataset in target_dataset.recursive_datasets:
|
||||||
|
try:
|
||||||
|
if dataset not in used_target_datasets:
|
||||||
|
|
||||||
# cant do anything without our own snapshots
|
# cant do anything without our own snapshots
|
||||||
if not dataset.our_snapshots:
|
if not dataset.our_snapshots:
|
||||||
if dataset.datasets:
|
if dataset.datasets:
|
||||||
|
# its not a leaf, just ignore
|
||||||
dataset.debug("Destroy missing: ignoring")
|
dataset.debug("Destroy missing: ignoring")
|
||||||
else:
|
else:
|
||||||
dataset.verbose(
|
dataset.verbose(
|
||||||
@ -284,7 +214,56 @@ class ZfsAutobackup:
|
|||||||
dataset.destroy(fail_exception=True)
|
dataset.destroy(fail_exception=True)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
dataset.error("Error during destoy missing ({})".format(str(e)))
|
dataset.error("Error during --destroy-missing: {}".format(str(e)))
|
||||||
|
|
||||||
|
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||||
|
def sync_datasets(self, source_node, source_datasets, target_node, filter_properties, set_properties):
|
||||||
|
"""Sync datasets, or thin-only on both sides"""
|
||||||
|
|
||||||
|
fail_count = 0
|
||||||
|
target_datasets = []
|
||||||
|
for source_dataset in source_datasets:
|
||||||
|
|
||||||
|
try:
|
||||||
|
# determine corresponding target_dataset
|
||||||
|
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
||||||
|
target_dataset = ZfsDataset(target_node, target_name)
|
||||||
|
target_datasets.append(target_dataset)
|
||||||
|
|
||||||
|
# ensure parents exists
|
||||||
|
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
|
||||||
|
if not self.args.no_send \
|
||||||
|
and target_dataset.parent not in target_datasets \
|
||||||
|
and not target_dataset.parent.exists:
|
||||||
|
target_dataset.parent.create_filesystem(parents=True)
|
||||||
|
|
||||||
|
# determine common zpool features (cached, so no problem we call it often)
|
||||||
|
source_features = source_node.get_zfs_pool(source_dataset.split_path()[0]).features
|
||||||
|
target_features = target_node.get_zfs_pool(target_dataset.split_path()[0]).features
|
||||||
|
common_features = source_features and target_features
|
||||||
|
# source_dataset.debug("Common features: {}".format(common_features))
|
||||||
|
|
||||||
|
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress,
|
||||||
|
features=common_features, filter_properties=filter_properties,
|
||||||
|
set_properties=set_properties,
|
||||||
|
ignore_recv_exit_code=self.args.ignore_transfer_errors,
|
||||||
|
holds=not self.args.no_holds, rollback=self.args.rollback,
|
||||||
|
raw=self.args.raw, other_snapshots=self.args.other_snapshots,
|
||||||
|
no_send=self.args.no_send,
|
||||||
|
destroy_incompatible=self.args.destroy_incompatible)
|
||||||
|
except Exception as e:
|
||||||
|
fail_count = fail_count + 1
|
||||||
|
source_dataset.error("FAILED: " + str(e))
|
||||||
|
if self.args.debug:
|
||||||
|
raise
|
||||||
|
|
||||||
|
if not self.args.no_thinning:
|
||||||
|
self.thin_missing_targets(target_dataset=ZfsDataset(target_node, self.args.target_path), used_target_datasets=target_datasets)
|
||||||
|
|
||||||
|
if self.args.destroy_missing is not None:
|
||||||
|
self.destroy_missing_targets(target_dataset=ZfsDataset(target_node, self.args.target_path), used_target_datasets=target_datasets)
|
||||||
|
|
||||||
|
return fail_count
|
||||||
|
|
||||||
def thin_source(self, source_datasets):
|
def thin_source(self, source_datasets):
|
||||||
|
|
||||||
@ -342,8 +321,54 @@ class ZfsAutobackup:
|
|||||||
|
|
||||||
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
|
# if target is specified, we sync the datasets, otherwise we just thin the source. (e.g. snapshot mode)
|
||||||
if self.args.target_path:
|
if self.args.target_path:
|
||||||
fail_count = self.sync_datasets(source_node, source_datasets)
|
|
||||||
|
# create target_node
|
||||||
|
self.set_title("Target settings")
|
||||||
|
target_thinner = Thinner(self.args.keep_target)
|
||||||
|
target_node = ZfsNode(self.args.backup_name, self, ssh_config=self.args.ssh_config,
|
||||||
|
ssh_to=self.args.ssh_target,
|
||||||
|
readonly=self.args.test, debug_output=self.args.debug_output,
|
||||||
|
description="[Target]",
|
||||||
|
thinner=target_thinner)
|
||||||
|
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
|
||||||
|
|
||||||
|
# determine filter- and set properties lists
|
||||||
|
if self.args.filter_properties:
|
||||||
|
filter_properties = self.args.filter_properties.split(",")
|
||||||
else:
|
else:
|
||||||
|
filter_properties = []
|
||||||
|
|
||||||
|
if self.args.set_properties:
|
||||||
|
set_properties = self.args.set_properties.split(",")
|
||||||
|
else:
|
||||||
|
set_properties = []
|
||||||
|
|
||||||
|
if self.args.clear_refreservation:
|
||||||
|
filter_properties.append("refreservation")
|
||||||
|
|
||||||
|
if self.args.clear_mountpoint:
|
||||||
|
set_properties.append("canmount=noauto")
|
||||||
|
|
||||||
|
if self.args.no_send:
|
||||||
|
self.set_title("Thinning source and target")
|
||||||
|
else:
|
||||||
|
self.set_title("Sending and thinning")
|
||||||
|
|
||||||
|
# check if exists, to prevent vague errors
|
||||||
|
target_dataset = ZfsDataset(target_node, self.args.target_path)
|
||||||
|
if not target_dataset.exists:
|
||||||
|
raise(Exception(
|
||||||
|
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
|
||||||
|
|
||||||
|
# do the actual sync
|
||||||
|
fail_count = self.sync_datasets(
|
||||||
|
source_node=source_node,
|
||||||
|
source_datasets=source_datasets,
|
||||||
|
target_node=target_node,
|
||||||
|
filter_properties=filter_properties, set_properties=set_properties)
|
||||||
|
|
||||||
|
else:
|
||||||
|
if not self.args.no_thinning:
|
||||||
self.thin_source(source_datasets)
|
self.thin_source(source_datasets)
|
||||||
fail_count = 0
|
fail_count = 0
|
||||||
|
|
||||||
|
@ -277,7 +277,6 @@ class ZfsDataset:
|
|||||||
def snapshots(self):
|
def snapshots(self):
|
||||||
"""get all snapshots of this dataset"""
|
"""get all snapshots of this dataset"""
|
||||||
|
|
||||||
|
|
||||||
if not self.exists:
|
if not self.exists:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@ -414,7 +413,7 @@ class ZfsDataset:
|
|||||||
|
|
||||||
# progress output
|
# progress output
|
||||||
if show_progress:
|
if show_progress:
|
||||||
cmd.append("-v")
|
# cmd.append("-v")
|
||||||
cmd.append("-P")
|
cmd.append("-P")
|
||||||
|
|
||||||
# resume a previous send? (don't need more parameters in that case)
|
# resume a previous send? (don't need more parameters in that case)
|
||||||
@ -431,9 +430,6 @@ class ZfsDataset:
|
|||||||
|
|
||||||
cmd.append(self.name)
|
cmd.append(self.name)
|
||||||
|
|
||||||
# if args.buffer and args.ssh_source!="local":
|
|
||||||
# cmd.append("|mbuffer -m {}".format(args.buffer))
|
|
||||||
|
|
||||||
# NOTE: this doesn't start the send yet, it only returns a subprocess.Pipe
|
# NOTE: this doesn't start the send yet, it only returns a subprocess.Pipe
|
||||||
return self.zfs_node.run(cmd, pipe=True)
|
return self.zfs_node.run(cmd, pipe=True)
|
||||||
|
|
||||||
@ -495,9 +491,6 @@ class ZfsDataset:
|
|||||||
self.error("error during transfer")
|
self.error("error during transfer")
|
||||||
raise (Exception("Target doesn't exist after transfer, something went wrong."))
|
raise (Exception("Target doesn't exist after transfer, something went wrong."))
|
||||||
|
|
||||||
# if args.buffer and args.ssh_target!="local":
|
|
||||||
# cmd.append("|mbuffer -m {}".format(args.buffer))
|
|
||||||
|
|
||||||
def transfer_snapshot(self, target_snapshot, features, prev_snapshot=None, show_progress=False,
|
def transfer_snapshot(self, target_snapshot, features, prev_snapshot=None, show_progress=False,
|
||||||
filter_properties=None, set_properties=None, ignore_recv_exit_code=False, resume_token=None,
|
filter_properties=None, set_properties=None, ignore_recv_exit_code=False, resume_token=None,
|
||||||
raw=False):
|
raw=False):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user