mirror of
https://github.com/psy0rz/zfs_autobackup.git
synced 2025-04-17 22:52:20 +03:00
Initial ZFS clones support
This commit is contained in:
parent
07cb7cfad4
commit
e11c332808
@ -372,6 +372,9 @@ class ZfsAutobackup:
|
||||
:type source_node: ZfsNode
|
||||
"""
|
||||
|
||||
def make_target_name(source_dataset):
|
||||
return self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
||||
|
||||
send_pipes = self.get_send_pipes(source_node.verbose)
|
||||
recv_pipes = self.get_recv_pipes(target_node.verbose)
|
||||
|
||||
@ -387,7 +390,7 @@ class ZfsAutobackup:
|
||||
|
||||
try:
|
||||
# determine corresponding target_dataset
|
||||
target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path)
|
||||
target_name = make_target_name(source_dataset)
|
||||
target_dataset = ZfsDataset(target_node, target_name)
|
||||
target_datasets.append(target_dataset)
|
||||
|
||||
@ -414,7 +417,8 @@ class ZfsAutobackup:
|
||||
destroy_incompatible=self.args.destroy_incompatible,
|
||||
send_pipes=send_pipes, recv_pipes=recv_pipes,
|
||||
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
|
||||
zfs_compressed=self.args.zfs_compressed)
|
||||
zfs_compressed=self.args.zfs_compressed,
|
||||
make_target_name=make_target_name)
|
||||
except Exception as e:
|
||||
fail_count = fail_count + 1
|
||||
source_dataset.error("FAILED: " + str(e))
|
||||
|
@ -223,7 +223,12 @@ class ZfsDataset:
|
||||
if self.is_snapshot:
|
||||
raise (Exception("Please call this on a dataset."))
|
||||
|
||||
index = self.find_snapshot_index(snapshot)
|
||||
if snapshot.name == self.properties.get("origin"):
|
||||
# Special case when start snapshot filesystem is other
|
||||
index = -1
|
||||
else:
|
||||
index = self.find_snapshot_index(snapshot)
|
||||
|
||||
while index is not None and index < len(self.snapshots) - 1:
|
||||
index = index + 1
|
||||
if also_other_snapshots or self.snapshots[index].is_ours():
|
||||
@ -567,7 +572,10 @@ class ZfsDataset:
|
||||
|
||||
# incremental?
|
||||
if prev_snapshot:
|
||||
cmd.extend(["-i", "@" + prev_snapshot.snapshot_name])
|
||||
if self.filesystem_name == prev_snapshot.filesystem_name:
|
||||
cmd.extend(["-i", "@" + prev_snapshot.snapshot_name])
|
||||
else:
|
||||
cmd.extend(["-i", prev_snapshot.name])
|
||||
|
||||
cmd.append(self.name)
|
||||
|
||||
@ -776,6 +784,10 @@ class ZfsDataset:
|
||||
"""
|
||||
if not target_dataset.snapshots:
|
||||
# target has nothing yet
|
||||
origin = self.properties.get("origin")
|
||||
if origin:
|
||||
# We are a clone. The origin has earlier creation time and thus must have been already synced.
|
||||
return ZfsDataset(self.zfs_node, origin)
|
||||
return None
|
||||
else:
|
||||
# snapshot=self.find_snapshot(target_dataset.snapshots[-1].snapshot_name)
|
||||
@ -986,7 +998,7 @@ class ZfsDataset:
|
||||
|
||||
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
||||
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
|
||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed):
|
||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, make_target_name):
|
||||
"""sync this dataset's snapshots to target_dataset, while also thinning
|
||||
out old snapshots along the way.
|
||||
|
||||
@ -1011,6 +1023,14 @@ class ZfsDataset:
|
||||
incompatible_target_snapshots) = \
|
||||
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)
|
||||
|
||||
if not target_dataset.exists and common_snapshot and common_snapshot.filesystem_name != target_dataset.filesystem_name:
|
||||
target_origin = ZfsDataset(target_dataset.zfs_node, make_target_name(common_snapshot))
|
||||
if not target_origin.exists:
|
||||
raise Exception("Origin {} for clone {} does not exist on target.{}"
|
||||
.format(target_origin.name, target_dataset.name,
|
||||
("" if also_other_snapshots
|
||||
else " You may want to retransfer {} with --other-snapshots.".format(common_snapshot.filesystem_name))))
|
||||
|
||||
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
|
||||
# Also usefull with no_send to still cleanup stuff.
|
||||
self._pre_clean(
|
||||
|
@ -1,5 +1,6 @@
|
||||
# python 2 compatibility
|
||||
from __future__ import print_function
|
||||
from operator import attrgetter
|
||||
import re
|
||||
import shlex
|
||||
import subprocess
|
||||
@ -219,15 +220,15 @@ class ZfsNode(ExecuteNode):
|
||||
def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change):
|
||||
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
|
||||
|
||||
returns: list of ZfsDataset
|
||||
returns: list of ZfsDataset sorted by creation time
|
||||
"""
|
||||
|
||||
self.debug("Getting selected datasets")
|
||||
|
||||
# get all source filesystems that have the backup property
|
||||
lines = self.run(tab_split=True, readonly=True, cmd=[
|
||||
"zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H",
|
||||
property_name
|
||||
"zfs", "get", "-t", "volume,filesystem", "-Hp",
|
||||
"{},creation".format(property_name)
|
||||
])
|
||||
|
||||
# The returnlist of selected ZfsDataset's:
|
||||
@ -237,23 +238,27 @@ class ZfsNode(ExecuteNode):
|
||||
sources = {}
|
||||
|
||||
for line in lines:
|
||||
(name, value, raw_source) = line
|
||||
dataset = ZfsDataset(self, name)
|
||||
(name, prop_name, value, raw_source) = line
|
||||
if prop_name == property_name:
|
||||
dataset = ZfsDataset(self, name)
|
||||
|
||||
# "resolve" inherited sources
|
||||
sources[name] = raw_source
|
||||
if raw_source.find("inherited from ") == 0:
|
||||
inherited = True
|
||||
inherited_from = re.sub("^inherited from ", "", raw_source)
|
||||
source = sources[inherited_from]
|
||||
else:
|
||||
inherited = False
|
||||
source = raw_source
|
||||
# "resolve" inherited sources
|
||||
sources[name] = raw_source
|
||||
if raw_source.find("inherited from ") == 0:
|
||||
inherited = True
|
||||
inherited_from = re.sub("^inherited from ", "", raw_source)
|
||||
source = sources[inherited_from]
|
||||
else:
|
||||
inherited = False
|
||||
source = raw_source
|
||||
|
||||
# determine it
|
||||
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
|
||||
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
|
||||
min_change=min_change):
|
||||
selected_filesystems.append(dataset)
|
||||
|
||||
return selected_filesystems
|
||||
# determine it
|
||||
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received,
|
||||
exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged,
|
||||
min_change=min_change):
|
||||
selected_filesystems.append(dataset)
|
||||
elif prop_name == "creation":
|
||||
# creation date for the last dataset
|
||||
if selected_filesystems and selected_filesystems[-1].name == name:
|
||||
selected_filesystems[-1].creation = int(value)
|
||||
return sorted(selected_filesystems, key=attrgetter("creation"))
|
||||
|
Loading…
x
Reference in New Issue
Block a user