From 883984fda30c0ba784f8a90c1559e9f61fa6b99f Mon Sep 17 00:00:00 2001 From: Edwin Eefting Date: Tue, 4 Jan 2022 22:48:25 +0100 Subject: [PATCH] Revert "Initial ZFS clones support" Woops accidently committed this, still need to review/change it before comitting. This reverts commit e11c33280885b60fab7be648ee4046be1ca346d6. --- zfs_autobackup/ZfsAutobackup.py | 8 ++---- zfs_autobackup/ZfsDataset.py | 26 +++--------------- zfs_autobackup/ZfsNode.py | 47 +++++++++++++++------------------ 3 files changed, 26 insertions(+), 55 deletions(-) diff --git a/zfs_autobackup/ZfsAutobackup.py b/zfs_autobackup/ZfsAutobackup.py index aebea91..a347d45 100644 --- a/zfs_autobackup/ZfsAutobackup.py +++ b/zfs_autobackup/ZfsAutobackup.py @@ -372,9 +372,6 @@ class ZfsAutobackup: :type source_node: ZfsNode """ - def make_target_name(source_dataset): - return self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path) - send_pipes = self.get_send_pipes(source_node.verbose) recv_pipes = self.get_recv_pipes(target_node.verbose) @@ -390,7 +387,7 @@ class ZfsAutobackup: try: # determine corresponding target_dataset - target_name = make_target_name(source_dataset) + target_name = self.args.target_path + "/" + source_dataset.lstrip_path(self.args.strip_path) target_dataset = ZfsDataset(target_node, target_name) target_datasets.append(target_dataset) @@ -417,8 +414,7 @@ class ZfsAutobackup: destroy_incompatible=self.args.destroy_incompatible, send_pipes=send_pipes, recv_pipes=recv_pipes, decrypt=self.args.decrypt, encrypt=self.args.encrypt, - zfs_compressed=self.args.zfs_compressed, - make_target_name=make_target_name) + zfs_compressed=self.args.zfs_compressed) except Exception as e: fail_count = fail_count + 1 source_dataset.error("FAILED: " + str(e)) diff --git a/zfs_autobackup/ZfsDataset.py b/zfs_autobackup/ZfsDataset.py index 38eb61a..da56b54 100644 --- a/zfs_autobackup/ZfsDataset.py +++ b/zfs_autobackup/ZfsDataset.py @@ -223,12 +223,7 @@ class ZfsDataset: if self.is_snapshot: raise (Exception("Please call this on a dataset.")) - if snapshot.name == self.properties.get("origin"): - # Special case when start snapshot filesystem is other - index = -1 - else: - index = self.find_snapshot_index(snapshot) - + index = self.find_snapshot_index(snapshot) while index is not None and index < len(self.snapshots) - 1: index = index + 1 if also_other_snapshots or self.snapshots[index].is_ours(): @@ -572,10 +567,7 @@ class ZfsDataset: # incremental? if prev_snapshot: - if self.filesystem_name == prev_snapshot.filesystem_name: - cmd.extend(["-i", "@" + prev_snapshot.snapshot_name]) - else: - cmd.extend(["-i", prev_snapshot.name]) + cmd.extend(["-i", "@" + prev_snapshot.snapshot_name]) cmd.append(self.name) @@ -784,10 +776,6 @@ class ZfsDataset: """ if not target_dataset.snapshots: # target has nothing yet - origin = self.properties.get("origin") - if origin: - # We are a clone. The origin has earlier creation time and thus must have been already synced. - return ZfsDataset(self.zfs_node, origin) return None else: # snapshot=self.find_snapshot(target_dataset.snapshots[-1].snapshot_name) @@ -998,7 +986,7 @@ class ZfsDataset: def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties, ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots, - no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, make_target_name): + no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed): """sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way. @@ -1023,14 +1011,6 @@ class ZfsDataset: incompatible_target_snapshots) = \ self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots) - if not target_dataset.exists and common_snapshot and common_snapshot.filesystem_name != target_dataset.filesystem_name: - target_origin = ZfsDataset(target_dataset.zfs_node, make_target_name(common_snapshot)) - if not target_origin.exists: - raise Exception("Origin {} for clone {} does not exist on target.{}" - .format(target_origin.name, target_dataset.name, - ("" if also_other_snapshots - else " You may want to retransfer {} with --other-snapshots.".format(common_snapshot.filesystem_name)))) - # NOTE: we do this because we dont want filesystems to fillup when backups keep failing. # Also usefull with no_send to still cleanup stuff. self._pre_clean( diff --git a/zfs_autobackup/ZfsNode.py b/zfs_autobackup/ZfsNode.py index e5672bf..1461ffb 100644 --- a/zfs_autobackup/ZfsNode.py +++ b/zfs_autobackup/ZfsNode.py @@ -1,6 +1,5 @@ # python 2 compatibility from __future__ import print_function -from operator import attrgetter import re import shlex import subprocess @@ -220,15 +219,15 @@ class ZfsNode(ExecuteNode): def selected_datasets(self, property_name, exclude_received, exclude_paths, exclude_unchanged, min_change): """determine filesystems that should be backed up by looking at the special autobackup-property, systemwide - returns: list of ZfsDataset sorted by creation time + returns: list of ZfsDataset """ self.debug("Getting selected datasets") # get all source filesystems that have the backup property lines = self.run(tab_split=True, readonly=True, cmd=[ - "zfs", "get", "-t", "volume,filesystem", "-Hp", - "{},creation".format(property_name) + "zfs", "get", "-t", "volume,filesystem", "-o", "name,value,source", "-H", + property_name ]) # The returnlist of selected ZfsDataset's: @@ -238,27 +237,23 @@ class ZfsNode(ExecuteNode): sources = {} for line in lines: - (name, prop_name, value, raw_source) = line - if prop_name == property_name: - dataset = ZfsDataset(self, name) + (name, value, raw_source) = line + dataset = ZfsDataset(self, name) - # "resolve" inherited sources - sources[name] = raw_source - if raw_source.find("inherited from ") == 0: - inherited = True - inherited_from = re.sub("^inherited from ", "", raw_source) - source = sources[inherited_from] - else: - inherited = False - source = raw_source + # "resolve" inherited sources + sources[name] = raw_source + if raw_source.find("inherited from ") == 0: + inherited = True + inherited_from = re.sub("^inherited from ", "", raw_source) + source = sources[inherited_from] + else: + inherited = False + source = raw_source - # determine it - if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received, - exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged, - min_change=min_change): - selected_filesystems.append(dataset) - elif prop_name == "creation": - # creation date for the last dataset - if selected_filesystems and selected_filesystems[-1].name == name: - selected_filesystems[-1].creation = int(value) - return sorted(selected_filesystems, key=attrgetter("creation")) + # determine it + if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received, + exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged, + min_change=min_change): + selected_filesystems.append(dataset) + + return selected_filesystems