more consistent creation of ZfsDataset and ZfsPool via ZfsNode.get_dataset() and ZfsNode.get_pool()

This commit is contained in:
Edwin Eefting 2022-01-24 16:29:32 +01:00
parent c0086f8953
commit 302a9ecd86
3 changed files with 41 additions and 32 deletions

@ -299,7 +299,7 @@ class ZfsAutobackup(ZfsAuto):
try:
# determine corresponding target_dataset
target_name = self.make_target_name(source_dataset)
target_dataset = ZfsDataset(target_node, target_name)
target_dataset = target_node.get_dataset(target_name)
target_datasets.append(target_dataset)
# ensure parents exists
@ -310,8 +310,8 @@ class ZfsAutobackup(ZfsAuto):
target_dataset.parent.create_filesystem(parents=True)
# determine common zpool features (cached, so no problem we call it often)
source_features = source_node.get_zfs_pool(source_dataset.split_path()[0]).features
target_features = target_node.get_zfs_pool(target_dataset.split_path()[0]).features
source_features = source_node.get_pool(source_dataset).features
target_features = target_node.get_pool(target_dataset).features
common_features = source_features and target_features
# sync the snapshots of this dataset
@ -336,7 +336,7 @@ class ZfsAutobackup(ZfsAuto):
if self.args.progress:
self.clear_progress()
target_path_dataset = ZfsDataset(target_node, self.args.target_path)
target_path_dataset = target_node.get_dataset(self.args.target_path)
if not self.args.no_thinning:
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
@ -434,7 +434,7 @@ class ZfsAutobackup(ZfsAuto):
self.set_title("Synchronising")
# check if exists, to prevent vague errors
target_dataset = ZfsDataset(target_node, self.args.target_path)
target_dataset = target_node.get_dataset(self.args.target_path)
if not target_dataset.exists:
raise (Exception(
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))

@ -186,9 +186,9 @@ class ZfsDataset:
we cache this so everything in the parent that is cached also stays.
"""
if self.is_snapshot:
return ZfsDataset(self.zfs_node, self.filesystem_name)
return self.zfs_node.get_dataset(self.filesystem_name)
else:
return ZfsDataset(self.zfs_node, self.rstrip_path(1))
return self.zfs_node.get_dataset(self.rstrip_path(1))
# NOTE: unused for now
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
@ -370,7 +370,7 @@ class ZfsDataset:
"""
ret = []
for name in names:
ret.append(ZfsDataset(self.zfs_node, name))
ret.append(self.zfs_node.get_dataset(name))
return ret
@ -724,7 +724,7 @@ class ZfsDataset:
matches = re.findall("toname = .*@(.*)", line)
if matches:
snapshot_name = matches[0]
snapshot = ZfsDataset(self.zfs_node, self.filesystem_name + "@" + snapshot_name)
snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot_name)
snapshot.debug("resume token belongs to this snapshot")
return snapshot
@ -867,9 +867,7 @@ class ZfsDataset:
while snapshot:
# create virtual target snapsho
# NOTE: with force_exist we're telling the dataset it doesnt exist yet. (e.g. its virtual)
virtual_snapshot = ZfsDataset(self.zfs_node,
self.filesystem_name + "@" + snapshot.snapshot_name,
force_exists=False)
virtual_snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot.snapshot_name, force_exists=False)
self.snapshots.append(virtual_snapshot)
snapshot = source_dataset.find_next_snapshot(snapshot, also_other_snapshots)
@ -1120,19 +1118,18 @@ class ZfsDataset:
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
# unused/untested for now
# def clone(self, name):
# """clones this snapshot and returns ZfsDataset of the clone"""
#
# self.debug("Cloning to {}".format(name))
#
# cmd = [
# "zfs", "clone", self.name, name
# ]
#
# self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
#
# return ZfsDataset(self.zfs_node, name, force_exists=True)
def clone(self, name):
"""clones this snapshot and returns ZfsDataset of the clone"""
self.debug("Cloning to {}".format(name))
cmd = [
"zfs", "clone", self.name, name
]
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
return self.zfs_node.get_dataset(name, force_exists=True)
def set(self, prop, value):
"""set a zfs property"""

@ -48,6 +48,7 @@ class ZfsNode(ExecuteNode):
# list of ZfsPools
self.__pools = {}
self.__datasets = {}
self._progress_total_bytes = 0
self._progress_start_time = time.time()
@ -93,12 +94,23 @@ class ZfsNode(ExecuteNode):
return True
# TODO: also create a get_zfs_dataset() function that stores all the objects in a dict. This should optimize
# caching a bit and is more consistent.
def get_zfs_pool(self, name):
"""get a ZfsPool() object from specified name. stores objects internally to enable caching"""
def get_pool(self, dataset):
"""get a ZfsPool() object from dataset. stores objects internally to enable caching"""
return self.__pools.setdefault(name, ZfsPool(self, name))
if not isinstance(dataset, ZfsDataset):
raise (Exception("{} is not a ZfsDataset".format(dataset)))
zpool_name = dataset.name.split("/")[0]
return self.__pools.setdefault(zpool_name, ZfsPool(self, zpool_name))
def get_dataset(self, name, force_exists=None):
"""get a ZfsDataset() object from name. stores objects internally to enable caching"""
if not isinstance(name, str):
raise (Exception("{} is not a str".format(name)))
return self.__datasets.setdefault(name, ZfsDataset(self, name))
def reset_progress(self):
"""reset progress output counters"""
@ -179,7 +191,7 @@ class ZfsNode(ExecuteNode):
continue
# force_exist, since we're making it
snapshot = ZfsDataset(dataset.zfs_node, dataset.name + "@" + snapshot_name, force_exists=True)
snapshot = self.get_dataset(dataset.name + "@" + snapshot_name, force_exists=True)
pool = dataset.split_path()[0]
if pool not in pools:
@ -239,7 +251,7 @@ class ZfsNode(ExecuteNode):
for line in lines:
(name, value, raw_source) = line
dataset = ZfsDataset(self, name)
dataset = self.get_dataset(name, force_exists=True)
# "resolve" inherited sources
sources[name] = raw_source