mirror of
https://github.com/psy0rz/zfs_autobackup.git
synced 2025-05-01 23:21:30 +03:00
more consistent creation of ZfsDataset and ZfsPool via ZfsNode.get_dataset() and ZfsNode.get_pool()
This commit is contained in:
parent
c0086f8953
commit
302a9ecd86
@ -299,7 +299,7 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
try:
|
try:
|
||||||
# determine corresponding target_dataset
|
# determine corresponding target_dataset
|
||||||
target_name = self.make_target_name(source_dataset)
|
target_name = self.make_target_name(source_dataset)
|
||||||
target_dataset = ZfsDataset(target_node, target_name)
|
target_dataset = target_node.get_dataset(target_name)
|
||||||
target_datasets.append(target_dataset)
|
target_datasets.append(target_dataset)
|
||||||
|
|
||||||
# ensure parents exists
|
# ensure parents exists
|
||||||
@ -310,8 +310,8 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
target_dataset.parent.create_filesystem(parents=True)
|
target_dataset.parent.create_filesystem(parents=True)
|
||||||
|
|
||||||
# determine common zpool features (cached, so no problem we call it often)
|
# determine common zpool features (cached, so no problem we call it often)
|
||||||
source_features = source_node.get_zfs_pool(source_dataset.split_path()[0]).features
|
source_features = source_node.get_pool(source_dataset).features
|
||||||
target_features = target_node.get_zfs_pool(target_dataset.split_path()[0]).features
|
target_features = target_node.get_pool(target_dataset).features
|
||||||
common_features = source_features and target_features
|
common_features = source_features and target_features
|
||||||
|
|
||||||
# sync the snapshots of this dataset
|
# sync the snapshots of this dataset
|
||||||
@ -336,7 +336,7 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
if self.args.progress:
|
if self.args.progress:
|
||||||
self.clear_progress()
|
self.clear_progress()
|
||||||
|
|
||||||
target_path_dataset = ZfsDataset(target_node, self.args.target_path)
|
target_path_dataset = target_node.get_dataset(self.args.target_path)
|
||||||
if not self.args.no_thinning:
|
if not self.args.no_thinning:
|
||||||
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
|
self.thin_missing_targets(target_dataset=target_path_dataset, used_target_datasets=target_datasets)
|
||||||
|
|
||||||
@ -434,7 +434,7 @@ class ZfsAutobackup(ZfsAuto):
|
|||||||
self.set_title("Synchronising")
|
self.set_title("Synchronising")
|
||||||
|
|
||||||
# check if exists, to prevent vague errors
|
# check if exists, to prevent vague errors
|
||||||
target_dataset = ZfsDataset(target_node, self.args.target_path)
|
target_dataset = target_node.get_dataset(self.args.target_path)
|
||||||
if not target_dataset.exists:
|
if not target_dataset.exists:
|
||||||
raise (Exception(
|
raise (Exception(
|
||||||
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
|
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
|
||||||
|
@ -186,9 +186,9 @@ class ZfsDataset:
|
|||||||
we cache this so everything in the parent that is cached also stays.
|
we cache this so everything in the parent that is cached also stays.
|
||||||
"""
|
"""
|
||||||
if self.is_snapshot:
|
if self.is_snapshot:
|
||||||
return ZfsDataset(self.zfs_node, self.filesystem_name)
|
return self.zfs_node.get_dataset(self.filesystem_name)
|
||||||
else:
|
else:
|
||||||
return ZfsDataset(self.zfs_node, self.rstrip_path(1))
|
return self.zfs_node.get_dataset(self.rstrip_path(1))
|
||||||
|
|
||||||
# NOTE: unused for now
|
# NOTE: unused for now
|
||||||
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
|
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
|
||||||
@ -370,7 +370,7 @@ class ZfsDataset:
|
|||||||
"""
|
"""
|
||||||
ret = []
|
ret = []
|
||||||
for name in names:
|
for name in names:
|
||||||
ret.append(ZfsDataset(self.zfs_node, name))
|
ret.append(self.zfs_node.get_dataset(name))
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@ -724,7 +724,7 @@ class ZfsDataset:
|
|||||||
matches = re.findall("toname = .*@(.*)", line)
|
matches = re.findall("toname = .*@(.*)", line)
|
||||||
if matches:
|
if matches:
|
||||||
snapshot_name = matches[0]
|
snapshot_name = matches[0]
|
||||||
snapshot = ZfsDataset(self.zfs_node, self.filesystem_name + "@" + snapshot_name)
|
snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot_name)
|
||||||
snapshot.debug("resume token belongs to this snapshot")
|
snapshot.debug("resume token belongs to this snapshot")
|
||||||
return snapshot
|
return snapshot
|
||||||
|
|
||||||
@ -867,9 +867,7 @@ class ZfsDataset:
|
|||||||
while snapshot:
|
while snapshot:
|
||||||
# create virtual target snapsho
|
# create virtual target snapsho
|
||||||
# NOTE: with force_exist we're telling the dataset it doesnt exist yet. (e.g. its virtual)
|
# NOTE: with force_exist we're telling the dataset it doesnt exist yet. (e.g. its virtual)
|
||||||
virtual_snapshot = ZfsDataset(self.zfs_node,
|
virtual_snapshot = self.zfs_node.get_dataset(self.filesystem_name + "@" + snapshot.snapshot_name, force_exists=False)
|
||||||
self.filesystem_name + "@" + snapshot.snapshot_name,
|
|
||||||
force_exists=False)
|
|
||||||
self.snapshots.append(virtual_snapshot)
|
self.snapshots.append(virtual_snapshot)
|
||||||
snapshot = source_dataset.find_next_snapshot(snapshot, also_other_snapshots)
|
snapshot = source_dataset.find_next_snapshot(snapshot, also_other_snapshots)
|
||||||
|
|
||||||
@ -1120,19 +1118,18 @@ class ZfsDataset:
|
|||||||
|
|
||||||
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
||||||
|
|
||||||
# unused/untested for now
|
def clone(self, name):
|
||||||
# def clone(self, name):
|
"""clones this snapshot and returns ZfsDataset of the clone"""
|
||||||
# """clones this snapshot and returns ZfsDataset of the clone"""
|
|
||||||
#
|
self.debug("Cloning to {}".format(name))
|
||||||
# self.debug("Cloning to {}".format(name))
|
|
||||||
#
|
cmd = [
|
||||||
# cmd = [
|
"zfs", "clone", self.name, name
|
||||||
# "zfs", "clone", self.name, name
|
]
|
||||||
# ]
|
|
||||||
#
|
self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
||||||
# self.zfs_node.run(cmd=cmd, valid_exitcodes=[0])
|
|
||||||
#
|
return self.zfs_node.get_dataset(name, force_exists=True)
|
||||||
# return ZfsDataset(self.zfs_node, name, force_exists=True)
|
|
||||||
|
|
||||||
def set(self, prop, value):
|
def set(self, prop, value):
|
||||||
"""set a zfs property"""
|
"""set a zfs property"""
|
||||||
|
@ -48,6 +48,7 @@ class ZfsNode(ExecuteNode):
|
|||||||
|
|
||||||
# list of ZfsPools
|
# list of ZfsPools
|
||||||
self.__pools = {}
|
self.__pools = {}
|
||||||
|
self.__datasets = {}
|
||||||
|
|
||||||
self._progress_total_bytes = 0
|
self._progress_total_bytes = 0
|
||||||
self._progress_start_time = time.time()
|
self._progress_start_time = time.time()
|
||||||
@ -93,12 +94,23 @@ class ZfsNode(ExecuteNode):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# TODO: also create a get_zfs_dataset() function that stores all the objects in a dict. This should optimize
|
def get_pool(self, dataset):
|
||||||
# caching a bit and is more consistent.
|
"""get a ZfsPool() object from dataset. stores objects internally to enable caching"""
|
||||||
def get_zfs_pool(self, name):
|
|
||||||
"""get a ZfsPool() object from specified name. stores objects internally to enable caching"""
|
|
||||||
|
|
||||||
return self.__pools.setdefault(name, ZfsPool(self, name))
|
if not isinstance(dataset, ZfsDataset):
|
||||||
|
raise (Exception("{} is not a ZfsDataset".format(dataset)))
|
||||||
|
|
||||||
|
zpool_name = dataset.name.split("/")[0]
|
||||||
|
|
||||||
|
return self.__pools.setdefault(zpool_name, ZfsPool(self, zpool_name))
|
||||||
|
|
||||||
|
def get_dataset(self, name, force_exists=None):
|
||||||
|
"""get a ZfsDataset() object from name. stores objects internally to enable caching"""
|
||||||
|
|
||||||
|
if not isinstance(name, str):
|
||||||
|
raise (Exception("{} is not a str".format(name)))
|
||||||
|
|
||||||
|
return self.__datasets.setdefault(name, ZfsDataset(self, name))
|
||||||
|
|
||||||
def reset_progress(self):
|
def reset_progress(self):
|
||||||
"""reset progress output counters"""
|
"""reset progress output counters"""
|
||||||
@ -179,7 +191,7 @@ class ZfsNode(ExecuteNode):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
# force_exist, since we're making it
|
# force_exist, since we're making it
|
||||||
snapshot = ZfsDataset(dataset.zfs_node, dataset.name + "@" + snapshot_name, force_exists=True)
|
snapshot = self.get_dataset(dataset.name + "@" + snapshot_name, force_exists=True)
|
||||||
|
|
||||||
pool = dataset.split_path()[0]
|
pool = dataset.split_path()[0]
|
||||||
if pool not in pools:
|
if pool not in pools:
|
||||||
@ -239,7 +251,7 @@ class ZfsNode(ExecuteNode):
|
|||||||
|
|
||||||
for line in lines:
|
for line in lines:
|
||||||
(name, value, raw_source) = line
|
(name, value, raw_source) = line
|
||||||
dataset = ZfsDataset(self, name)
|
dataset = self.get_dataset(name, force_exists=True)
|
||||||
|
|
||||||
# "resolve" inherited sources
|
# "resolve" inherited sources
|
||||||
sources[name] = raw_source
|
sources[name] = raw_source
|
||||||
|
Loading…
x
Reference in New Issue
Block a user