reduce number of dataset exist-checks

This commit is contained in:
Edwin Eefting 2023-09-26 16:52:48 +02:00
parent 090a2d1343
commit d90ea7edd2
No known key found for this signature in database
GPG Key ID: 0F3C35D8E9887737
2 changed files with 14 additions and 12 deletions

View File

@ -38,8 +38,9 @@ class TestZfsScaling(unittest2.TestCase):
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
expected_runs=343
print("ACTUAL RUNS: {}".format(run_counter))
expected_runs=336
print("EXPECTED RUNS: {}".format(expected_runs))
print("ACTUAL RUNS : {}".format(run_counter))
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
@ -51,8 +52,9 @@ class TestZfsScaling(unittest2.TestCase):
#this triggers if you make a change with a performance impact of more than O(snapshot_count/2)
expected_runs=47
print("ACTUAL RUNS: {}".format(run_counter))
expected_runs=42
print("EXPECTED RUNS: {}".format(expected_runs))
print("ACTUAL RUNS : {}".format(run_counter))
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
def test_manydatasets(self):
@ -77,7 +79,7 @@ class TestZfsScaling(unittest2.TestCase):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --no-holds --allow-empty".split(" ")).run())
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
#this triggers if you make a change with an impact of more than O(snapshot_count/2)`
expected_runs=636
print("EXPECTED RUNS: {}".format(expected_runs))
print("ACTUAL RUNS: {}".format(run_counter))

View File

@ -263,10 +263,10 @@ class ZfsDataset:
"""
if self.force_exists is not None:
self.debug("Checking if filesystem exists: was forced to {}".format(self.force_exists))
self.debug("Checking if dataset exists: was forced to {}".format(self.force_exists))
return self.force_exists
else:
self.debug("Checking if filesystem exists")
self.debug("Checking if dataset exists")
return (self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True, valid_exitcodes=[0, 1],
hide_errors=True) and True)
@ -408,7 +408,7 @@ class ZfsDataset:
seconds = time.mktime(dt.timetuple())
return seconds
def from_names(self, names):
def from_names(self, names, force_exists=None):
"""convert a list of names to a list ZfsDatasets for this zfs_node
Args:
@ -416,7 +416,7 @@ class ZfsDataset:
"""
ret = []
for name in names:
ret.append(self.zfs_node.get_dataset(name))
ret.append(self.zfs_node.get_dataset(name, force_exists))
return ret
@ -446,7 +446,7 @@ class ZfsDataset:
"zfs", "list", "-d", "1", "-r", "-t", "snapshot", "-H", "-o", "name", self.name
]
return self.from_names(self.zfs_node.run(cmd=cmd, readonly=True))
return self.from_names(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True)
@property
def our_snapshots(self):
@ -545,7 +545,7 @@ class ZfsDataset:
"zfs", "list", "-r", "-t", types, "-o", "name", "-H", self.name
])
return self.from_names(names[1:])
return self.from_names(names[1:], force_exists=True)
@CachedProperty
def datasets(self, types="filesystem,volume"):
@ -561,7 +561,7 @@ class ZfsDataset:
"zfs", "list", "-r", "-t", types, "-o", "name", "-H", "-d", "1", self.name
])
return self.from_names(names[1:])
return self.from_names(names[1:], force_exists=True)
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes, zfs_compressed):
"""returns a pipe with zfs send output for this snapshot