fix #112, pretty big change in mounting behaviour

This commit is contained in:
Edwin Eefting 2023-09-27 00:52:06 +02:00
parent ea8beee7c8
commit 771127d34a
No known key found for this signature in database
GPG Key ID: 0F3C35D8E9887737
4 changed files with 81 additions and 20 deletions

View File

@ -38,7 +38,7 @@ class TestZfsScaling(unittest2.TestCase):
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
expected_runs=335
expected_runs=342
print("EXPECTED RUNS: {}".format(expected_runs))
print("ACTUAL RUNS : {}".format(run_counter))
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
@ -80,7 +80,7 @@ class TestZfsScaling(unittest2.TestCase):
#this triggers if you make a change with an impact of more than O(snapshot_count/2)`
expected_runs=635
expected_runs=842
print("EXPECTED RUNS: {}".format(expected_runs))
print("ACTUAL RUNS: {}".format(run_counter))
self.assertLess(abs(run_counter-expected_runs), dataset_count/2)

View File

@ -493,13 +493,13 @@ test_source2/fs2/sub@test-20101111000000 canmount - -
test_source2/fs3 canmount on default
test_source2/fs3/sub canmount on default
test_target1 canmount on default
test_target1/test_source1 canmount on default
test_target1/test_source1 canmount off local
test_target1/test_source1/fs1 canmount noauto local
test_target1/test_source1/fs1@test-20101111000000 canmount - -
test_target1/test_source1/fs1/sub canmount noauto local
test_target1/test_source1/fs1/sub@test-20101111000000 canmount - -
test_target1/test_source2 canmount on default
test_target1/test_source2/fs2 canmount on default
test_target1/test_source2 canmount off local
test_target1/test_source2/fs2 canmount off local
test_target1/test_source2/fs2/sub canmount noauto local
test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
""")
@ -512,7 +512,6 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose".split(" ")).run())
#make change
r=shelltest("zfs mount test_target1/test_source1/fs1")
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
with patch('time.strftime', return_value="test-20101111000001"):
@ -539,7 +538,6 @@ test_target1/test_source2/fs2/sub@test-20101111000000 canmount - -
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
#add incompatible snapshot by changing and snapshotting
r=shelltest("zfs mount test_target1/test_source1/fs1")
r=shelltest("touch /test_target1/test_source1/fs1/change.txt")
r=shelltest("zfs snapshot test_target1/test_source1/fs1@incompatible1")

View File

@ -106,3 +106,34 @@ test_target1/test_source2/fs2/sub@test-20101111000001
""")
#check consistent mounting behaviour, see issue #112
def test_mount_consitency_mounted(self):
"""only filesystems that have canmount=on with a mountpoint should be mounted. """
shelltest("zfs create -V 10M test_source1/fs1/subvol")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty".split(" ")).run())
r=shelltest("zfs mount |grep -o /test_target1.*")
self.assertMultiLineEqual(r,"""
/test_target1
/test_target1/test_source1/fs1
/test_target1/test_source1/fs1/sub
/test_target1/test_source2/fs2/sub
""")
def test_mount_consitency_unmounted(self):
"""only test_target1 should be mounted in this test"""
shelltest("zfs create -V 10M test_source1/fs1/subvol")
with patch('time.strftime', return_value="test-20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --no-progress --verbose --allow-empty --clear-mountpoint".split(" ")).run())
r=shelltest("zfs mount |grep -o /test_target1.*")
self.assertMultiLineEqual(r,"""
/test_target1
""")

View File

@ -206,6 +206,7 @@ class ZfsDataset:
we cache this so everything in the parent that is cached also stays.
returns None if there is no parent.
:rtype: ZfsDataset | None
"""
if self.is_snapshot:
return self.zfs_node.get_dataset(self.filesystem_name)
@ -271,18 +272,24 @@ class ZfsDataset:
return (self.zfs_node.run(tab_split=True, cmd=["zfs", "list", self.name], readonly=True, valid_exitcodes=[0, 1],
hide_errors=True) and True)
def create_filesystem(self, parents=False):
def create_filesystem(self, parents=False, unmountable=True):
"""create a filesystem
Args:
:type parents: bool
"""
if parents:
self.verbose("Creating filesystem and parents")
self.zfs_node.run(["zfs", "create", "-p", self.name])
else:
self.verbose("Creating filesystem")
self.zfs_node.run(["zfs", "create", self.name])
#recurse up
if parents and self.parent and not self.parent.exists:
self.parent.create_filesystem(parents, unmountable)
cmd=["zfs", "create"]
if unmountable:
cmd.extend( ["-o", "canmount=off"])
cmd.append(self.name)
self.zfs_node.run(cmd)
self.force_exists = True
@ -325,9 +332,6 @@ class ZfsDataset:
"zfs", "get", "-H", "-o", "property,value", "-p", "all", self.name
]
if not self.exists:
return {}
self.debug("Getting zfs properties")
ret = {}
@ -335,6 +339,7 @@ class ZfsDataset:
if len(pair) == 2:
ret[pair[0]] = pair[1]
return ret
def is_changed(self, min_changed_bytes=1):
@ -655,7 +660,7 @@ class ZfsDataset:
cmd.extend(["zfs", "recv"])
# don't mount filesystem that is received
# don't let zfs recv mount everything thats received (even with canmount=noauto!)
cmd.append("-u")
for property_ in filter_properties:
@ -685,7 +690,7 @@ class ZfsDataset:
# self.zfs_node.reset_progress()
self.zfs_node.run(cmd, inp=pipe, valid_exitcodes=valid_exitcodes)
# invalidate cache, but we at least know we exist now
# invalidate cache
self.invalidate()
# in test mode we assume everything was ok and it exists
@ -698,6 +703,27 @@ class ZfsDataset:
self.error("error during transfer")
raise (Exception("Target doesn't exist after transfer, something went wrong."))
def automount(self):
"""mount the dataset as if one did a zfs mount -a, but only for this dataset"""
self.debug("Auto mounting")
if self.properties['type']!="filesystem":
return
if self.properties['canmount']!='on':
return
if self.properties['mountpoint']=='legacy':
return
if self.properties['mountpoint']=='none':
return
self.zfs_node.run(["zfs", "mount", self.name])
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed, force):
@ -743,6 +769,12 @@ class ZfsDataset:
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes, force=force)
#try to automount it, if its the initial transfer
if not prev_snapshot:
target_snapshot.parent.force_exists=True
target_snapshot.parent.automount()
def abort_resume(self):
"""abort current resume state"""
self.debug("Aborting resume")
@ -974,7 +1006,7 @@ class ZfsDataset:
:type start_snapshot: ZfsDataset
"""
if 'receive_resume_token' in target_dataset.properties:
if target_dataset.exists and 'receive_resume_token' in target_dataset.properties:
if start_snapshot==None:
target_dataset.verbose("Aborting resume, its obsolete.")
target_dataset.abort_resume()