merged v3.1.2-rc2

This commit is contained in:
Edwin Eefting 2022-02-23 20:43:49 +01:00
commit 44c6896ddd
3 changed files with 60 additions and 36 deletions

View File

@ -420,33 +420,13 @@ test_target1/fs2/sub
test_target1/fs2/sub@test-20101111000000 test_target1/fs2/sub@test-20101111000000
""") """)
# def test_strippath_toomuch(self): def test_strippath_collision(self):
# with patch('time.strftime', return_value="test-20101111000000"): with self.assertRaisesRegexp(Exception,"collision"):
# self.assertFalse( ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress --debug".split(" ")).run()
# ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress".split(" ")).run())
# def test_strippath_toomuch(self):
# r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS) with self.assertRaisesRegexp(Exception,"too much"):
# self.assertMultiLineEqual(r, """ ZfsAutobackup("test test_target1 --verbose --strip-path=3 --no-progress --debug".split(" ")).run()
# test_source1
# test_source1/fs1
# test_source1/fs1@test-20101111000000
# test_source1/fs1/sub
# test_source1/fs1/sub@test-20101111000000
# test_source2
# test_source2/fs2
# test_source2/fs2/sub
# test_source2/fs2/sub@test-20101111000000
# test_source2/fs3
# test_source2/fs3/sub
# test_target1
# test_target1/fs1
# test_target1/fs1@test-20101111000000
# test_target1/fs1/sub
# test_target1/fs1/sub@test-20101111000000
# test_target1/fs2
# test_target1/fs2/sub
# test_target1/fs2/sub@test-20101111000000
# """)
def test_clearrefres(self): def test_clearrefres(self):

View File

@ -12,7 +12,7 @@ from .Thinner import Thinner
from .ZfsDataset import ZfsDataset from .ZfsDataset import ZfsDataset
from .ZfsNode import ZfsNode from .ZfsNode import ZfsNode
from .ThinnerRule import ThinnerRule from .ThinnerRule import ThinnerRule
import os.path
class ZfsAutobackup(ZfsAuto): class ZfsAutobackup(ZfsAuto):
"""The main zfs-autobackup class. Start here, at run() :)""" """The main zfs-autobackup class. Start here, at run() :)"""
@ -281,6 +281,29 @@ class ZfsAutobackup(ZfsAuto):
return ret return ret
def make_target_name(self, source_dataset):
"""make target_name from a source_dataset"""
stripped=source_dataset.lstrip_path(self.args.strip_path)
if stripped!="":
return self.args.target_path + "/" + stripped
else:
return self.args.target_path
def check_target_names(self, source_node, source_datasets, target_node):
"""check all target names for collesions etc due to strip-options"""
self.debug("Checking target names:")
target_datasets={}
for source_dataset in source_datasets:
target_name = self.make_target_name(source_dataset)
source_dataset.debug("-> {}".format(target_name))
if target_name in target_datasets:
raise Exception("Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name, source_dataset, target_datasets[target_name]))
target_datasets[target_name]=source_dataset
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters: # NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node): def sync_datasets(self, source_node, source_datasets, target_node):
"""Sync datasets, or thin-only on both sides """Sync datasets, or thin-only on both sides
@ -311,6 +334,7 @@ class ZfsAutobackup(ZfsAuto):
# ensure parents exists # ensure parents exists
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't. # TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
if not self.args.no_send \ if not self.args.no_send \
and target_dataset.parent \
and target_dataset.parent not in target_datasets \ and target_dataset.parent not in target_datasets \
and not target_dataset.parent.exists: and not target_dataset.parent.exists:
target_dataset.parent.create_filesystem(parents=True) target_dataset.parent.create_filesystem(parents=True)
@ -331,7 +355,7 @@ class ZfsAutobackup(ZfsAuto):
destroy_incompatible=self.args.destroy_incompatible, destroy_incompatible=self.args.destroy_incompatible,
send_pipes=send_pipes, recv_pipes=recv_pipes, send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, decrypt=self.args.decrypt, encrypt=self.args.encrypt,
zfs_compressed=self.args.zfs_compressed) zfs_compressed=self.args.zfs_compressed, force=self.args.force)
except Exception as e: except Exception as e:
# if self.args.progress: # if self.args.progress:
# self.clear_progress() # self.clear_progress()
@ -448,6 +472,9 @@ class ZfsAutobackup(ZfsAuto):
raise (Exception( raise (Exception(
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset))) "Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
# check for collisions due to strip-path
self.check_target_names(source_node, source_datasets, target_node)
# do the actual sync # do the actual sync
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots # NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
fail_count = self.sync_datasets( fail_count = self.sync_datasets(

View File

@ -79,7 +79,11 @@ class ZfsDataset:
Args: Args:
:type count: int :type count: int
""" """
return "/".join(self.split_path()[count:]) components=self.split_path()
if count>len(components):
raise Exception("Trying to strip too much from path ({} items from {})".format(count, self.name))
return "/".join(components[count:])
def rstrip_path(self, count): def rstrip_path(self, count):
"""return name with last count components stripped """return name with last count components stripped
@ -184,11 +188,17 @@ class ZfsDataset:
parent according to path parent according to path
we cache this so everything in the parent that is cached also stays. we cache this so everything in the parent that is cached also stays.
returns None if there is no parent.
""" """
if self.is_snapshot: if self.is_snapshot:
return self.zfs_node.get_dataset(self.filesystem_name) return self.zfs_node.get_dataset(self.filesystem_name)
else: else:
return self.zfs_node.get_dataset(self.rstrip_path(1)) stripped=self.rstrip_path(1)
if stripped:
return self.zfs_node.get_dataset(stripped)
else:
return None
# NOTE: unused for now # NOTE: unused for now
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False): # def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
@ -584,7 +594,7 @@ class ZfsDataset:
return output_pipe return output_pipe
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False): def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False, force=False):
"""starts a zfs recv for this snapshot and uses pipe as input """starts a zfs recv for this snapshot and uses pipe as input
note: you can it both on a snapshot or filesystem object. The note: you can it both on a snapshot or filesystem object. The
@ -625,6 +635,9 @@ class ZfsDataset:
# verbose output # verbose output
cmd.append("-v") cmd.append("-v")
if force:
cmd.append("-F")
if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options: if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options:
# support resuming # support resuming
self.debug("Enabled resume support") self.debug("Enabled resume support")
@ -655,7 +668,7 @@ class ZfsDataset:
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress, def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token, filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed): raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed, force):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for """transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer incremental transfer
@ -696,7 +709,7 @@ class ZfsDataset:
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot, pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed) resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties, target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes) set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes, force=force)
def abort_resume(self): def abort_resume(self):
"""abort current resume state""" """abort current resume state"""
@ -987,7 +1000,7 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties, def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots, ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed): no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force):
"""sync this dataset's snapshots to target_dataset, while also thinning """sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way. out old snapshots along the way.
@ -1008,6 +1021,8 @@ class ZfsDataset:
:type destroy_incompatible: bool :type destroy_incompatible: bool
""" """
self.verbose("sending to {}".format(target_dataset))
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps, (common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
incompatible_target_snapshots) = \ incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots) self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)
@ -1070,7 +1085,9 @@ class ZfsDataset:
filter_properties=active_filter_properties, filter_properties=active_filter_properties,
set_properties=active_set_properties, set_properties=active_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code, ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes, zfs_compressed=zfs_compressed) resume_token=resume_token, write_embedded=write_embedded, raw=raw,
send_properties=send_properties, send_pipes=send_pipes,
recv_pipes=recv_pipes, zfs_compressed=zfs_compressed, force=force)
resume_token = None resume_token = None