mirror of
https://github.com/psy0rz/zfs_autobackup.git
synced 2025-04-11 22:40:01 +03:00
merged v3.1.2-rc2
This commit is contained in:
commit
44c6896ddd
@ -420,33 +420,13 @@ test_target1/fs2/sub
|
||||
test_target1/fs2/sub@test-20101111000000
|
||||
""")
|
||||
|
||||
# def test_strippath_toomuch(self):
|
||||
# with patch('time.strftime', return_value="test-20101111000000"):
|
||||
# self.assertFalse(
|
||||
# ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress".split(" ")).run())
|
||||
#
|
||||
# r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
# self.assertMultiLineEqual(r, """
|
||||
# test_source1
|
||||
# test_source1/fs1
|
||||
# test_source1/fs1@test-20101111000000
|
||||
# test_source1/fs1/sub
|
||||
# test_source1/fs1/sub@test-20101111000000
|
||||
# test_source2
|
||||
# test_source2/fs2
|
||||
# test_source2/fs2/sub
|
||||
# test_source2/fs2/sub@test-20101111000000
|
||||
# test_source2/fs3
|
||||
# test_source2/fs3/sub
|
||||
# test_target1
|
||||
# test_target1/fs1
|
||||
# test_target1/fs1@test-20101111000000
|
||||
# test_target1/fs1/sub
|
||||
# test_target1/fs1/sub@test-20101111000000
|
||||
# test_target1/fs2
|
||||
# test_target1/fs2/sub
|
||||
# test_target1/fs2/sub@test-20101111000000
|
||||
# """)
|
||||
def test_strippath_collision(self):
|
||||
with self.assertRaisesRegexp(Exception,"collision"):
|
||||
ZfsAutobackup("test test_target1 --verbose --strip-path=2 --no-progress --debug".split(" ")).run()
|
||||
|
||||
def test_strippath_toomuch(self):
|
||||
with self.assertRaisesRegexp(Exception,"too much"):
|
||||
ZfsAutobackup("test test_target1 --verbose --strip-path=3 --no-progress --debug".split(" ")).run()
|
||||
|
||||
def test_clearrefres(self):
|
||||
|
||||
|
@ -12,7 +12,7 @@ from .Thinner import Thinner
|
||||
from .ZfsDataset import ZfsDataset
|
||||
from .ZfsNode import ZfsNode
|
||||
from .ThinnerRule import ThinnerRule
|
||||
|
||||
import os.path
|
||||
|
||||
class ZfsAutobackup(ZfsAuto):
|
||||
"""The main zfs-autobackup class. Start here, at run() :)"""
|
||||
@ -281,6 +281,29 @@ class ZfsAutobackup(ZfsAuto):
|
||||
|
||||
return ret
|
||||
|
||||
def make_target_name(self, source_dataset):
|
||||
"""make target_name from a source_dataset"""
|
||||
stripped=source_dataset.lstrip_path(self.args.strip_path)
|
||||
if stripped!="":
|
||||
return self.args.target_path + "/" + stripped
|
||||
else:
|
||||
return self.args.target_path
|
||||
|
||||
def check_target_names(self, source_node, source_datasets, target_node):
|
||||
"""check all target names for collesions etc due to strip-options"""
|
||||
|
||||
self.debug("Checking target names:")
|
||||
target_datasets={}
|
||||
for source_dataset in source_datasets:
|
||||
|
||||
target_name = self.make_target_name(source_dataset)
|
||||
source_dataset.debug("-> {}".format(target_name))
|
||||
|
||||
if target_name in target_datasets:
|
||||
raise Exception("Target collision: Target path {} encountered twice, due to: {} and {}".format(target_name, source_dataset, target_datasets[target_name]))
|
||||
|
||||
target_datasets[target_name]=source_dataset
|
||||
|
||||
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
|
||||
def sync_datasets(self, source_node, source_datasets, target_node):
|
||||
"""Sync datasets, or thin-only on both sides
|
||||
@ -311,6 +334,7 @@ class ZfsAutobackup(ZfsAuto):
|
||||
# ensure parents exists
|
||||
# TODO: this isnt perfect yet, in some cases it can create parents when it shouldn't.
|
||||
if not self.args.no_send \
|
||||
and target_dataset.parent \
|
||||
and target_dataset.parent not in target_datasets \
|
||||
and not target_dataset.parent.exists:
|
||||
target_dataset.parent.create_filesystem(parents=True)
|
||||
@ -331,7 +355,7 @@ class ZfsAutobackup(ZfsAuto):
|
||||
destroy_incompatible=self.args.destroy_incompatible,
|
||||
send_pipes=send_pipes, recv_pipes=recv_pipes,
|
||||
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
|
||||
zfs_compressed=self.args.zfs_compressed)
|
||||
zfs_compressed=self.args.zfs_compressed, force=self.args.force)
|
||||
except Exception as e:
|
||||
# if self.args.progress:
|
||||
# self.clear_progress()
|
||||
@ -448,6 +472,9 @@ class ZfsAutobackup(ZfsAuto):
|
||||
raise (Exception(
|
||||
"Target path '{}' does not exist. Please create this dataset first.".format(target_dataset)))
|
||||
|
||||
# check for collisions due to strip-path
|
||||
self.check_target_names(source_node, source_datasets, target_node)
|
||||
|
||||
# do the actual sync
|
||||
# NOTE: even with no_send, no_thinning and no_snapshot it does a usefull thing because it checks if the common snapshots and shows incompatible snapshots
|
||||
fail_count = self.sync_datasets(
|
||||
|
@ -79,7 +79,11 @@ class ZfsDataset:
|
||||
Args:
|
||||
:type count: int
|
||||
"""
|
||||
return "/".join(self.split_path()[count:])
|
||||
components=self.split_path()
|
||||
if count>len(components):
|
||||
raise Exception("Trying to strip too much from path ({} items from {})".format(count, self.name))
|
||||
|
||||
return "/".join(components[count:])
|
||||
|
||||
def rstrip_path(self, count):
|
||||
"""return name with last count components stripped
|
||||
@ -184,11 +188,17 @@ class ZfsDataset:
|
||||
parent according to path
|
||||
|
||||
we cache this so everything in the parent that is cached also stays.
|
||||
|
||||
returns None if there is no parent.
|
||||
"""
|
||||
if self.is_snapshot:
|
||||
return self.zfs_node.get_dataset(self.filesystem_name)
|
||||
else:
|
||||
return self.zfs_node.get_dataset(self.rstrip_path(1))
|
||||
stripped=self.rstrip_path(1)
|
||||
if stripped:
|
||||
return self.zfs_node.get_dataset(stripped)
|
||||
else:
|
||||
return None
|
||||
|
||||
# NOTE: unused for now
|
||||
# def find_prev_snapshot(self, snapshot, also_other_snapshots=False):
|
||||
@ -584,7 +594,7 @@ class ZfsDataset:
|
||||
|
||||
return output_pipe
|
||||
|
||||
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False):
|
||||
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False, force=False):
|
||||
"""starts a zfs recv for this snapshot and uses pipe as input
|
||||
|
||||
note: you can it both on a snapshot or filesystem object. The
|
||||
@ -625,6 +635,9 @@ class ZfsDataset:
|
||||
# verbose output
|
||||
cmd.append("-v")
|
||||
|
||||
if force:
|
||||
cmd.append("-F")
|
||||
|
||||
if 'extensible_dataset' in features and "-s" in self.zfs_node.supported_recv_options:
|
||||
# support resuming
|
||||
self.debug("Enabled resume support")
|
||||
@ -655,7 +668,7 @@ class ZfsDataset:
|
||||
|
||||
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
|
||||
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
|
||||
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed):
|
||||
raw, send_properties, write_embedded, send_pipes, recv_pipes, zfs_compressed, force):
|
||||
"""transfer this snapshot to target_snapshot. specify prev_snapshot for
|
||||
incremental transfer
|
||||
|
||||
@ -696,7 +709,7 @@ class ZfsDataset:
|
||||
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
|
||||
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes, zfs_compressed=zfs_compressed)
|
||||
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
|
||||
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes)
|
||||
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes, force=force)
|
||||
|
||||
def abort_resume(self):
|
||||
"""abort current resume state"""
|
||||
@ -987,7 +1000,7 @@ class ZfsDataset:
|
||||
|
||||
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
||||
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
|
||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed):
|
||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force):
|
||||
"""sync this dataset's snapshots to target_dataset, while also thinning
|
||||
out old snapshots along the way.
|
||||
|
||||
@ -1008,6 +1021,8 @@ class ZfsDataset:
|
||||
:type destroy_incompatible: bool
|
||||
"""
|
||||
|
||||
self.verbose("sending to {}".format(target_dataset))
|
||||
|
||||
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
|
||||
incompatible_target_snapshots) = \
|
||||
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots)
|
||||
@ -1070,7 +1085,9 @@ class ZfsDataset:
|
||||
filter_properties=active_filter_properties,
|
||||
set_properties=active_set_properties,
|
||||
ignore_recv_exit_code=ignore_recv_exit_code,
|
||||
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes, zfs_compressed=zfs_compressed)
|
||||
resume_token=resume_token, write_embedded=write_embedded, raw=raw,
|
||||
send_properties=send_properties, send_pipes=send_pipes,
|
||||
recv_pipes=recv_pipes, zfs_compressed=zfs_compressed, force=force)
|
||||
|
||||
resume_token = None
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user