forked from third-party-mirrors/zfs_autobackup
completed --no-thinning option. fixes #54
This commit is contained in:
parent
0c9d14bf32
commit
a4155f970e
49
tests/test_zfsautobackup31.py
Normal file
49
tests/test_zfsautobackup31.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
from basetest import *
|
||||||
|
import time
|
||||||
|
|
||||||
|
class TestZfsAutobackup31(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage=True
|
||||||
|
|
||||||
|
def test_no_thinning(self):
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="20101111000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="20101111000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --allow-empty --keep-target=0 --keep-source=0 --no-thinning".split(" ")).run())
|
||||||
|
|
||||||
|
r=shelltest("zfs list -H -o name -r -t all "+TEST_POOLS)
|
||||||
|
self.assertMultiLineEqual(r,"""
|
||||||
|
test_source1
|
||||||
|
test_source1/fs1
|
||||||
|
test_source1/fs1@test-20101111000000
|
||||||
|
test_source1/fs1@test-20101111000001
|
||||||
|
test_source1/fs1/sub
|
||||||
|
test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_source2
|
||||||
|
test_source2/fs2
|
||||||
|
test_source2/fs2/sub
|
||||||
|
test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_source2/fs2/sub@test-20101111000001
|
||||||
|
test_source2/fs3
|
||||||
|
test_source2/fs3/sub
|
||||||
|
test_target1
|
||||||
|
test_target1/test_source1
|
||||||
|
test_target1/test_source1/fs1
|
||||||
|
test_target1/test_source1/fs1@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1@test-20101111000001
|
||||||
|
test_target1/test_source1/fs1/sub
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000000
|
||||||
|
test_target1/test_source1/fs1/sub@test-20101111000001
|
||||||
|
test_target1/test_source2
|
||||||
|
test_target1/test_source2/fs2
|
||||||
|
test_target1/test_source2/fs2/sub
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000000
|
||||||
|
test_target1/test_source2/fs2/sub@test-20101111000001
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
@ -115,7 +115,7 @@ test_target1
|
|||||||
def test_supportedrecvoptions(self):
|
def test_supportedrecvoptions(self):
|
||||||
logger=LogStub()
|
logger=LogStub()
|
||||||
description="[Source]"
|
description="[Source]"
|
||||||
#NOTE: this couldnt hang via ssh if we dont close filehandles properly. (which was a previous bug)
|
#NOTE: this could hang via ssh if we dont close filehandles properly. (which was a previous bug)
|
||||||
node=ZfsNode("test", logger, description=description, ssh_to='localhost')
|
node=ZfsNode("test", logger, description=description, ssh_to='localhost')
|
||||||
self.assertIsInstance(node.supported_recv_options, list)
|
self.assertIsInstance(node.supported_recv_options, list)
|
||||||
|
|
||||||
|
@ -47,6 +47,9 @@ class ZfsAutobackup:
|
|||||||
help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
|
help='Don\'t create new snapshots (useful for finishing uncompleted backups, or cleanups)')
|
||||||
parser.add_argument('--no-send', action='store_true',
|
parser.add_argument('--no-send', action='store_true',
|
||||||
help='Don\'t send snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
help='Don\'t send snapshots (useful for cleanups, or if you want a serperate send-cronjob)')
|
||||||
|
parser.add_argument('--no-thinning', action='store_true', help="Do not destroy any snapshots.")
|
||||||
|
parser.add_argument('--no-holds', action='store_true',
|
||||||
|
help='Don\'t hold snapshots. (Faster. Allows you to destroy common snapshot.)')
|
||||||
parser.add_argument('--min-change', type=int, default=1,
|
parser.add_argument('--min-change', type=int, default=1,
|
||||||
help='Number of bytes written after which we consider a dataset changed (default %('
|
help='Number of bytes written after which we consider a dataset changed (default %('
|
||||||
'default)s)')
|
'default)s)')
|
||||||
@ -55,8 +58,6 @@ class ZfsAutobackup:
|
|||||||
parser.add_argument('--ignore-replicated', action='store_true',
|
parser.add_argument('--ignore-replicated', action='store_true',
|
||||||
help='Ignore datasets that seem to be replicated some other way. (No changes since '
|
help='Ignore datasets that seem to be replicated some other way. (No changes since '
|
||||||
'lastest snapshot. Useful for proxmox HA replication)')
|
'lastest snapshot. Useful for proxmox HA replication)')
|
||||||
parser.add_argument('--no-holds', action='store_true',
|
|
||||||
help='Don\'t hold snapshots. (Faster)')
|
|
||||||
|
|
||||||
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
|
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
|
||||||
parser.add_argument('--strip-path', default=0, type=int,
|
parser.add_argument('--strip-path', default=0, type=int,
|
||||||
@ -103,7 +104,6 @@ class ZfsAutobackup:
|
|||||||
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
|
help='show zfs progress output. Enabled automaticly on ttys. (use --no-progress to disable)')
|
||||||
parser.add_argument('--no-progress', action='store_true', help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
parser.add_argument('--no-progress', action='store_true', help=argparse.SUPPRESS) # needed to workaround a zfs recv -v bug
|
||||||
|
|
||||||
parser.add_argument('--no-thinning', action='store_true', help="Do not destroy any snapshots.")
|
|
||||||
|
|
||||||
# note args is the only global variable we use, since its a global readonly setting anyway
|
# note args is the only global variable we use, since its a global readonly setting anyway
|
||||||
args = parser.parse_args(argv)
|
args = parser.parse_args(argv)
|
||||||
@ -249,7 +249,8 @@ class ZfsAutobackup:
|
|||||||
holds=not self.args.no_holds, rollback=self.args.rollback,
|
holds=not self.args.no_holds, rollback=self.args.rollback,
|
||||||
raw=self.args.raw, also_other_snapshots=self.args.other_snapshots,
|
raw=self.args.raw, also_other_snapshots=self.args.other_snapshots,
|
||||||
no_send=self.args.no_send,
|
no_send=self.args.no_send,
|
||||||
destroy_incompatible=self.args.destroy_incompatible)
|
destroy_incompatible=self.args.destroy_incompatible,
|
||||||
|
no_thinning=self.args.no_thinning)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
fail_count = fail_count + 1
|
fail_count = fail_count + 1
|
||||||
source_dataset.error("FAILED: " + str(e))
|
source_dataset.error("FAILED: " + str(e))
|
||||||
|
@ -744,7 +744,7 @@ class ZfsDataset:
|
|||||||
|
|
||||||
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
||||||
ignore_recv_exit_code, holds, rollback, raw, also_other_snapshots,
|
ignore_recv_exit_code, holds, rollback, raw, also_other_snapshots,
|
||||||
no_send, destroy_incompatible):
|
no_send, destroy_incompatible, no_thinning):
|
||||||
"""sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way."""
|
"""sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way."""
|
||||||
|
|
||||||
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
|
(common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_keeps,
|
||||||
@ -753,9 +753,10 @@ class ZfsDataset:
|
|||||||
|
|
||||||
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
|
# NOTE: we do this because we dont want filesystems to fillup when backups keep failing.
|
||||||
# Also usefull with no_send to still cleanup stuff.
|
# Also usefull with no_send to still cleanup stuff.
|
||||||
self._pre_clean(
|
if not no_thinning:
|
||||||
common_snapshot=common_snapshot, target_dataset=target_dataset,
|
self._pre_clean(
|
||||||
target_keeps=target_keeps, target_obsoletes=target_obsoletes, source_obsoletes=source_obsoletes)
|
common_snapshot=common_snapshot, target_dataset=target_dataset,
|
||||||
|
target_keeps=target_keeps, target_obsoletes=target_obsoletes, source_obsoletes=source_obsoletes)
|
||||||
|
|
||||||
# now actually transfer the snapshots, if we want
|
# now actually transfer the snapshots, if we want
|
||||||
if no_send:
|
if no_send:
|
||||||
@ -800,15 +801,16 @@ class ZfsDataset:
|
|||||||
prev_source_snapshot.release()
|
prev_source_snapshot.release()
|
||||||
target_dataset.find_snapshot(prev_source_snapshot).release()
|
target_dataset.find_snapshot(prev_source_snapshot).release()
|
||||||
|
|
||||||
# we may now destroy the previous source snapshot if its obsolete
|
if not no_thinning:
|
||||||
if prev_source_snapshot in source_obsoletes:
|
# we may now destroy the previous source snapshot if its obsolete
|
||||||
prev_source_snapshot.destroy()
|
if prev_source_snapshot in source_obsoletes:
|
||||||
|
prev_source_snapshot.destroy()
|
||||||
|
|
||||||
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
|
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
|
||||||
# the rest was already destroyed or will not be send)
|
# the rest was already destroyed or will not be send)
|
||||||
prev_target_snapshot = target_dataset.find_snapshot(prev_source_snapshot)
|
prev_target_snapshot = target_dataset.find_snapshot(prev_source_snapshot)
|
||||||
if prev_target_snapshot in target_obsoletes:
|
if prev_target_snapshot in target_obsoletes:
|
||||||
prev_target_snapshot.destroy()
|
prev_target_snapshot.destroy()
|
||||||
|
|
||||||
prev_source_snapshot = source_snapshot
|
prev_source_snapshot = source_snapshot
|
||||||
else:
|
else:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user