mirror of
https://github.com/psy0rz/zfs_autobackup.git
synced 2025-04-17 22:52:20 +03:00
renamed --ignore-replicated to --exclude-unchanged. tidied up and removed seperate filter_replicated() step. #93, #95, #96
This commit is contained in:
parent
f9b16c050b
commit
578fb1be4b
@ -15,7 +15,7 @@ class TestZfsNode(unittest2.TestCase):
|
||||
node = ZfsNode("test", logger, description=description)
|
||||
|
||||
with self.subTest("first snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1", 100000)
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-1", 100000)
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertEqual(r, """
|
||||
test_source1
|
||||
@ -33,7 +33,7 @@ test_target1
|
||||
""")
|
||||
|
||||
with self.subTest("second snapshot, no changes, no snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-2", 1)
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-2", 1)
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertEqual(r, """
|
||||
test_source1
|
||||
@ -51,7 +51,7 @@ test_target1
|
||||
""")
|
||||
|
||||
with self.subTest("second snapshot, no changes, empty snapshot"):
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-2", 0)
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=200000), "test-2", 0)
|
||||
r = shelltest("zfs list -H -o name -r -t all " + TEST_POOLS)
|
||||
self.assertEqual(r, """
|
||||
test_source1
|
||||
@ -79,7 +79,7 @@ test_target1
|
||||
with self.subTest("Test if all cmds are executed correctly (no failures)"):
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1",
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||
0,
|
||||
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||
post_snapshot_cmds=["echo post1 >&2", "echo post2 >&2"]
|
||||
@ -95,7 +95,7 @@ test_target1
|
||||
with OutputIO() as buf:
|
||||
with redirect_stdout(buf):
|
||||
with self.assertRaises(ExecuteError):
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1",
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||
0,
|
||||
pre_snapshot_cmds=["echo pre1", "false", "echo pre2"],
|
||||
post_snapshot_cmds=["echo post1", "false", "echo post2"]
|
||||
@ -112,7 +112,7 @@ test_target1
|
||||
with redirect_stdout(buf):
|
||||
with self.assertRaises(ExecuteError):
|
||||
#same snapshot name as before so it fails
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False), "test-1",
|
||||
node.consistent_snapshot(node.selected_datasets(exclude_paths=[], exclude_received=False, exclude_unchanged=False, min_change=1), "test-1",
|
||||
0,
|
||||
pre_snapshot_cmds=["echo pre1", "echo pre2"],
|
||||
post_snapshot_cmds=["echo post1", "echo post2"]
|
||||
@ -126,10 +126,19 @@ test_target1
|
||||
|
||||
|
||||
def test_getselected(self):
|
||||
|
||||
# should be excluded by property
|
||||
shelltest("zfs create test_source1/fs1/subexcluded")
|
||||
shelltest("zfs set autobackup:test=false test_source1/fs1/subexcluded")
|
||||
|
||||
# should be excluded by being unchanged
|
||||
shelltest("zfs create test_source1/fs1/unchanged")
|
||||
shelltest("zfs snapshot test_source1/fs1/unchanged@somesnapshot")
|
||||
|
||||
logger = LogStub()
|
||||
description = "[Source]"
|
||||
node = ZfsNode("test", logger, description=description)
|
||||
s = pformat(node.selected_datasets(exclude_paths=[], exclude_received=False))
|
||||
s = pformat(node.selected_datasets(exclude_paths=[], exclude_received=False, exclude_unchanged=True, min_change=1))
|
||||
print(s)
|
||||
|
||||
# basics
|
||||
@ -137,11 +146,6 @@ test_target1
|
||||
(local): test_source1/fs1/sub,
|
||||
(local): test_source2/fs2/sub]""")
|
||||
|
||||
# caching, so expect same result after changing it
|
||||
subprocess.check_call("zfs set autobackup:test=true test_source2/fs3", shell=True)
|
||||
self.assertEqual(s, """[(local): test_source1/fs1,
|
||||
(local): test_source1/fs1/sub,
|
||||
(local): test_source2/fs2/sub]""")
|
||||
|
||||
def test_validcommand(self):
|
||||
logger = LogStub()
|
||||
|
@ -11,12 +11,10 @@ from zfs_autobackup.ZfsNode import ZfsNode
|
||||
from zfs_autobackup.ThinnerRule import ThinnerRule
|
||||
|
||||
|
||||
|
||||
|
||||
class ZfsAutobackup:
|
||||
"""main class"""
|
||||
|
||||
VERSION = "3.1"
|
||||
VERSION = "3.1.1-beta1"
|
||||
HEADER = "zfs-autobackup v{} - (c)2021 E.H.Eefting (edwin@datux.nl)".format(VERSION)
|
||||
|
||||
def __init__(self, argv, print_arguments=True):
|
||||
@ -63,18 +61,16 @@ class ZfsAutobackup:
|
||||
'default)s)')
|
||||
parser.add_argument('--allow-empty', action='store_true',
|
||||
help='If nothing has changed, still create empty snapshots. (same as --min-change=0)')
|
||||
parser.add_argument('--ignore-replicated', action='store_true',
|
||||
help='Ignore datasets that seem to be replicated some other way. (No changes since '
|
||||
'lastest snapshot. Useful for proxmox HA replication)')
|
||||
|
||||
parser.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
|
||||
parser.add_argument('--exclude-unchanged', action='store_true',
|
||||
help='Exclude datasets that have no changes since any last snapshot. (Useful in combination with proxmox HA replication)')
|
||||
parser.add_argument('--exclude-received', action='store_true',
|
||||
help='Ignore datasets that have the origin of their autobackup: property as "received". '
|
||||
'This can avoid recursive replication between two backup partners. You would usually '
|
||||
'use --ignore-replicated instead of this option.')
|
||||
help='Exclude datasets that have the origin of their autobackup: property as "received". '
|
||||
'This can avoid recursive replication between two backup partners.')
|
||||
parser.add_argument('--strip-path', metavar='N', default=0, type=int,
|
||||
help='Number of directories to strip from target path (use 1 when cloning zones between 2 '
|
||||
'SmartOS machines)')
|
||||
# parser.add_argument('--buffer', default="", help='Use mbuffer with specified size to speedup zfs transfer.
|
||||
# (e.g. --buffer 1G) Will also show nice progress output.')
|
||||
|
||||
parser.add_argument('--clear-refreservation', action='store_true',
|
||||
help='Filter "refreservation" property. (recommended, safes space. same as '
|
||||
@ -125,16 +121,19 @@ class ZfsAutobackup:
|
||||
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
|
||||
parser.add_argument('--raw', action='store_true', help=argparse.SUPPRESS)
|
||||
|
||||
#these things all do stuff by piping zfs send/recv IO
|
||||
# these things all do stuff by piping zfs send/recv IO
|
||||
parser.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
|
||||
help='pipe zfs send output through COMMAND (can be used multiple times)')
|
||||
parser.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
|
||||
help='pipe zfs recv input through COMMAND (can be used multiple times)')
|
||||
parser.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-adapt', choices=compressors.choices(), help='Use compression during transfer, defaults to zstd-adapt if TYPE is not specified. ({})'.format(", ".join(compressors.choices())))
|
||||
parser.add_argument('--rate', metavar='DATARATE', default=None, help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
|
||||
parser.add_argument('--buffer', metavar='SIZE', default=None, help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
|
||||
|
||||
|
||||
parser.add_argument('--compress', metavar='TYPE', default=None, nargs='?', const='zstd-adapt',
|
||||
choices=compressors.choices(),
|
||||
help='Use compression during transfer, defaults to zstd-adapt if TYPE is not specified. ({})'.format(
|
||||
", ".join(compressors.choices())))
|
||||
parser.add_argument('--rate', metavar='DATARATE', default=None,
|
||||
help='Limit data transfer rate (e.g. 128K. requires mbuffer.)')
|
||||
parser.add_argument('--buffer', metavar='SIZE', default=None,
|
||||
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
|
||||
|
||||
# note args is the only global variable we use, since its a global readonly setting anyway
|
||||
args = parser.parse_args(argv)
|
||||
@ -177,6 +176,10 @@ class ZfsAutobackup:
|
||||
if args.compress and args.zfs_compressed:
|
||||
self.warning("Using --compress with --zfs-compressed, might be inefficient.")
|
||||
|
||||
if args.ignore_replicated:
|
||||
self.warning("--ignore-replicated has been renamed, using --exclude-unchanged")
|
||||
args.exclude_unchanged = True
|
||||
|
||||
def verbose(self, txt):
|
||||
self.log.verbose(txt)
|
||||
|
||||
@ -289,12 +292,12 @@ class ZfsAutobackup:
|
||||
def get_send_pipes(self, logger):
|
||||
"""determine the zfs send pipe"""
|
||||
|
||||
ret=[]
|
||||
ret = []
|
||||
|
||||
# IO buffer
|
||||
if self.args.buffer:
|
||||
logger("zfs send buffer : {}".format(self.args.buffer))
|
||||
ret.extend([ ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m"+self.args.buffer ])
|
||||
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m" + self.args.buffer])
|
||||
|
||||
# custom pipes
|
||||
for send_pipe in self.args.send_pipe:
|
||||
@ -303,27 +306,26 @@ class ZfsAutobackup:
|
||||
logger("zfs send custom pipe : {}".format(send_pipe))
|
||||
|
||||
# compression
|
||||
if self.args.compress!=None:
|
||||
if self.args.compress != None:
|
||||
ret.append(ExecuteNode.PIPE)
|
||||
cmd=compressors.compress_cmd(self.args.compress)
|
||||
cmd = compressors.compress_cmd(self.args.compress)
|
||||
ret.extend(cmd)
|
||||
logger("zfs send compression : {}".format(" ".join(cmd)))
|
||||
|
||||
# transfer rate
|
||||
if self.args.rate:
|
||||
logger("zfs send transfer rate : {}".format(self.args.rate))
|
||||
ret.extend([ ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R"+self.args.rate ])
|
||||
|
||||
ret.extend([ExecuteNode.PIPE, "mbuffer", "-q", "-s128k", "-m16M", "-R" + self.args.rate])
|
||||
|
||||
return ret
|
||||
|
||||
def get_recv_pipes(self, logger):
|
||||
|
||||
ret=[]
|
||||
ret = []
|
||||
|
||||
# decompression
|
||||
if self.args.compress!=None:
|
||||
cmd=compressors.decompress_cmd(self.args.compress)
|
||||
if self.args.compress != None:
|
||||
cmd = compressors.decompress_cmd(self.args.compress)
|
||||
ret.extend(cmd)
|
||||
ret.append(ExecuteNode.PIPE)
|
||||
logger("zfs recv decompression : {}".format(" ".join(cmd)))
|
||||
@ -336,10 +338,10 @@ class ZfsAutobackup:
|
||||
|
||||
# IO buffer
|
||||
if self.args.buffer:
|
||||
#only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
|
||||
if self.args.ssh_source!=None or self.args.ssh_target!=None or self.args.recv_pipe or self.args.send_pipe or self.args.compress!=None:
|
||||
# only add second buffer if its usefull. (e.g. non local transfer or other pipes active)
|
||||
if self.args.ssh_source != None or self.args.ssh_target != None or self.args.recv_pipe or self.args.send_pipe or self.args.compress != None:
|
||||
logger("zfs recv buffer : {}".format(self.args.buffer))
|
||||
ret.extend(["mbuffer", "-q", "-s128k", "-m"+self.args.buffer, ExecuteNode.PIPE ])
|
||||
ret.extend(["mbuffer", "-q", "-s128k", "-m" + self.args.buffer, ExecuteNode.PIPE])
|
||||
|
||||
return ret
|
||||
|
||||
@ -351,8 +353,8 @@ class ZfsAutobackup:
|
||||
:type source_node: ZfsNode
|
||||
"""
|
||||
|
||||
send_pipes=self.get_send_pipes(source_node.verbose)
|
||||
recv_pipes=self.get_recv_pipes(target_node.verbose)
|
||||
send_pipes = self.get_send_pipes(source_node.verbose)
|
||||
recv_pipes = self.get_recv_pipes(target_node.verbose)
|
||||
|
||||
fail_count = 0
|
||||
count = 0
|
||||
@ -392,7 +394,8 @@ class ZfsAutobackup:
|
||||
no_send=self.args.no_send,
|
||||
destroy_incompatible=self.args.destroy_incompatible,
|
||||
send_pipes=send_pipes, recv_pipes=recv_pipes,
|
||||
decrypt=self.args.decrypt, encrypt=self.args.encrypt, zfs_compressed=self.args.zfs_compressed )
|
||||
decrypt=self.args.decrypt, encrypt=self.args.encrypt,
|
||||
zfs_compressed=self.args.zfs_compressed)
|
||||
except Exception as e:
|
||||
fail_count = fail_count + 1
|
||||
source_dataset.error("FAILED: " + str(e))
|
||||
@ -418,20 +421,6 @@ class ZfsAutobackup:
|
||||
for source_dataset in source_datasets:
|
||||
source_dataset.thin(skip_holds=True)
|
||||
|
||||
def filter_replicated(self, datasets):
|
||||
if not self.args.ignore_replicated:
|
||||
return datasets
|
||||
else:
|
||||
self.set_title("Filtering already replicated filesystems")
|
||||
ret = []
|
||||
for dataset in datasets:
|
||||
if dataset.is_changed(self.args.min_change):
|
||||
ret.append(dataset)
|
||||
else:
|
||||
dataset.verbose("Ignoring, already replicated")
|
||||
|
||||
return ret
|
||||
|
||||
def filter_properties_list(self):
|
||||
|
||||
if self.args.filter_properties:
|
||||
@ -487,7 +476,7 @@ class ZfsAutobackup:
|
||||
# may still need to be used to explicitly exclude a backup with the 'received' source to avoid accidental
|
||||
# recursive replication of a zvol that is currently being received in another session (as it will have changes).
|
||||
exclude_paths = []
|
||||
exclude_received=self.args.exclude_received
|
||||
exclude_received = self.args.exclude_received
|
||||
if self.args.ssh_source == self.args.ssh_target:
|
||||
if self.args.target_path:
|
||||
# target and source are the same, make sure to exclude target_path
|
||||
@ -495,21 +484,19 @@ class ZfsAutobackup:
|
||||
exclude_paths.append(self.args.target_path)
|
||||
else:
|
||||
self.warning("Source and target are on the same host, excluding received datasets from selection.")
|
||||
exclude_received=True
|
||||
exclude_received = True
|
||||
|
||||
|
||||
selected_source_datasets = source_node.selected_datasets(exclude_received=exclude_received,
|
||||
exclude_paths=exclude_paths)
|
||||
if not selected_source_datasets:
|
||||
source_datasets = source_node.selected_datasets(exclude_received=exclude_received,
|
||||
exclude_paths=exclude_paths,
|
||||
exclude_unchanged=self.args.exclude_unchanged,
|
||||
min_change=self.args.min_change)
|
||||
if not source_datasets:
|
||||
self.error(
|
||||
"No source filesystems selected, please do a 'zfs set autobackup:{0}=true' on the source datasets "
|
||||
"you want to select.".format(
|
||||
self.args.backup_name))
|
||||
return 255
|
||||
|
||||
# filter out already replicated stuff?
|
||||
source_datasets = self.filter_replicated(selected_source_datasets)
|
||||
|
||||
################# snapshotting
|
||||
if not self.args.no_snapshot:
|
||||
self.set_title("Snapshotting")
|
||||
|
@ -112,7 +112,7 @@ class ZfsDataset:
|
||||
"""true if this dataset is a snapshot"""
|
||||
return self.name.find("@") != -1
|
||||
|
||||
def is_selected(self, value, source, inherited, exclude_received, exclude_paths):
|
||||
def is_selected(self, value, source, inherited, exclude_received, exclude_paths, exclude_unchanged, min_change):
|
||||
"""determine if dataset should be selected for backup (called from
|
||||
ZfsNode)
|
||||
|
||||
@ -122,6 +122,12 @@ class ZfsDataset:
|
||||
:type source: str
|
||||
:type inherited: bool
|
||||
:type exclude_received: bool
|
||||
:type exclude_unchanged: bool
|
||||
:type min_change: bool
|
||||
|
||||
:param value: Value of the zfs property ("false"/"true"/"child"/"-")
|
||||
:param source: Source of the zfs property ("local"/"received", "-")
|
||||
:param inherited: True of the value/source was inherited from a higher dataset.
|
||||
"""
|
||||
|
||||
# sanity checks
|
||||
@ -135,28 +141,40 @@ class ZfsDataset:
|
||||
raise (Exception(
|
||||
"{} autobackup-property has illegal value: '{}'".format(self.name, value)))
|
||||
|
||||
# non specified, ignore
|
||||
if value == "-":
|
||||
return False
|
||||
|
||||
# only select childs of this dataset, ignore
|
||||
if value == "child" and not inherited:
|
||||
return False
|
||||
|
||||
# manually excluded by property
|
||||
if value == "false":
|
||||
self.verbose("Excluded")
|
||||
return False
|
||||
|
||||
# from here on the dataset is selected by property, now do additional exclusion checks
|
||||
|
||||
# our path starts with one of the excluded paths?
|
||||
for exclude_path in exclude_paths:
|
||||
if self.name.startswith(exclude_path):
|
||||
# too noisy for verbose
|
||||
self.debug("Excluded (in exclude list)")
|
||||
self.debug("Excluded (path in exclude list)")
|
||||
return False
|
||||
|
||||
# now determine if its actually selected
|
||||
if value == "false":
|
||||
self.verbose("Excluded (disabled)")
|
||||
if source == "received":
|
||||
if exclude_received:
|
||||
self.verbose("Excluded (dataset already received)")
|
||||
return False
|
||||
|
||||
if exclude_unchanged and not self.is_changed(min_change):
|
||||
self.verbose("Excluded (unchanged since last snapshot)")
|
||||
return False
|
||||
elif value == "true" or (value == "child" and inherited):
|
||||
if source == "local":
|
||||
self.verbose("Selected")
|
||||
return True
|
||||
elif source == "received":
|
||||
if exclude_received:
|
||||
self.verbose("Excluded (dataset already received)")
|
||||
return False
|
||||
else:
|
||||
self.verbose("Selected")
|
||||
return True
|
||||
|
||||
self.verbose("Selected")
|
||||
return True
|
||||
|
||||
|
||||
@CachedProperty
|
||||
def parent(self):
|
||||
@ -296,6 +314,7 @@ class ZfsDataset:
|
||||
if min_changed_bytes == 0:
|
||||
return True
|
||||
|
||||
|
||||
if int(self.properties['written']) < min_changed_bytes:
|
||||
return False
|
||||
else:
|
||||
|
@ -214,9 +214,7 @@ class ZfsNode(ExecuteNode):
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
def selected_datasets(self, exclude_received, exclude_paths):
|
||||
def selected_datasets(self, exclude_received, exclude_paths, exclude_unchanged, min_change):
|
||||
"""determine filesystems that should be backed up by looking at the special autobackup-property, systemwide
|
||||
|
||||
returns: list of ZfsDataset
|
||||
@ -251,7 +249,7 @@ class ZfsNode(ExecuteNode):
|
||||
source = raw_source
|
||||
|
||||
# determine it
|
||||
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received, exclude_paths=exclude_paths):
|
||||
if dataset.is_selected(value=value, source=source, inherited=inherited, exclude_received=exclude_received, exclude_paths=exclude_paths, exclude_unchanged=exclude_unchanged, min_change=min_change):
|
||||
selected_filesystems.append(dataset)
|
||||
|
||||
return selected_filesystems
|
||||
|
Loading…
x
Reference in New Issue
Block a user