mirror of
https://github.com/psy0rz/zfs_autobackup.git
synced 2025-05-28 01:09:13 +03:00
added tag support (needed for bookmarks). implements #151
This commit is contained in:
parent
88a4e52763
commit
ec9ca29620
@ -57,6 +57,7 @@ class ZfsAuto(CliBase):
|
||||
self.property_name = args.property_format.format(args.backup_name)
|
||||
self.snapshot_time_format = args.snapshot_format.format(args.backup_name)
|
||||
self.hold_name = args.hold_format.format(args.backup_name)
|
||||
self.tag_seperator = args.tag_seperator
|
||||
|
||||
dt = datetime_now(args.utc)
|
||||
|
||||
@ -68,6 +69,27 @@ class ZfsAuto(CliBase):
|
||||
self.verbose("Snapshot format : {}".format(self.snapshot_time_format))
|
||||
self.verbose("Timezone : {}".format("UTC" if args.utc else "Local"))
|
||||
|
||||
seperator_test = datetime_now(False).strftime(self.snapshot_time_format)
|
||||
|
||||
# according to man 8 zfs:
|
||||
valid_tags = "_.: -"
|
||||
if self.tag_seperator not in valid_tags or self.tag_seperator == '':
|
||||
self.log.error("Invalid tag seperator. Allowed: '{}'".format(valid_tags))
|
||||
sys.exit(255)
|
||||
|
||||
if self.tag_seperator in seperator_test:
|
||||
self.log.error("Tag seperator '{}' may not be used in snapshot format: {}".format(self.tag_seperator,
|
||||
self.snapshot_time_format))
|
||||
sys.exit(255)
|
||||
|
||||
if args.tag and self.tag_seperator in args.tag:
|
||||
self.log.error(
|
||||
"Tag '{}' may not contain tag seperator '{}'".format(args.tag, self.tag_seperator))
|
||||
sys.exit(255)
|
||||
|
||||
if args.tag:
|
||||
self.verbose("Tag : {}".format(self.tag_seperator + args.tag))
|
||||
|
||||
return args
|
||||
|
||||
def get_parser(self):
|
||||
@ -98,13 +120,18 @@ class ZfsAuto(CliBase):
|
||||
help='ZFS hold string format. Default: %(default)s')
|
||||
group.add_argument('--strip-path', metavar='N', default=0, type=int,
|
||||
help='Number of directories to strip from target path.')
|
||||
group.add_argument('--tag-seperator', metavar='CHAR', default="_",
|
||||
help="Tag seperator for snapshots and bookmarks. Default: %(default)s")
|
||||
group.add_argument('--tag', metavar='TAG', default=None,
|
||||
help='Backup tag to add to snapshots names. (For administrative purposes)')
|
||||
|
||||
group = parser.add_argument_group("Selection options")
|
||||
group.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
|
||||
group.add_argument('--exclude-unchanged', metavar='BYTES', default=0, type=int,
|
||||
help='Exclude datasets that have less than BYTES data changed since any last snapshot. (Use with proxmox HA replication)')
|
||||
group.add_argument('--exclude-received', action='store_true',
|
||||
help='Exclude datasets that have the origin of their autobackup: property as "received".' , )
|
||||
help='Exclude datasets that have the origin of their autobackup: property as "received".', )
|
||||
|
||||
# group.add_argument('--include-received', action='store_true',
|
||||
# help=argparse.SUPPRESS)
|
||||
|
||||
|
@ -48,9 +48,8 @@ class ZfsAutobackup(ZfsAuto):
|
||||
self.warning("Using --compress with --zfs-compressed, might be inefficient.")
|
||||
|
||||
if args.decrypt:
|
||||
self.warning("Properties will not be sent over for datasets that will be decrypted. (zfs bug https://github.com/openzfs/zfs/issues/16275)")
|
||||
|
||||
|
||||
self.warning(
|
||||
"Properties will not be sent over for datasets that will be decrypted. (zfs bug https://github.com/openzfs/zfs/issues/16275)")
|
||||
|
||||
return args
|
||||
|
||||
@ -127,8 +126,8 @@ class ZfsAutobackup(ZfsAuto):
|
||||
help='Limit data transfer rate in Bytes/sec (e.g. 128K. requires mbuffer.)')
|
||||
group.add_argument('--buffer', metavar='SIZE', default=None,
|
||||
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
|
||||
parser.add_argument('--buffer-chunk-size', metavar="BUFFERCHUNKSIZE", default=None,
|
||||
help='Tune chunk size when mbuffer is used. (requires mbuffer.)')
|
||||
group.add_argument('--buffer-chunk-size', metavar="BUFFERCHUNKSIZE", default=None,
|
||||
help='Tune chunk size when mbuffer is used. (requires mbuffer.)')
|
||||
group.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
|
||||
help='pipe zfs send output through COMMAND (can be used multiple times)')
|
||||
group.add_argument('--recv-pipe', metavar="COMMAND", default=[], action='append',
|
||||
@ -399,14 +398,14 @@ class ZfsAutobackup(ZfsAuto):
|
||||
common_features = source_features and target_features
|
||||
|
||||
if self.args.no_bookmarks:
|
||||
use_bookmarks=False
|
||||
use_bookmarks = False
|
||||
else:
|
||||
# NOTE: bookmark_written seems to be needed. (only 'bookmarks' was not enough on ubuntu 20)
|
||||
if not 'bookmark_written' in common_features:
|
||||
source_dataset.warning("Disabling bookmarks, not supported on both pools.")
|
||||
use_bookmarks=False
|
||||
use_bookmarks = False
|
||||
else:
|
||||
use_bookmarks=True
|
||||
use_bookmarks = True
|
||||
|
||||
# sync the snapshots of this dataset
|
||||
source_dataset.sync_snapshots(target_dataset, show_progress=self.args.progress,
|
||||
@ -455,7 +454,6 @@ class ZfsAutobackup(ZfsAuto):
|
||||
if self.args.clear_refreservation:
|
||||
filter_properties.append("refreservation")
|
||||
|
||||
|
||||
return filter_properties
|
||||
|
||||
def set_properties_list(self):
|
||||
@ -496,7 +494,8 @@ class ZfsAutobackup(ZfsAuto):
|
||||
ssh_config=self.args.ssh_config,
|
||||
ssh_to=self.args.ssh_source, readonly=self.args.test,
|
||||
debug_output=self.args.debug_output, description=description, thinner=source_thinner,
|
||||
exclude_snapshot_patterns=self.args.exclude_snapshot_pattern)
|
||||
exclude_snapshot_patterns=self.args.exclude_snapshot_pattern,
|
||||
tag_seperator=self.tag_seperator)
|
||||
|
||||
################# select source datasets
|
||||
self.set_title("Selecting")
|
||||
@ -512,6 +511,9 @@ class ZfsAutobackup(ZfsAuto):
|
||||
if not self.args.no_snapshot:
|
||||
self.set_title("Snapshotting")
|
||||
snapshot_name = datetime_now(self.args.utc).strftime(self.snapshot_time_format)
|
||||
if self.args.tag:
|
||||
snapshot_name = snapshot_name + self.tag_seperator + self.args.tag
|
||||
|
||||
source_node.consistent_snapshot(source_datasets, snapshot_name,
|
||||
min_changed_bytes=self.args.min_change,
|
||||
pre_snapshot_cmds=self.args.pre_snapshot_cmd,
|
||||
@ -534,7 +536,8 @@ class ZfsAutobackup(ZfsAuto):
|
||||
ssh_to=self.args.ssh_target,
|
||||
readonly=self.args.test, debug_output=self.args.debug_output,
|
||||
description="[Target]",
|
||||
thinner=target_thinner)
|
||||
exclude_snapshot_patterns=self.args.exclude_snapshot_pattern,
|
||||
thinner=target_thinner, tag_seperator=self.tag_seperator)
|
||||
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
|
||||
|
||||
self.set_title("Synchronising")
|
||||
|
@ -6,6 +6,8 @@ import time
|
||||
from .ExecuteNode import ExecuteError
|
||||
|
||||
|
||||
# NOTE: get/create instances via zfs_node.get_dataset(). This is to make sure there is only one ZfsDataset object per actual dataset.
|
||||
|
||||
class ZfsDataset:
|
||||
"""a zfs dataset (filesystem/volume/snapshot/clone) Note that a dataset
|
||||
doesn't have to actually exist (yet/anymore) Also most properties are cached
|
||||
@ -26,7 +28,9 @@ class ZfsDataset:
|
||||
:type force_exists: bool
|
||||
"""
|
||||
self.zfs_node = zfs_node
|
||||
self.name = name # full name
|
||||
self.name = name # full actual name of dataset
|
||||
|
||||
self.force_exists = force_exists
|
||||
|
||||
# caching
|
||||
# self.__snapshots = None # type: None|list[ZfsDataset]
|
||||
@ -36,9 +40,7 @@ class ZfsDataset:
|
||||
self.__recursive_datasets = None # type: None|list[ZfsDataset]
|
||||
self.__datasets = None # type: None|list[ZfsDataset]
|
||||
# self.__bookmarks = None # type: None|list[ZfsDataset]
|
||||
self.__snapshots_bookmarks = None #type: None|list[ZfsDataset]
|
||||
|
||||
self.force_exists = force_exists
|
||||
self.__snapshots_bookmarks = None # type: None|list[ZfsDataset]
|
||||
|
||||
def invalidate_cache(self):
|
||||
"""clear caches"""
|
||||
@ -141,20 +143,29 @@ class ZfsDataset:
|
||||
|
||||
raise (Exception("This is not a snapshot or bookmark"))
|
||||
|
||||
@property
|
||||
def tagless_suffix(self):
|
||||
"""snapshot or bookmark part of the name, but without the tag."""
|
||||
|
||||
suffix = self.suffix
|
||||
if self.zfs_node.tag_seperator in suffix:
|
||||
return suffix.split(self.zfs_node.tag_seperator)[0]
|
||||
else:
|
||||
return suffix
|
||||
|
||||
@property
|
||||
def typed_suffix(self):
|
||||
"""suffix with @ or # in front of it"""
|
||||
|
||||
if self.is_snapshot:
|
||||
(filesystem, snapshot_name) = self.name.split("@")
|
||||
return "@"+snapshot_name
|
||||
return "@" + snapshot_name
|
||||
elif self.is_bookmark:
|
||||
(filesystem, bookmark_name) = self.name.split("#")
|
||||
return "#"+bookmark_name
|
||||
return "#" + bookmark_name
|
||||
|
||||
raise (Exception("This is not a snapshot or bookmark"))
|
||||
|
||||
|
||||
@property
|
||||
def is_snapshot(self):
|
||||
"""true if this dataset is a snapshot"""
|
||||
@ -170,15 +181,14 @@ class ZfsDataset:
|
||||
|
||||
return not (self.is_snapshot or self.is_bookmark)
|
||||
|
||||
|
||||
@property
|
||||
def is_excluded(self):
|
||||
def is_snapshot_excluded(self):
|
||||
"""true if this dataset is a snapshot and matches the exclude pattern"""
|
||||
if not self.is_snapshot:
|
||||
return False
|
||||
|
||||
for pattern in self.zfs_node.exclude_snapshot_patterns:
|
||||
if pattern.search(self.name) is not None:
|
||||
if pattern.search(self.suffix) is not None:
|
||||
self.debug("Excluded (path matches snapshot exclude pattern)")
|
||||
return True
|
||||
|
||||
@ -273,7 +283,6 @@ class ZfsDataset:
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def find_next_snapshot(self, snapshot_bookmark):
|
||||
"""find next snapshot in this dataset, according to snapshot or bookmark. None if it doesn't exist
|
||||
|
||||
@ -284,17 +293,16 @@ class ZfsDataset:
|
||||
if not self.is_dataset:
|
||||
raise (Exception("Please call this on a dataset."))
|
||||
|
||||
found=False
|
||||
found = False
|
||||
for snapshot in self.snapshots_bookmarks:
|
||||
if snapshot == snapshot_bookmark:
|
||||
found=True
|
||||
found = True
|
||||
else:
|
||||
if found==True and snapshot.is_snapshot:
|
||||
if found == True and snapshot.is_snapshot:
|
||||
return snapshot
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@property
|
||||
def exists_check(self):
|
||||
"""check on disk if it exists"""
|
||||
@ -441,11 +449,13 @@ class ZfsDataset:
|
||||
"""get timestamp from snapshot name. Only works for our own snapshots
|
||||
with the correct format. Snapshots that are not ours always return None
|
||||
|
||||
Note that the tag-part in the name is ignored, so snapsnots are ours regardless of their tag.
|
||||
|
||||
:rtype: int|None
|
||||
"""
|
||||
|
||||
try:
|
||||
dt = datetime.strptime(self.suffix, self.zfs_node.snapshot_time_format)
|
||||
dt = datetime.strptime(self.tagless_suffix, self.zfs_node.snapshot_time_format)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
@ -476,14 +486,15 @@ class ZfsDataset:
|
||||
self.debug("Getting snapshots and bookmarks")
|
||||
|
||||
cmd = [
|
||||
"zfs", "list", "-d", "1", "-r", "-t", "snapshot,bookmark", "-H", "-o", "name", "-s", "createtxg", self.name
|
||||
"zfs", "list", "-d", "1", "-r", "-t", "snapshot,bookmark", "-H", "-o", "name", "-s", "createtxg",
|
||||
self.name
|
||||
]
|
||||
|
||||
self.__snapshots_bookmarks = self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True)
|
||||
self.__snapshots_bookmarks = self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True),
|
||||
force_exists=True)
|
||||
|
||||
return self.__snapshots_bookmarks
|
||||
|
||||
|
||||
@property
|
||||
def snapshots(self):
|
||||
"""get all snapshots of this dataset
|
||||
@ -579,7 +590,6 @@ class ZfsDataset:
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def find_snapshot_index(self, snapshot):
|
||||
"""find snapshot index by snapshot (can be a snapshot_name or
|
||||
ZfsDataset)
|
||||
@ -640,17 +650,17 @@ class ZfsDataset:
|
||||
"""Bookmark this snapshot, and return the bookmark"""
|
||||
|
||||
if not self.is_snapshot:
|
||||
raise(Exception("Can only bookmark a snapshot!"))
|
||||
raise (Exception("Can only bookmark a snapshot!"))
|
||||
|
||||
self.debug("Bookmarking")
|
||||
|
||||
cmd = [
|
||||
"zfs", "bookmark", self.name, "#"+self.suffix
|
||||
"zfs", "bookmark", self.name, "#" + self.suffix
|
||||
]
|
||||
|
||||
self.zfs_node.run(cmd=cmd)
|
||||
|
||||
bookmark=self.zfs_node.get_dataset( self.name+'#'+self.suffix,force_exists=True)
|
||||
bookmark = self.zfs_node.get_dataset(self.name + '#' + self.suffix, force_exists=True)
|
||||
self.cache_snapshot_bookmark(bookmark)
|
||||
return bookmark
|
||||
|
||||
@ -665,7 +675,6 @@ class ZfsDataset:
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@property
|
||||
def recursive_datasets(self, types="filesystem,volume"):
|
||||
"""get all (non-snapshot) datasets recursively under us
|
||||
@ -757,7 +766,6 @@ class ZfsDataset:
|
||||
|
||||
# incremental?
|
||||
if prev_snapshot:
|
||||
|
||||
cmd.extend(["-i", prev_snapshot.typed_suffix])
|
||||
|
||||
cmd.append(self.name)
|
||||
@ -1014,7 +1022,7 @@ class ZfsDataset:
|
||||
else:
|
||||
for target_snapshot in reversed(target_dataset.snapshots):
|
||||
|
||||
#Source bookmark?
|
||||
# Source bookmark?
|
||||
source_bookmark = self.find_bookmark(target_snapshot)
|
||||
if source_bookmark:
|
||||
if guid_check and source_bookmark.properties['guid'] != target_snapshot.properties['guid']:
|
||||
@ -1023,7 +1031,7 @@ class ZfsDataset:
|
||||
source_bookmark.debug("Common bookmark")
|
||||
return source_bookmark
|
||||
|
||||
#Source snapshot?
|
||||
# Source snapshot?
|
||||
source_snapshot = self.find_snapshot(target_snapshot)
|
||||
if source_snapshot:
|
||||
if guid_check and source_snapshot.properties['guid'] != target_snapshot.properties['guid']:
|
||||
@ -1178,7 +1186,7 @@ class ZfsDataset:
|
||||
|
||||
while source_snapshot:
|
||||
# we want it?
|
||||
if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_excluded:
|
||||
if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_snapshot_excluded:
|
||||
# create virtual target snapshot
|
||||
target_snapshot = target_dataset.zfs_node.get_dataset(
|
||||
target_dataset.filesystem_name + source_snapshot.typed_suffix, force_exists=False)
|
||||
@ -1228,7 +1236,8 @@ class ZfsDataset:
|
||||
|
||||
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
|
||||
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
|
||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check, use_bookmarks):
|
||||
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check,
|
||||
use_bookmarks):
|
||||
"""sync this dataset's snapshots to target_dataset, while also thinning
|
||||
out old snapshots along the way.
|
||||
|
||||
@ -1333,12 +1342,12 @@ class ZfsDataset:
|
||||
if prev_target_snapshot:
|
||||
prev_target_snapshot.release()
|
||||
|
||||
#bookmark common snapshot on source, or use holds if bookmarks are not enabled.
|
||||
# bookmark common snapshot on source, or use holds if bookmarks are not enabled.
|
||||
if use_bookmarks:
|
||||
source_bookmark=source_snapshot.bookmark()
|
||||
#note: destroy source_snapshot when obsolete at this point?
|
||||
source_bookmark = source_snapshot.bookmark()
|
||||
# note: destroy source_snapshot when obsolete at this point?
|
||||
else:
|
||||
source_bookmark=None
|
||||
source_bookmark = None
|
||||
if holds:
|
||||
source_snapshot.hold()
|
||||
|
||||
@ -1346,9 +1355,10 @@ class ZfsDataset:
|
||||
prev_source_snapshot_bookmark.release()
|
||||
|
||||
# we may now destroy the previous source snapshot if its obsolete or an bookmark
|
||||
#FIXME: met bookmarks kan de huidige snapshot na send ook meteen weg
|
||||
#FIXME: klopt niet, nu haalt ie altijd bookmark weg? wat als we naar andere target willen senden (zoals in test_encryption.py)
|
||||
if prev_source_snapshot_bookmark and (prev_source_snapshot_bookmark in source_obsoletes or prev_source_snapshot_bookmark.is_bookmark):
|
||||
# FIXME: met bookmarks kan de huidige snapshot na send ook meteen weg
|
||||
# FIXME: klopt niet, nu haalt ie altijd bookmark weg? wat als we naar andere target willen senden (zoals in test_encryption.py)
|
||||
if prev_source_snapshot_bookmark and (
|
||||
prev_source_snapshot_bookmark in source_obsoletes or prev_source_snapshot_bookmark.is_bookmark):
|
||||
prev_source_snapshot_bookmark.destroy()
|
||||
|
||||
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
|
||||
@ -1356,7 +1366,7 @@ class ZfsDataset:
|
||||
if prev_target_snapshot in target_obsoletes:
|
||||
prev_target_snapshot.destroy()
|
||||
|
||||
#we always try to use the bookmark during incremental send
|
||||
# we always try to use the bookmark during incremental send
|
||||
if source_bookmark:
|
||||
prev_source_snapshot_bookmark = source_bookmark
|
||||
else:
|
||||
@ -1364,7 +1374,6 @@ class ZfsDataset:
|
||||
|
||||
prev_target_snapshot = target_snapshot
|
||||
|
||||
|
||||
def mount(self, mount_point):
|
||||
|
||||
self.debug("Mounting")
|
||||
|
@ -17,13 +17,18 @@ from .util import datetime_now
|
||||
class ZfsNode(ExecuteNode):
|
||||
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
|
||||
|
||||
def __init__(self, logger, utc=False, snapshot_time_format="", hold_name="", ssh_config=None, ssh_to=None,
|
||||
readonly=False,
|
||||
description="",
|
||||
debug_output=False, thinner=None, exclude_snapshot_patterns=[]):
|
||||
# def __init__(self, logger, utc=False, snapshot_time_format="", hold_name="", ssh_config=None, ssh_to=None,
|
||||
# readonly=False,
|
||||
# description="",
|
||||
# debug_output=False, thinner=None, exclude_snapshot_patterns=None, tag_seperator='~'):
|
||||
def __init__(self, logger, utc, snapshot_time_format, hold_name, ssh_config, ssh_to,
|
||||
readonly,
|
||||
description,
|
||||
debug_output, thinner, exclude_snapshot_patterns, tag_seperator):
|
||||
|
||||
self.utc = utc
|
||||
self.snapshot_time_format = snapshot_time_format
|
||||
self.tag_seperator = tag_seperator
|
||||
self.hold_name = hold_name
|
||||
|
||||
self.description = description
|
||||
|
Loading…
x
Reference in New Issue
Block a user