added tag support (needed for bookmarks). implements #151

This commit is contained in:
Edwin Eefting 2024-10-02 16:17:56 +02:00
parent 88a4e52763
commit ec9ca29620
No known key found for this signature in database
GPG Key ID: F059440DED3FB5B8
4 changed files with 98 additions and 54 deletions

View File

@ -57,6 +57,7 @@ class ZfsAuto(CliBase):
self.property_name = args.property_format.format(args.backup_name)
self.snapshot_time_format = args.snapshot_format.format(args.backup_name)
self.hold_name = args.hold_format.format(args.backup_name)
self.tag_seperator = args.tag_seperator
dt = datetime_now(args.utc)
@ -68,6 +69,27 @@ class ZfsAuto(CliBase):
self.verbose("Snapshot format : {}".format(self.snapshot_time_format))
self.verbose("Timezone : {}".format("UTC" if args.utc else "Local"))
seperator_test = datetime_now(False).strftime(self.snapshot_time_format)
# according to man 8 zfs:
valid_tags = "_.: -"
if self.tag_seperator not in valid_tags or self.tag_seperator == '':
self.log.error("Invalid tag seperator. Allowed: '{}'".format(valid_tags))
sys.exit(255)
if self.tag_seperator in seperator_test:
self.log.error("Tag seperator '{}' may not be used in snapshot format: {}".format(self.tag_seperator,
self.snapshot_time_format))
sys.exit(255)
if args.tag and self.tag_seperator in args.tag:
self.log.error(
"Tag '{}' may not contain tag seperator '{}'".format(args.tag, self.tag_seperator))
sys.exit(255)
if args.tag:
self.verbose("Tag : {}".format(self.tag_seperator + args.tag))
return args
def get_parser(self):
@ -98,6 +120,10 @@ class ZfsAuto(CliBase):
help='ZFS hold string format. Default: %(default)s')
group.add_argument('--strip-path', metavar='N', default=0, type=int,
help='Number of directories to strip from target path.')
group.add_argument('--tag-seperator', metavar='CHAR', default="_",
help="Tag seperator for snapshots and bookmarks. Default: %(default)s")
group.add_argument('--tag', metavar='TAG', default=None,
help='Backup tag to add to snapshots names. (For administrative purposes)')
group = parser.add_argument_group("Selection options")
group.add_argument('--ignore-replicated', action='store_true', help=argparse.SUPPRESS)
@ -105,6 +131,7 @@ class ZfsAuto(CliBase):
help='Exclude datasets that have less than BYTES data changed since any last snapshot. (Use with proxmox HA replication)')
group.add_argument('--exclude-received', action='store_true',
help='Exclude datasets that have the origin of their autobackup: property as "received".', )
# group.add_argument('--include-received', action='store_true',
# help=argparse.SUPPRESS)

View File

@ -48,9 +48,8 @@ class ZfsAutobackup(ZfsAuto):
self.warning("Using --compress with --zfs-compressed, might be inefficient.")
if args.decrypt:
self.warning("Properties will not be sent over for datasets that will be decrypted. (zfs bug https://github.com/openzfs/zfs/issues/16275)")
self.warning(
"Properties will not be sent over for datasets that will be decrypted. (zfs bug https://github.com/openzfs/zfs/issues/16275)")
return args
@ -127,7 +126,7 @@ class ZfsAutobackup(ZfsAuto):
help='Limit data transfer rate in Bytes/sec (e.g. 128K. requires mbuffer.)')
group.add_argument('--buffer', metavar='SIZE', default=None,
help='Add zfs send and recv buffers to smooth out IO bursts. (e.g. 128M. requires mbuffer)')
parser.add_argument('--buffer-chunk-size', metavar="BUFFERCHUNKSIZE", default=None,
group.add_argument('--buffer-chunk-size', metavar="BUFFERCHUNKSIZE", default=None,
help='Tune chunk size when mbuffer is used. (requires mbuffer.)')
group.add_argument('--send-pipe', metavar="COMMAND", default=[], action='append',
help='pipe zfs send output through COMMAND (can be used multiple times)')
@ -455,7 +454,6 @@ class ZfsAutobackup(ZfsAuto):
if self.args.clear_refreservation:
filter_properties.append("refreservation")
return filter_properties
def set_properties_list(self):
@ -496,7 +494,8 @@ class ZfsAutobackup(ZfsAuto):
ssh_config=self.args.ssh_config,
ssh_to=self.args.ssh_source, readonly=self.args.test,
debug_output=self.args.debug_output, description=description, thinner=source_thinner,
exclude_snapshot_patterns=self.args.exclude_snapshot_pattern)
exclude_snapshot_patterns=self.args.exclude_snapshot_pattern,
tag_seperator=self.tag_seperator)
################# select source datasets
self.set_title("Selecting")
@ -512,6 +511,9 @@ class ZfsAutobackup(ZfsAuto):
if not self.args.no_snapshot:
self.set_title("Snapshotting")
snapshot_name = datetime_now(self.args.utc).strftime(self.snapshot_time_format)
if self.args.tag:
snapshot_name = snapshot_name + self.tag_seperator + self.args.tag
source_node.consistent_snapshot(source_datasets, snapshot_name,
min_changed_bytes=self.args.min_change,
pre_snapshot_cmds=self.args.pre_snapshot_cmd,
@ -534,7 +536,8 @@ class ZfsAutobackup(ZfsAuto):
ssh_to=self.args.ssh_target,
readonly=self.args.test, debug_output=self.args.debug_output,
description="[Target]",
thinner=target_thinner)
exclude_snapshot_patterns=self.args.exclude_snapshot_pattern,
thinner=target_thinner, tag_seperator=self.tag_seperator)
target_node.verbose("Receive datasets under: {}".format(self.args.target_path))
self.set_title("Synchronising")

View File

@ -6,6 +6,8 @@ import time
from .ExecuteNode import ExecuteError
# NOTE: get/create instances via zfs_node.get_dataset(). This is to make sure there is only one ZfsDataset object per actual dataset.
class ZfsDataset:
"""a zfs dataset (filesystem/volume/snapshot/clone) Note that a dataset
doesn't have to actually exist (yet/anymore) Also most properties are cached
@ -26,7 +28,9 @@ class ZfsDataset:
:type force_exists: bool
"""
self.zfs_node = zfs_node
self.name = name # full name
self.name = name # full actual name of dataset
self.force_exists = force_exists
# caching
# self.__snapshots = None # type: None|list[ZfsDataset]
@ -38,8 +42,6 @@ class ZfsDataset:
# self.__bookmarks = None # type: None|list[ZfsDataset]
self.__snapshots_bookmarks = None # type: None|list[ZfsDataset]
self.force_exists = force_exists
def invalidate_cache(self):
"""clear caches"""
self.force_exists = None
@ -141,6 +143,16 @@ class ZfsDataset:
raise (Exception("This is not a snapshot or bookmark"))
@property
def tagless_suffix(self):
"""snapshot or bookmark part of the name, but without the tag."""
suffix = self.suffix
if self.zfs_node.tag_seperator in suffix:
return suffix.split(self.zfs_node.tag_seperator)[0]
else:
return suffix
@property
def typed_suffix(self):
"""suffix with @ or # in front of it"""
@ -154,7 +166,6 @@ class ZfsDataset:
raise (Exception("This is not a snapshot or bookmark"))
@property
def is_snapshot(self):
"""true if this dataset is a snapshot"""
@ -170,15 +181,14 @@ class ZfsDataset:
return not (self.is_snapshot or self.is_bookmark)
@property
def is_excluded(self):
def is_snapshot_excluded(self):
"""true if this dataset is a snapshot and matches the exclude pattern"""
if not self.is_snapshot:
return False
for pattern in self.zfs_node.exclude_snapshot_patterns:
if pattern.search(self.name) is not None:
if pattern.search(self.suffix) is not None:
self.debug("Excluded (path matches snapshot exclude pattern)")
return True
@ -273,7 +283,6 @@ class ZfsDataset:
else:
return None
def find_next_snapshot(self, snapshot_bookmark):
"""find next snapshot in this dataset, according to snapshot or bookmark. None if it doesn't exist
@ -294,7 +303,6 @@ class ZfsDataset:
return None
@property
def exists_check(self):
"""check on disk if it exists"""
@ -441,11 +449,13 @@ class ZfsDataset:
"""get timestamp from snapshot name. Only works for our own snapshots
with the correct format. Snapshots that are not ours always return None
Note that the tag-part in the name is ignored, so snapsnots are ours regardless of their tag.
:rtype: int|None
"""
try:
dt = datetime.strptime(self.suffix, self.zfs_node.snapshot_time_format)
dt = datetime.strptime(self.tagless_suffix, self.zfs_node.snapshot_time_format)
except ValueError:
return None
@ -476,14 +486,15 @@ class ZfsDataset:
self.debug("Getting snapshots and bookmarks")
cmd = [
"zfs", "list", "-d", "1", "-r", "-t", "snapshot,bookmark", "-H", "-o", "name", "-s", "createtxg", self.name
"zfs", "list", "-d", "1", "-r", "-t", "snapshot,bookmark", "-H", "-o", "name", "-s", "createtxg",
self.name
]
self.__snapshots_bookmarks = self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True), force_exists=True)
self.__snapshots_bookmarks = self.zfs_node.get_datasets(self.zfs_node.run(cmd=cmd, readonly=True),
force_exists=True)
return self.__snapshots_bookmarks
@property
def snapshots(self):
"""get all snapshots of this dataset
@ -579,7 +590,6 @@ class ZfsDataset:
return None
def find_snapshot_index(self, snapshot):
"""find snapshot index by snapshot (can be a snapshot_name or
ZfsDataset)
@ -665,7 +675,6 @@ class ZfsDataset:
return ret
@property
def recursive_datasets(self, types="filesystem,volume"):
"""get all (non-snapshot) datasets recursively under us
@ -757,7 +766,6 @@ class ZfsDataset:
# incremental?
if prev_snapshot:
cmd.extend(["-i", prev_snapshot.typed_suffix])
cmd.append(self.name)
@ -1178,7 +1186,7 @@ class ZfsDataset:
while source_snapshot:
# we want it?
if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_excluded:
if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_snapshot_excluded:
# create virtual target snapshot
target_snapshot = target_dataset.zfs_node.get_dataset(
target_dataset.filesystem_name + source_snapshot.typed_suffix, force_exists=False)
@ -1228,7 +1236,8 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check, use_bookmarks):
no_send, destroy_incompatible, send_pipes, recv_pipes, zfs_compressed, force, guid_check,
use_bookmarks):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
@ -1348,7 +1357,8 @@ class ZfsDataset:
# we may now destroy the previous source snapshot if its obsolete or an bookmark
# FIXME: met bookmarks kan de huidige snapshot na send ook meteen weg
# FIXME: klopt niet, nu haalt ie altijd bookmark weg? wat als we naar andere target willen senden (zoals in test_encryption.py)
if prev_source_snapshot_bookmark and (prev_source_snapshot_bookmark in source_obsoletes or prev_source_snapshot_bookmark.is_bookmark):
if prev_source_snapshot_bookmark and (
prev_source_snapshot_bookmark in source_obsoletes or prev_source_snapshot_bookmark.is_bookmark):
prev_source_snapshot_bookmark.destroy()
# destroy the previous target snapshot if obsolete (usually this is only the common_snapshot,
@ -1364,7 +1374,6 @@ class ZfsDataset:
prev_target_snapshot = target_snapshot
def mount(self, mount_point):
self.debug("Mounting")

View File

@ -17,13 +17,18 @@ from .util import datetime_now
class ZfsNode(ExecuteNode):
"""a node that contains zfs datasets. implements global (systemwide/pool wide) zfs commands"""
def __init__(self, logger, utc=False, snapshot_time_format="", hold_name="", ssh_config=None, ssh_to=None,
readonly=False,
description="",
debug_output=False, thinner=None, exclude_snapshot_patterns=[]):
# def __init__(self, logger, utc=False, snapshot_time_format="", hold_name="", ssh_config=None, ssh_to=None,
# readonly=False,
# description="",
# debug_output=False, thinner=None, exclude_snapshot_patterns=None, tag_seperator='~'):
def __init__(self, logger, utc, snapshot_time_format, hold_name, ssh_config, ssh_to,
readonly,
description,
debug_output, thinner, exclude_snapshot_patterns, tag_seperator):
self.utc = utc
self.snapshot_time_format = snapshot_time_format
self.tag_seperator = tag_seperator
self.hold_name = hold_name
self.description = description