forked from third-party-mirrors/zfs_autobackup
wip
This commit is contained in:
parent
637963c046
commit
673db7c014
@ -168,16 +168,20 @@ class Thinner:
|
|||||||
|
|
||||||
return(ret)
|
return(ret)
|
||||||
|
|
||||||
def run(self,objects, now=None):
|
def thin(self,objects, keep_objects=[], now=None):
|
||||||
"""thin list of objects with current schedule rules.
|
"""thin list of objects with current schedule rules.
|
||||||
object should have timestamp-attribute with unix timestamp
|
objects: list of objects to thin. every object should have timestamp attribute.
|
||||||
|
keep_objects: objects to always keep (these should also be in normal objects list, so we can use them to perhaps delete other obsolete objects)
|
||||||
|
|
||||||
|
|
||||||
return( keeps, removes )
|
return( keeps, removes )
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
#keep everything
|
||||||
if len(objects)<=self.always_keep:
|
if len(objects)<=self.always_keep:
|
||||||
return ( (objects, []) )
|
return ( (objects, []) )
|
||||||
|
|
||||||
|
#determine time blocks
|
||||||
time_blocks={}
|
time_blocks={}
|
||||||
for rule in self.rules:
|
for rule in self.rules:
|
||||||
time_blocks[rule.period]={}
|
time_blocks[rule.period]={}
|
||||||
@ -195,6 +199,7 @@ class Thinner:
|
|||||||
age=now-timestamp
|
age=now-timestamp
|
||||||
|
|
||||||
# store in the correct time blocks, per period-size, if not too old yet
|
# store in the correct time blocks, per period-size, if not too old yet
|
||||||
|
# e.g.: look if there is ANY timeblock that wants to keep this object
|
||||||
keep=False
|
keep=False
|
||||||
for rule in self.rules:
|
for rule in self.rules:
|
||||||
if age<=rule.ttl:
|
if age<=rule.ttl:
|
||||||
@ -203,7 +208,8 @@ class Thinner:
|
|||||||
time_blocks[rule.period][block_nr]=True
|
time_blocks[rule.period][block_nr]=True
|
||||||
keep=True
|
keep=True
|
||||||
|
|
||||||
if keep:
|
#keep it according to schedule, or keep it because it is in the keep_objects list
|
||||||
|
if keep or object in keep_objects:
|
||||||
keeps.append(object)
|
keeps.append(object)
|
||||||
else:
|
else:
|
||||||
removes.append(object)
|
removes.append(object)
|
||||||
@ -450,8 +456,10 @@ class ZfsDataset():
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, zfs_node, name):
|
def __init__(self, zfs_node, name, exists=None):
|
||||||
"""name: full path of the zfs dataset"""
|
"""name: full path of the zfs dataset
|
||||||
|
exists: specifiy if you already know a dataset exists or not. for performance reasons. (othewise it will have to check with zfs list when needed)
|
||||||
|
"""
|
||||||
self.zfs_node=zfs_node
|
self.zfs_node=zfs_node
|
||||||
self.name=name #full name
|
self.name=name #full name
|
||||||
|
|
||||||
@ -524,8 +532,13 @@ class ZfsDataset():
|
|||||||
|
|
||||||
|
|
||||||
@cached_property
|
@cached_property
|
||||||
def exists(self):
|
def exists(self, force=None):
|
||||||
"""check if dataset exists"""
|
"""check if dataset exists.
|
||||||
|
Use force to force a specific value to be cached, if you already know. Usefull for performance reasons"""
|
||||||
|
|
||||||
|
if force!=None:
|
||||||
|
return(force)
|
||||||
|
|
||||||
self.debug("Checking if filesystem exists")
|
self.debug("Checking if filesystem exists")
|
||||||
return(self.zfs_node.run(tab_split=True, cmd=[ "zfs", "list", self.name], readonly=True, valid_exitcodes=[ 0,1 ], hide_errors=True) and True)
|
return(self.zfs_node.run(tab_split=True, cmd=[ "zfs", "list", self.name], readonly=True, valid_exitcodes=[ 0,1 ], hide_errors=True) and True)
|
||||||
|
|
||||||
@ -772,9 +785,21 @@ class ZfsDataset():
|
|||||||
target_dataset.snapshots.append(ZfsDataset(target_dataset.zfs_node, target_dataset.name+"@"+self.snapshot_name))
|
target_dataset.snapshots.append(ZfsDataset(target_dataset.zfs_node, target_dataset.name+"@"+self.snapshot_name))
|
||||||
|
|
||||||
|
|
||||||
|
def thin(self, keep=[]):
|
||||||
|
"""determines list of snapshots that should be kept or deleted based on the thinning schedule.
|
||||||
|
keep: list of snapshots to always keep
|
||||||
|
"""
|
||||||
|
return(self.zfs_node.thinner.thin(self.our_snapshots, keep_objects=keep))
|
||||||
|
|
||||||
|
|
||||||
def sync_snapshots(self, target_dataset, show_progress=False):
|
def sync_snapshots(self, target_dataset, show_progress=False):
|
||||||
"""sync our snapshots to target_dataset"""
|
"""sync our snapshots to target_dataset"""
|
||||||
|
|
||||||
|
#dertermine the snapshots that are obosole so we might skip or clean some snapshots
|
||||||
|
(source_keeps, source_obsoletes)=self.thin()
|
||||||
|
#XXX: pre-create target snapshot list with exist=False so the thinner can "plan ahead" what the target eventually wants
|
||||||
|
(target_keeps, target_obsoletes)=self.thin()
|
||||||
|
|
||||||
# inital transfer
|
# inital transfer
|
||||||
resume_token=None
|
resume_token=None
|
||||||
if not target_dataset.exists:
|
if not target_dataset.exists:
|
||||||
@ -792,6 +817,7 @@ class ZfsDataset():
|
|||||||
self.our_snapshots[0].transfer_snapshot(target_dataset, show_progress=show_progress, resume_token=resume_token)
|
self.our_snapshots[0].transfer_snapshot(target_dataset, show_progress=show_progress, resume_token=resume_token)
|
||||||
resume_token=None
|
resume_token=None
|
||||||
|
|
||||||
|
#increments
|
||||||
self.debug("Sync snapshots: Incremental transfer")
|
self.debug("Sync snapshots: Incremental transfer")
|
||||||
latest_common_snapshot=None
|
latest_common_snapshot=None
|
||||||
for source_snapshot in self.our_snapshots:
|
for source_snapshot in self.our_snapshots:
|
||||||
@ -801,11 +827,14 @@ class ZfsDataset():
|
|||||||
latest_common_snapshot=source_snapshot
|
latest_common_snapshot=source_snapshot
|
||||||
else:
|
else:
|
||||||
if latest_common_snapshot:
|
if latest_common_snapshot:
|
||||||
|
# do we still want it on target?
|
||||||
#transfer it
|
#transfer it
|
||||||
source_snapshot.transfer_snapshot(target_dataset, latest_common_snapshot, show_progress=True, resume_token=resume_token)
|
source_snapshot.transfer_snapshot(target_dataset, latest_common_snapshot, show_progress=True, resume_token=resume_token)
|
||||||
resume_token=None
|
resume_token=None
|
||||||
latest_common_snapshot=source_snapshot
|
latest_common_snapshot=source_snapshot
|
||||||
|
|
||||||
|
#
|
||||||
|
|
||||||
if not latest_common_snapshot:
|
if not latest_common_snapshot:
|
||||||
raise(Exception("Cant find a common snapshot. (hint: zfs destroy {})".format(target_dataset)))
|
raise(Exception("Cant find a common snapshot. (hint: zfs destroy {})".format(target_dataset)))
|
||||||
|
|
||||||
@ -830,6 +859,8 @@ class ZfsNode(ExecuteNode):
|
|||||||
else:
|
else:
|
||||||
self.verbose("Keep all snapshots forver.")
|
self.verbose("Keep all snapshots forver.")
|
||||||
|
|
||||||
|
self.thinner=Thinner()
|
||||||
|
|
||||||
ExecuteNode.__init__(self, ssh_to=ssh_to, readonly=readonly, debug_output=debug_output)
|
ExecuteNode.__init__(self, ssh_to=ssh_to, readonly=readonly, debug_output=debug_output)
|
||||||
|
|
||||||
def verbose(self,txt):
|
def verbose(self,txt):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user