mirror of
https://github.com/psy0rz/zfs_autobackup.git
synced 2025-06-09 01:52:07 +03:00
testing scalability of snapshots. optimized performance by making --no-holds also not use holds on the target. (this is also more like expected behavious)
This commit is contained in:
parent
0649f42d66
commit
7493a0bc55
25
README.md
25
README.md
@ -3,27 +3,6 @@
|
|||||||
|
|
||||||
[](https://github.com/psy0rz/zfs_autobackup/actions?query=workflow%3A%22Regression+tests%22) [](https://coveralls.io/github/psy0rz/zfs_autobackup) [](https://pypi.org/project/zfs-autobackup/)
|
[](https://github.com/psy0rz/zfs_autobackup/actions?query=workflow%3A%22Regression+tests%22) [](https://coveralls.io/github/psy0rz/zfs_autobackup) [](https://pypi.org/project/zfs-autobackup/)
|
||||||
|
|
||||||
## New in v3
|
|
||||||
|
|
||||||
* Complete rewrite, cleaner object oriented code.
|
|
||||||
* Python 3 and 2 support.
|
|
||||||
* Automated regression test against real ZFS environment.
|
|
||||||
* Installable via [pip](https://pypi.org/project/zfs-autobackup/).
|
|
||||||
* Backwards compatible with your current backups and parameters.
|
|
||||||
* Progressive thinning (via a destroy schedule. default schedule should be fine for most people)
|
|
||||||
* Cleaner output, with optional color support (pip install colorama).
|
|
||||||
* Clear distinction between local and remote output.
|
|
||||||
* Summary at the beginning, displaying what will happen and the current thinning-schedule.
|
|
||||||
* More efficient destroying/skipping snapshots on the fly. (no more space issues if your backup is way behind)
|
|
||||||
* Progress indicator (--progress)
|
|
||||||
* Better property management (--set-properties and --filter-properties)
|
|
||||||
* Better resume handling, automatically abort invalid resumes.
|
|
||||||
* More robust error handling.
|
|
||||||
* Prepared for future enhancements.
|
|
||||||
* Supports raw backups for encryption.
|
|
||||||
* Custom SSH client config.
|
|
||||||
|
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
This is a tool I wrote to make replicating ZFS datasets easy and reliable.
|
This is a tool I wrote to make replicating ZFS datasets easy and reliable.
|
||||||
@ -396,6 +375,10 @@ Snapshots on the source that still have to be send to the target wont be destroy
|
|||||||
* Use ```--clear-refreservation``` to save space on your backup server.
|
* Use ```--clear-refreservation``` to save space on your backup server.
|
||||||
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot.
|
* Use ```--clear-mountpoint``` to prevent the target server from mounting the backupped filesystem in the wrong place during a reboot.
|
||||||
|
|
||||||
|
### Performance tips
|
||||||
|
|
||||||
|
* --no-holds and --allow-empty improve performance a lot if you deal with large amounts of datasets or snapshots.
|
||||||
|
|
||||||
### Speeding up SSH
|
### Speeding up SSH
|
||||||
|
|
||||||
You can make your ssh connections persistent and greatly speed up zfs-autobackup:
|
You can make your ssh connections persistent and greatly speed up zfs-autobackup:
|
||||||
|
@ -1182,7 +1182,7 @@ class ZfsDataset:
|
|||||||
return allowed_filter_properties, allowed_set_properties
|
return allowed_filter_properties, allowed_set_properties
|
||||||
|
|
||||||
def sync_snapshots(self, target_dataset, features, show_progress=False, filter_properties=None, set_properties=None,
|
def sync_snapshots(self, target_dataset, features, show_progress=False, filter_properties=None, set_properties=None,
|
||||||
ignore_recv_exit_code=False, source_holds=True, rollback=False, raw=False, other_snapshots=False,
|
ignore_recv_exit_code=False, holds=True, rollback=False, raw=False, other_snapshots=False,
|
||||||
no_send=False, destroy_incompatible=False):
|
no_send=False, destroy_incompatible=False):
|
||||||
"""sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way."""
|
"""sync this dataset's snapshots to target_dataset, while also thinning out old snapshots along the way."""
|
||||||
|
|
||||||
@ -1293,11 +1293,12 @@ class ZfsDataset:
|
|||||||
resume_token = None
|
resume_token = None
|
||||||
|
|
||||||
# hold the new common snapshots and release the previous ones
|
# hold the new common snapshots and release the previous ones
|
||||||
|
if holds:
|
||||||
target_snapshot.hold()
|
target_snapshot.hold()
|
||||||
if source_holds:
|
|
||||||
source_snapshot.hold()
|
source_snapshot.hold()
|
||||||
|
|
||||||
if prev_source_snapshot:
|
if prev_source_snapshot:
|
||||||
if source_holds:
|
if holds:
|
||||||
prev_source_snapshot.release()
|
prev_source_snapshot.release()
|
||||||
target_dataset.find_snapshot(prev_source_snapshot).release()
|
target_dataset.find_snapshot(prev_source_snapshot).release()
|
||||||
|
|
||||||
@ -1595,8 +1596,7 @@ class ZfsAutobackup:
|
|||||||
help='Ignore datasets that seem to be replicated some other way. (No changes since '
|
help='Ignore datasets that seem to be replicated some other way. (No changes since '
|
||||||
'lastest snapshot. Useful for proxmox HA replication)')
|
'lastest snapshot. Useful for proxmox HA replication)')
|
||||||
parser.add_argument('--no-holds', action='store_true',
|
parser.add_argument('--no-holds', action='store_true',
|
||||||
help='Don\'t lock snapshots on the source. (Useful to allow proxmox HA replication to '
|
help='Don\'t hold snapshots. (Faster)')
|
||||||
'switches nodes)')
|
|
||||||
|
|
||||||
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
|
parser.add_argument('--resume', action='store_true', help=argparse.SUPPRESS)
|
||||||
parser.add_argument('--strip-path', default=0, type=int,
|
parser.add_argument('--strip-path', default=0, type=int,
|
||||||
@ -1753,7 +1753,7 @@ class ZfsAutobackup:
|
|||||||
features=common_features, filter_properties=filter_properties,
|
features=common_features, filter_properties=filter_properties,
|
||||||
set_properties=set_properties,
|
set_properties=set_properties,
|
||||||
ignore_recv_exit_code=self.args.ignore_transfer_errors,
|
ignore_recv_exit_code=self.args.ignore_transfer_errors,
|
||||||
source_holds=not self.args.no_holds, rollback=self.args.rollback,
|
holds=not self.args.no_holds, rollback=self.args.rollback,
|
||||||
raw=self.args.raw, other_snapshots=self.args.other_snapshots,
|
raw=self.args.raw, other_snapshots=self.args.other_snapshots,
|
||||||
no_send=self.args.no_send,
|
no_send=self.args.no_send,
|
||||||
destroy_incompatible=self.args.destroy_incompatible)
|
destroy_incompatible=self.args.destroy_incompatible)
|
||||||
|
57
test_scaling.py
Normal file
57
test_scaling.py
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
from basetest import *
|
||||||
|
import time
|
||||||
|
from bin.zfs_autobackup import *
|
||||||
|
|
||||||
|
run_orig=ExecuteNode.run
|
||||||
|
run_counter=0
|
||||||
|
|
||||||
|
def run_count(*args, **kwargs):
|
||||||
|
global run_counter
|
||||||
|
run_counter=run_counter+1
|
||||||
|
return (run_orig(*args, **kwargs))
|
||||||
|
|
||||||
|
class TestZfsScaling(unittest2.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
prepare_zpools()
|
||||||
|
self.longMessage = True
|
||||||
|
|
||||||
|
def test_manysnaps(self):
|
||||||
|
"""count the number of commands when there are many snapshots."""
|
||||||
|
|
||||||
|
snapshot_count=100
|
||||||
|
|
||||||
|
# create bunch of snapshots
|
||||||
|
s=""
|
||||||
|
for i in range(1970,1970+snapshot_count):
|
||||||
|
s=s+"zfs snapshot test_source1/fs1@test-{:04}1111000000;".format(i)
|
||||||
|
|
||||||
|
shelltest(s)
|
||||||
|
|
||||||
|
global run_counter
|
||||||
|
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="20101112000000"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=343
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||||
|
|
||||||
|
|
||||||
|
run_counter=0
|
||||||
|
with patch.object(ExecuteNode,'run', run_count) as p:
|
||||||
|
|
||||||
|
with patch('time.strftime', return_value="20101112000001"):
|
||||||
|
self.assertFalse(ZfsAutobackup("test test_target1 --verbose --keep-source=10000 --keep-target=10000 --no-holds --allow-empty".split(" ")).run())
|
||||||
|
|
||||||
|
|
||||||
|
#this triggers if you make a change with an impact of more than O(snapshot_count/2)
|
||||||
|
expected_runs=47
|
||||||
|
print("ACTUAL RUNS: {}".format(run_counter))
|
||||||
|
self.assertLess(abs(run_counter-expected_runs), snapshot_count/2)
|
||||||
|
|
@ -370,13 +370,13 @@ test_source2/fs3/sub userrefs - -
|
|||||||
test_target1 userrefs - -
|
test_target1 userrefs - -
|
||||||
test_target1/test_source1 userrefs - -
|
test_target1/test_source1 userrefs - -
|
||||||
test_target1/test_source1/fs1 userrefs - -
|
test_target1/test_source1/fs1 userrefs - -
|
||||||
test_target1/test_source1/fs1@test-20101111000000 userrefs 1 -
|
test_target1/test_source1/fs1@test-20101111000000 userrefs 0 -
|
||||||
test_target1/test_source1/fs1/sub userrefs - -
|
test_target1/test_source1/fs1/sub userrefs - -
|
||||||
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 1 -
|
test_target1/test_source1/fs1/sub@test-20101111000000 userrefs 0 -
|
||||||
test_target1/test_source2 userrefs - -
|
test_target1/test_source2 userrefs - -
|
||||||
test_target1/test_source2/fs2 userrefs - -
|
test_target1/test_source2/fs2 userrefs - -
|
||||||
test_target1/test_source2/fs2/sub userrefs - -
|
test_target1/test_source2/fs2/sub userrefs - -
|
||||||
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 1 -
|
test_target1/test_source2/fs2/sub@test-20101111000000 userrefs 0 -
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user