show pipes in verbose

This commit is contained in:
Edwin Eefting 2021-05-15 12:34:21 +02:00
parent 59d53e9664
commit 5e04aabf37
5 changed files with 37 additions and 41 deletions

View File

@ -14,20 +14,13 @@ class TestSendRecvPipes(unittest2.TestCase):
"""send basics (remote/local send pipe)"""
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--no-progress", "--send-pipe=dd bs=1M", "--send-pipe=dd bs=2M"]).run())
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--no-progress", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--send-pipe=dd bs=2M"]).run())
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--no-progress", "--ssh-source=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
# r=shelltest("zfs list -H -o name -r -t snapshot test_target1")
# #NOTE: it wont backup test_target1/a/test_source2/fs2/sub to test_target1/b since it doesnt have the zfs_autobackup property anymore.
# self.assertMultiLineEqual(r,"""
# test_target1/a/test_source1/fs1@test-20101111000000
# test_target1/a/test_source1/fs1/sub@test-20101111000000
# test_target1/a/test_source2/fs2/sub@test-20101111000000
# test_target1/b/test_source1/fs1@test-20101111000000
# test_target1/b/test_source1/fs1/sub@test-20101111000000
# test_target1/b/test_source2/fs2/sub@test-20101111000000
# test_target1/b/test_target1/a/test_source1/fs1@test-20101111000000
# test_target1/b/test_target1/a/test_source1/fs1/sub@test-20101111000000
# """)
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--no-progress", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())
with patch('time.strftime', return_value="20101111000001"):
self.assertFalse(ZfsAutobackup(["test", "test_target1", "--no-progress", "--ssh-source=localhost", "--ssh-target=localhost", "--send-pipe=dd bs=1M", "--recv-pipe=dd bs=2M"]).run())

View File

@ -890,7 +890,7 @@ test_target1/test_source2/fs2/sub@test-20101111000003
n=ZfsNode("test",l)
d=ZfsDataset(n,"test_source1@test")
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, output_pipes=[], send_properties=True, write_embedded=True)
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, send_pipes=[], send_properties=True, write_embedded=True)
with OutputIO() as buf:

View File

@ -261,26 +261,29 @@ class ZfsAutobackup:
if self.args.progress:
self.clear_progress()
def get_input_pipes(self):
def get_recv_pipes(self):
ret=[]
for input_pipe in self.args.recv_pipe:
ret.extend(input_pipe.split(" "))
for recv_pipe in self.args.recv_pipe:
ret.extend(recv_pipe.split(" "))
ret.append(ExecuteNode.PIPE)
self.verbose("Added recv pipe: {}".format(recv_pipe))
return ret
def get_output_pipes(self):
def get_send_pipes(self):
ret=[]
for output_pipe in self.args.send_pipe:
for send_pipe in self.args.send_pipe:
ret.append(ExecuteNode.PIPE)
ret.extend(output_pipe.split(" "))
ret.extend(send_pipe.split(" "))
self.verbose("Added send pipe: {}".format(send_pipe))
return ret
# NOTE: this method also uses self.args. args that need extra processing are passed as function parameters:
def sync_datasets(self, source_node, source_datasets, target_node):
"""Sync datasets, or thin-only on both sides
@ -289,8 +292,8 @@ class ZfsAutobackup:
:type source_node: ZfsNode
"""
output_pipes=self.get_output_pipes()
input_pipes=self.get_input_pipes()
send_pipes=self.get_send_pipes()
recv_pipes=self.get_recv_pipes()
fail_count = 0
count = 0
@ -329,7 +332,7 @@ class ZfsAutobackup:
also_other_snapshots=self.args.other_snapshots,
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible,
output_pipes=output_pipes, input_pipes=input_pipes,
send_pipes=send_pipes, recv_pipes=recv_pipes,
decrypt=self.args.decrypt, encrypt=self.args.encrypt, )
except Exception as e:
fail_count = fail_count + 1

View File

@ -503,15 +503,15 @@ class ZfsDataset:
return self.from_names(names[1:])
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, output_pipes):
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, write_embedded, send_pipes):
"""returns a pipe with zfs send output for this snapshot
resume_token: resume sending from this token. (in that case we don't
need to know snapshot names)
Args:
:param output_pipes: output cmd array that will be added to actual zfs send command. (e.g. mbuffer or compression program)
:type output_pipes: list of str
:param send_pipes: output cmd array that will be added to actual zfs send command. (e.g. mbuffer or compression program)
:type send_pipes: list of str
:type features: list of str
:type prev_snapshot: ZfsDataset
:type resume_token: str
@ -557,13 +557,13 @@ class ZfsDataset:
cmd.append(self.name)
cmd.extend(output_pipes)
cmd.extend(send_pipes)
output_pipe = self.zfs_node.run(cmd, pipe=True, readonly=True)
return output_pipe
def recv_pipe(self, pipe, features, input_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False):
def recv_pipe(self, pipe, features, recv_pipes, filter_properties=None, set_properties=None, ignore_exit_code=False):
"""starts a zfs recv for this snapshot and uses pipe as input
note: you can it both on a snapshot or filesystem object. The
@ -571,7 +571,7 @@ class ZfsDataset:
differently.
Args:
:param input_pipes: input cmd array that will be prepended to actual zfs recv command. (e.g. mbuffer or decompression program)
:param recv_pipes: input cmd array that will be prepended to actual zfs recv command. (e.g. mbuffer or decompression program)
:type pipe: subprocess.pOpen
:type features: list of str
:type filter_properties: list of str
@ -588,7 +588,7 @@ class ZfsDataset:
# build target command
cmd = []
cmd.extend(input_pipes)
cmd.extend(recv_pipes)
cmd.extend(["zfs", "recv"])
@ -634,15 +634,15 @@ class ZfsDataset:
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, send_properties, write_embedded, output_pipes, input_pipes):
raw, send_properties, write_embedded, send_pipes, recv_pipes):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer
connects a send_pipe() to recv_pipe()
Args:
:type output_pipes: list of str
:type input_pipes: list of str
:type send_pipes: list of str
:type recv_pipes: list of str
:type target_snapshot: ZfsDataset
:type features: list of str
:type prev_snapshot: ZfsDataset
@ -673,9 +673,9 @@ class ZfsDataset:
# do it
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, output_pipes=output_pipes)
resume_token=resume_token, raw=raw, send_properties=send_properties, write_embedded=write_embedded, send_pipes=send_pipes)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, input_pipes=input_pipes)
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code, recv_pipes=recv_pipes)
def abort_resume(self):
"""abort current resume state"""
@ -963,13 +963,13 @@ class ZfsDataset:
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, decrypt, encrypt, also_other_snapshots,
no_send, destroy_incompatible, output_pipes, input_pipes):
no_send, destroy_incompatible, send_pipes, recv_pipes):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
Args:
:type output_pipes: list of str
:type input_pipes: list of str
:type send_pipes: list of str
:type recv_pipes: list of str
:type target_dataset: ZfsDataset
:type features: list of str
:type show_progress: bool
@ -1046,7 +1046,7 @@ class ZfsDataset:
filter_properties=active_filter_properties,
set_properties=active_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, write_embedded=write_embedded,raw=raw, send_properties=send_properties, output_pipes=output_pipes, input_pipes=input_pipes)
resume_token=resume_token, write_embedded=write_embedded, raw=raw, send_properties=send_properties, send_pipes=send_pipes, recv_pipes=recv_pipes)
resume_token = None

View File

@ -120,7 +120,7 @@ class ZfsNode(ExecuteNode):
self._progress_total_bytes = int(progress_fields[2])
elif progress_fields[0] == 'incremental':
self._progress_total_bytes = int(progress_fields[3])
else:
elif progress_fields[1].isnumeric():
bytes_ = int(progress_fields[1])
if self._progress_total_bytes:
percentage = min(100, int(bytes_ * 100 / self._progress_total_bytes))