working on proper encryption support

This commit is contained in:
Edwin Eefting 2021-04-20 18:39:57 +02:00
parent c0ea311e18
commit 671eda7386
3 changed files with 60 additions and 22 deletions

@ -1,4 +1,4 @@
from CmdPipe import CmdPipe
from zfs_autobackup.CmdPipe import CmdPipe
from basetest import *
import time
@ -882,9 +882,31 @@ test_target1/test_source2/fs2/sub@test-20101111000003
###########################
# TODO:
def test_raw(self):
def test_encrypted(self):
self.skipTest("todo: later when travis supports zfs 0.8")
# create encrypted dataset
shelltest("echo 12345678 > /tmp/zfstest.key")
shelltest("zfs create -o keylocation=file:///tmp/zfstest.key -o keyformat=passphrase -o encryption=on test_source1/fs1/enc1")
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/enc1/data.txt bs=200000 count=1")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --allow-empty --verbose --no-progress".split(" ")).run())
# self.skipTest("todo: later when travis supports zfs 0.8")
r = shelltest("zfs get encryption -H -o value test_target1/test_source1/fs1/enc1")
self.assertNotIn("off",r)
def test_decrypted(self):
# create encrypted dataset
shelltest("echo 12345678 > /tmp/zfstest.key")
shelltest("zfs create -o keylocation=file:///tmp/zfstest.key -o keyformat=passphrase -o encryption=on test_source1/fs1/enc1")
r=shelltest("dd if=/dev/zero of=/test_source1/fs1/enc1/data.txt bs=200000 count=1")
with patch('time.strftime', return_value="20101111000000"):
self.assertFalse(ZfsAutobackup("test test_target1 --decrypt --allow-empty --no-progress".split(" ")).run())
# self.skipTest("todo: later when travis supports zfs 0.8")
r = shelltest("zfs get encryption -H -o value test_target1/test_source1/fs1/enc1")
self.assertIn("off",r)
def test_progress(self):
@ -896,7 +918,7 @@ test_target1/test_source2/fs2/sub@test-20101111000003
n=ZfsNode("test",l)
d=ZfsDataset(n,"test_source1@test")
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, output_pipes=[])
sp=d.send_pipe([], prev_snapshot=None, resume_token=None, show_progress=True, raw=False, output_pipes=[], send_properties=False)
with OutputIO() as buf:

@ -90,7 +90,10 @@ class ZfsAutobackup:
help='Ignore transfer errors (still checks if received filesystem exists. useful for '
'acltype errors)')
parser.add_argument('--raw', action='store_true',
help='For encrypted datasets, send data exactly as it exists on disk.')
help=argparse.SUPPRESS)
parser.add_argument('--decrypt', action='store_true',
help='Decrypt data before sending it over')
parser.add_argument('--test', action='store_true',
help='dont change anything, just show what would be done (still does all read-only '
@ -136,6 +139,9 @@ class ZfsAutobackup:
if args.resume:
self.verbose("NOTE: The --resume option isn't needed anymore (its autodetected now)")
if args.raw:
self.verbose("NOTE: The --raw option isn't needed anymore (its autodetected now). Use --decrypt to explicitly send data decrypted.")
if args.target_path is not None and args.target_path[0] == "/":
self.log.error("Target should not start with a /")
sys.exit(255)
@ -256,10 +262,10 @@ class ZfsAutobackup:
set_properties=self.set_properties_list(),
ignore_recv_exit_code=self.args.ignore_transfer_errors,
holds=not self.args.no_holds, rollback=self.args.rollback,
raw=self.args.raw, also_other_snapshots=self.args.other_snapshots,
also_other_snapshots=self.args.other_snapshots,
no_send=self.args.no_send,
destroy_incompatible=self.args.destroy_incompatible,
output_pipes=self.args.send_pipe, input_pipes=self.args.recv_pipe)
output_pipes=self.args.send_pipe, input_pipes=self.args.recv_pipe, decrypt=self.args.decrypt)
except Exception as e:
fail_count = fail_count + 1
source_dataset.error("FAILED: " + str(e))
@ -406,7 +412,7 @@ class ZfsAutobackup:
else:
if fail_count != 255:
self.error("{} failures!".format(fail_count))
self.error("{} dataset(s) failed!".format(fail_count))
if self.args.test:
self.verbose("")

@ -494,14 +494,14 @@ class ZfsDataset:
return self.from_names(names[1:])
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, output_pipes):
def send_pipe(self, features, prev_snapshot, resume_token, show_progress, raw, send_properties, output_pipes):
"""returns a pipe with zfs send output for this snapshot
resume_token: resume sending from this token. (in that case we don't
need to know snapshot names)
Args:
:type output_pipes: list of CmdPipe
:type output_pipes: list of str
:type features: list of str
:type prev_snapshot: ZfsDataset
:type resume_token: str
@ -523,13 +523,7 @@ class ZfsDataset:
if "-c" in self.zfs_node.supported_send_options:
cmd.append("-c") # use compressed WRITE records
# NOTE: performance is usually worse with this option, according to manual
# also -D will be depricated in newer ZFS versions
# if not resume:
# if "-D" in self.zfs_node.supported_send_options:
# cmd.append("-D") # dedupped stream, sends less duplicate data
# raw? (for encryption)
# raw? (send over encrypted data in its original encrypted form without decrypting)
if raw:
cmd.append("--raw")
@ -544,7 +538,8 @@ class ZfsDataset:
else:
# send properties
cmd.append("-p")
if send_properties:
cmd.append("-p")
# incremental?
if prev_snapshot:
@ -637,7 +632,7 @@ class ZfsDataset:
def transfer_snapshot(self, target_snapshot, features, prev_snapshot, show_progress,
filter_properties, set_properties, ignore_recv_exit_code, resume_token,
raw, output_pipes, input_pipes):
raw, send_properties, output_pipes, input_pipes):
"""transfer this snapshot to target_snapshot. specify prev_snapshot for
incremental transfer
@ -676,7 +671,7 @@ class ZfsDataset:
# do it
pipe = self.send_pipe(features=features, show_progress=show_progress, prev_snapshot=prev_snapshot,
resume_token=resume_token, raw=raw, output_pipes=output_pipes)
resume_token=resume_token, raw=raw, send_properties=send_properties, output_pipes=output_pipes)
target_snapshot.recv_pipe(pipe, features=features, filter_properties=filter_properties,
set_properties=set_properties, ignore_exit_code=ignore_recv_exit_code)
@ -963,8 +958,9 @@ class ZfsDataset:
snapshot.destroy()
self.snapshots.remove(snapshot)
def sync_snapshots(self, target_dataset, features, show_progress, filter_properties, set_properties,
ignore_recv_exit_code, holds, rollback, raw, also_other_snapshots,
ignore_recv_exit_code, holds, rollback, decrypt, also_other_snapshots,
no_send, destroy_incompatible, output_pipes, input_pipes):
"""sync this dataset's snapshots to target_dataset, while also thinning
out old snapshots along the way.
@ -981,6 +977,7 @@ class ZfsDataset:
:type holds: bool
:type rollback: bool
:type raw: bool
:type decrypt: bool
:type also_other_snapshots: bool
:type no_send: bool
:type destroy_incompatible: bool
@ -1010,6 +1007,19 @@ class ZfsDataset:
if rollback:
target_dataset.rollback()
send_properties = True
raw = False
# source dataset encrypted?
if self.properties.get('encryption', 'off')!='off':
# user wants to send it over decrypted?
if decrypt:
# when decrypting, zfs cant send properties
send_properties=False
else:
# keep data encrypted by sending it raw (including properties)
raw=True
# now actually transfer the snapshots
prev_source_snapshot = common_snapshot
source_snapshot = start_snapshot
@ -1026,7 +1036,7 @@ class ZfsDataset:
filter_properties=allowed_filter_properties,
set_properties=allowed_set_properties,
ignore_recv_exit_code=ignore_recv_exit_code,
resume_token=resume_token, raw=raw, output_pipes=output_pipes, input_pipes=input_pipes)
resume_token=resume_token, raw=raw, send_properties=send_properties, output_pipes=output_pipes, input_pipes=input_pipes)
resume_token = None