mirror of
https://github.com/PR0M3TH3AN/SeedPass.git
synced 2025-09-09 15:58:48 +00:00
Add snapshot backup support
This commit is contained in:
@@ -22,10 +22,13 @@ __all__ = [
|
|||||||
"KIND_DELTA",
|
"KIND_DELTA",
|
||||||
"Manifest",
|
"Manifest",
|
||||||
"ChunkMeta",
|
"ChunkMeta",
|
||||||
|
"prepare_snapshot",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def __getattr__(name: str):
|
def __getattr__(name: str):
|
||||||
if name == "NostrClient":
|
if name == "NostrClient":
|
||||||
return import_module(".client", __name__).NostrClient
|
return import_module(".client", __name__).NostrClient
|
||||||
|
if name == "prepare_snapshot":
|
||||||
|
return import_module(".client", __name__).prepare_snapshot
|
||||||
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
||||||
|
@@ -4,9 +4,10 @@ import base64
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import time
|
import time
|
||||||
from typing import List, Optional
|
from typing import List, Optional, Tuple
|
||||||
import hashlib
|
import hashlib
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import gzip
|
||||||
|
|
||||||
# Imports from the nostr-sdk library
|
# Imports from the nostr-sdk library
|
||||||
from nostr_sdk import (
|
from nostr_sdk import (
|
||||||
@@ -22,6 +23,7 @@ from nostr_sdk import (
|
|||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
|
|
||||||
from .key_manager import KeyManager as SeedPassKeyManager
|
from .key_manager import KeyManager as SeedPassKeyManager
|
||||||
|
from .backup_models import Manifest, ChunkMeta, KIND_MANIFEST, KIND_SNAPSHOT_CHUNK
|
||||||
from password_manager.encryption import EncryptionManager
|
from password_manager.encryption import EncryptionManager
|
||||||
from utils.file_lock import exclusive_lock
|
from utils.file_lock import exclusive_lock
|
||||||
|
|
||||||
@@ -39,6 +41,44 @@ DEFAULT_RELAYS = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_snapshot(
|
||||||
|
encrypted_bytes: bytes, limit: int
|
||||||
|
) -> Tuple[Manifest, list[bytes]]:
|
||||||
|
"""Compress and split the encrypted vault into chunks.
|
||||||
|
|
||||||
|
Each chunk is hashed with SHA-256 and described in the returned
|
||||||
|
:class:`Manifest`.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
encrypted_bytes : bytes
|
||||||
|
The encrypted vault contents.
|
||||||
|
limit : int
|
||||||
|
Maximum chunk size in bytes.
|
||||||
|
|
||||||
|
Returns
|
||||||
|
-------
|
||||||
|
Tuple[Manifest, list[bytes]]
|
||||||
|
The manifest describing all chunks and the list of chunk bytes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
compressed = gzip.compress(encrypted_bytes)
|
||||||
|
chunks = [compressed[i : i + limit] for i in range(0, len(compressed), limit)]
|
||||||
|
|
||||||
|
metas: list[ChunkMeta] = []
|
||||||
|
for i, chunk in enumerate(chunks):
|
||||||
|
metas.append(
|
||||||
|
ChunkMeta(
|
||||||
|
id=f"seedpass-chunk-{i:04d}",
|
||||||
|
size=len(chunk),
|
||||||
|
hash=hashlib.sha256(chunk).hexdigest(),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
manifest = Manifest(ver=1, algo="gzip", chunks=metas)
|
||||||
|
return manifest, chunks
|
||||||
|
|
||||||
|
|
||||||
class NostrClient:
|
class NostrClient:
|
||||||
"""Interact with the Nostr network using nostr-sdk."""
|
"""Interact with the Nostr network using nostr-sdk."""
|
||||||
|
|
||||||
@@ -195,6 +235,82 @@ class NostrClient:
|
|||||||
self.last_error = "Latest event contained no content"
|
self.last_error = "Latest event contained no content"
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
async def publish_snapshot(
|
||||||
|
self, encrypted_bytes: bytes, limit: int = 50_000
|
||||||
|
) -> Manifest:
|
||||||
|
"""Publish a compressed snapshot split into chunks.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
encrypted_bytes : bytes
|
||||||
|
Vault contents already encrypted with the user's key.
|
||||||
|
limit : int, optional
|
||||||
|
Maximum chunk size in bytes. Defaults to 50 kB.
|
||||||
|
"""
|
||||||
|
|
||||||
|
manifest, chunks = prepare_snapshot(encrypted_bytes, limit)
|
||||||
|
for meta, chunk in zip(manifest.chunks, chunks):
|
||||||
|
content = base64.b64encode(chunk).decode("utf-8")
|
||||||
|
builder = EventBuilder(Kind(KIND_SNAPSHOT_CHUNK), content).tags(
|
||||||
|
[Tag.identifier(meta.id)]
|
||||||
|
)
|
||||||
|
event = builder.build(self.keys.public_key()).sign_with_keys(self.keys)
|
||||||
|
await self.client.send_event(event)
|
||||||
|
|
||||||
|
manifest_json = json.dumps(
|
||||||
|
{
|
||||||
|
"ver": manifest.ver,
|
||||||
|
"algo": manifest.algo,
|
||||||
|
"chunks": [meta.__dict__ for meta in manifest.chunks],
|
||||||
|
"delta_since": manifest.delta_since,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
manifest_event = (
|
||||||
|
EventBuilder(Kind(KIND_MANIFEST), manifest_json)
|
||||||
|
.build(self.keys.public_key())
|
||||||
|
.sign_with_keys(self.keys)
|
||||||
|
)
|
||||||
|
await self.client.send_event(manifest_event)
|
||||||
|
return manifest
|
||||||
|
|
||||||
|
async def fetch_latest_snapshot(self) -> Tuple[Manifest, list[bytes]] | None:
|
||||||
|
"""Retrieve the latest manifest and all snapshot chunks."""
|
||||||
|
|
||||||
|
pubkey = self.keys.public_key()
|
||||||
|
f = Filter().author(pubkey).kind(Kind(KIND_MANIFEST)).limit(1)
|
||||||
|
timeout = timedelta(seconds=10)
|
||||||
|
events = (await self.client.fetch_events(f, timeout)).to_vec()
|
||||||
|
if not events:
|
||||||
|
return None
|
||||||
|
manifest_raw = events[0].content()
|
||||||
|
data = json.loads(manifest_raw)
|
||||||
|
manifest = Manifest(
|
||||||
|
ver=data["ver"],
|
||||||
|
algo=data["algo"],
|
||||||
|
chunks=[ChunkMeta(**c) for c in data["chunks"]],
|
||||||
|
delta_since=data.get("delta_since"),
|
||||||
|
)
|
||||||
|
|
||||||
|
chunks: list[bytes] = []
|
||||||
|
for meta in manifest.chunks:
|
||||||
|
cf = (
|
||||||
|
Filter()
|
||||||
|
.author(pubkey)
|
||||||
|
.kind(Kind(KIND_SNAPSHOT_CHUNK))
|
||||||
|
.identifier(meta.id)
|
||||||
|
.limit(1)
|
||||||
|
)
|
||||||
|
cev = (await self.client.fetch_events(cf, timeout)).to_vec()
|
||||||
|
if not cev:
|
||||||
|
raise ValueError(f"Missing chunk {meta.id}")
|
||||||
|
chunk_bytes = base64.b64decode(cev[0].content().encode("utf-8"))
|
||||||
|
if hashlib.sha256(chunk_bytes).hexdigest() != meta.hash:
|
||||||
|
raise ValueError(f"Checksum mismatch for chunk {meta.id}")
|
||||||
|
chunks.append(chunk_bytes)
|
||||||
|
|
||||||
|
return manifest, chunks
|
||||||
|
|
||||||
def close_client_pool(self) -> None:
|
def close_client_pool(self) -> None:
|
||||||
"""Disconnects the client from all relays."""
|
"""Disconnects the client from all relays."""
|
||||||
try:
|
try:
|
||||||
|
98
src/tests/test_nostr_snapshot.py
Normal file
98
src/tests/test_nostr_snapshot.py
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import gzip
|
||||||
|
from pathlib import Path
|
||||||
|
from tempfile import TemporaryDirectory
|
||||||
|
from cryptography.fernet import Fernet
|
||||||
|
import base64
|
||||||
|
import asyncio
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
from nostr import prepare_snapshot, NostrClient
|
||||||
|
from password_manager.encryption import EncryptionManager
|
||||||
|
|
||||||
|
|
||||||
|
def test_prepare_snapshot_roundtrip():
|
||||||
|
data = b"a" * 70000
|
||||||
|
manifest, chunks = prepare_snapshot(data, 50000)
|
||||||
|
assert len(chunks) == len(manifest.chunks)
|
||||||
|
joined = b"".join(chunks)
|
||||||
|
assert len(joined) <= len(data)
|
||||||
|
assert hashlib.sha256(chunks[0]).hexdigest() == manifest.chunks[0].hash
|
||||||
|
assert manifest.chunks[0].id == "seedpass-chunk-0000"
|
||||||
|
assert data == gzip.decompress(joined)
|
||||||
|
|
||||||
|
|
||||||
|
class DummyEvent:
|
||||||
|
def __init__(self, content):
|
||||||
|
self._content = content
|
||||||
|
|
||||||
|
def content(self):
|
||||||
|
return self._content
|
||||||
|
|
||||||
|
|
||||||
|
class DummyClient:
|
||||||
|
def __init__(self, events):
|
||||||
|
self.events = events
|
||||||
|
self.pos = 0
|
||||||
|
|
||||||
|
async def add_relays(self, relays):
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def add_relay(self, relay):
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def connect(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def disconnect(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def send_event(self, event):
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def fetch_events(self, f, timeout):
|
||||||
|
ev = self.events[self.pos]
|
||||||
|
self.pos += 1
|
||||||
|
|
||||||
|
class E:
|
||||||
|
def __init__(self, ev):
|
||||||
|
self._ev = ev
|
||||||
|
|
||||||
|
def to_vec(self):
|
||||||
|
return [self._ev]
|
||||||
|
|
||||||
|
return E(ev)
|
||||||
|
|
||||||
|
|
||||||
|
def test_fetch_latest_snapshot():
|
||||||
|
data = b"seedpass" * 1000
|
||||||
|
manifest, chunks = prepare_snapshot(data, 50000)
|
||||||
|
manifest_json = json.dumps(
|
||||||
|
{
|
||||||
|
"ver": manifest.ver,
|
||||||
|
"algo": manifest.algo,
|
||||||
|
"chunks": [c.__dict__ for c in manifest.chunks],
|
||||||
|
"delta_since": None,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
events = [DummyEvent(manifest_json)] + [
|
||||||
|
DummyEvent(base64.b64encode(c).decode()) for c in chunks
|
||||||
|
]
|
||||||
|
|
||||||
|
client = DummyClient(events)
|
||||||
|
with TemporaryDirectory() as tmpdir:
|
||||||
|
enc_mgr = EncryptionManager(Fernet.generate_key(), Path(tmpdir))
|
||||||
|
with patch("nostr.client.Client", lambda signer: client), patch(
|
||||||
|
"nostr.client.KeyManager"
|
||||||
|
) as MockKM, patch.object(NostrClient, "initialize_client_pool"), patch.object(
|
||||||
|
enc_mgr, "decrypt_parent_seed", return_value="seed"
|
||||||
|
):
|
||||||
|
km = MockKM.return_value
|
||||||
|
km.keys.private_key_hex.return_value = "1" * 64
|
||||||
|
km.keys.public_key_hex.return_value = "2" * 64
|
||||||
|
nc = NostrClient(enc_mgr, "fp")
|
||||||
|
result_manifest, result_chunks = asyncio.run(nc.fetch_latest_snapshot())
|
||||||
|
|
||||||
|
assert manifest == result_manifest
|
||||||
|
assert result_chunks == chunks
|
Reference in New Issue
Block a user