mirror of
https://github.com/PR0M3TH3AN/SeedPass.git
synced 2025-09-09 07:48:57 +00:00
17
src/main.py
17
src/main.py
@@ -365,14 +365,15 @@ def handle_post_to_nostr(
|
|||||||
Handles the action of posting the encrypted password index to Nostr.
|
Handles the action of posting the encrypted password index to Nostr.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
event_id = password_manager.sync_vault(alt_summary=alt_summary)
|
result = password_manager.sync_vault(alt_summary=alt_summary)
|
||||||
if event_id:
|
if result:
|
||||||
print(
|
print(colored("\N{WHITE HEAVY CHECK MARK} Sync complete.", "green"))
|
||||||
colored(
|
print("Event IDs:")
|
||||||
f"\N{WHITE HEAVY CHECK MARK} Sync complete. Event ID: {event_id}",
|
print(f" manifest: {result['manifest_id']}")
|
||||||
"green",
|
for cid in result["chunk_ids"]:
|
||||||
)
|
print(f" chunk: {cid}")
|
||||||
)
|
for did in result["delta_ids"]:
|
||||||
|
print(f" delta: {did}")
|
||||||
logging.info("Encrypted index posted to Nostr successfully.")
|
logging.info("Encrypted index posted to Nostr successfully.")
|
||||||
else:
|
else:
|
||||||
print(colored("\N{CROSS MARK} Sync failed…", "red"))
|
print(colored("\N{CROSS MARK} Sync failed…", "red"))
|
||||||
|
@@ -14,6 +14,7 @@ class ChunkMeta:
|
|||||||
id: str
|
id: str
|
||||||
size: int
|
size: int
|
||||||
hash: str
|
hash: str
|
||||||
|
event_id: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@@ -78,6 +78,7 @@ def prepare_snapshot(
|
|||||||
id=f"seedpass-chunk-{i:04d}",
|
id=f"seedpass-chunk-{i:04d}",
|
||||||
size=len(chunk),
|
size=len(chunk),
|
||||||
hash=hashlib.sha256(chunk).hexdigest(),
|
hash=hashlib.sha256(chunk).hexdigest(),
|
||||||
|
event_id=None,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -372,7 +373,13 @@ class NostrClient:
|
|||||||
[Tag.identifier(meta.id)]
|
[Tag.identifier(meta.id)]
|
||||||
)
|
)
|
||||||
event = builder.build(self.keys.public_key()).sign_with_keys(self.keys)
|
event = builder.build(self.keys.public_key()).sign_with_keys(self.keys)
|
||||||
await self.client.send_event(event)
|
result = await self.client.send_event(event)
|
||||||
|
try:
|
||||||
|
meta.event_id = (
|
||||||
|
result.id.to_hex() if hasattr(result, "id") else str(result)
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
meta.event_id = None
|
||||||
|
|
||||||
manifest_json = json.dumps(
|
manifest_json = json.dumps(
|
||||||
{
|
{
|
||||||
@@ -400,6 +407,60 @@ class NostrClient:
|
|||||||
logger.info("publish_snapshot completed in %.2f seconds", duration)
|
logger.info("publish_snapshot completed in %.2f seconds", duration)
|
||||||
return manifest, manifest_id
|
return manifest, manifest_id
|
||||||
|
|
||||||
|
async def _fetch_chunks_with_retry(
|
||||||
|
self, manifest_event
|
||||||
|
) -> tuple[Manifest, list[bytes]] | None:
|
||||||
|
"""Retrieve all chunks referenced by ``manifest_event`` with retries."""
|
||||||
|
|
||||||
|
pubkey = self.keys.public_key()
|
||||||
|
timeout = timedelta(seconds=10)
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = json.loads(manifest_event.content())
|
||||||
|
manifest = Manifest(
|
||||||
|
ver=data["ver"],
|
||||||
|
algo=data["algo"],
|
||||||
|
chunks=[ChunkMeta(**c) for c in data["chunks"]],
|
||||||
|
delta_since=(
|
||||||
|
int(data["delta_since"])
|
||||||
|
if data.get("delta_since") is not None
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
chunks: list[bytes] = []
|
||||||
|
for meta in manifest.chunks:
|
||||||
|
attempt = 0
|
||||||
|
chunk_bytes: bytes | None = None
|
||||||
|
while attempt < MAX_RETRIES:
|
||||||
|
cf = Filter().author(pubkey).kind(Kind(KIND_SNAPSHOT_CHUNK))
|
||||||
|
if meta.event_id:
|
||||||
|
cf = cf.id(EventId.parse(meta.event_id))
|
||||||
|
else:
|
||||||
|
cf = cf.identifier(meta.id)
|
||||||
|
cf = cf.limit(1)
|
||||||
|
cev = (await self.client.fetch_events(cf, timeout)).to_vec()
|
||||||
|
if cev:
|
||||||
|
candidate = base64.b64decode(cev[0].content().encode("utf-8"))
|
||||||
|
if hashlib.sha256(candidate).hexdigest() == meta.hash:
|
||||||
|
chunk_bytes = candidate
|
||||||
|
break
|
||||||
|
attempt += 1
|
||||||
|
if attempt < MAX_RETRIES:
|
||||||
|
await asyncio.sleep(RETRY_DELAY)
|
||||||
|
if chunk_bytes is None:
|
||||||
|
return None
|
||||||
|
chunks.append(chunk_bytes)
|
||||||
|
|
||||||
|
man_id = getattr(manifest_event, "id", None)
|
||||||
|
if hasattr(man_id, "to_hex"):
|
||||||
|
man_id = man_id.to_hex()
|
||||||
|
self.current_manifest = manifest
|
||||||
|
self.current_manifest_id = man_id
|
||||||
|
return manifest, chunks
|
||||||
|
|
||||||
async def fetch_latest_snapshot(self) -> Tuple[Manifest, list[bytes]] | None:
|
async def fetch_latest_snapshot(self) -> Tuple[Manifest, list[bytes]] | None:
|
||||||
"""Retrieve the latest manifest and all snapshot chunks."""
|
"""Retrieve the latest manifest and all snapshot chunks."""
|
||||||
if self.offline_mode or not self.relays:
|
if self.offline_mode or not self.relays:
|
||||||
@@ -407,48 +468,18 @@ class NostrClient:
|
|||||||
await self._connect_async()
|
await self._connect_async()
|
||||||
|
|
||||||
pubkey = self.keys.public_key()
|
pubkey = self.keys.public_key()
|
||||||
f = Filter().author(pubkey).kind(Kind(KIND_MANIFEST)).limit(1)
|
f = Filter().author(pubkey).kind(Kind(KIND_MANIFEST)).limit(3)
|
||||||
timeout = timedelta(seconds=10)
|
timeout = timedelta(seconds=10)
|
||||||
events = (await self.client.fetch_events(f, timeout)).to_vec()
|
events = (await self.client.fetch_events(f, timeout)).to_vec()
|
||||||
if not events:
|
if not events:
|
||||||
return None
|
return None
|
||||||
manifest_event = events[0]
|
|
||||||
manifest_raw = manifest_event.content()
|
|
||||||
data = json.loads(manifest_raw)
|
|
||||||
manifest = Manifest(
|
|
||||||
ver=data["ver"],
|
|
||||||
algo=data["algo"],
|
|
||||||
chunks=[ChunkMeta(**c) for c in data["chunks"]],
|
|
||||||
delta_since=(
|
|
||||||
int(data["delta_since"])
|
|
||||||
if data.get("delta_since") is not None
|
|
||||||
else None
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
chunks: list[bytes] = []
|
for manifest_event in events:
|
||||||
for meta in manifest.chunks:
|
result = await self._fetch_chunks_with_retry(manifest_event)
|
||||||
cf = (
|
if result is not None:
|
||||||
Filter()
|
return result
|
||||||
.author(pubkey)
|
|
||||||
.kind(Kind(KIND_SNAPSHOT_CHUNK))
|
|
||||||
.identifier(meta.id)
|
|
||||||
.limit(1)
|
|
||||||
)
|
|
||||||
cev = (await self.client.fetch_events(cf, timeout)).to_vec()
|
|
||||||
if not cev:
|
|
||||||
raise ValueError(f"Missing chunk {meta.id}")
|
|
||||||
chunk_bytes = base64.b64decode(cev[0].content().encode("utf-8"))
|
|
||||||
if hashlib.sha256(chunk_bytes).hexdigest() != meta.hash:
|
|
||||||
raise ValueError(f"Checksum mismatch for chunk {meta.id}")
|
|
||||||
chunks.append(chunk_bytes)
|
|
||||||
|
|
||||||
self.current_manifest = manifest
|
return None
|
||||||
man_id = getattr(manifest_event, "id", None)
|
|
||||||
if hasattr(man_id, "to_hex"):
|
|
||||||
man_id = man_id.to_hex()
|
|
||||||
self.current_manifest_id = man_id
|
|
||||||
return manifest, chunks
|
|
||||||
|
|
||||||
async def publish_delta(self, delta_bytes: bytes, manifest_id: str) -> str:
|
async def publish_delta(self, delta_bytes: bytes, manifest_id: str) -> str:
|
||||||
"""Publish a delta event referencing a manifest."""
|
"""Publish a delta event referencing a manifest."""
|
||||||
|
@@ -1127,7 +1127,7 @@ class PasswordManager:
|
|||||||
def _worker() -> None:
|
def _worker() -> None:
|
||||||
try:
|
try:
|
||||||
if hasattr(self, "nostr_client") and hasattr(self, "vault"):
|
if hasattr(self, "nostr_client") and hasattr(self, "vault"):
|
||||||
self.sync_index_from_nostr_if_missing()
|
self.attempt_initial_sync()
|
||||||
if hasattr(self, "sync_index_from_nostr"):
|
if hasattr(self, "sync_index_from_nostr"):
|
||||||
self.sync_index_from_nostr()
|
self.sync_index_from_nostr()
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
@@ -1176,16 +1176,19 @@ class PasswordManager:
|
|||||||
|
|
||||||
threading.Thread(target=_worker, daemon=True).start()
|
threading.Thread(target=_worker, daemon=True).start()
|
||||||
|
|
||||||
def sync_index_from_nostr_if_missing(self) -> None:
|
def attempt_initial_sync(self) -> bool:
|
||||||
"""Retrieve the password database from Nostr if it doesn't exist locally.
|
"""Attempt to download the initial vault snapshot from Nostr.
|
||||||
|
|
||||||
If no valid data is found or decryption fails, initialize a fresh local
|
Returns ``True`` if the snapshot was successfully downloaded and the
|
||||||
database and publish it to Nostr.
|
local index file was written. Returns ``False`` otherwise. The local
|
||||||
|
index file is not created on failure.
|
||||||
"""
|
"""
|
||||||
index_file = self.fingerprint_dir / "seedpass_entries_db.json.enc"
|
index_file = self.fingerprint_dir / "seedpass_entries_db.json.enc"
|
||||||
if index_file.exists():
|
if index_file.exists():
|
||||||
return
|
return True
|
||||||
|
|
||||||
have_data = False
|
have_data = False
|
||||||
|
start = time.perf_counter()
|
||||||
try:
|
try:
|
||||||
result = asyncio.run(self.nostr_client.fetch_latest_snapshot())
|
result = asyncio.run(self.nostr_client.fetch_latest_snapshot())
|
||||||
if result:
|
if result:
|
||||||
@@ -1202,10 +1205,23 @@ class PasswordManager:
|
|||||||
if success:
|
if success:
|
||||||
logger.info("Initialized local database from Nostr.")
|
logger.info("Initialized local database from Nostr.")
|
||||||
have_data = True
|
have_data = True
|
||||||
except Exception as e:
|
except Exception as e: # pragma: no cover - network errors
|
||||||
logger.warning(f"Unable to sync index from Nostr: {e}")
|
logger.warning(f"Unable to sync index from Nostr: {e}")
|
||||||
|
finally:
|
||||||
|
if getattr(self, "verbose_timing", False):
|
||||||
|
duration = time.perf_counter() - start
|
||||||
|
logger.info("attempt_initial_sync completed in %.2f seconds", duration)
|
||||||
|
|
||||||
if not have_data:
|
return have_data
|
||||||
|
|
||||||
|
def sync_index_from_nostr_if_missing(self) -> None:
|
||||||
|
"""Retrieve the password database from Nostr if it doesn't exist locally.
|
||||||
|
|
||||||
|
If no valid data is found or decryption fails, initialize a fresh local
|
||||||
|
database and publish it to Nostr.
|
||||||
|
"""
|
||||||
|
success = self.attempt_initial_sync()
|
||||||
|
if not success:
|
||||||
self.vault.save_index({"schema_version": LATEST_VERSION, "entries": {}})
|
self.vault.save_index({"schema_version": LATEST_VERSION, "entries": {}})
|
||||||
try:
|
try:
|
||||||
self.sync_vault()
|
self.sync_vault()
|
||||||
@@ -3501,8 +3517,10 @@ class PasswordManager:
|
|||||||
# Re-raise the exception to inform the calling function of the failure
|
# Re-raise the exception to inform the calling function of the failure
|
||||||
raise
|
raise
|
||||||
|
|
||||||
def sync_vault(self, alt_summary: str | None = None) -> str | None:
|
def sync_vault(
|
||||||
"""Publish the current vault contents to Nostr."""
|
self, alt_summary: str | None = None
|
||||||
|
) -> dict[str, list[str] | str] | None:
|
||||||
|
"""Publish the current vault contents to Nostr and return event IDs."""
|
||||||
try:
|
try:
|
||||||
if getattr(self, "offline_mode", False):
|
if getattr(self, "offline_mode", False):
|
||||||
return None
|
return None
|
||||||
@@ -3510,16 +3528,28 @@ class PasswordManager:
|
|||||||
if not encrypted:
|
if not encrypted:
|
||||||
return None
|
return None
|
||||||
pub_snap = getattr(self.nostr_client, "publish_snapshot", None)
|
pub_snap = getattr(self.nostr_client, "publish_snapshot", None)
|
||||||
|
manifest = None
|
||||||
|
event_id = None
|
||||||
if callable(pub_snap):
|
if callable(pub_snap):
|
||||||
if asyncio.iscoroutinefunction(pub_snap):
|
if asyncio.iscoroutinefunction(pub_snap):
|
||||||
_, event_id = asyncio.run(pub_snap(encrypted))
|
manifest, event_id = asyncio.run(pub_snap(encrypted))
|
||||||
else:
|
else:
|
||||||
_, event_id = pub_snap(encrypted)
|
manifest, event_id = pub_snap(encrypted)
|
||||||
else:
|
else:
|
||||||
# Fallback for tests using simplified stubs
|
# Fallback for tests using simplified stubs
|
||||||
event_id = self.nostr_client.publish_json_to_nostr(encrypted)
|
event_id = self.nostr_client.publish_json_to_nostr(encrypted)
|
||||||
self.is_dirty = False
|
self.is_dirty = False
|
||||||
return event_id
|
if event_id is None:
|
||||||
|
return None
|
||||||
|
chunk_ids: list[str] = []
|
||||||
|
if manifest is not None:
|
||||||
|
chunk_ids = [c.event_id for c in manifest.chunks if c.event_id]
|
||||||
|
delta_ids = getattr(self.nostr_client, "_delta_events", [])
|
||||||
|
return {
|
||||||
|
"manifest_id": event_id,
|
||||||
|
"chunk_ids": chunk_ids,
|
||||||
|
"delta_ids": list(delta_ids),
|
||||||
|
}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Failed to sync vault: {e}", exc_info=True)
|
logging.error(f"Failed to sync vault: {e}", exc_info=True)
|
||||||
return None
|
return None
|
||||||
|
@@ -419,9 +419,14 @@ def vault_reveal_parent_seed(
|
|||||||
def nostr_sync(ctx: typer.Context) -> None:
|
def nostr_sync(ctx: typer.Context) -> None:
|
||||||
"""Sync with configured Nostr relays."""
|
"""Sync with configured Nostr relays."""
|
||||||
pm = _get_pm(ctx)
|
pm = _get_pm(ctx)
|
||||||
event_id = pm.sync_vault()
|
result = pm.sync_vault()
|
||||||
if event_id:
|
if result:
|
||||||
typer.echo(event_id)
|
typer.echo("Event IDs:")
|
||||||
|
typer.echo(f"- manifest: {result['manifest_id']}")
|
||||||
|
for cid in result["chunk_ids"]:
|
||||||
|
typer.echo(f"- chunk: {cid}")
|
||||||
|
for did in result["delta_ids"]:
|
||||||
|
typer.echo(f"- delta: {did}")
|
||||||
else:
|
else:
|
||||||
typer.echo("Error: Failed to sync vault")
|
typer.echo("Error: Failed to sync vault")
|
||||||
|
|
||||||
|
@@ -108,6 +108,7 @@ class DummyFilter:
|
|||||||
self.ids: list[str] = []
|
self.ids: list[str] = []
|
||||||
self.limit_val: int | None = None
|
self.limit_val: int | None = None
|
||||||
self.since_val: int | None = None
|
self.since_val: int | None = None
|
||||||
|
self.id_called: bool = False
|
||||||
|
|
||||||
def author(self, _pk):
|
def author(self, _pk):
|
||||||
return self
|
return self
|
||||||
@@ -125,6 +126,11 @@ class DummyFilter:
|
|||||||
self.ids.append(ident)
|
self.ids.append(ident)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def id(self, ident: str):
|
||||||
|
self.id_called = True
|
||||||
|
self.ids.append(ident)
|
||||||
|
return self
|
||||||
|
|
||||||
def limit(self, val: int):
|
def limit(self, val: int):
|
||||||
self.limit_val = val
|
self.limit_val = val
|
||||||
return self
|
return self
|
||||||
@@ -167,6 +173,7 @@ class DummyRelayClient:
|
|||||||
self.manifests: list[DummyEvent] = []
|
self.manifests: list[DummyEvent] = []
|
||||||
self.chunks: dict[str, DummyEvent] = {}
|
self.chunks: dict[str, DummyEvent] = {}
|
||||||
self.deltas: list[DummyEvent] = []
|
self.deltas: list[DummyEvent] = []
|
||||||
|
self.filters: list[DummyFilter] = []
|
||||||
|
|
||||||
async def add_relays(self, _relays):
|
async def add_relays(self, _relays):
|
||||||
pass
|
pass
|
||||||
@@ -195,6 +202,7 @@ class DummyRelayClient:
|
|||||||
elif event.kind == KIND_SNAPSHOT_CHUNK:
|
elif event.kind == KIND_SNAPSHOT_CHUNK:
|
||||||
ident = event.tags[0] if event.tags else str(self.counter)
|
ident = event.tags[0] if event.tags else str(self.counter)
|
||||||
self.chunks[ident] = event
|
self.chunks[ident] = event
|
||||||
|
self.chunks[eid] = event
|
||||||
elif event.kind == KIND_DELTA:
|
elif event.kind == KIND_DELTA:
|
||||||
if not hasattr(event, "created_at"):
|
if not hasattr(event, "created_at"):
|
||||||
self.ts_counter += 1
|
self.ts_counter += 1
|
||||||
@@ -203,6 +211,7 @@ class DummyRelayClient:
|
|||||||
return DummySendResult(eid)
|
return DummySendResult(eid)
|
||||||
|
|
||||||
async def fetch_events(self, f, _timeout):
|
async def fetch_events(self, f, _timeout):
|
||||||
|
self.filters.append(f)
|
||||||
kind = getattr(f, "kind_val", None)
|
kind = getattr(f, "kind_val", None)
|
||||||
limit = getattr(f, "limit_val", None)
|
limit = getattr(f, "limit_val", None)
|
||||||
identifier = f.ids[0] if getattr(f, "ids", None) else None
|
identifier = f.ids[0] if getattr(f, "ids", None) else None
|
||||||
|
@@ -53,7 +53,11 @@ class DummyPM:
|
|||||||
self.nostr_client = SimpleNamespace(
|
self.nostr_client = SimpleNamespace(
|
||||||
key_manager=SimpleNamespace(get_npub=lambda: "npub")
|
key_manager=SimpleNamespace(get_npub=lambda: "npub")
|
||||||
)
|
)
|
||||||
self.sync_vault = lambda: "event"
|
self.sync_vault = lambda: {
|
||||||
|
"manifest_id": "event",
|
||||||
|
"chunk_ids": ["c1"],
|
||||||
|
"delta_ids": [],
|
||||||
|
}
|
||||||
self.config_manager = SimpleNamespace(
|
self.config_manager = SimpleNamespace(
|
||||||
load_config=lambda require_pin=False: {"inactivity_timeout": 30},
|
load_config=lambda require_pin=False: {"inactivity_timeout": 30},
|
||||||
set_inactivity_timeout=lambda v: None,
|
set_inactivity_timeout=lambda v: None,
|
||||||
|
@@ -47,7 +47,8 @@ def test_full_sync_roundtrip(dummy_nostr_client):
|
|||||||
manifest_id = relay.manifests[-1].id
|
manifest_id = relay.manifests[-1].id
|
||||||
|
|
||||||
# Manager B retrieves snapshot
|
# Manager B retrieves snapshot
|
||||||
pm_b.sync_index_from_nostr_if_missing()
|
result = pm_b.attempt_initial_sync()
|
||||||
|
assert result is True
|
||||||
entries = pm_b.entry_manager.list_entries()
|
entries = pm_b.entry_manager.list_entries()
|
||||||
assert [e[1] for e in entries] == ["site1"]
|
assert [e[1] for e in entries] == ["site1"]
|
||||||
|
|
||||||
|
@@ -47,7 +47,8 @@ def test_full_sync_roundtrip(dummy_nostr_client):
|
|||||||
manifest_id = relay.manifests[-1].id
|
manifest_id = relay.manifests[-1].id
|
||||||
|
|
||||||
# Manager B retrieves snapshot
|
# Manager B retrieves snapshot
|
||||||
pm_b.sync_index_from_nostr_if_missing()
|
result = pm_b.attempt_initial_sync()
|
||||||
|
assert result is True
|
||||||
entries = pm_b.entry_manager.list_entries()
|
entries = pm_b.entry_manager.list_entries()
|
||||||
assert [e[1] for e in entries] == ["site1"]
|
assert [e[1] for e in entries] == ["site1"]
|
||||||
|
|
||||||
|
@@ -39,7 +39,7 @@ class MockClient:
|
|||||||
|
|
||||||
class FakeId:
|
class FakeId:
|
||||||
def to_hex(self_inner):
|
def to_hex(self_inner):
|
||||||
return "abcd"
|
return "a" * 64
|
||||||
|
|
||||||
class FakeOutput:
|
class FakeOutput:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
@@ -7,6 +7,7 @@ from password_manager.entry_management import EntryManager
|
|||||||
from password_manager.backup import BackupManager
|
from password_manager.backup import BackupManager
|
||||||
from password_manager.config_manager import ConfigManager
|
from password_manager.config_manager import ConfigManager
|
||||||
from nostr.client import prepare_snapshot
|
from nostr.client import prepare_snapshot
|
||||||
|
from nostr.backup_models import KIND_SNAPSHOT_CHUNK
|
||||||
|
|
||||||
|
|
||||||
def test_manifest_generation(tmp_path):
|
def test_manifest_generation(tmp_path):
|
||||||
@@ -35,10 +36,18 @@ def test_retrieve_multi_chunk_snapshot(dummy_nostr_client):
|
|||||||
data = os.urandom(120000)
|
data = os.urandom(120000)
|
||||||
manifest, _ = asyncio.run(client.publish_snapshot(data, limit=50000))
|
manifest, _ = asyncio.run(client.publish_snapshot(data, limit=50000))
|
||||||
assert len(manifest.chunks) > 1
|
assert len(manifest.chunks) > 1
|
||||||
|
for meta in manifest.chunks:
|
||||||
|
assert meta.event_id
|
||||||
fetched_manifest, chunk_bytes = asyncio.run(client.fetch_latest_snapshot())
|
fetched_manifest, chunk_bytes = asyncio.run(client.fetch_latest_snapshot())
|
||||||
assert len(chunk_bytes) == len(manifest.chunks)
|
assert len(chunk_bytes) == len(manifest.chunks)
|
||||||
|
assert [c.event_id for c in fetched_manifest.chunks] == [
|
||||||
|
c.event_id for c in manifest.chunks
|
||||||
|
]
|
||||||
joined = b"".join(chunk_bytes)
|
joined = b"".join(chunk_bytes)
|
||||||
assert gzip.decompress(joined) == data
|
assert gzip.decompress(joined) == data
|
||||||
|
for f in relay.filters:
|
||||||
|
if getattr(f, "kind_val", None) == KIND_SNAPSHOT_CHUNK:
|
||||||
|
assert f.id_called
|
||||||
|
|
||||||
|
|
||||||
def test_publish_and_fetch_deltas(dummy_nostr_client):
|
def test_publish_and_fetch_deltas(dummy_nostr_client):
|
||||||
@@ -56,3 +65,70 @@ def test_publish_and_fetch_deltas(dummy_nostr_client):
|
|||||||
assert relay.manifests[-1].delta_since == second_ts
|
assert relay.manifests[-1].delta_since == second_ts
|
||||||
deltas = asyncio.run(client.fetch_deltas_since(0))
|
deltas = asyncio.run(client.fetch_deltas_since(0))
|
||||||
assert deltas == [d1, d2]
|
assert deltas == [d1, d2]
|
||||||
|
|
||||||
|
|
||||||
|
def test_fetch_snapshot_fallback_on_missing_chunk(dummy_nostr_client, monkeypatch):
|
||||||
|
import os
|
||||||
|
import gzip
|
||||||
|
|
||||||
|
client, relay = dummy_nostr_client
|
||||||
|
monkeypatch.setattr("nostr.client.MAX_RETRIES", 3)
|
||||||
|
monkeypatch.setattr("nostr.client.RETRY_DELAY", 0)
|
||||||
|
|
||||||
|
data1 = os.urandom(60000)
|
||||||
|
manifest1, _ = asyncio.run(client.publish_snapshot(data1))
|
||||||
|
|
||||||
|
data2 = os.urandom(60000)
|
||||||
|
manifest2, _ = asyncio.run(client.publish_snapshot(data2))
|
||||||
|
|
||||||
|
missing = manifest2.chunks[0]
|
||||||
|
if missing.event_id:
|
||||||
|
relay.chunks.pop(missing.event_id, None)
|
||||||
|
relay.chunks.pop(missing.id, None)
|
||||||
|
|
||||||
|
relay.filters.clear()
|
||||||
|
|
||||||
|
fetched_manifest, chunk_bytes = asyncio.run(client.fetch_latest_snapshot())
|
||||||
|
|
||||||
|
assert gzip.decompress(b"".join(chunk_bytes)) == data1
|
||||||
|
assert [c.event_id for c in fetched_manifest.chunks] == [
|
||||||
|
c.event_id for c in manifest1.chunks
|
||||||
|
]
|
||||||
|
|
||||||
|
attempts = sum(
|
||||||
|
1
|
||||||
|
for f in relay.filters
|
||||||
|
if getattr(f, "kind_val", None) == KIND_SNAPSHOT_CHUNK
|
||||||
|
and (
|
||||||
|
missing.id in getattr(f, "ids", [])
|
||||||
|
or (missing.event_id and missing.event_id in getattr(f, "ids", []))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
assert attempts == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_fetch_snapshot_uses_event_ids(dummy_nostr_client):
|
||||||
|
import os
|
||||||
|
import gzip
|
||||||
|
|
||||||
|
client, relay = dummy_nostr_client
|
||||||
|
|
||||||
|
data = os.urandom(60000)
|
||||||
|
manifest, _ = asyncio.run(client.publish_snapshot(data))
|
||||||
|
|
||||||
|
# Remove identifier keys so chunks can only be fetched via event_id
|
||||||
|
for meta in manifest.chunks:
|
||||||
|
relay.chunks.pop(meta.id, None)
|
||||||
|
|
||||||
|
relay.filters.clear()
|
||||||
|
|
||||||
|
fetched_manifest, chunk_bytes = asyncio.run(client.fetch_latest_snapshot())
|
||||||
|
|
||||||
|
assert gzip.decompress(b"".join(chunk_bytes)) == data
|
||||||
|
|
||||||
|
id_filters = [
|
||||||
|
f.id_called
|
||||||
|
for f in relay.filters
|
||||||
|
if getattr(f, "kind_val", None) == KIND_SNAPSHOT_CHUNK
|
||||||
|
]
|
||||||
|
assert id_filters and all(id_filters)
|
||||||
|
@@ -68,6 +68,8 @@ class DummyClient:
|
|||||||
def test_fetch_latest_snapshot():
|
def test_fetch_latest_snapshot():
|
||||||
data = b"seedpass" * 1000
|
data = b"seedpass" * 1000
|
||||||
manifest, chunks = prepare_snapshot(data, 50000)
|
manifest, chunks = prepare_snapshot(data, 50000)
|
||||||
|
for i, m in enumerate(manifest.chunks):
|
||||||
|
m.event_id = f"{i:064x}"
|
||||||
manifest_json = json.dumps(
|
manifest_json = json.dumps(
|
||||||
{
|
{
|
||||||
"ver": manifest.ver,
|
"ver": manifest.ver,
|
||||||
@@ -98,3 +100,6 @@ def test_fetch_latest_snapshot():
|
|||||||
|
|
||||||
assert manifest == result_manifest
|
assert manifest == result_manifest
|
||||||
assert result_chunks == chunks
|
assert result_chunks == chunks
|
||||||
|
assert [c.event_id for c in manifest.chunks] == [
|
||||||
|
c.event_id for c in result_manifest.chunks
|
||||||
|
]
|
||||||
|
@@ -9,12 +9,17 @@ import main
|
|||||||
|
|
||||||
def test_handle_post_success(capsys):
|
def test_handle_post_success(capsys):
|
||||||
pm = SimpleNamespace(
|
pm = SimpleNamespace(
|
||||||
sync_vault=lambda alt_summary=None: "abcd",
|
sync_vault=lambda alt_summary=None: {
|
||||||
|
"manifest_id": "abcd",
|
||||||
|
"chunk_ids": ["c1", "c2"],
|
||||||
|
"delta_ids": ["d1"],
|
||||||
|
},
|
||||||
)
|
)
|
||||||
main.handle_post_to_nostr(pm)
|
main.handle_post_to_nostr(pm)
|
||||||
out = capsys.readouterr().out
|
out = capsys.readouterr().out
|
||||||
assert "✅ Sync complete." in out
|
assert "✅ Sync complete." in out
|
||||||
assert "abcd" in out
|
assert "abcd" in out
|
||||||
|
assert "c1" in out and "c2" in out and "d1" in out
|
||||||
|
|
||||||
|
|
||||||
def test_handle_post_failure(capsys):
|
def test_handle_post_failure(capsys):
|
||||||
@@ -24,3 +29,24 @@ def test_handle_post_failure(capsys):
|
|||||||
main.handle_post_to_nostr(pm)
|
main.handle_post_to_nostr(pm)
|
||||||
out = capsys.readouterr().out
|
out = capsys.readouterr().out
|
||||||
assert "❌ Sync failed…" in out
|
assert "❌ Sync failed…" in out
|
||||||
|
|
||||||
|
|
||||||
|
def test_handle_post_prints_all_ids(capsys):
|
||||||
|
pm = SimpleNamespace(
|
||||||
|
sync_vault=lambda alt_summary=None: {
|
||||||
|
"manifest_id": "m1",
|
||||||
|
"chunk_ids": ["c1", "c2"],
|
||||||
|
"delta_ids": ["d1", "d2"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
main.handle_post_to_nostr(pm)
|
||||||
|
out_lines = capsys.readouterr().out.splitlines()
|
||||||
|
expected = [
|
||||||
|
" manifest: m1",
|
||||||
|
" chunk: c1",
|
||||||
|
" chunk: c2",
|
||||||
|
" delta: d1",
|
||||||
|
" delta: d2",
|
||||||
|
]
|
||||||
|
for line in expected:
|
||||||
|
assert any(line in ol for ol in out_lines)
|
||||||
|
@@ -81,6 +81,28 @@ def test_sync_index_missing_bad_data(monkeypatch, dummy_nostr_client):
|
|||||||
)
|
)
|
||||||
monkeypatch.setattr(client, "fetch_deltas_since", lambda *_a, **_k: [])
|
monkeypatch.setattr(client, "fetch_deltas_since", lambda *_a, **_k: [])
|
||||||
|
|
||||||
pm.sync_index_from_nostr_if_missing()
|
result = pm.attempt_initial_sync()
|
||||||
data = pm.vault.load_index()
|
assert result is False
|
||||||
assert data["entries"] == {}
|
index_path = dir_path / "seedpass_entries_db.json.enc"
|
||||||
|
assert not index_path.exists()
|
||||||
|
|
||||||
|
|
||||||
|
def test_attempt_initial_sync_incomplete_data(monkeypatch, dummy_nostr_client):
|
||||||
|
client, _relay = dummy_nostr_client
|
||||||
|
with TemporaryDirectory() as tmpdir:
|
||||||
|
dir_path = Path(tmpdir)
|
||||||
|
vault, _enc = create_vault(dir_path)
|
||||||
|
|
||||||
|
pm = PasswordManager.__new__(PasswordManager)
|
||||||
|
pm.fingerprint_dir = dir_path
|
||||||
|
pm.vault = vault
|
||||||
|
pm.nostr_client = client
|
||||||
|
pm.sync_vault = lambda *a, **k: None
|
||||||
|
|
||||||
|
# Simulate relay snapshot retrieval failure due to missing chunks
|
||||||
|
monkeypatch.setattr(client, "fetch_latest_snapshot", lambda: None)
|
||||||
|
|
||||||
|
result = pm.attempt_initial_sync()
|
||||||
|
assert result is False
|
||||||
|
index_path = dir_path / "seedpass_entries_db.json.enc"
|
||||||
|
assert not index_path.exists()
|
||||||
|
@@ -288,7 +288,11 @@ def test_nostr_sync(monkeypatch):
|
|||||||
|
|
||||||
def sync_vault():
|
def sync_vault():
|
||||||
called["called"] = True
|
called["called"] = True
|
||||||
return "evt123"
|
return {
|
||||||
|
"manifest_id": "evt123",
|
||||||
|
"chunk_ids": ["c1"],
|
||||||
|
"delta_ids": ["d1"],
|
||||||
|
}
|
||||||
|
|
||||||
pm = SimpleNamespace(sync_vault=sync_vault, select_fingerprint=lambda fp: None)
|
pm = SimpleNamespace(sync_vault=sync_vault, select_fingerprint=lambda fp: None)
|
||||||
monkeypatch.setattr(cli, "PasswordManager", lambda: pm)
|
monkeypatch.setattr(cli, "PasswordManager", lambda: pm)
|
||||||
@@ -296,6 +300,8 @@ def test_nostr_sync(monkeypatch):
|
|||||||
assert result.exit_code == 0
|
assert result.exit_code == 0
|
||||||
assert called.get("called") is True
|
assert called.get("called") is True
|
||||||
assert "evt123" in result.stdout
|
assert "evt123" in result.stdout
|
||||||
|
assert "c1" in result.stdout
|
||||||
|
assert "d1" in result.stdout
|
||||||
|
|
||||||
|
|
||||||
def test_generate_password(monkeypatch):
|
def test_generate_password(monkeypatch):
|
||||||
|
Reference in New Issue
Block a user