mirror of
https://github.com/PR0M3TH3AN/Marlin.git
synced 2025-09-08 15:18:44 +00:00
update
This commit is contained in:
70
Cargo.lock
generated
70
Cargo.lock
generated
@@ -199,7 +199,16 @@ version = "5.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35"
|
||||
dependencies = [
|
||||
"dirs-sys",
|
||||
"dirs-sys 0.4.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs"
|
||||
version = "6.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e"
|
||||
dependencies = [
|
||||
"dirs-sys 0.5.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -210,10 +219,22 @@ checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"option-ext",
|
||||
"redox_users",
|
||||
"redox_users 0.4.6",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dirs-sys"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"option-ext",
|
||||
"redox_users 0.5.0",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fallible-iterator"
|
||||
version = "0.3.0"
|
||||
@@ -356,6 +377,7 @@ dependencies = [
|
||||
"directories",
|
||||
"glob",
|
||||
"rusqlite",
|
||||
"shellexpand",
|
||||
"shlex",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
@@ -452,7 +474,18 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"libredox",
|
||||
"thiserror",
|
||||
"thiserror 1.0.69",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_users"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd6f9d3d47bdd2ad6945c5015a226ec6155d0bcdfd8f7cd29f86b71f8de99d2b"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"libredox",
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -537,6 +570,15 @@ dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shellexpand"
|
||||
version = "3.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b1fdf65dd6331831494dd616b30351c38e96e45921a27745cf98490458b90bb"
|
||||
dependencies = [
|
||||
"dirs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
version = "1.3.0"
|
||||
@@ -572,7 +614,16 @@ version = "1.0.69"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
|
||||
dependencies = [
|
||||
"thiserror-impl",
|
||||
"thiserror-impl 1.0.69",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "2.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708"
|
||||
dependencies = [
|
||||
"thiserror-impl 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -586,6 +637,17 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "2.0.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "1.1.8"
|
||||
|
@@ -14,4 +14,5 @@ tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] }
|
||||
walkdir = "2.5"
|
||||
shlex = "1.3"
|
||||
chrono = "0.4"
|
||||
shellexpand = "3.1"
|
||||
|
||||
|
212
README.md
212
README.md
@@ -2,20 +2,21 @@
|
||||
|
||||
# Marlin
|
||||
|
||||
**Marlin** is a lightweight, metadata-driven file indexer that runs entirely on your computer. It scans folders, stores paths and file stats in SQLite, lets you add hierarchical **tags** and **custom attributes**, takes automatic snapshots, and offers instant full-text search with FTS5. Nothing ever leaves your machine.
|
||||
**Marlin** is a lightweight, metadata-driven file indexer that runs 100 % on your computer. It scans folders, stores paths and file stats in SQLite, lets you attach hierarchical **tags** and **custom attributes**, takes automatic snapshots, and offers instant full-text search via FTS5.
|
||||
*No cloud, no telemetry – your data never leaves the machine.*
|
||||
|
||||
---
|
||||
|
||||
## Feature highlights
|
||||
|
||||
| Area | What you get |
|
||||
|----------------|---------------------------------------------------------------------------------|
|
||||
| **Safety** | Timestamped backups `marlin backup` and one-command restore `marlin restore` |
|
||||
| **Upgrades** | Automatic schema migrations + dynamic column adds |
|
||||
| **Indexing** | Fast multi-path scanner (WAL mode) |
|
||||
| **Metadata** | Hierarchical tags (`project/alpha`) & key-value attributes (`reviewed=yes`) |
|
||||
| **Search** | Prefix-aware FTS5, optional `--exec` action per hit |
|
||||
| **DX / Logs** | Readable tracing (`RUST_LOG=debug …`) |
|
||||
| Area | What you get |
|
||||
| -------------- | --------------------------------------------------------------------------------- |
|
||||
| **Safety** | Timestamped backups (`marlin backup`) and one-command restore (`marlin restore`) |
|
||||
| **Resilience** | Versioned, idempotent schema migrations – zero-downtime upgrades |
|
||||
| **Indexing** | Fast multi-path scanner with SQLite WAL concurrency |
|
||||
| **Metadata** | Hierarchical tags (`project/alpha`) & key-value attributes (`reviewed=yes`) |
|
||||
| **Search** | Prefix-aware FTS5 across paths, tags, and attributes; optional `--exec` per match |
|
||||
| **DX / Logs** | Structured tracing (`RUST_LOG=debug`) for every operation |
|
||||
|
||||
---
|
||||
|
||||
@@ -29,18 +30,18 @@
|
||||
▲ search / exec └──────┬──────┘
|
||||
└────────── backup / restore ▼
|
||||
timestamped snapshots
|
||||
````
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Prerequisites
|
||||
|
||||
| Requirement | Why |
|
||||
| ------------------ | -------------------------------------- |
|
||||
| **Rust** ≥ 1.77 | Build toolchain (`rustup.rs`) |
|
||||
| C build essentials | `gcc`, `make`, etc. for bundled SQLite |
|
||||
| Requirement | Why |
|
||||
| ------------------ | ----------------------------- |
|
||||
| **Rust** ≥ 1.77 | Build toolchain (`rustup.rs`) |
|
||||
| C build essentials | Builds bundled SQLite (Linux) |
|
||||
|
||||
*(Windows/macOS: let the Rust installer pull the matching build tools.)*
|
||||
macOS & Windows users: let the Rust installer pull the matching build tools.
|
||||
|
||||
---
|
||||
|
||||
@@ -50,8 +51,7 @@
|
||||
git clone https://github.com/yourname/marlin.git
|
||||
cd marlin
|
||||
cargo build --release
|
||||
# optional: add to PATH
|
||||
sudo install -Dm755 target/release/marlin /usr/local/bin/marlin
|
||||
sudo install -Dm755 target/release/marlin /usr/local/bin/marlin # optional
|
||||
```
|
||||
|
||||
---
|
||||
@@ -59,21 +59,21 @@ sudo install -Dm755 target/release/marlin /usr/local/bin/marlin
|
||||
## Quick start
|
||||
|
||||
```bash
|
||||
marlin init # create DB
|
||||
marlin init # create DB (idempotent)
|
||||
marlin scan ~/Pictures ~/Documents # index files
|
||||
marlin tag "~/Pictures/**/*.jpg" photos/trip-2024 # add tag
|
||||
marlin attr set "~/Documents/**/*.pdf" reviewed yes
|
||||
marlin search reviewed --exec "xdg-open {}" # open hits
|
||||
marlin search reviewed --exec "xdg-open {}" # open matches
|
||||
marlin backup # snapshot DB
|
||||
```
|
||||
|
||||
### Database location
|
||||
|
||||
* **Linux** `~/.local/share/marlin/index.db`
|
||||
* **macOS** `~/Library/Application Support/marlin/index.db`
|
||||
* **macOS** `~/Library/Application Support/marlin/index.db`
|
||||
* **Windows** `%APPDATA%\marlin\index.db`
|
||||
|
||||
Override:
|
||||
Override with:
|
||||
|
||||
```bash
|
||||
export MARLIN_DB_PATH=/path/to/custom.db
|
||||
@@ -106,74 +106,170 @@ restore <snapshot.db> replace DB with snapshot
|
||||
|
||||
## Backups & restore
|
||||
|
||||
* **Create snapshot**
|
||||
*Create snapshot*
|
||||
|
||||
```bash
|
||||
marlin backup
|
||||
# → ~/.local/share/marlin/backups/backup_2025-05-14_22-15-30.db
|
||||
```
|
||||
```bash
|
||||
marlin backup
|
||||
# → ~/.local/share/marlin/backups/backup_2025-05-14_22-15-30.db
|
||||
```
|
||||
|
||||
* **Restore snapshot**
|
||||
*Restore snapshot*
|
||||
|
||||
```bash
|
||||
marlin restore ~/.local/share/marlin/backups/backup_2025-05-14_22-15-30.db
|
||||
```
|
||||
```bash
|
||||
marlin restore ~/.local/share/marlin/backups/backup_2025-05-14_22-15-30.db
|
||||
```
|
||||
|
||||
Marlin automatically takes a safety backup before any schema migration.
|
||||
Marlin also takes an **automatic safety backup before every schema migration**.
|
||||
|
||||
---
|
||||
|
||||
## Upgrading to a new build
|
||||
## Upgrading
|
||||
|
||||
```bash
|
||||
cargo install --path . --force # rebuild & overwrite installed binary
|
||||
cargo install --path . --force # rebuild & replace installed binary
|
||||
```
|
||||
|
||||
Backups + dynamic migrations mean your data is preserved across upgrades.
|
||||
The versioned migration system preserves your data across upgrades.
|
||||
|
||||
---
|
||||
|
||||
## Roadmap
|
||||
|
||||
| Milestone | Focus |
|
||||
| --------- | -------------------------------------------------- |
|
||||
| **M1** | `tags://` virtual folder • attribute search DSL |
|
||||
| **M2** | Real-time sync service • change-log diff viewer |
|
||||
| **M3** | Natural-language query builder |
|
||||
| **M4** | Plug-in marketplace • mobile (read-only) companion |
|
||||
See [`ROADMAP.md`](./ROADMAP.md) for the full development plan.
|
||||
|
||||
---
|
||||
|
||||
## Five-minute tutorial
|
||||
## Five-Minute Quickstart
|
||||
|
||||
Paste & run each block in your terminal.
|
||||
|
||||
---
|
||||
|
||||
### 0 Prepare & build
|
||||
|
||||
```bash
|
||||
# 0. Playground
|
||||
mkdir -p ~/marlin_demo/{Projects/{Alpha,Beta},Media/Photos,Docs}
|
||||
echo "Alpha draft" > ~/marlin_demo/Projects/Alpha/draft.txt
|
||||
echo "Receipt PDF" > ~/marlin_demo/Docs/receipt.pdf
|
||||
echo "fake jpg" > ~/marlin_demo/Media/Photos/vacation.jpg
|
||||
# Clone or cd into your Marlin repo
|
||||
cd ~/Documents/GitHub/Marlin
|
||||
|
||||
# 1. Init & scan
|
||||
# Build the release binary
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 1 Install on your PATH
|
||||
|
||||
```bash
|
||||
sudo install -Dm755 target/release/marlin /usr/local/bin/marlin
|
||||
```
|
||||
|
||||
> Now `marlin` is available everywhere.
|
||||
|
||||
---
|
||||
|
||||
### 2 Prepare a clean demo directory
|
||||
|
||||
```bash
|
||||
rm -rf ~/marlin_demo
|
||||
mkdir -p ~/marlin_demo/{Projects/{Alpha,Beta},Media/Photos,Docs}
|
||||
|
||||
printf "Alpha draft\n" > ~/marlin_demo/Projects/Alpha/draft.txt
|
||||
printf "Beta notes\n" > ~/marlin_demo/Projects/Beta/notes.md
|
||||
printf "Receipt PDF\n" > ~/marlin_demo/Docs/receipt.pdf
|
||||
printf "fake jpg\n" > ~/marlin_demo/Media/Photos/vacation.jpg
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3 Initialize & index files
|
||||
|
||||
```bash
|
||||
# Use --verbose if you want full debug traces:
|
||||
marlin init
|
||||
marlin scan ~/marlin_demo
|
||||
|
||||
# 2. Tags & attributes
|
||||
marlin tag "~/marlin_demo/Projects/Alpha/**/*" project/alpha
|
||||
# or, to see every path tested:
|
||||
marlin --verbose init
|
||||
marlin --verbose scan ~/marlin_demo
|
||||
```
|
||||
|
||||
> **Tip:** Rerun `marlin scan` after you add/remove/modify files; only changed files get re-indexed.
|
||||
|
||||
---
|
||||
|
||||
### 4 Attach tags & attributes
|
||||
|
||||
```bash
|
||||
# Tag everything under “Alpha”
|
||||
marlin tag "~/marlin_demo/Projects/Alpha/**/*" project/alpha
|
||||
|
||||
# Mark all PDFs as reviewed
|
||||
marlin attr set "~/marlin_demo/**/*.pdf" reviewed yes
|
||||
|
||||
# 3. Search
|
||||
marlin search alpha
|
||||
marlin search reviewed --exec "echo Found: {}"
|
||||
|
||||
# 4. Snapshot & restore
|
||||
marlin backup
|
||||
marlin restore ~/.local/share/marlin/backups/backup_YYYY-MM-DD_HH-MM-SS.db
|
||||
# (or with debug)
|
||||
marlin --verbose tag "~/marlin_demo/Projects/Alpha/**/*" project/alpha
|
||||
marlin --verbose attr set "~/marlin_demo/**/*.pdf" reviewed yes
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 5 Search your index
|
||||
|
||||
```bash
|
||||
# By tag or filename
|
||||
marlin search alpha
|
||||
|
||||
# Combined terms (AND across path+attrs)
|
||||
marlin search "reviewed AND pdf"
|
||||
|
||||
# Run a command on each hit
|
||||
marlin search reviewed --exec "echo HIT → {}"
|
||||
|
||||
# If things aren’t matching, add --verbose to see the underlying FTS query:
|
||||
marlin --verbose search "reviewed AND pdf"
|
||||
```
|
||||
|
||||
> `{}` in `--exec` is replaced with each file’s path.
|
||||
|
||||
---
|
||||
|
||||
### 6 Backup & restore
|
||||
|
||||
```bash
|
||||
# Snapshot and store its name
|
||||
snap=$(marlin backup | awk '{print $NF}')
|
||||
|
||||
# Simulate data loss
|
||||
rm ~/.local/share/marlin/index.db
|
||||
|
||||
# Restore instantly
|
||||
marlin restore "$snap"
|
||||
|
||||
# Verify your files still show up
|
||||
marlin search reviewed
|
||||
```
|
||||
|
||||
> Backups live under `~/.local/share/marlin/backups` by default.
|
||||
|
||||
##### What you just exercised
|
||||
|
||||
| Command | Purpose |
|
||||
| ----------------- | ----------------------------------------- |
|
||||
| `marlin init` | Create / upgrade the SQLite database |
|
||||
| `marlin scan` | Walk directories and (re)index files |
|
||||
| `marlin tag` | Attach hierarchical tags |
|
||||
| `marlin attr set` | Add/overwrite custom key-value attributes |
|
||||
| `marlin search` | FTS5 search across path / tags / attrs |
|
||||
| `--exec` | Pipe hits into any shell command |
|
||||
| `marlin backup` | Timestamped snapshot of the DB |
|
||||
| `marlin restore` | Replace live DB with a chosen snapshot |
|
||||
|
||||
That’s the complete surface area of Marlin today—feel free to play around or
|
||||
point the scanner at real folders.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
MIT – see `LICENSE`
|
||||
|
||||
|
||||
|
46
roadmap.md
46
roadmap.md
@@ -1,37 +1,29 @@
|
||||
# Roadmap
|
||||
|
||||
| Phase | Functional focus | Why do it now? | Key deliverables |
|
||||
| ------------------------------- | ------------------------ | ---------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| **1. Lock down the foundation** | *Migrations + tests* | Schema churn and silent breakage are the biggest hidden costs. Catch them early. | • Split `migrations.sql` into versioned files<br>• Remove runtime “ensure\_column” path<br>• Add CI job that runs `cargo test` on every PR |
|
||||
| **2. Trim the FTS triggers** | *Efficient index upkeep* | The current triggers will bog down as soon as users bulk-tag thousands of files. | • Replace per-row GROUP\_CONCAT triggers with a “dirty” flag or app-side refresh<br>• Benchmark a full scan + mass tag on ≥100 k files |
|
||||
| **3. Hashing & dedup logic** | *Content integrity* | Once the index is stable and fast, add SHA-256 so the DB can detect duplicates/corruption. | • `files.hash` column populated on first scan<br>• `marlin scan --rehash` to force refresh |
|
||||
| **4. Alias / canonical tags** | *Usable taxonomy* | Without this, tag sprawl happens quickly. Better to solve before users have thousands of tags. | • `tags.aliases` table or `canonical_id` enforcement<br>• CLI subcommands: `tag alias add`, `tag alias ls` |
|
||||
| **5. Search parser upgrade** | *Power queries* | After the data model is solid, richer search is the next visible win. | • Swap ad-hoc parser for `nom`-based grammar<br>• Support grouping `(...)`, boolean ops, quoted phrases |
|
||||
| **6. Attribute schemas** | *Structured metadata* | Custom field templates let you build real workflows (e.g. Photo > Aperture). | • `templates` + `template_fields` tables<br>• Validation on `attr set` |
|
||||
| **7. Dolphin extension MVP** | *Desktop integration* | No point shipping a GUI until the backend is rock-solid. | • Read-only sidebar showing tags/attrs<br>• Double-click tag to filter view |
|
||||
| **8. Write / edit UI** | *End-user adoption* | Once people can browse metadata inside Dolphin, they’ll want to edit it too. | • In-place tag editor widget<br>• Attribute form dialog tied to templates |
|
||||
| **9. Sync & sharing** | *Multi-device story* | Last—most complex. Only tackle when single-machine use is boring. | • Lite RPC layer (SQLite WAL + notify?)<br>• Optional read-only mode for network mounts |
|
||||
| Phase | Focus | Why now? | Key deliverables |
|
||||
| -------------------------- | ------------------------ | ------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- |
|
||||
| **1. 2025‑Q2 – "Bedrock"** | Migrations + CI baseline | We’ve landed versioned migrations and removed runtime column hacks – ensure it stays solid. | • CI job runs `cargo test` + `cargo sqlx migrate run --dry-run` |
|
||||
| **2. 2025‑Q2** | Leaner FTS maintenance | Per‑row triggers don’t scale past \~100 k files. | • Replace triggers with “dirty” flag + periodic rebuild <br>• Benchmark on 100 k files |
|
||||
| **3. 2025‑Q3** | Content hashing & dedup | Detect duplicates, enable future integrity checks. | • SHA‑256 in `files.hash` <br>• `scan --rehash` option |
|
||||
| **4. 2025‑Q3** | Tag aliases / canonicals | Control tag sprawl before users accumulate thousands. | • `canonical_id` enforcement <br>• `tag alias add/ls/rm` CLI |
|
||||
| **5. 2025‑Q4** | Search DSL v2 | Power users want grouping, boolean ops, quoted phrases. | • Replace ad‑hoc parser with `nom` grammar <br>• Unit‑tested examples |
|
||||
| **6. 2025‑Q4** | Attribute templates | Structured metadata unlocks real workflows. | • `templates` + `template_fields` tables <br>• Validation on `attr set` |
|
||||
| **7. 2026‑Q1** | Dolphin read‑only plugin | Browse tags/attrs inside the default file manager. | • Qt sidebar showing metadata |
|
||||
| **8. 2026‑Q1** | Full edit UI | After read‑only proves stable, add editing. | • Tag editor widget, attribute dialog |
|
||||
| **9. 2026‑Q2** | Multi‑device sync | Final frontier: optional sync/replication layer. | • Choose between rqlite / Litestream / bespoke <br>• Read‑only mode for network mounts |
|
||||
|
||||
---
|
||||
|
||||
#### How to tackle each phase
|
||||
### Current sprint (ends **2025‑06‑01**)
|
||||
|
||||
1. **Do one migration PR that just moves existing DDL into `0001.sql`**. Merge, tag a release.
|
||||
2. **Prototype trigger-less FTS maintenance** in a branch; measure with `--timings` tracing.
|
||||
3. **Hashing:** gate expensive work behind `mtime/size` check you already coded.
|
||||
4. **Alias logic:** start simple—single-level `canonical_id`; later add synonym sets if needed.
|
||||
5. **Parser:** write unit tests for every example query first, then swap implementation—same public API.
|
||||
6. **Templates:** store JSON schema in DB, validate with `serde_json::Value` + compiled regexes.
|
||||
7. **Dolphin plugin:** expose DBus calls from Rust core, C++/Qt side just calls them.
|
||||
8. **Write UI:** reuse the same DBus interface; no extra DB code.
|
||||
9. **Sync:** decide early if you aim for local-first replication (Litestream, rqlite) or a bespoke solution.
|
||||
1. **FTS rebuild prototype** – dirtied‑rows approach, measure on 50 k files.
|
||||
2. `backup --prune` to keep only N most recent snapshots.
|
||||
3. Integration tests for tag/attr workflows on Windows via GitHub Actions.
|
||||
|
||||
---
|
||||
|
||||
### Practical next sprint (2 weeks)
|
||||
### Development principles
|
||||
|
||||
1. **Finish phase 1** (migrations + CI) ⇒ release `v0.2.0`.
|
||||
2. **Start phase 2:** rip out FTS triggers, implement dirtied-rows rebuild, test at 50 k files.
|
||||
3. **If time remains:** add `--rehash` flag and wire in SHA-256 function (phase 3 seed).
|
||||
|
||||
This path keeps user-visible features arriving every couple of weeks without accumulating technical debt.
|
||||
* **Local‑first** – every feature must work offline.
|
||||
* **Zero manual migrations** – shipping code *is* the migration.
|
||||
* **Instrumentation first** – every new command logs trace spans and timings.
|
||||
|
@@ -1,12 +1,15 @@
|
||||
// src/cli.rs
|
||||
use std::path::PathBuf;
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
/// Marlin – metadata-driven file explorer (CLI utilities)
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about)]
|
||||
pub struct Cli {
|
||||
/// Enable debug logging and extra output
|
||||
#[arg(long)]
|
||||
pub verbose: bool,
|
||||
|
||||
#[command(subcommand)]
|
||||
pub command: Commands,
|
||||
}
|
||||
|
@@ -1,61 +0,0 @@
|
||||
PRAGMA foreign_keys = ON;
|
||||
|
||||
-- ─── core tables ───────────────────────────────────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
id INTEGER PRIMARY KEY,
|
||||
path TEXT NOT NULL UNIQUE,
|
||||
size INTEGER,
|
||||
mtime INTEGER
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tags (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
parent_id INTEGER REFERENCES tags(id),
|
||||
canonical_id INTEGER REFERENCES tags(id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS file_tags (
|
||||
file_id INTEGER NOT NULL REFERENCES files(id) ON DELETE CASCADE,
|
||||
tag_id INTEGER NOT NULL REFERENCES tags(id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (file_id, tag_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS attributes (
|
||||
id INTEGER PRIMARY KEY,
|
||||
file_id INTEGER NOT NULL REFERENCES files(id) ON DELETE CASCADE,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT
|
||||
);
|
||||
|
||||
-- optional free-form JSON metadata
|
||||
CREATE TABLE IF NOT EXISTS json_meta (
|
||||
file_id INTEGER PRIMARY KEY REFERENCES files(id) ON DELETE CASCADE,
|
||||
data TEXT -- arbitrary JSON blob
|
||||
);
|
||||
|
||||
-- ─── full-text search ──────────────────────────────────────────────────
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS files_fts
|
||||
USING fts5(
|
||||
path,
|
||||
content='files', content_rowid='id',
|
||||
prefix='2 3 4 5 6 7 8 9 10'
|
||||
);
|
||||
|
||||
CREATE TRIGGER IF NOT EXISTS files_ai AFTER INSERT ON files BEGIN
|
||||
INSERT INTO files_fts(rowid, path) VALUES (new.id, new.path);
|
||||
END;
|
||||
CREATE TRIGGER IF NOT EXISTS files_au AFTER UPDATE ON files BEGIN
|
||||
UPDATE files_fts SET path = new.path WHERE rowid = new.id;
|
||||
END;
|
||||
CREATE TRIGGER IF NOT EXISTS files_ad AFTER DELETE ON files BEGIN
|
||||
DELETE FROM files_fts WHERE rowid = old.id;
|
||||
END;
|
||||
|
||||
-- ─── version table for incremental migrations ─────────────────────────
|
||||
CREATE TABLE IF NOT EXISTS schema_version (version INTEGER PRIMARY KEY);
|
||||
|
||||
-- ─── useful indexes ────────────────────────────────────────────────────
|
||||
CREATE INDEX IF NOT EXISTS idx_files_path ON files(path);
|
||||
CREATE INDEX IF NOT EXISTS idx_file_tags_tag_id ON file_tags(tag_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_attr_file_key ON attributes(file_id, key);
|
191
src/db/migrations/0001_initial_schema.sql
Normal file
191
src/db/migrations/0001_initial_schema.sql
Normal file
@@ -0,0 +1,191 @@
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA journal_mode = WAL; -- Use WAL for better concurrency
|
||||
|
||||
-- Version 1: Initial Schema (with FTS5-backed search over paths, tags & attrs)
|
||||
|
||||
-- Core tables
|
||||
|
||||
CREATE TABLE IF NOT EXISTS files (
|
||||
id INTEGER PRIMARY KEY,
|
||||
path TEXT NOT NULL UNIQUE,
|
||||
size INTEGER,
|
||||
mtime INTEGER,
|
||||
hash TEXT -- file content hash (e.g. SHA256)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tags (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL, -- tag segment
|
||||
parent_id INTEGER REFERENCES tags(id) ON DELETE CASCADE,
|
||||
canonical_id INTEGER REFERENCES tags(id) ON DELETE SET NULL,
|
||||
UNIQUE(name, parent_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS file_tags (
|
||||
file_id INTEGER NOT NULL REFERENCES files(id) ON DELETE CASCADE,
|
||||
tag_id INTEGER NOT NULL REFERENCES tags(id) ON DELETE CASCADE,
|
||||
PRIMARY KEY(file_id, tag_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS attributes (
|
||||
id INTEGER PRIMARY KEY,
|
||||
file_id INTEGER NOT NULL REFERENCES files(id) ON DELETE CASCADE,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT,
|
||||
UNIQUE(file_id, key)
|
||||
);
|
||||
|
||||
-- Full-text search
|
||||
|
||||
-- Drop any old FTS table, then recreate it as a contentless standalone table
|
||||
DROP TABLE IF EXISTS files_fts;
|
||||
CREATE VIRTUAL TABLE files_fts
|
||||
USING fts5(
|
||||
path, -- Remove UNINDEXED to enable path searching
|
||||
tags_text, -- concat of all tag names for this file
|
||||
attrs_text, -- concat of all key=value attrs
|
||||
content='', -- Explicitly mark as contentless
|
||||
tokenize="unicode61 remove_diacritics 2"
|
||||
);
|
||||
|
||||
-- FTS-sync triggers
|
||||
|
||||
-- When a file is added
|
||||
DROP TRIGGER IF EXISTS files_fts_ai_file;
|
||||
CREATE TRIGGER files_fts_ai_file
|
||||
AFTER INSERT ON files
|
||||
BEGIN
|
||||
INSERT INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
VALUES (
|
||||
NEW.id, -- Sets files_fts.rowid to files.id
|
||||
NEW.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = NEW.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = NEW.id)
|
||||
);
|
||||
END;
|
||||
|
||||
-- When a file’s path changes
|
||||
DROP TRIGGER IF EXISTS files_fts_au_file;
|
||||
CREATE TRIGGER files_fts_au_file
|
||||
AFTER UPDATE OF path ON files
|
||||
BEGIN
|
||||
UPDATE files_fts
|
||||
SET path = NEW.path
|
||||
WHERE rowid = NEW.id; -- rowid refers to files_fts.rowid which matches files.id
|
||||
END;
|
||||
|
||||
-- When a file is removed
|
||||
DROP TRIGGER IF EXISTS files_fts_ad_file;
|
||||
CREATE TRIGGER files_fts_ad_file
|
||||
AFTER DELETE ON files
|
||||
BEGIN
|
||||
DELETE FROM files_fts WHERE rowid = OLD.id; -- OLD.id from files table
|
||||
END;
|
||||
|
||||
-- When tags are added, replace the entire FTS row
|
||||
DROP TRIGGER IF EXISTS file_tags_fts_ai;
|
||||
CREATE TRIGGER file_tags_fts_ai
|
||||
AFTER INSERT ON file_tags
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = NEW.file_id;
|
||||
END;
|
||||
|
||||
-- When tags are removed, replace the entire FTS row
|
||||
DROP TRIGGER IF EXISTS file_tags_fts_ad;
|
||||
CREATE TRIGGER file_tags_fts_ad
|
||||
AFTER DELETE ON file_tags
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = OLD.file_id;
|
||||
END;
|
||||
|
||||
-- When attributes are added, replace the entire FTS row
|
||||
DROP TRIGGER IF EXISTS attributes_fts_ai;
|
||||
CREATE TRIGGER attributes_fts_ai
|
||||
AFTER INSERT ON attributes
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = NEW.file_id;
|
||||
END;
|
||||
|
||||
-- When attribute values change, replace the entire FTS row
|
||||
DROP TRIGGER IF EXISTS attributes_fts_au;
|
||||
CREATE TRIGGER attributes_fts_au
|
||||
AFTER UPDATE OF value ON attributes
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = NEW.file_id;
|
||||
END;
|
||||
|
||||
-- When attributes are removed, replace the entire FTS row
|
||||
DROP TRIGGER IF EXISTS attributes_fts_ad;
|
||||
CREATE TRIGGER attributes_fts_ad
|
||||
AFTER DELETE ON attributes
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = OLD.file_id;
|
||||
END;
|
||||
|
||||
-- Versioning & helpful indexes
|
||||
|
||||
CREATE TABLE IF NOT EXISTS schema_version (
|
||||
version INTEGER PRIMARY KEY,
|
||||
applied_on TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_files_path ON files(path);
|
||||
CREATE INDEX IF NOT EXISTS idx_files_hash ON files(hash);
|
||||
CREATE INDEX IF NOT EXISTS idx_tags_name_parent ON tags(name, parent_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_file_tags_tag_id ON file_tags(tag_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_attr_file_key ON attributes(file_id, key);
|
91
src/db/migrations/0002_update_fts_and_triggers.sql
Normal file
91
src/db/migrations/0002_update_fts_and_triggers.sql
Normal file
@@ -0,0 +1,91 @@
|
||||
PRAGMA foreign_keys = ON;
|
||||
PRAGMA journal_mode = WAL; -- Use WAL for better concurrency
|
||||
|
||||
-- Drop old FTS5 triggers so we can fully replace the row on tag/attr changes
|
||||
DROP TRIGGER IF EXISTS file_tags_fts_ai;
|
||||
DROP TRIGGER IF EXISTS file_tags_fts_ad;
|
||||
DROP TRIGGER IF EXISTS attributes_fts_ai;
|
||||
DROP TRIGGER IF EXISTS attributes_fts_au;
|
||||
DROP TRIGGER IF EXISTS attributes_fts_ad;
|
||||
|
||||
-- Recreate triggers with INSERT OR REPLACE to ensure full reindex:
|
||||
|
||||
CREATE TRIGGER file_tags_fts_ai
|
||||
AFTER INSERT ON file_tags
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = NEW.file_id;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER file_tags_fts_ad
|
||||
AFTER DELETE ON file_tags
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = OLD.file_id;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER attributes_fts_ai
|
||||
AFTER INSERT ON attributes
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = NEW.file_id;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER attributes_fts_au
|
||||
AFTER UPDATE OF value ON attributes
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = NEW.file_id;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER attributes_fts_ad
|
||||
AFTER DELETE ON attributes
|
||||
BEGIN
|
||||
INSERT OR REPLACE INTO files_fts(rowid, path, tags_text, attrs_text)
|
||||
SELECT f.id, f.path,
|
||||
(SELECT IFNULL(GROUP_CONCAT(t.name, ' '), '')
|
||||
FROM file_tags ft
|
||||
JOIN tags t ON ft.tag_id = t.id
|
||||
WHERE ft.file_id = f.id),
|
||||
(SELECT IFNULL(GROUP_CONCAT(a.key || '=' || a.value, ' '), '')
|
||||
FROM attributes a
|
||||
WHERE a.file_id = f.id)
|
||||
FROM files f
|
||||
WHERE f.id = OLD.file_id;
|
||||
END;
|
111
src/db/mod.rs
111
src/db/mod.rs
@@ -4,53 +4,94 @@ use std::{
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{Context, Result};
|
||||
use chrono::Local;
|
||||
use rusqlite::{
|
||||
backup::{Backup, StepResult},
|
||||
params, Connection, OpenFlags,
|
||||
params,
|
||||
Connection,
|
||||
OpenFlags,
|
||||
OptionalExtension,
|
||||
};
|
||||
use tracing::{debug, info};
|
||||
|
||||
const MIGRATIONS_SQL: &str = include_str!("migrations.sql");
|
||||
/// Embed every numbered migration file here.
|
||||
const MIGRATIONS: &[(&str, &str)] = &[
|
||||
("0001_initial_schema.sql", include_str!("migrations/0001_initial_schema.sql")),
|
||||
("0002_update_fts_and_triggers.sql", include_str!("migrations/0002_update_fts_and_triggers.sql")),
|
||||
];
|
||||
|
||||
/* ─── connection bootstrap ──────────────────────────────────────────── */
|
||||
|
||||
/// Open (or create) the DB, apply migrations, add any missing columns,
|
||||
/// and rebuild the FTS index if needed.
|
||||
pub fn open<P: AsRef<Path>>(db_path: P) -> Result<Connection> {
|
||||
let conn = Connection::open(&db_path)?;
|
||||
let db_path_ref = db_path.as_ref();
|
||||
let mut conn = Connection::open(db_path_ref)
|
||||
.with_context(|| format!("failed to open DB at {}", db_path_ref.display()))?;
|
||||
|
||||
conn.pragma_update(None, "journal_mode", "WAL")?;
|
||||
conn.execute_batch(MIGRATIONS_SQL)?;
|
||||
conn.pragma_update(None, "foreign_keys", "ON")?;
|
||||
|
||||
// example of dynamic column addition: files.hash TEXT
|
||||
ensure_column(&conn, "files", "hash", "TEXT")?;
|
||||
// Apply migrations
|
||||
apply_migrations(&mut conn)?;
|
||||
|
||||
// ensure FTS picks up tokenizer / prefix changes
|
||||
conn.execute("INSERT INTO files_fts(files_fts) VALUES('rebuild')", [])?;
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
/// Add a column if it does not already exist.
|
||||
fn ensure_column(conn: &Connection, table: &str, col: &str, ddl_type: &str) -> Result<()> {
|
||||
// PRAGMA table_info returns rows with (cid, name, type, ...)
|
||||
let mut exists = false;
|
||||
let mut stmt = conn.prepare(&format!("PRAGMA table_info({table});"))?;
|
||||
let rows = stmt.query_map([], |row| row.get::<_, String>(1))?;
|
||||
for name in rows.flatten() {
|
||||
if name == col {
|
||||
exists = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* ─── migration runner ──────────────────────────────────────────────── */
|
||||
|
||||
if !exists {
|
||||
conn.execute(
|
||||
&format!("ALTER TABLE {table} ADD COLUMN {col} {ddl_type};"),
|
||||
[],
|
||||
fn apply_migrations(conn: &mut Connection) -> Result<()> {
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS schema_version (
|
||||
version INTEGER PRIMARY KEY,
|
||||
applied_on TEXT NOT NULL
|
||||
);",
|
||||
)?;
|
||||
|
||||
// legacy patch (ignore if already exists)
|
||||
let _ = conn.execute("ALTER TABLE schema_version ADD COLUMN applied_on TEXT", []);
|
||||
|
||||
let tx = conn.transaction()?;
|
||||
for (fname, sql) in MIGRATIONS {
|
||||
let version: i64 = fname
|
||||
.split('_')
|
||||
.next()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.expect("migration filenames start with number");
|
||||
|
||||
let already: Option<i64> = tx
|
||||
.query_row(
|
||||
"SELECT version FROM schema_version WHERE version = ?1",
|
||||
[version],
|
||||
|r| r.get(0),
|
||||
)
|
||||
.optional()?;
|
||||
|
||||
if already.is_some() {
|
||||
debug!("migration {fname} already applied");
|
||||
continue;
|
||||
}
|
||||
|
||||
info!("applying migration {fname}");
|
||||
// For debugging:
|
||||
println!(
|
||||
"\nSQL SCRIPT FOR MIGRATION: {}\nBEGIN SQL >>>\n{}\n<<< END SQL\n",
|
||||
fname, sql
|
||||
);
|
||||
|
||||
tx.execute_batch(sql)
|
||||
.with_context(|| format!("could not apply migration {fname}"))?;
|
||||
|
||||
tx.execute(
|
||||
"INSERT INTO schema_version (version, applied_on) VALUES (?1, ?2)",
|
||||
params![version, Local::now().to_rfc3339()],
|
||||
)?;
|
||||
}
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Ensure a (possibly hierarchical) tag exists and return the leaf tag id.
|
||||
/* ─── helpers ───────────────────────────────────────────────────────── */
|
||||
|
||||
pub fn ensure_tag_path(conn: &Connection, path: &str) -> Result<i64> {
|
||||
let mut parent: Option<i64> = None;
|
||||
for segment in path.split('/').filter(|s| !s.is_empty()) {
|
||||
@@ -68,13 +109,11 @@ pub fn ensure_tag_path(conn: &Connection, path: &str) -> Result<i64> {
|
||||
parent.ok_or_else(|| anyhow::anyhow!("empty tag path"))
|
||||
}
|
||||
|
||||
/// Look up `files.id` by absolute path.
|
||||
pub fn file_id(conn: &Connection, path: &str) -> Result<i64> {
|
||||
conn.query_row("SELECT id FROM files WHERE path = ?1", [path], |r| r.get(0))
|
||||
.map_err(|_| anyhow::anyhow!("file not indexed: {}", path))
|
||||
}
|
||||
|
||||
/// Insert or update an attribute.
|
||||
pub fn upsert_attr(conn: &Connection, file_id: i64, key: &str, value: &str) -> Result<()> {
|
||||
conn.execute(
|
||||
r#"
|
||||
@@ -87,31 +126,27 @@ pub fn upsert_attr(conn: &Connection, file_id: i64, key: &str, value: &str) -> R
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a **consistent snapshot** of the DB and return the backup path.
|
||||
/* ─── backup / restore ──────────────────────────────────────────────── */
|
||||
|
||||
pub fn backup<P: AsRef<Path>>(db_path: P) -> Result<PathBuf> {
|
||||
let src = db_path.as_ref();
|
||||
let dir = src
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow::anyhow!("invalid DB path"))?
|
||||
.ok_or_else(|| anyhow::anyhow!("invalid DB path: {}", src.display()))?
|
||||
.join("backups");
|
||||
fs::create_dir_all(&dir)?;
|
||||
|
||||
let stamp = Local::now().format("%Y-%m-%d_%H-%M-%S");
|
||||
let dst = dir.join(format!("backup_{stamp}.db"));
|
||||
|
||||
// open connections: src read-only, dst writable
|
||||
let src_conn = Connection::open_with_flags(src, OpenFlags::SQLITE_OPEN_READ_ONLY)?;
|
||||
let mut dst_conn = Connection::open(&dst)?;
|
||||
|
||||
// run online backup
|
||||
let mut bk = Backup::new(&src_conn, &mut dst_conn)?;
|
||||
let bk = Backup::new(&src_conn, &mut dst_conn)?;
|
||||
while let StepResult::More = bk.step(100)? {}
|
||||
// Backup finalised when `bk` is dropped.
|
||||
|
||||
Ok(dst)
|
||||
}
|
||||
|
||||
/// Replace the live DB file with a snapshot (caller must have closed handles).
|
||||
pub fn restore<P: AsRef<Path>>(backup_path: P, live_db_path: P) -> Result<()> {
|
||||
fs::copy(&backup_path, &live_db_path)?;
|
||||
Ok(())
|
||||
|
309
src/main.rs
309
src/main.rs
@@ -5,30 +5,42 @@ mod db;
|
||||
mod logging;
|
||||
mod scan;
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{Context, Result};
|
||||
use clap::Parser;
|
||||
use cli::{AttrCmd, Cli, Commands};
|
||||
use glob::glob;
|
||||
use glob::Pattern;
|
||||
use rusqlite::params;
|
||||
use tracing::{error, info};
|
||||
use shellexpand;
|
||||
use shlex;
|
||||
use std::{env, path::PathBuf, process::Command};
|
||||
use tracing::{debug, error, info};
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use cli::{AttrCmd, Cli, Commands};
|
||||
|
||||
fn main() -> Result<()> {
|
||||
// Parse CLI and bootstrap logging
|
||||
let args = Cli::parse();
|
||||
if args.verbose {
|
||||
env::set_var("RUST_LOG", "debug");
|
||||
}
|
||||
logging::init();
|
||||
|
||||
let args = Cli::parse();
|
||||
let cfg = config::Config::load()?;
|
||||
|
||||
// snapshot unless doing an explicit backup / restore
|
||||
if !matches!(args.command, Commands::Backup | Commands::Restore { .. }) {
|
||||
let _ = db::backup(&cfg.db_path);
|
||||
// Backup before any non-init, non-backup/restore command
|
||||
if !matches!(args.command, Commands::Init | Commands::Backup | Commands::Restore { .. }) {
|
||||
match db::backup(&cfg.db_path) {
|
||||
Ok(path) => info!("Pre-command auto-backup created at {}", path.display()),
|
||||
Err(e) => error!("Failed to create pre-command auto-backup: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
// open database (runs migrations / dynamic column adds)
|
||||
// Open (and migrate) the DB
|
||||
let mut conn = db::open(&cfg.db_path)?;
|
||||
|
||||
match args.command {
|
||||
Commands::Init => {
|
||||
info!("database initialised at {}", cfg.db_path.display());
|
||||
info!("Database initialised at {}", cfg.db_path.display());
|
||||
}
|
||||
|
||||
Commands::Scan { paths } => {
|
||||
@@ -40,17 +52,22 @@ fn main() -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
Commands::Tag { pattern, tag_path } => apply_tag(&conn, &pattern, &tag_path)?,
|
||||
Commands::Tag { pattern, tag_path } => {
|
||||
apply_tag(&conn, &pattern, &tag_path)?;
|
||||
}
|
||||
|
||||
Commands::Attr { action } => match action {
|
||||
// borrow the Strings so attr_set gets &str
|
||||
AttrCmd::Set { pattern, key, value } => {
|
||||
attr_set(&conn, &pattern, &key, &value)?
|
||||
attr_set(&conn, &pattern, &key, &value)?;
|
||||
}
|
||||
AttrCmd::Ls { path } => {
|
||||
attr_ls(&conn, &path)?;
|
||||
}
|
||||
AttrCmd::Ls { path } => attr_ls(&conn, &path)?,
|
||||
},
|
||||
|
||||
Commands::Search { query, exec } => run_search(&conn, &query, exec)?,
|
||||
Commands::Search { query, exec } => {
|
||||
run_search(&conn, &query, exec)?;
|
||||
}
|
||||
|
||||
Commands::Backup => {
|
||||
let path = db::backup(&cfg.db_path)?;
|
||||
@@ -58,118 +75,240 @@ fn main() -> Result<()> {
|
||||
}
|
||||
|
||||
Commands::Restore { backup_path } => {
|
||||
drop(conn); // close handle
|
||||
db::restore(&backup_path, &cfg.db_path)?;
|
||||
println!("Restored from {}", backup_path.display());
|
||||
drop(conn);
|
||||
db::restore(&backup_path, &cfg.db_path)
|
||||
.with_context(|| format!("Failed to restore DB from {}", backup_path.display()))?;
|
||||
println!("Restored DB file from {}", backup_path.display());
|
||||
db::open(&cfg.db_path)
|
||||
.with_context(|| format!("Could not open restored DB at {}", cfg.db_path.display()))?;
|
||||
info!("Successfully opened and processed restored database.");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/* ─── tagging ────────────────────────────────────────────────────────── */
|
||||
/// Apply a hierarchical tag to all files matching the glob pattern.
|
||||
fn apply_tag(conn: &rusqlite::Connection, pattern: &str, tag_path: &str) -> Result<()> {
|
||||
let tag_id = db::ensure_tag_path(conn, tag_path)?;
|
||||
let expanded = shellexpand::tilde(pattern).into_owned();
|
||||
let pat = Pattern::new(&expanded)
|
||||
.with_context(|| format!("Invalid glob pattern `{}`", expanded))?;
|
||||
let root = determine_scan_root(&expanded);
|
||||
|
||||
let mut stmt_file = conn.prepare("SELECT id FROM files WHERE path = ?1")?;
|
||||
let mut stmt_insert =
|
||||
conn.prepare("INSERT OR IGNORE INTO file_tags(file_id, tag_id) VALUES (?1, ?2)")?;
|
||||
|
||||
for entry in glob(pattern)? {
|
||||
match entry {
|
||||
Ok(path) => {
|
||||
let path_str = path.to_string_lossy();
|
||||
if let Ok(file_id) =
|
||||
stmt_file.query_row(params![path_str], |row| row.get::<_, i64>(0))
|
||||
{
|
||||
stmt_insert.execute(params![file_id, tag_id])?;
|
||||
let mut count = 0;
|
||||
for entry in WalkDir::new(&root)
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.filter(|e| e.file_type().is_file())
|
||||
{
|
||||
let path_str = entry.path().to_string_lossy();
|
||||
debug!("testing path: {}", path_str);
|
||||
if !pat.matches(&path_str) {
|
||||
debug!(" → no match");
|
||||
continue;
|
||||
}
|
||||
debug!(" → matched");
|
||||
|
||||
match stmt_file.query_row(params![path_str.as_ref()], |r| r.get::<_, i64>(0)) {
|
||||
Ok(file_id) => {
|
||||
if stmt_insert.execute(params![file_id, tag_id])? > 0 {
|
||||
info!(file = %path_str, tag = tag_path, "tagged");
|
||||
count += 1;
|
||||
} else {
|
||||
error!(file = %path_str, "file not in index – run `marlin scan` first");
|
||||
debug!(file = %path_str, tag = tag_path, "already tagged");
|
||||
}
|
||||
}
|
||||
Err(e) => error!(error = %e, "glob error"),
|
||||
Err(rusqlite::Error::QueryReturnedNoRows) => {
|
||||
error!(file = %path_str, "not indexed – run `marlin scan` first");
|
||||
}
|
||||
Err(e) => {
|
||||
error!(file = %path_str, error = %e, "could not lookup file ID");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
info!("Applied tag '{}' to {} file(s).", tag_path, count);
|
||||
} else {
|
||||
info!("No new files were tagged with '{}' (no matches or already tagged).", tag_path);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/* ─── attributes ─────────────────────────────────────────────────────── */
|
||||
fn attr_set(conn: &rusqlite::Connection, pattern: &str, key: &str, value: &str) -> Result<()> {
|
||||
for entry in glob(pattern)? {
|
||||
match entry {
|
||||
Ok(path) => {
|
||||
let path_str = path.to_string_lossy();
|
||||
let file_id = db::file_id(conn, &path_str)?;
|
||||
/// Set a key=value attribute on all files matching the glob pattern.
|
||||
fn attr_set(
|
||||
conn: &rusqlite::Connection,
|
||||
pattern: &str,
|
||||
key: &str,
|
||||
value: &str,
|
||||
) -> Result<()> {
|
||||
let expanded = shellexpand::tilde(pattern).into_owned();
|
||||
let pat = Pattern::new(&expanded)
|
||||
.with_context(|| format!("Invalid glob pattern `{}`", expanded))?;
|
||||
let root = determine_scan_root(&expanded);
|
||||
|
||||
let mut stmt_file = conn.prepare("SELECT id FROM files WHERE path = ?1")?;
|
||||
let mut count = 0;
|
||||
|
||||
for entry in WalkDir::new(&root)
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.filter(|e| e.file_type().is_file())
|
||||
{
|
||||
let path_str = entry.path().to_string_lossy();
|
||||
debug!("testing attr path: {}", path_str);
|
||||
if !pat.matches(&path_str) {
|
||||
debug!(" → no match");
|
||||
continue;
|
||||
}
|
||||
debug!(" → matched");
|
||||
|
||||
match stmt_file.query_row(params![path_str.as_ref()], |r| r.get::<_, i64>(0)) {
|
||||
Ok(file_id) => {
|
||||
db::upsert_attr(conn, file_id, key, value)?;
|
||||
info!(file = %path_str, key = key, value = value, "attr set");
|
||||
count += 1;
|
||||
}
|
||||
Err(rusqlite::Error::QueryReturnedNoRows) => {
|
||||
error!(file = %path_str, "not indexed – run `marlin scan` first");
|
||||
}
|
||||
Err(e) => {
|
||||
error!(file = %path_str, error = %e, "could not lookup file ID");
|
||||
}
|
||||
Err(e) => error!(error = %e, "glob error"),
|
||||
}
|
||||
}
|
||||
|
||||
if count > 0 {
|
||||
info!("Attribute '{}: {}' set on {} file(s).", key, value, count);
|
||||
} else {
|
||||
info!("No attributes set (no matches or not indexed).");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List attributes for a given file path.
|
||||
fn attr_ls(conn: &rusqlite::Connection, path: &std::path::Path) -> Result<()> {
|
||||
let file_id = db::file_id(conn, &path.to_string_lossy())?;
|
||||
let mut stmt = conn.prepare("SELECT key, value FROM attributes WHERE file_id = ?1")?;
|
||||
let rows = stmt.query_map([file_id], |row| Ok((row.get::<_, String>(0)?, row.get::<_, String>(1)?)))?;
|
||||
for row in rows {
|
||||
let mut stmt = conn.prepare(
|
||||
"SELECT key, value FROM attributes WHERE file_id = ?1 ORDER BY key",
|
||||
)?;
|
||||
for row in stmt.query_map([file_id], |r| Ok((r.get::<_, String>(0)?, r.get::<_, String>(1)?)))? {
|
||||
let (k, v) = row?;
|
||||
println!("{k} = {v}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/* ─── search helpers ─────────────────────────────────────────────────── */
|
||||
fn run_search(conn: &rusqlite::Connection, raw: &str, exec: Option<String>) -> Result<()> {
|
||||
let hits = search(conn, raw)?;
|
||||
|
||||
if hits.is_empty() && exec.is_none() {
|
||||
eprintln!("No matches for `{}`", raw);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(cmd_tpl) = exec {
|
||||
for path in hits {
|
||||
let cmd_final = if cmd_tpl.contains("{}") {
|
||||
cmd_tpl.replace("{}", &path)
|
||||
} else {
|
||||
format!("{cmd_tpl} \"{path}\"")
|
||||
};
|
||||
let mut parts = cmd_final.splitn(2, ' ');
|
||||
let prog = parts.next().unwrap();
|
||||
let args = parts.next().unwrap_or("");
|
||||
let status = std::process::Command::new(prog)
|
||||
.args(shlex::split(args).unwrap_or_default())
|
||||
.status()?;
|
||||
if !status.success() {
|
||||
error!(file = %path, "command failed");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for p in hits {
|
||||
println!("{p}");
|
||||
/// Build and run an FTS5 search query, with optional exec.
|
||||
fn run_search(conn: &rusqlite::Connection, raw_query: &str, exec: Option<String>) -> Result<()> {
|
||||
let mut fts_query_parts = Vec::new();
|
||||
let parts = shlex::split(raw_query).unwrap_or_else(|| vec![raw_query.to_string()]);
|
||||
for part in parts {
|
||||
if ["AND", "OR", "NOT"].contains(&part.as_str()) {
|
||||
fts_query_parts.push(part);
|
||||
} else if let Some(tag) = part.strip_prefix("tag:") {
|
||||
fts_query_parts.push(format!("tags_text:{}", escape_fts_query_term(tag)));
|
||||
} else if let Some(attr) = part.strip_prefix("attr:") {
|
||||
fts_query_parts.push(format!("attrs_text:{}", escape_fts_query_term(attr)));
|
||||
} else {
|
||||
fts_query_parts.push(escape_fts_query_term(&part));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn search(conn: &rusqlite::Connection, raw: &str) -> Result<Vec<String>> {
|
||||
let q = if raw.split_ascii_whitespace().count() == 1
|
||||
&& !raw.contains(&['"', '\'', ':', '*', '(', ')', '~', '+', '-'][..])
|
||||
{
|
||||
format!("{raw}*")
|
||||
} else {
|
||||
raw.to_string()
|
||||
};
|
||||
let fts_expr = fts_query_parts.join(" ");
|
||||
debug!("Constructed FTS MATCH expression: {}", fts_expr);
|
||||
|
||||
let mut stmt = conn.prepare(
|
||||
r#"
|
||||
SELECT f.path FROM files_fts
|
||||
JOIN files f ON f.rowid = files_fts.rowid
|
||||
WHERE files_fts MATCH ?1
|
||||
SELECT f.path
|
||||
FROM files_fts
|
||||
JOIN files f ON f.rowid = files_fts.rowid
|
||||
WHERE files_fts MATCH ?1
|
||||
ORDER BY rank
|
||||
"#,
|
||||
)?;
|
||||
let rows = stmt.query_map([&q], |row| row.get::<_, String>(0))?;
|
||||
Ok(rows.filter_map(Result::ok).collect())
|
||||
let hits: Vec<String> = stmt
|
||||
.query_map(params![fts_expr], |row| row.get(0))?
|
||||
.filter_map(Result::ok)
|
||||
.collect();
|
||||
|
||||
if let Some(cmd_tpl) = exec {
|
||||
// Exec-on-hits logic
|
||||
let mut ran_without_placeholder = false;
|
||||
// If no hits and no placeholder, run once
|
||||
if hits.is_empty() && !cmd_tpl.contains("{}") {
|
||||
if let Some(mut parts) = shlex::split(&cmd_tpl) {
|
||||
if !parts.is_empty() {
|
||||
let prog = parts.remove(0);
|
||||
let status = Command::new(&prog).args(&parts).status()?;
|
||||
if !status.success() {
|
||||
error!(command=%cmd_tpl, code=?status.code(), "command failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
ran_without_placeholder = true;
|
||||
}
|
||||
// Otherwise, run per hit
|
||||
if !ran_without_placeholder {
|
||||
for path in hits {
|
||||
let quoted = shlex::try_quote(&path).unwrap_or(path.clone().into());
|
||||
let cmd_final = if cmd_tpl.contains("{}") {
|
||||
cmd_tpl.replace("{}", "ed)
|
||||
} else {
|
||||
format!("{} {}", cmd_tpl, "ed)
|
||||
};
|
||||
if let Some(mut parts) = shlex::split(&cmd_final) {
|
||||
if parts.is_empty() {
|
||||
continue;
|
||||
}
|
||||
let prog = parts.remove(0);
|
||||
let status = Command::new(&prog).args(&parts).status()?;
|
||||
if !status.success() {
|
||||
error!(file=%path, command=%cmd_final, code=?status.code(), "command failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if hits.is_empty() {
|
||||
eprintln!("No matches for query: `{}` (FTS expression: `{}`)", raw_query, fts_expr);
|
||||
} else {
|
||||
for p in hits {
|
||||
println!("{}", p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Quote terms for FTS when needed.
|
||||
fn escape_fts_query_term(term: &str) -> String {
|
||||
if term.contains(|c: char| c.is_whitespace() || "-:()\"".contains(c))
|
||||
|| ["AND","OR","NOT","NEAR"].contains(&term.to_uppercase().as_str())
|
||||
{
|
||||
format!("\"{}\"", term.replace('"', "\"\""))
|
||||
} else {
|
||||
term.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine a filesystem root to limit recursive walking.
|
||||
fn determine_scan_root(pattern: &str) -> PathBuf {
|
||||
let wildcard_pos = pattern.find(|c| c == '*' || c == '?' || c == '[').unwrap_or(pattern.len());
|
||||
let prefix = &pattern[..wildcard_pos];
|
||||
let mut root = PathBuf::from(prefix);
|
||||
while root.as_os_str().to_string_lossy().contains(|c| ['*','?','['].contains(&c)) {
|
||||
if let Some(parent) = root.parent() {
|
||||
root = parent.to_path_buf();
|
||||
} else {
|
||||
root = PathBuf::from(".");
|
||||
break;
|
||||
}
|
||||
}
|
||||
root
|
||||
}
|
||||
|
@@ -1 +0,0 @@
|
||||
{"rustc":13226066032359371072,"features":"[]","declared_features":"[]","target":11231084163139794023,"profile":2040997289075261528,"path":4942398508502643691,"deps":[[8606274917505247608,"tracing",false,231856011624696765],[13625485746686963219,"anyhow",false,4141387669743810832],[15299814984394074821,"rusqlite",false,14944962927072959781],[15622660310229662834,"walkdir",false,10621832926267965360],[16230660778393187092,"tracing_subscriber",false,4186456971351832017],[17155886227862585100,"glob",false,15671197672131490604],[17236266856776043413,"directories",false,415205815177997164],[17612818546626403359,"clap",false,8915347984520511202]],"local":[{"CheckDepInfo":{"dep_info":"release/.fingerprint/marlin-634839a5e9cc4921/dep-bin-marlin","checksum":false}}],"rustflags":[],"config":2069994364910194474,"compile_kind":0}
|
Binary file not shown.
@@ -1 +0,0 @@
|
||||
This file has an mtime of when this was started.
|
@@ -1,3 +0,0 @@
|
||||
{"$message_type":"diagnostic","message":"failed to resolve: use of unresolved module or unlinked crate `shlex`","code":{"code":"E0433","explanation":"An undeclared crate, module, or type was used.\n\nErroneous code example:\n\n```compile_fail,E0433\nlet map = HashMap::new();\n// error: failed to resolve: use of undeclared type `HashMap`\n```\n\nPlease verify you didn't misspell the type/module's name or that you didn't\nforget to import it:\n\n```\nuse std::collections::HashMap; // HashMap has been imported.\nlet map: HashMap<u32, u32> = HashMap::new(); // So it can be used!\n```\n\nIf you've expected to use a crate name:\n\n```compile_fail\nuse ferris_wheel::BigO;\n// error: failed to resolve: use of undeclared module or unlinked crate\n```\n\nMake sure the crate has been added as a dependency in `Cargo.toml`.\n\nTo use a module from your current crate, add the `crate::` prefix to the path.\n"},"level":"error","spans":[{"file_name":"src/main.rs","byte_start":1965,"byte_end":1970,"line_start":65,"line_end":65,"column_start":31,"column_end":36,"is_primary":true,"text":[{"text":" .args(shlex::split(args).unwrap_or_default())","highlight_start":31,"highlight_end":36}],"label":"use of unresolved module or unlinked crate `shlex`","suggested_replacement":null,"suggestion_applicability":null,"expansion":null}],"children":[{"message":"if you wanted to use a crate named `shlex`, use `cargo add shlex` to add it to your `Cargo.toml`","code":null,"level":"help","spans":[],"children":[],"rendered":null}],"rendered":"\u001b[0m\u001b[1m\u001b[38;5;9merror[E0433]\u001b[0m\u001b[0m\u001b[1m: failed to resolve: use of unresolved module or unlinked crate `shlex`\u001b[0m\n\u001b[0m \u001b[0m\u001b[0m\u001b[1m\u001b[38;5;12m--> \u001b[0m\u001b[0msrc/main.rs:65:31\u001b[0m\n\u001b[0m \u001b[0m\u001b[0m\u001b[1m\u001b[38;5;12m|\u001b[0m\n\u001b[0m\u001b[1m\u001b[38;5;12m65\u001b[0m\u001b[0m \u001b[0m\u001b[0m\u001b[1m\u001b[38;5;12m|\u001b[0m\u001b[0m \u001b[0m\u001b[0m .args(shlex::split(args).unwrap_or_default())\u001b[0m\n\u001b[0m \u001b[0m\u001b[0m\u001b[1m\u001b[38;5;12m|\u001b[0m\u001b[0m \u001b[0m\u001b[0m\u001b[1m\u001b[38;5;9m^^^^^\u001b[0m\u001b[0m \u001b[0m\u001b[0m\u001b[1m\u001b[38;5;9muse of unresolved module or unlinked crate `shlex`\u001b[0m\n\u001b[0m \u001b[0m\u001b[0m\u001b[1m\u001b[38;5;12m|\u001b[0m\n\u001b[0m \u001b[0m\u001b[0m\u001b[1m\u001b[38;5;12m= \u001b[0m\u001b[0m\u001b[1mhelp\u001b[0m\u001b[0m: if you wanted to use a crate named `shlex`, use `cargo add shlex` to add it to your `Cargo.toml`\u001b[0m\n\n"}
|
||||
{"$message_type":"diagnostic","message":"aborting due to 1 previous error","code":null,"level":"error","spans":[],"children":[],"rendered":"\u001b[0m\u001b[1m\u001b[38;5;9merror\u001b[0m\u001b[0m\u001b[1m: aborting due to 1 previous error\u001b[0m\n\n"}
|
||||
{"$message_type":"diagnostic","message":"For more information about this error, try `rustc --explain E0433`.","code":null,"level":"failure-note","spans":[],"children":[],"rendered":"\u001b[0m\u001b[1mFor more information about this error, try `rustc --explain E0433`.\u001b[0m\n"}
|
Binary file not shown.
@@ -1 +0,0 @@
|
||||
This file has an mtime of when this was started.
|
@@ -1 +0,0 @@
|
||||
25dd90b1992c67cf
|
@@ -1 +0,0 @@
|
||||
{"rustc":13226066032359371072,"features":"[\"bundled\", \"modern_sqlite\"]","declared_features":"[\"array\", \"backup\", \"blob\", \"buildtime_bindgen\", \"bundled\", \"bundled-full\", \"bundled-sqlcipher\", \"bundled-sqlcipher-vendored-openssl\", \"bundled-windows\", \"chrono\", \"collation\", \"column_decltype\", \"csv\", \"csvtab\", \"extra_check\", \"functions\", \"hooks\", \"i128_blob\", \"in_gecko\", \"limits\", \"load_extension\", \"loadable_extension\", \"modern-full\", \"modern_sqlite\", \"release_memory\", \"rusqlite-macros\", \"serde_json\", \"serialize\", \"series\", \"session\", \"sqlcipher\", \"time\", \"trace\", \"unlock_notify\", \"url\", \"uuid\", \"vtab\", \"wasm32-wasi-vfs\", \"window\", \"with-asan\"]","target":10662205063260755052,"profile":2040997289075261528,"path":402185755359498904,"deps":[[3056352129074654578,"hashlink",false,5628520544735898740],[5510864063823219921,"fallible_streaming_iterator",false,8552745081982913985],[6048213226671835012,"smallvec",false,13040686971658754908],[6166349630582887940,"bitflags",false,10135884282368686019],[9986166984836792091,"libsqlite3_sys",false,10002925590023881488],[12860549049674006569,"fallible_iterator",false,12944913816775710420]],"local":[{"CheckDepInfo":{"dep_info":"release/.fingerprint/rusqlite-6466f1598a85e8fc/dep-lib-rusqlite","checksum":false}}],"rustflags":[],"config":2069994364910194474,"compile_kind":0}
|
Binary file not shown.
Binary file not shown.
@@ -1,11 +0,0 @@
|
||||
/home/user/Documents/GitHub/Marlin/target/release/deps/marlin-634839a5e9cc4921: src/main.rs src/cli.rs src/config.rs src/db/mod.rs src/logging.rs src/scan.rs src/db/migrations.sql
|
||||
|
||||
/home/user/Documents/GitHub/Marlin/target/release/deps/marlin-634839a5e9cc4921.d: src/main.rs src/cli.rs src/config.rs src/db/mod.rs src/logging.rs src/scan.rs src/db/migrations.sql
|
||||
|
||||
src/main.rs:
|
||||
src/cli.rs:
|
||||
src/config.rs:
|
||||
src/db/mod.rs:
|
||||
src/logging.rs:
|
||||
src/scan.rs:
|
||||
src/db/migrations.sql:
|
@@ -1,29 +0,0 @@
|
||||
/home/user/Documents/GitHub/Marlin/target/release/deps/librusqlite-6466f1598a85e8fc.rmeta: /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/lib.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/error.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/busy.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/cache.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/column.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/config.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/inner_connection.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/params.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/pragma.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/raw_statement.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/row.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/statement.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/transaction.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/mod.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/from_sql.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/to_sql.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/value.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/value_ref.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/version.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/mod.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/param_cache.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/small_cstr.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/sqlite_string.rs
|
||||
|
||||
/home/user/Documents/GitHub/Marlin/target/release/deps/librusqlite-6466f1598a85e8fc.rlib: /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/lib.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/error.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/busy.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/cache.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/column.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/config.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/inner_connection.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/params.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/pragma.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/raw_statement.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/row.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/statement.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/transaction.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/mod.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/from_sql.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/to_sql.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/value.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/value_ref.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/version.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/mod.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/param_cache.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/small_cstr.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/sqlite_string.rs
|
||||
|
||||
/home/user/Documents/GitHub/Marlin/target/release/deps/rusqlite-6466f1598a85e8fc.d: /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/lib.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/error.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/busy.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/cache.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/column.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/config.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/inner_connection.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/params.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/pragma.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/raw_statement.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/row.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/statement.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/transaction.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/mod.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/from_sql.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/to_sql.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/value.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/value_ref.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/version.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/mod.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/param_cache.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/small_cstr.rs /home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/sqlite_string.rs
|
||||
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/lib.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/error.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/busy.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/cache.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/column.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/config.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/inner_connection.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/params.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/pragma.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/raw_statement.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/row.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/statement.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/transaction.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/mod.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/from_sql.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/to_sql.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/value.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/types/value_ref.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/version.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/mod.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/param_cache.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/small_cstr.rs:
|
||||
/home/user/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/rusqlite-0.31.0/src/util/sqlite_string.rs:
|
Binary file not shown.
@@ -1 +1 @@
|
||||
/home/user/Documents/GitHub/Marlin/target/release/marlin: /home/user/Documents/GitHub/Marlin/src/cli.rs /home/user/Documents/GitHub/Marlin/src/config.rs /home/user/Documents/GitHub/Marlin/src/db/migrations.sql /home/user/Documents/GitHub/Marlin/src/db/mod.rs /home/user/Documents/GitHub/Marlin/src/logging.rs /home/user/Documents/GitHub/Marlin/src/main.rs /home/user/Documents/GitHub/Marlin/src/scan.rs
|
||||
/home/user/Documents/GitHub/Marlin/target/release/marlin: /home/user/Documents/GitHub/Marlin/src/cli.rs /home/user/Documents/GitHub/Marlin/src/config.rs /home/user/Documents/GitHub/Marlin/src/db/migrations/0001_initial_schema.sql /home/user/Documents/GitHub/Marlin/src/db/migrations/0002_update_fts_and_triggers.sql /home/user/Documents/GitHub/Marlin/src/db/mod.rs /home/user/Documents/GitHub/Marlin/src/logging.rs /home/user/Documents/GitHub/Marlin/src/main.rs /home/user/Documents/GitHub/Marlin/src/scan.rs
|
||||
|
Reference in New Issue
Block a user