From 8b45e74c8e3ce43055b67657b6824db708e934a2 Mon Sep 17 00:00:00 2001 From: CJ Brewer Date: Thu, 7 May 2026 16:06:27 -0600 Subject: [PATCH 1/2] feat(cli): implement phased plans for rollout and backfill --- packages/cli/src/bin/stash.ts | 22 +- packages/cli/src/commands/impl/index.ts | 126 ++++- .../init/lib/__tests__/parse-plan.test.ts | 134 +++++- .../init/lib/__tests__/rollout-state.test.ts | 127 +++++ .../init/lib/__tests__/setup-prompt.test.ts | 185 ++++++- .../cli/src/commands/init/lib/parse-plan.ts | 86 +++- .../src/commands/init/lib/rollout-state.ts | 147 ++++++ .../cli/src/commands/init/lib/setup-prompt.ts | 441 +++++++++++++---- .../src/commands/init/lib/write-context.ts | 1 + packages/cli/src/commands/init/types.ts | 8 + packages/cli/src/commands/plan/index.ts | 111 ++++- .../commands/status/__tests__/status.test.ts | 453 ++++++++++++++---- packages/cli/src/commands/status/index.ts | 289 +++++++---- packages/cli/src/commands/status/quest.ts | 262 ++++++++++ packages/cli/src/commands/status/render.ts | 210 ++++++++ skills/stash-cli/SKILL.md | 105 +++- skills/stash-drizzle/SKILL.md | 70 ++- skills/stash-dynamodb/SKILL.md | 12 + skills/stash-encryption/SKILL.md | 153 +++--- skills/stash-supabase/SKILL.md | 196 ++++++++ 20 files changed, 2679 insertions(+), 459 deletions(-) create mode 100644 packages/cli/src/commands/init/lib/__tests__/rollout-state.test.ts create mode 100644 packages/cli/src/commands/init/lib/rollout-state.ts create mode 100644 packages/cli/src/commands/status/quest.ts create mode 100644 packages/cli/src/commands/status/render.ts diff --git a/packages/cli/src/bin/stash.ts b/packages/cli/src/bin/stash.ts index e920f218..e8169ca5 100644 --- a/packages/cli/src/bin/stash.ts +++ b/packages/cli/src/bin/stash.ts @@ -110,6 +110,20 @@ Init Flags: --supabase Use Supabase-specific setup flow --drizzle Use Drizzle-specific setup flow +Plan Flags: + --complete-rollout Plan the entire encryption lifecycle (schema-add through drop) + in one document. Skips the production-deploy gate that + normally separates rollout from cutover. Only safe when this + database is not backing a deployed application (local dev, + sandbox, freshly seeded test environment). + +Status Flags: + --quest Force the quest-log output (emoji + progress bars) + even in non-TTY contexts. Default is auto: fancy + in a terminal, plain in CI / pipes / agents. + --plain Force the plain-text output even in TTY contexts. + --json Emit a structured JSON document instead. + Impl Flags: --continue-without-plan Skip planning and go straight to implementation (interactively confirms before proceeding) @@ -382,13 +396,17 @@ async function main() { await initCommand(flags) break case 'plan': - await planCommand() + await planCommand(flags) break case 'impl': await implCommand(flags) break case 'status': - await statusCommand() + await statusCommand({ + quest: flags.quest, + plain: flags.plain, + json: flags.json, + }) break case 'auth': { const authArgs = subcommand ? [subcommand, ...commandArgs] : commandArgs diff --git a/packages/cli/src/commands/impl/index.ts b/packages/cli/src/commands/impl/index.ts index 4b5ad386..f23b0735 100644 --- a/packages/cli/src/commands/impl/index.ts +++ b/packages/cli/src/commands/impl/index.ts @@ -2,8 +2,18 @@ import { existsSync, readFileSync } from 'node:fs' import { resolve } from 'node:path' import * as p from '@clack/prompts' import { type AgentEnvironment, detectAgents } from '../init/detect-agents.js' -import { parsePlanSummary, renderPlanSummary } from '../init/lib/parse-plan.js' +import { + type PlanStep, + type PlanSummary, + effectiveStep, + parsePlanSummary, + renderPlanSummary, +} from '../init/lib/parse-plan.js' import { readContextFile } from '../init/lib/read-context.js' +import { + classifyPhase, + detectColumnStates, +} from '../init/lib/rollout-state.js' import { PLAN_REL_PATH } from '../init/lib/setup-prompt.js' import { CONTEXT_REL_PATH, @@ -46,6 +56,71 @@ async function confirmContinueWithoutPlan(): Promise { } } +/** + * Verify the plan-summary's targeted columns have crossed the deploy gate + * (a `dual_writing` event recorded in `cs_migrations`). Used only for + * `cutover`-step plans. Returns the list of columns that are still in the + * rollout step (no `dual_writing` event yet) — empty when the plan is + * safe to proceed. + * + * On any DB error, returns `null` to signal "could not verify". The caller + * still proceeds — refusing to run when the DB is just temporarily + * unreachable would be too brittle, and the encrypt commands themselves + * also gate on the same event before doing anything destructive. + */ +async function verifyCutoverPreconditions( + cwd: string, + summary: PlanSummary, +): Promise<{ table: string; column: string }[] | null> { + const migrate = summary.columns.filter((c) => c.path === 'migrate') + if (migrate.length === 0) return [] + + let databaseUrl: string + try { + const { loadStashConfig } = await import('../../config/index.js') + const config = await loadStashConfig() + databaseUrl = config.databaseUrl + } catch { + return null + } + + const states = await detectColumnStates(databaseUrl, migrate) + return states + .filter((s) => classifyPhase(s.phase) !== 'cutover') + .map((s) => ({ table: s.table, column: s.column })) +} + +/** + * Print the deploy-gate banner shown at the end of a successful + * rollout-step impl run. The banner is the explicit handover from the + * CLI back to the user — the encrypted twin column and dual-write code + * are now in the repo, but they only become safe once that code is + * running in production. The CLI deliberately does not chain into + * anything destructive here; the next destructive step (`stash encrypt + * backfill`) requires the user to come back and run `stash plan` after + * deploy. + */ +function printDeployGateBanner(cli: string): void { + p.note( + [ + 'Encryption rollout is in your repo.', + '', + 'Encrypted values are not yet flowing through your application because the', + 'dual-write code is not deployed. Until your production environment is running', + 'this code, backfilling historical rows would corrupt new writes (any row', + 'inserted during the backfill window would land in plaintext only and create', + 'silent migration drift).', + '', + 'Next:', + ` 1. Deploy this branch to production.`, + ` 2. Run \`${cli} status\` to verify dual-writes are live.`, + ` 3. Run \`${cli} plan\` again — the CLI will detect dual-writes and draft`, + ` the encryption cutover (backfill → switch reads → drop plaintext).`, + ].join('\n'), + '⛔ Deploy gate', + ) +} + /** * `stash impl` — execute an encryption plan. * @@ -54,6 +129,9 @@ async function confirmContinueWithoutPlan(): Promise { * * - **Plan exists** (TTY): parse the structured summary block, render * a confirmation panel, ask the user to proceed. Default-yes. + * For `cutover`-step plans, verify `dual_writing` events are + * recorded for every migrate column before launching — refuse if + * not, and point the user at re-running `stash plan` after deploy. * - **Plan exists** (non-TTY): proceed without confirmation. * - **No plan, `--continue-without-plan`**: confirm once, then implement. * - **No plan, TTY**: present a `p.select` — draft a plan first @@ -61,6 +139,13 @@ async function confirmContinueWithoutPlan(): Promise { * once, then implements). * - **No plan, non-TTY**: error out with a clear next-action; CI must * pass `--continue-without-plan` or run `stash plan` first. + * + * After successful handoff, the outro depends on plan step: + * - `rollout` — deploy-gate banner; explicit "do not run encrypt + * backfill yet" message. + * - `cutover` — confirmation that the rollout is fully complete. + * - `complete` — same as cutover (escape hatch covers everything). + * - no plan / no summary — generic "verify state" pointer. */ export async function implCommand(flags: Record) { const cwd = process.cwd() @@ -82,12 +167,38 @@ export async function implCommand(flags: Record) { const continueWithoutPlan = flags['continue-without-plan'] === true const isTTY = process.stdout.isTTY + let planStep: PlanStep | undefined + try { if (planExists) { + const summary = parsePlanSummary(readFileSync(planPath, 'utf-8')) + planStep = summary ? effectiveStep(summary) : undefined + + // Deploy-gate enforcement for cutover-step plans. Block before any + // confirm prompt or handoff so the user never gets the chance to + // press Enter through a misshapen flow. The check itself is + // defensive: a DB outage doesn't fail the run (returns null), the + // encrypt commands have their own gate. + if (summary && planStep === 'cutover') { + const missing = await verifyCutoverPreconditions(cwd, summary) + if (missing && missing.length > 0) { + p.log.error( + 'This plan is a cutover-step plan, but `cs_migrations` has no `dual_writing` event for the following column(s):', + ) + for (const col of missing) { + p.log.error(` · ${col.table}.${col.column}`) + } + p.log.info( + `Backfill, cutover, and drop are unsafe until the dual-write code is live in production. Deploy your encryption-rollout PR first, then re-run \`${cli} status\` to verify and \`${cli} plan\` to redraft.`, + ) + p.outro('Cutover blocked.') + process.exit(1) + } + } + // Plan-summary checkpoint: the last save point before launching the // (potentially hour-long) implementation phase. if (isTTY) { - const summary = parsePlanSummary(readFileSync(planPath, 'utf-8')) if (summary) { p.note(renderPlanSummary(summary), 'Plan summary') } else { @@ -154,9 +265,14 @@ export async function implCommand(flags: Record) { await howToProceedStep.run(state) - p.outro( - `Implementation handoff complete. Run \`${cli} db status\` to verify state.`, - ) + if (planStep === 'rollout') { + printDeployGateBanner(cli) + p.outro('Encryption rollout handed off — deploy when ready.') + } else { + p.outro( + `Implementation handoff complete. Run \`${cli} status\` to verify state.`, + ) + } } catch (err) { if (err instanceof CancelledError) { p.cancel('Cancelled.') diff --git a/packages/cli/src/commands/init/lib/__tests__/parse-plan.test.ts b/packages/cli/src/commands/init/lib/__tests__/parse-plan.test.ts index 63f0ef1a..cd9e6471 100644 --- a/packages/cli/src/commands/init/lib/__tests__/parse-plan.test.ts +++ b/packages/cli/src/commands/init/lib/__tests__/parse-plan.test.ts @@ -1,6 +1,7 @@ import { describe, expect, it } from 'vitest' import { type PlanSummary, + effectiveStep, parsePlanSummary, renderPlanSummary, } from '../parse-plan.js' @@ -14,6 +15,7 @@ describe('parsePlanSummary', () => { it('parses a well-formed summary block', () => { const md = `` + const summary = parsePlanSummary(md) + expect(summary).toBeDefined() + expect(summary?.step).toBeUndefined() + expect(effectiveStep(summary as PlanSummary)).toBe('complete') + }) + + it('rejects an unknown `step` value', () => { + const md = `` + expect(parsePlanSummary(md)).toBeUndefined() + }) + it('returns undefined for malformed JSON inside the block', () => { const md = `` @@ -92,6 +115,7 @@ describe('parsePlanSummary', () => { * - * `stash impl` parses this block to render a confirmation panel before - * dispatching to the implementation handoff. Plans without the block (or - * with a malformed one) fall back to a soft "open the plan in your editor" - * message — never an error. Older plans pre-dating this feature are still - * usable. + * `step` (optional for backwards compatibility) tells `stash impl` which + * scope of the encryption rollout this plan covers: + * - `"rollout"` — schema-add + dual-write code + db push (pending). + * Deploy gate after this; cutover comes in a later run. + * - `"cutover"` — backfill + cutover + drop. Requires `dual_writing` + * events in `cs_migrations`; impl refuses otherwise. + * - `"complete"` — the whole lifecycle in one document (escape hatch + * for users without a production-deploy to gate on). + * + * Plans without `step` are treated as `"complete"` for backwards + * compatibility — that is how every plan was shaped before the rollout + * split landed. Plans without the block (or with a malformed one) fall + * back to a soft "open the plan in your editor" message — never an error. */ export type PlanPath = 'new' | 'migrate' +export type PlanStep = 'rollout' | 'cutover' | 'complete' + export interface PlanColumn { table: string column: string @@ -29,6 +40,8 @@ export interface PlanColumn { } export interface PlanSummary { + /** Scope of this plan. Optional for backwards compat; absent = `complete`. */ + step?: PlanStep columns: PlanColumn[] } @@ -46,18 +59,25 @@ function isPlanColumn(x: unknown): x is PlanColumn { ) } +function isPlanStep(x: unknown): x is PlanStep { + return x === 'rollout' || x === 'cutover' || x === 'complete' +} + function isPlanSummary(x: unknown): x is PlanSummary { if (!x || typeof x !== 'object') return false const obj = x as Record // Empty `columns` is rejected: downstream `renderPlanSummary` would - // produce "0 columns across 0 tables — single-deploy", which is - // misleading. Treating empty as invalid lets `stash impl` fall back - // to the soft "open it in your editor" panel. - return ( - Array.isArray(obj.columns) && - obj.columns.length > 0 && - obj.columns.every(isPlanColumn) - ) + // produce a misleading zero-state line. `stash impl` falls back to the + // soft "open it in your editor" panel instead. + if ( + !Array.isArray(obj.columns) || + obj.columns.length === 0 || + !obj.columns.every(isPlanColumn) + ) { + return false + } + if (obj.step !== undefined && !isPlanStep(obj.step)) return false + return true } /** @@ -77,6 +97,12 @@ export function parsePlanSummary(content: string): PlanSummary | undefined { } } +/** Resolve `step` with the legacy default. Plans pre-dating the split + * carry no `step` and were always end-to-end. */ +export function effectiveStep(summary: PlanSummary): PlanStep { + return summary.step ?? 'complete' +} + const COLUMN_LABEL_WIDTH = 20 /** @@ -88,12 +114,17 @@ const COLUMN_LABEL_WIDTH = 20 * ◇ users.phone migrate existing column * ◇ orders.notes migrate existing column * - * Includes migrate-existing columns — implementation is staged across - * 4 deploys (schema-add → backfill → cutover → drop). + * Encryption rollout — implementation lands schema-add and dual-write + * code in your repo. Deploy that to production, verify with + * `npx stash status`, then run `npx stash plan` again to draft the + * encryption cutover. * - * Deploys are reported as a flat 4 (not 4 per migrate column) because the - * lifecycle batches columns: one schema-add deploy covers every twin, one - * backfill covers every column, etc. + * Footer copy varies by step: + * - rollout → "Encryption rollout — deploy gate next." + * - cutover → "Encryption cutover — backfill, switch reads, drop plaintext." + * - complete → "Complete rollout — skips the deploy gate; only safe when + * this database is not backing a deployed application." + * - (no migrate columns) → "All columns are additive — single-deploy." */ export function renderPlanSummary(summary: PlanSummary): string { const tables = new Set(summary.columns.map((c) => c.table)) @@ -112,10 +143,21 @@ export function renderPlanSummary(summary: PlanSummary): string { return `◇ ${`${c.table}.${c.column}`.padEnd(COLUMN_LABEL_WIDTH)} ${desc}` }) - const footer = - migrateCount > 0 - ? `Includes migrate-existing column${migrateCount === 1 ? '' : 's'} — implementation is staged across 4 deploys (schema-add → backfill → cutover → drop).` - : 'All columns are additive — single-deploy implementation.' + const footer = renderFooter(effectiveStep(summary), migrateCount) return [header, '', ...rows, '', footer].join('\n') } + +function renderFooter(step: PlanStep, migrateCount: number): string { + if (migrateCount === 0) { + return 'All columns are additive — single-deploy implementation.' + } + switch (step) { + case 'rollout': + return 'Encryption rollout — implementation lands schema-add and dual-write code in your repo. Deploy that to production, verify with `npx stash status`, then run `npx stash plan` again to draft the encryption cutover.' + case 'cutover': + return 'Encryption cutover — implementation runs the backfill, switches reads to encrypted, and drops plaintext. Requires dual-writes already live in production.' + case 'complete': + return 'Complete encryption rollout — covers schema-add through drop in one go. Skips the production-deploy gate; only safe when this database is not backing a deployed application.' + } +} diff --git a/packages/cli/src/commands/init/lib/rollout-state.ts b/packages/cli/src/commands/init/lib/rollout-state.ts new file mode 100644 index 00000000..6ec1455b --- /dev/null +++ b/packages/cli/src/commands/init/lib/rollout-state.ts @@ -0,0 +1,147 @@ +import { type MigrationPhase, latestByColumn } from '@cipherstash/migrate' +import pg from 'pg' + +/** + * What rollout work this column needs next, derived from `cs_migrations`. + * + * - `rollout` — schema-add and dual-write code aren't confirmed live yet. + * No `dual_writing` event recorded; the deploy gate hasn't + * been crossed. + * - `cutover` — `dual_writing` (or later) is recorded. Backfill, cutover, + * and drop are the remaining work. + * - `completed` — `dropped` event recorded; nothing left to do. + * - `unknown` — column has no `cs_migrations` entries. Could be a brand + * new column, a migrate column that hasn't started yet, or + * a column where init-side work is still in progress. + * Callers should treat this as "needs the user to confirm + * path=new vs path=migrate". + */ +export type ColumnNeeds = 'rollout' | 'cutover' | 'completed' | 'unknown' + +export interface ColumnState { + table: string + column: string + /** Latest phase recorded for this column, or `null` if no events. */ + phase: MigrationPhase | null + needs: ColumnNeeds +} + +/** + * Classify a phase into the next plan-step the column needs. The mapping is: + * + * null → unknown (no events; brand new or not started) + * schema-added → rollout (synthesised in some renderers; safe default) + * dual-writing → cutover (deploy gate crossed) + * backfilling → cutover (cutover work in flight) + * backfilled → cutover (ready to rename swap) + * cut-over → cutover (rename done; drop still pending) + * dropped → completed (lifecycle complete) + */ +export function classifyPhase(phase: MigrationPhase | null): ColumnNeeds { + if (phase === null) return 'unknown' + if (phase === 'schema-added') return 'rollout' + if (phase === 'dropped') return 'completed' + return 'cutover' +} + +/** + * Read `cs_migrations` once and classify a list of (table, column) pairs. + * Used by `stash plan` to dispatch to the right template, by `stash impl` + * to enforce the deploy gate, and by `stash status` (the quest log) to + * shape per-column objective state. + * + * Connects, queries, and disconnects in one call. Callers that already + * have a connection should use `classifyPhases` against the result of + * `latestByColumn` directly. + * + * On any unexpected error, returns `unknown` for every input — never + * throws. The encryption rollout is paused-by-default safer than + * crashed-by-default. + */ +export async function detectColumnStates( + databaseUrl: string, + columns: ReadonlyArray<{ table: string; column: string }>, +): Promise { + if (columns.length === 0) return [] + + const client = new pg.Client({ connectionString: databaseUrl }) + try { + await client.connect() + const events = await latestByColumnSafe(client) + return classifyPhases(columns, (table, column) => { + const row = events.get(`${table}.${column}`) + return row?.phase ?? null + }) + } catch { + return columns.map((c) => ({ + table: c.table, + column: c.column, + phase: null, + needs: 'unknown' as const, + })) + } finally { + await client.end().catch(() => undefined) + } +} + +/** + * Pure classification helper. Given a phase lookup function, return one + * `ColumnState` per requested column. Useful when the caller already has + * an open pg connection (`stash status` reads three things in parallel). + */ +export function classifyPhases( + columns: ReadonlyArray<{ table: string; column: string }>, + lookup: (table: string, column: string) => MigrationPhase | null, +): ColumnState[] { + return columns.map((c) => { + const phase = lookup(c.table, c.column) + return { + table: c.table, + column: c.column, + phase, + needs: classifyPhase(phase), + } + }) +} + +/** + * Roll a list of per-column needs up into a single plan step. The + * dispatch rule for `stash plan`: + * + * any cutover → plan the cutover (covers any rollout-state columns + * along the way; agent will batch them) + * any rollout → plan the rollout + * all completed → no work to plan + * otherwise → unknown (caller asks the user to choose) + * + * The bias toward cutover when mixed is deliberate: if any column has + * passed the deploy gate, the user has acknowledged dual-writes are live. + * The cutover plan template explicitly handles "and these other columns + * still need their rollout work too" alongside the destructive steps. + */ +export function rollupPlanStep( + states: ReadonlyArray, +): 'rollout' | 'cutover' | 'completed' | 'unknown' { + if (states.length === 0) return 'unknown' + if (states.some((s) => s.needs === 'cutover')) return 'cutover' + if (states.some((s) => s.needs === 'rollout')) return 'rollout' + if (states.every((s) => s.needs === 'completed')) return 'completed' + return 'unknown' +} + +async function latestByColumnSafe(client: pg.Client) { + try { + return await latestByColumn(client) + } catch (err) { + // The cs_migrations table may not exist yet (project that has run + // `stash init` but not `stash db install`, or a fresh database). + // Treat as "no events" rather than a hard error. + if ( + err instanceof Error && + /cs_migrations|schema "cipherstash"/i.test(err.message) + ) { + return new Map() + } + throw err + } +} diff --git a/packages/cli/src/commands/init/lib/setup-prompt.ts b/packages/cli/src/commands/init/lib/setup-prompt.ts index 238dfc93..95715185 100644 --- a/packages/cli/src/commands/init/lib/setup-prompt.ts +++ b/packages/cli/src/commands/init/lib/setup-prompt.ts @@ -1,3 +1,4 @@ +import type { PlanStep } from './parse-plan.js' import type { HandoffChoice, InitMode, Integration } from '../types.js' import { type PackageManager, runnerCommand } from '../utils.js' @@ -16,8 +17,8 @@ export interface SetupPromptContext { handoff: HandoffChoice /** Whether the agent should produce a plan first or implement directly. * Drives the entire prompt body — plan-mode tells the agent its task is - * to produce `.cipherstash/plan.md`; implement-mode is the original - * orient-and-route action prompt. */ + * to produce `.cipherstash/plan.md`; implement-mode is the orient-and- + * route action prompt. */ mode: InitMode /** Names of skills `stash init` copied into the project (e.g. * `stash-encryption`, `stash-drizzle`, `stash-cli`). The action prompt @@ -25,6 +26,12 @@ export interface SetupPromptContext { * the `agents-md` handoff (no skills directory installed) and for * `wizard` (the wizard installs its own). */ installedSkills: string[] + /** In plan mode, which scope of plan to produce. The `stash plan` command + * picks this by reading `cs_migrations` (`rollupPlanStep`); the user + * override is `--complete-rollout`. Ignored in implement mode. Defaults + * to `'rollout'` when plan mode is invoked without explicit state — that + * matches a fresh project where there are no recorded events yet. */ + planStep?: PlanStep } interface MigrationCommands { @@ -35,9 +42,9 @@ interface MigrationCommands { } /** - * Per-integration migration commands. Used in the path-1 (add new encrypted - * column) walkthrough so the prompt names the exact strings the agent should - * run, not a generic "run your migrations" hand-wave. + * Per-integration migration commands. Used in the "add a new encrypted + * column" walkthrough so the prompt names the exact strings the agent + * should run, not a generic "run your migrations" hand-wave. */ function migrationCommands( integration: Integration, @@ -112,15 +119,15 @@ function rulesLocation(handoff: HandoffChoice): string { */ const SKILL_PURPOSES: Record = { 'stash-encryption': - 'the encryption API, schema definition, and the column-migration lifecycle (the source of truth for migrating an existing column)', + 'the encryption API, schema definition, and the rollout-and-cutover lifecycle (the source of truth for taking encryption to production)', 'stash-drizzle': - 'Drizzle-specific patterns: declaring encrypted columns, query operators, the migrating-an-existing-column worked example', + 'Drizzle-specific patterns: declaring encrypted columns, query operators, the rollout/cutover walkthrough for an existing column', 'stash-supabase': - 'Supabase-specific patterns: `encryptedSupabase` wrapper, encrypted query filters, transparent decryption', + 'Supabase-specific patterns: `encryptedSupabase` wrapper, encrypted query filters, transparent decryption, the rollout/cutover walkthrough', 'stash-dynamodb': 'DynamoDB encryption: per-item encrypt/decrypt, HMAC attribute keys, audit logging', 'stash-cli': - '`stash` command reference — `db install`, `encrypt {status,plan,backfill,cutover,drop}`, etc.', + '`stash` command reference — `status`, `plan`, `impl`, `db install`, `encrypt {backfill,cutover,drop}`, etc.', 'stash-secrets': 'storing and retrieving encrypted secrets (separate concern from column encryption)', 'stash-supply-chain-security': @@ -152,32 +159,7 @@ export function renderSetupPrompt(ctx: SetupPromptContext): string { : renderImplementPrompt(ctx) } -/** - * Render the implementation action prompt. - * - * This is the file the agent reads first after `stash init` hands off in - * implement mode. It does NOT prescribe a fixed sequence of edits — the - * agent doesn't yet know what the user wants. Instead the prompt: - * - * 1. Confirms what setup is complete. - * 2. Names the skills loaded and what each is for. - * 3. Explains the two real options for encrypting a column (add a new - * encrypted column from scratch, or migrate an existing populated - * column via the staged-twin lifecycle). In-place conversion is not - * supported and called out as such. - * 4. Tells the agent its FIRST response should be a routing question, not - * an action. - * 5. Lists the "stop and ask" rules that override flow mechanics. - * - * If `.cipherstash/plan.md` exists (a previous `stash init --plan` run), - * the prompt directs the agent to read it first and treat it as the - * source of truth for routing — the user has already done the - * orientation pass. - */ -export function renderImplementPrompt(ctx: SetupPromptContext): string { - const cli = runnerCommand(ctx.packageManager, 'stash') - const migration = migrationCommands(ctx.integration, ctx.packageManager) - +function setupChecklist(ctx: SetupPromptContext): string[] { const done: string[] = [ checked('Authenticated to CipherStash and selected a workspace'), checked(`Detected integration: \`${ctx.integration}\``), @@ -198,6 +180,33 @@ export function renderImplementPrompt(ctx: SetupPromptContext): string { ), ) } + return done +} + +/** + * Render the implementation action prompt. + * + * This is the file the agent reads first after `stash init` hands off in + * implement mode. It does NOT prescribe a fixed sequence of edits — the + * agent doesn't yet know what the user wants. Instead the prompt: + * + * 1. Confirms what setup is complete. + * 2. Names the skills loaded and what each is for. + * 3. Explains the two real options for encrypting a column (add a new + * encrypted column from scratch, or migrate an existing populated + * column via the encryption rollout + cutover lifecycle). In-place + * conversion is not supported and called out as such. + * 4. Tells the agent its FIRST response should be a routing question, not + * an action. + * 5. Lists the "stop and ask" rules that override flow mechanics. + * + * If `.cipherstash/plan.md` exists (a previous `stash plan` run), the + * prompt directs the agent to read it first and treat it as the source + * of truth for routing — the user has already done the orientation pass. + */ +export function renderImplementPrompt(ctx: SetupPromptContext): string { + const cli = runnerCommand(ctx.packageManager, 'stash') + const migration = migrationCommands(ctx.integration, ctx.packageManager) const sections: string[] = [ '# CipherStash setup — orient and ask', @@ -206,6 +215,10 @@ export function renderImplementPrompt(ctx: SetupPromptContext): string { '', '`stash init` has finished its mechanical setup. Your job is **not** to start editing schema or running migrations immediately. Your job is to **orient the user with the two real options for encrypting a column, then ask which one they want before touching anything**. Pick concrete table/column names from `.cipherstash/context.json` when describing the options so the user can recognise their own data.', '', + '## Where am I?', + '', + `Run \`${cli} status\` before you start editing. It is disk-only, idempotent, and tells you which encryption rollouts are in flight, which have been deployed, and what's left. Re-run it after every transition — never act blind.`, + '', '## Existing plan', '', `Before anything else, check whether \`${PLAN_REL_PATH}\` exists. If it does, the user has already done a planning pass with you (or another agent). Read it as the source of truth for which path applies, which tables/columns are in scope, and the deploy ordering — do not re-ask the routing question. Confirm with the user that the plan is still current, then execute it. If the plan looks stale (the schema or context has moved on), say so and propose specific updates rather than starting fresh.`, @@ -214,7 +227,7 @@ export function renderImplementPrompt(ctx: SetupPromptContext): string { '', '## What `stash init` already did', '', - ...done, + ...setupChecklist(ctx), '', '## Skills loaded', '', @@ -245,11 +258,22 @@ export function renderImplementPrompt(ctx: SetupPromptContext): string { '', "Use when the column **already exists** in the user's database and contains live data that must be preserved.", '', - "Why it's staged: there is no atomic way to replace a populated column with an encrypted one without corrupting data. Instead the lifecycle adds a parallel `_encrypted` twin, dual-writes from the app while existing rows are backfilled, then renames the twin into the original column name and drops the old plaintext. The `stash encrypt` CLI commands drive each step; the `stash-encryption` skill has the full model.", + "Why it's staged: there is no atomic way to replace a populated column with an encrypted one without corrupting data. Instead, taking encryption to production happens in two passes around a deploy gate. The first pass — the **encryption rollout** — adds the encrypted twin column and the dual-write code; the user deploys that to production so every new write produces both plaintext and ciphertext. The second pass — the **encryption cutover** — backfills historical rows, renames the encrypted twin into the original column name, switches reads through the encryption client, and drops the old plaintext column.", + '', + '#### Encryption rollout — what lands before the deploy', + '', + `1. **Schema-add.** Add a \`_encrypted\` twin column (nullable \`jsonb\`) alongside the existing plaintext column in the user's real schema file. Generate and apply the schema migration. **If this is the first encrypted column in the project, configure the bundler exclusion now** — see the snippets in the previous section. Without it, importing the encryption client at backfill time will crash.`, + `2. **Register pending config** — \`${cli} db push\`. With an existing active config, this writes the new column-set as \`pending\`. Cutover (later) will promote it. (If this is the very first push for the project, db push writes active directly — fine, the rest of the flow still works.)`, + `3. **Dual-write.** Edit the application code so **every persistence path that mutates this row writes both \`\` (plaintext, unchanged) and \`_encrypted\` (ciphertext via the encryption client) — in the same transaction, on every code branch, with no exceptions.** A single missed branch causes silent migration drift later. Reads still come from the plaintext column.`, + '', + `⛔ **Deploy gate.** Stop here. The application must be running this code in production — the deployed environment that owns the database — before backfill is safe to run. "Live on the user's laptop" or "live in CI" does not count. After the user deploys, tell them to run`, + '', + ` \`${cli} status\``, + '', + `to confirm where they are, then \`${cli} plan\` to draft the cutover. Do not run \`${cli} encrypt backfill\`, \`${cli} encrypt cutover\`, or \`${cli} encrypt drop\` until that has happened — \`${cli} impl\` will refuse to run cutover-step plans without a recorded \`dual_writing\` event.`, + '', + '#### Encryption cutover — after dual-writes are live', '', - "1. **Schema-add.** Add an `_encrypted` twin column (nullable `jsonb`) alongside the existing plaintext column in the user's real schema file. Generate and apply the schema migration. **If this is the first encrypted column in the project, configure the bundler exclusion now** — see the snippets in the previous section. Without it, importing the encryption client at backfill time will crash.", - `2. **Register pending config** — \`${cli} db push\`. With an existing active config, this writes the new column-set as \`pending\`. Cutover (step 5) will promote it. (If this is the very first push for the project, db push writes active directly — fine, the rest of the flow still works.)`, - '3. **Dual-write.** Edit the application code so every insert/update writes to *both* `` (plaintext, unchanged) and `_encrypted` (ciphertext via the encryption client). Reads still come from the plaintext column. Ship that code change.', `4. **Backfill.** Run \`${cli} encrypt backfill --table --column \`. The CLI prompts the user (or accepts \`--confirm-dual-writes-deployed\` non-interactively) to confirm dual-writes are live, then chunks through the existing rows. Resumable; checkpoints to \`cs_migrations\` after every chunk. SIGINT-safe.`, `5. **Switch the schema and re-push, then cutover.** Update the schema file to declare the encrypted column under its final name (drop \`_encrypted\` suffix, switch \`\` to \`encryptedType\`). Run \`${cli} db push\` again — pending now reflects the renamed shape. Then \`${cli} encrypt cutover --table --column \` runs the rename in one transaction (\`\` → \`_plaintext\`, \`_encrypted\` → \`\`) and promotes pending → active.`, '6. **Wire the read path through the encryption client.** Post-cutover, `` holds ciphertext. Read code paths must decrypt before returning the value to callers — `decryptModel(row, table)` for Drizzle, the `encryptedSupabase` wrapper for Supabase, or the equivalent `decrypt`/`bulkDecryptModels` calls. Without this step, your read paths return raw `eql_v2_encrypted` payloads to end users. The integration skill has the exact API.', @@ -264,7 +288,7 @@ export function renderImplementPrompt(ctx: SetupPromptContext): string { '', '## Your first response', '', - 'Before any edits, send the user a short orientation message. Confirm setup is complete, list the skills loaded with one-line purposes, summarise the two options in your own words, and end with a clear question — *"Which would you like to do? You can name a specific table+column or describe what you\'re trying to protect."* Reference concrete tables/columns from `.cipherstash/context.json` when it helps.', + `Before any edits, send the user a short orientation message. Confirm setup is complete, list the skills loaded with one-line purposes, summarise the two options in your own words, and end with a clear question — *"Which would you like to do? You can name a specific table+column or describe what you're trying to protect."* Reference concrete tables/columns from \`.cipherstash/context.json\` when it helps. Mention that they can run \`${cli} status\` at any time to see where each rollout is.`, '', 'Once the user answers, execute the relevant flow. Show diffs / generated SQL before applying. Pause for review at every database-mutating step.', '', @@ -294,48 +318,51 @@ export function renderImplementPrompt(ctx: SetupPromptContext): string { * Plan-mode tells the agent its task is to produce a reviewable plan file * at `.cipherstash/plan.md` — no schema edits, no migrations, no `db push`, * no `encrypt *` mutations during this phase. Read-only inspection - * (`stash db status`, schema grep, file reads) is fine. + * (`stash status`, `stash db status`, schema grep, file reads) is fine. + * + * Dispatches by `ctx.planStep`: * - * The plan covers: which table(s) and column(s) to protect, which - * lifecycle path applies per column (path 1 = new column / path 3 = - * migrate existing), the deploy ordering for path-3 columns, any - * project-specific risks, and the exact CLI sequence to execute when the - * user is ready to implement. + * `'rollout'` (default) — schema-add + dual-write code + db push. + * Plan stops at the deploy gate. + * `'cutover'` — backfill + cutover + drop. Pre-condition: + * `dual_writing` recorded for the targeted + * columns. + * `'complete'` — full lifecycle in one document. Used for the + * `--complete-rollout` escape hatch (databases + * without a deployed application to gate on). */ export function renderPlanPrompt(ctx: SetupPromptContext): string { - const cli = runnerCommand(ctx.packageManager, 'stash') + const step: PlanStep = ctx.planStep ?? 'rollout' - const done: string[] = [ - checked('Authenticated to CipherStash and selected a workspace'), - checked(`Detected integration: \`${ctx.integration}\``), - checked( - `Wrote a placeholder encryption client at \`${ctx.encryptionClientPath}\` (a small file showing the encryption-client patterns; the user's real Drizzle/Supabase schema files remain authoritative)`, - ), - ] - if (ctx.stackInstalled) { - done.push(checked('Installed `@cipherstash/stack` (runtime)')) - } - if (ctx.cliInstalled) { - done.push(checked('Installed `stash` (CLI, dev dep)')) - } - if (ctx.eqlInstalled) { - done.push( - checked( - 'Installed the EQL extension and `cipherstash.cs_migrations` into the database', - ), - ) + switch (step) { + case 'rollout': + return renderRolloutPlanPrompt(ctx) + case 'cutover': + return renderCutoverPlanPrompt(ctx) + case 'complete': + return renderCompletePlanPrompt(ctx) } +} - const sections: string[] = [ - '# CipherStash setup — write a plan', +function planSharedHeader(ctx: SetupPromptContext, title: string): string[] { + return [ + title, '', `Integration: \`${ctx.integration}\` · Package manager: \`${ctx.packageManager}\``, '', - `\`stash plan\` runs the planning phase — your job is to produce a reviewable plan at \`${PLAN_REL_PATH}\`, **not** to make schema or code changes. Read-only inspection (\`${cli} db status\`, reading schema files, grepping the codebase) is encouraged. Schema edits, migrations, \`${cli} db push\`, and any \`${cli} encrypt *\` mutations are deferred to \`${cli} impl\`, which the user will run after reviewing and approving the plan.`, + ] +} + +function planSharedSetupBlock(ctx: SetupPromptContext): string[] { + const cli = runnerCommand(ctx.packageManager, 'stash') + return [ + '## Where am I?', + '', + `Run \`${cli} status\` first. It tells you which columns are mid-rollout, which have been deployed, and what's left. Re-read it as you go — never plan blind.`, '', '## What `stash init` already did', '', - ...done, + ...setupChecklist(ctx), '', '## Skills loaded', '', @@ -345,34 +372,105 @@ export function renderPlanPrompt(ctx: SetupPromptContext): string { '', 'Read the skills before answering API or pattern questions. The doctrine in `AGENTS.md` (or its inlined equivalent) covers the invariants that apply regardless of which flow you take — never log plaintext, never `.notNull()` on creation, etc.', '', - '## The two options', - '', - 'There are exactly two supported ways to encrypt a column. The plan must identify which one applies per column:', - '', - '- **Add a new encrypted column** — the column does not yet exist; no plaintext predecessor to preserve. Single-deploy.', - '- **Migrate an existing column** — the column already exists with live data. Staged across four deploys: schema-add + dual-write → backfill run → cutover + read-from-encrypted → drop. The lifecycle is irreducible; the plan must spell out the deploy ordering so reviewers can sequence PRs correctly.', + ] +} + +function planSharedNotDoBlock(ctx: SetupPromptContext): string[] { + const cli = runnerCommand(ctx.packageManager, 'stash') + return [ + '## What you must NOT do', '', - 'Converting a populated column in place is **not** supported — any "just swap the type" approach corrupts data. If the user asks for that, the plan must explain why and route them to the migrate-existing flow.', + bullet( + 'Edit schema files, application code, or migration files. The plan describes future changes — it does not perform them.', + ), + bullet( + `Run \`${cli} db push\`, \`${cli} encrypt backfill\`, \`${cli} encrypt cutover\`, \`${cli} encrypt drop\`, \`${cli} db activate\`, or any other state-mutating command.`, + ), + bullet( + 'Run schema migrations (`drizzle-kit migrate`, `supabase migration up`, `prisma migrate`, etc.).', + ), + bullet( + 'Modify the placeholder encryption client beyond what is required to read it.', + ), '', - '## Your task: produce a plan file', + `Read-only commands (\`${cli} status\`, \`${cli} db status\`, file reads, greps, \`${cli} doctor\` if available) are fine and encouraged — the plan is more useful when grounded in the actual current state.`, '', - `Write \`${PLAN_REL_PATH}\` covering, for each table+column the user wants to protect:`, + ] +} + +function planSharedStopAndAsk(): string[] { + return [ + '## Stop and ask the user when', '', bullet( - '**A machine-readable summary block at the very top of the file**, before any heading or prose. `stash impl` parses this to render a confirmation panel before launching implementation. Use this exact shape (valid JSON, single block, no other content inside the comment):', + "The user asks to convert a populated column in place. Explain why it doesn't work and offer the migrate-existing-column flow instead.", + ), + bullet( + "A column the user names is already encrypted (`eql_v2_encrypted` udt) but with a different EQL config than they've described. This is the post-cutover re-encryption case (`stash encrypt update`, not yet shipped) — surface it in the plan as a flagged risk.", + ), + bullet( + 'You discover existing partial CipherStash setup that disagrees with what the user is describing — someone else may have run `stash init` earlier with different choices. Note this in the plan and ask the user to clarify before writing prescriptive steps.', + ), + bullet( + "The user names columns that don't appear in `.cipherstash/context.json` or in the schema files you can see. Confirm the names rather than guessing.", ), '', + ] +} + +function planSummaryBlockExample(step: PlanStep): string { + return [ ' ```', ' ', ' ```', + ].join('\n') +} + +/** Plan template for the encryption-rollout step (schema-add + dual-write). */ +function renderRolloutPlanPrompt(ctx: SetupPromptContext): string { + const cli = runnerCommand(ctx.packageManager, 'stash') + + const sections: string[] = [ + ...planSharedHeader( + ctx, + '# CipherStash setup — write an encryption rollout plan', + ), + `\`${cli} plan\` runs the planning phase for the **encryption rollout** — your job is to produce a reviewable plan at \`${PLAN_REL_PATH}\` that covers everything the user must land in their repo and deploy to production before any historical data can be backfilled. **Do not** make code or schema changes here; do not run mutating CLI commands. Read-only inspection is encouraged.`, '', - ` \`path\` is \`"new"\` for additive columns (no plaintext predecessor) and \`"migrate"\` for columns that already exist with live data. The block must remain in sync with the prose that follows; if you revise the plan, regenerate the summary.`, + `The encryption-cutover plan (backfill → switch reads to encrypted → drop plaintext) is a separate plan written by re-running \`${cli} plan\` after dual-writes are live in production. Stay in scope.`, + '', + ...planSharedSetupBlock(ctx), + '## What this plan covers', + '', + "Two paths, depending on whether the column already exists:", + '', + bullet( + '**Add a new encrypted column** — single deploy, no rollout/cutover split. Declared encrypted from the start.', + ), + bullet( + '**Encryption rollout for an existing column** — the encrypted twin column, the application-side dual-write code, and `stash db push` (writes pending). All of this lands in one PR; the user deploys it; `cs_migrations` records `dual_writing` the next time backfill is invoked.', + ), + '', + "Converting a populated column in place is **not** supported — any \"just swap the type\" approach corrupts data. If the user asks for that, the plan must explain why and route them to the encryption-rollout flow.", + '', + '## Your task: produce the rollout plan file', + '', + `Write \`${PLAN_REL_PATH}\` covering, for each table+column the user wants to protect:`, + '', + bullet( + '**A machine-readable summary block at the very top of the file**, before any heading or prose. `stash impl` parses this to render a confirmation panel before launching implementation. Use this exact shape (valid JSON, single block, no other content inside the comment):', + ), + '', + planSummaryBlockExample('rollout'), + '', + ` \`step\` is \`"rollout"\` for this plan. \`path\` is \`"new"\` for additive columns (no plaintext predecessor) and \`"migrate"\` for columns that already exist with live data. Keep the block in sync with the prose; if you revise the plan, regenerate the summary.`, '', 'Then, the prose plan covers:', '', @@ -380,61 +478,206 @@ export function renderPlanPrompt(ctx: SetupPromptContext): string { "The table and column names (extract candidates from `.cipherstash/context.json`; if the user hasn't yet said which columns matter, ask before writing the plan).", ), bullet( - 'Which lifecycle path applies (path 1 = add new / path 3 = migrate existing). Justify briefly — the user should be able to verify the choice without reading the skill.', + 'Which path applies per column (additive new column or encryption-rollout for an existing one). Justify briefly.', ), bullet( - 'For path-3 columns: the four-deploy sequence with one line per deploy on what changes (schema, app code, lifecycle command). Call out which step the deploy gates on (e.g. "deploy 2 must wait until deploy 1 is live in production before backfill is safe").', + 'For migrate columns: what the rollout PR contains — schema-add, `db push` (pending), and the exact dual-write code change. The dual-write definition matters: every persistence path that mutates the row writes both columns, in the same transaction, on every code branch.', ), bullet( `Project-specific risks. Common ones: bundler exclusion not yet configured (Next.js / webpack / Vite), top-level-await in the placeholder encryption client breaks non-Next contexts, existing partial CipherStash state (run \`${cli} db status\` and note any pre-existing encrypted columns or pending configs).`, ), bullet( - `The exact CLI sequence to execute when the user is ready to implement (the \`${cli} encrypt {backfill,cutover,drop}\` invocations with concrete \`--table\` / \`--column\` values).`, + 'A "Deploy gate" section near the end of the plan that explicitly says: after the rollout PR is in production and serving real traffic, the user runs `' + + cli + + ' status` to confirm, then `' + + cli + + ' plan` again — at that point the CLI will detect dual-writes and produce a separate cutover plan covering backfill, read-path switch, and drop.', ), bullet( - "Open questions for the user — anything you can't determine from the schema, context.json, or the skills. Better to surface than guess.", + "Open questions for the user — anything you can't determine from the schema, context.json, or the skills.", ), '', - `After writing the plan, also offer to copy it into \`docs/plans/cipherstash-encryption.md\` if the project has a \`docs/plans/\` directory — many teams version their plans alongside the code. Don't copy without asking. If \`docs/plans/\` does not exist, leave the plan at \`${PLAN_REL_PATH}\` and don't create the directory.`, + `After writing the plan, also offer to copy it into \`docs/plans/cipherstash-encryption-rollout.md\` if the project has a \`docs/plans/\` directory — many teams version their plans alongside the code. Don't copy without asking. If \`docs/plans/\` does not exist, leave the plan at \`${PLAN_REL_PATH}\` and don't create the directory.`, '', - '## What you must NOT do', + ...planSharedNotDoBlock(ctx), + '## Your first response', + '', + `Send the user a short orientation message before writing anything. Confirm setup is complete, list the skills loaded with one-line purposes, explain what an encryption rollout is in your own words, and end with a clear question — *"Which table(s) and column(s) would you like the rollout plan to cover? You can name them or describe what you're trying to protect."* Reference concrete tables/columns from \`.cipherstash/context.json\` when it helps.`, + '', + `Once the user answers, write \`${PLAN_REL_PATH}\`. Show the plan in chat as well so the user can react inline. After the plan is approved, tell the user to run \`${cli} impl\` to execute it.`, + '', + ...planSharedStopAndAsk(), + ] + + return sections.join('\n') +} + +/** Plan template for the encryption-cutover step (backfill + cutover + drop). */ +function renderCutoverPlanPrompt(ctx: SetupPromptContext): string { + const cli = runnerCommand(ctx.packageManager, 'stash') + + const sections: string[] = [ + ...planSharedHeader( + ctx, + '# CipherStash setup — write an encryption cutover plan', + ), + `\`${cli} plan\` detected that dual-writes are recorded as live in \`cs_migrations\` for at least one column. Your job is to produce a reviewable plan at \`${PLAN_REL_PATH}\` covering the **encryption cutover** — backfilling historical rows, switching reads through the encryption client, and dropping the old plaintext column. **Do not** make code or schema changes here; do not run mutating CLI commands. Read-only inspection is encouraged.`, + '', + 'The encryption rollout (schema-add + dual-write code + `db push`) is assumed already deployed to production. If the prose ends up describing dual-write code edits, you are off-scope — re-anchor on what cutover-step work remains.', + '', + ...planSharedSetupBlock(ctx), + '## What this plan covers', + '', + 'For each column whose dual-writes are live in production:', '', bullet( - 'Edit schema files, application code, or migration files. The plan describes future changes — it does not perform them.', + '**Backfill.** Encrypt the historical rows that pre-date the rollout deploy. Resumable; chunked; SIGINT-safe.', ), bullet( - `Run \`${cli} db push\`, \`${cli} encrypt backfill\`, \`${cli} encrypt cutover\`, \`${cli} encrypt drop\`, \`${cli} db activate\`, or any other state-mutating command.`, + '**Schema rename and re-push.** Update the schema declaration to put the encrypted form under its final column name; `stash db push` registers the renamed pending config.', ), bullet( - 'Run schema migrations (`drizzle-kit migrate`, `supabase migration up`, `prisma migrate`, etc.).', + '**Cutover.** A single transaction renames `` → `_plaintext`, `_encrypted` → ``, and promotes the pending EQL config to active.', ), bullet( - 'Modify the placeholder encryption client beyond what is required to read it.', + '**Read path.** Application reads of `` now return ciphertext until the read path decrypts via the encryption client. The plan must specify what changes per read site.', + ), + bullet( + '**Remove dual-writes.** The plaintext column is now `_plaintext` and is no longer authoritative. Delete the dual-write code paths.', + ), + bullet( + '**Drop plaintext.** `stash encrypt drop` emits a migration that removes `_plaintext`. Apply with the project\'s normal migration tooling.', + ), + '', + '## Your task: produce the cutover plan file', + '', + `Write \`${PLAN_REL_PATH}\` covering, for each column scheduled for cutover:`, + '', + bullet( + '**A machine-readable summary block at the very top of the file**, before any heading or prose. `stash impl` parses this to render a confirmation panel and to enforce the deploy gate (it refuses cutover-step plans without a recorded `dual_writing` event). Use this exact shape:', + ), + '', + planSummaryBlockExample('cutover'), + '', + ` \`step\` is \`"cutover"\` for this plan. \`path\` should be \`"migrate"\` for every column (cutover only applies to migrate columns; new columns never went through dual-writing). Keep the block in sync with the prose.`, + '', + 'Then the prose plan covers:', + '', + bullet( + 'For each column: backfill ordering (which to do first; any large tables that should run during low-traffic windows), the exact `' + + cli + + ' encrypt backfill` invocation with concrete `--table` / `--column` values.', + ), + bullet( + 'The schema-edit + `db push` step, with the exact rename pattern (drop `_encrypted` suffix on the encrypted column, switch the original column declaration off `text`/`varchar` and onto the encrypted type).', + ), + bullet( + 'The cutover invocation per column: `' + + cli + + ' encrypt cutover --table --column `.', + ), + bullet( + 'Read-path code changes: every site that reads `` from this table must decrypt via the encryption client. Enumerate the sites you can find via grep so the user can verify nothing was missed.', + ), + bullet( + 'Removal of the dual-write code from the persistence layer.', + ), + bullet( + 'The drop invocation: `' + + cli + + ' encrypt drop --table --column `, plus the schema-migration apply step that follows.', + ), + bullet( + `Risks specific to cutover: row-count for the backfill (use \`${cli} db status\` to estimate if helpful), tables under heavy write load (cutover holds a brief lock on the rename), application code that constructs SQL by string (those reads won't transparently decrypt).`, + ), + bullet( + "Open questions for the user — anything you can't determine from the schema, context.json, or the skills.", ), '', - `Read-only commands (\`${cli} db status\`, file reads, greps, \`${cli} doctor\` if available) are fine and encouraged — the plan is more useful when grounded in the actual current state.`, + `After writing the plan, also offer to copy it into \`docs/plans/cipherstash-encryption-cutover.md\` if the project has a \`docs/plans/\` directory. Don't copy without asking.`, '', + ...planSharedNotDoBlock(ctx), '## Your first response', '', - `Send the user a short orientation message before writing anything. Confirm setup is complete, list the skills loaded with one-line purposes, summarise the two options in your own words, and end with a clear question — *"Which table(s) and column(s) would you like the plan to cover? You can name them or describe what you're trying to protect."* Reference concrete tables/columns from \`.cipherstash/context.json\` when it helps.`, + `Send the user a short orientation message before writing anything. Confirm setup is complete, name the columns whose dual-writes are recorded as live (you can derive this from \`${cli} status\`), and ask the user — *"Which of these would you like the cutover plan to cover? Any preferences on ordering, e.g. small tables before large ones?"*`, '', `Once the user answers, write \`${PLAN_REL_PATH}\`. Show the plan in chat as well so the user can react inline. After the plan is approved, tell the user to run \`${cli} impl\` to execute it.`, '', - '## Stop and ask the user when', + ...planSharedStopAndAsk(), + ] + + return sections.join('\n') +} + +/** Plan template for the complete-rollout escape hatch. */ +function renderCompletePlanPrompt(ctx: SetupPromptContext): string { + const cli = runnerCommand(ctx.packageManager, 'stash') + + const sections: string[] = [ + ...planSharedHeader( + ctx, + '# CipherStash setup — write a complete encryption rollout plan', + ), + `The user invoked \`${cli} plan --complete-rollout\`. This produces a single plan that covers the entire lifecycle — schema-add, dual-write code, backfill, cutover, drop — without the production-deploy gate that normally separates rollout from cutover.`, + '', + "**This is the escape hatch.** It is only safe when the database backing this project is *not* serving a deployed application — local development against a seeded DB, ephemeral test environments, sandboxes. The plan must call this out and ask the user to confirm. If they're working against a deployed app, redirect them to the staged flow (`" + + cli + + ' plan` without the flag) so the encryption rollout deploys before backfill runs.', + '', + ...planSharedSetupBlock(ctx), + '## What this plan covers', + '', + 'The full lifecycle for each column the user wants to protect, in order:', '', bullet( - "The user asks to convert a populated column in place. Explain why it doesn't work and offer the migrate-existing-column flow instead.", + "**Add new encrypted columns** — declared encrypted from the start; single-deploy.", ), bullet( - "A column the user names is already encrypted (`eql_v2_encrypted` udt) but with a different EQL config than they've described. This is the post-cutover re-encryption case (`stash encrypt update`, not yet shipped) — surface it in the plan as a flagged risk.", + '**Migrate existing columns** — schema-add → dual-write code → `db push` → backfill → schema rename → `db push` → cutover → read-path switch → remove dual-write code → drop plaintext. No deploy gate between rollout and cutover steps because there is no deployed application to gate on.', ), + '', + '## Your task: produce the complete-rollout plan file', + '', + `Write \`${PLAN_REL_PATH}\` with:`, + '', bullet( - 'You discover existing partial CipherStash setup that disagrees with what the user is describing — someone else may have run `stash init` earlier with different choices. Note this in the plan and ask the user to clarify before writing prescriptive steps.', + '**A machine-readable summary block at the very top of the file.** Use this exact shape:', ), + '', + planSummaryBlockExample('complete'), + '', + ` \`step\` is \`"complete"\` for this plan. \`path\` is \`"new"\` or \`"migrate"\` per column.`, + '', bullet( - "The user names columns that don't appear in `.cipherstash/context.json` or in the schema files you can see. Confirm the names rather than guessing.", + "An explicit warning at the top of the prose: this plan skips the production-deploy gate; backfill will run against rows that may not have been seen by deployed dual-write code. Confirm with the user that no deployed application is writing to this database before they run `" + + cli + + ' impl`.', + ), + bullet( + 'For migrate columns: the full step list with the exact CLI invocations (`' + + cli + + ' encrypt backfill`, `cutover`, `drop`) and concrete `--table` / `--column` values.', + ), + bullet( + 'For new columns: the additive single-deploy walkthrough.', + ), + bullet( + `Project-specific risks. Common ones: bundler exclusion not yet configured (Next.js / webpack / Vite), top-level-await in the placeholder encryption client breaks non-Next contexts, existing partial CipherStash state (run \`${cli} db status\` and note any pre-existing encrypted columns or pending configs).`, + ), + bullet( + "Open questions for the user — anything you can't determine from the schema, context.json, or the skills.", ), '', + `After writing the plan, also offer to copy it into \`docs/plans/cipherstash-encryption.md\` if the project has a \`docs/plans/\` directory. Don't copy without asking.`, + '', + ...planSharedNotDoBlock(ctx), + '## Your first response', + '', + `Send the user a short orientation message before writing anything. Confirm setup is complete, **explicitly name that this is the escape-hatch flow that skips the production-deploy gate**, and end with — *"Which table(s) and column(s) would you like to encrypt end-to-end? And can you confirm this database isn't backing a deployed application?"*`, + '', + `Once the user answers, write \`${PLAN_REL_PATH}\`. Show the plan in chat as well so the user can react inline. After the plan is approved, tell the user to run \`${cli} impl\` to execute it.`, + '', + ...planSharedStopAndAsk(), ] return sections.join('\n') diff --git a/packages/cli/src/commands/init/lib/write-context.ts b/packages/cli/src/commands/init/lib/write-context.ts index 1bffbdff..e702563a 100644 --- a/packages/cli/src/commands/init/lib/write-context.ts +++ b/packages/cli/src/commands/init/lib/write-context.ts @@ -156,6 +156,7 @@ export function buildSetupPromptContext( handoff, mode: state.mode ?? 'implement', installedSkills, + planStep: state.planStep, } } diff --git a/packages/cli/src/commands/init/types.ts b/packages/cli/src/commands/init/types.ts index 438a6be7..e365a857 100644 --- a/packages/cli/src/commands/init/types.ts +++ b/packages/cli/src/commands/init/types.ts @@ -1,3 +1,4 @@ +import type { PlanStep } from './lib/parse-plan.js' import type { AgentEnvironment } from './detect-agents.js' import type { PackageManager } from './utils.js' @@ -62,6 +63,13 @@ export interface InitState { * command itself: `stash plan` always sets `'plan'`, `stash impl` always * sets `'implement'`. */ mode?: InitMode + /** Which step of the encryption rollout the plan should target. Set by + * `stash plan` after reading `cs_migrations` (or when the user passes + * `--complete-rollout`). Drives the plan-prompt template selection. Not + * meaningful in implement mode — `stash impl` reads the step from the + * on-disk plan-summary block instead. Defaults to `'rollout'` when the + * CLI has nothing else to go on (fresh project, no DB connectivity). */ + planStep?: PlanStep } /** diff --git a/packages/cli/src/commands/plan/index.ts b/packages/cli/src/commands/plan/index.ts index da3f11dc..caf32745 100644 --- a/packages/cli/src/commands/plan/index.ts +++ b/packages/cli/src/commands/plan/index.ts @@ -1,9 +1,12 @@ import { existsSync } from 'node:fs' import { resolve } from 'node:path' +import { readManifest } from '@cipherstash/migrate' import * as p from '@clack/prompts' import { howToProceedStep } from '../impl/steps/how-to-proceed.js' import { type AgentEnvironment, detectAgents } from '../init/detect-agents.js' import { readContextFile } from '../init/lib/read-context.js' +import { detectColumnStates, rollupPlanStep } from '../init/lib/rollout-state.js' +import type { PlanStep } from '../init/lib/parse-plan.js' import { PLAN_REL_PATH } from '../init/lib/setup-prompt.js' import { CONTEXT_REL_PATH, @@ -15,6 +18,7 @@ import { detectPackageManager, runnerCommand } from '../init/utils.js' function buildStateFromContext( ctx: ContextFile, agents: AgentEnvironment, + planStep: PlanStep, ): InitState { return { integration: ctx.integration, @@ -26,22 +30,91 @@ function buildStateFromContext( eqlInstalled: true, agents, mode: 'plan', + planStep, } } +/** + * Confirm the user wants to skip the production-deploy gate. Default-no is + * the security stance — the warning has to be a deliberate `y` press, not + * a stray Enter. + */ +async function confirmCompleteRollout(): Promise { + p.log.warn( + '`--complete-rollout` plans the full encryption lifecycle (schema-add through drop) in one document. It SKIPS the production-deploy gate that protects backfill from running before dual-writes are live.', + ) + p.log.warn( + 'Only safe when this database is not backing a deployed application — local development, ephemeral test environments, or freshly seeded sandboxes. If a deployed app writes to this database, rows inserted during the planned backfill will land in plaintext only and you will need a recovery pass.', + ) + const ok = await p.confirm({ + message: 'Proceed with a complete-rollout plan?', + initialValue: false, + }) + if (p.isCancel(ok) || !ok) throw new CancelledError() +} + +/** + * Detect what step the encryption rollout is at, by reading + * `cs_migrations` for every column declared in `.cipherstash/migrations.json`. + * + * Falls back to `'rollout'` when: + * - the manifest is missing or empty (fresh project, nothing tracked yet), + * - `stash.config.ts` can't be loaded (no DATABASE_URL), + * - the database isn't reachable. + * + * The fallback is intentional: a rollout-shaped plan is always a safe + * starting point, and the agent will ask the user about path=new vs + * path=migrate per column anyway. + */ +async function detectPlanStep(cwd: string): Promise { + const manifest = await readManifest(cwd).catch(() => null) + if (!manifest) return 'rollout' + + const columns: { table: string; column: string }[] = [] + for (const [table, cols] of Object.entries(manifest.tables)) { + for (const col of cols) { + columns.push({ table, column: col.column }) + } + } + if (columns.length === 0) return 'rollout' + + let databaseUrl: string + try { + const { loadStashConfig } = await import('../../config/index.js') + const config = await loadStashConfig() + databaseUrl = config.databaseUrl + } catch { + return 'rollout' + } + + const states = await detectColumnStates(databaseUrl, columns) + const step = rollupPlanStep(states) + // `unknown` and `completed` both map to rollout for plan-step selection: + // unknown — no events; treat as fresh. + // completed — every tracked column is `dropped`; the user must want to + // plan something new, so a rollout-shaped plan is the right + // canvas. (If they really have nothing to do, the agent + // will figure that out and tell them.) + if (step === 'cutover' || step === 'rollout') return step + return 'rollout' +} + /** * `stash plan` — draft a reviewable encryption plan. * - * Pre-flights `.cipherstash/context.json` (errors with a `stash init` - * pointer if missing). Always sets `mode='plan'`, dispatches to a handoff - * target via `howToProceedStep`, and ends with a chain prompt offering to - * continue into `stash impl`. + * State-driven: reads `.cipherstash/migrations.json` and `cs_migrations` + * to decide whether to produce an encryption-rollout plan (the default + * starting point) or an encryption-cutover plan (when at least one column + * has crossed the deploy gate). The selection is invisible to the user — + * they just run `stash plan` and get a plan for whatever step is next. * - * The deliverable is `.cipherstash/plan.md` with a machine-readable - * summary block at the top — `stash impl` parses that block to render a - * confirmation panel before launching implementation. + * Flags: + * `--complete-rollout` — escape hatch for databases without a deployed + * application. Plans schema-add through drop in + * one document with no deploy gate. Confirms + * (default-no) before generating. */ -export async function planCommand() { +export async function planCommand(flags: Record = {}) { const cwd = process.cwd() const pm = detectPackageManager() const cli = runnerCommand(pm, 'stash') @@ -63,14 +136,28 @@ export async function planCommand() { ) } + let planStep: PlanStep + if (flags['complete-rollout']) { + await confirmCompleteRollout() + planStep = 'complete' + } else { + planStep = await detectPlanStep(cwd) + if (planStep === 'rollout') { + p.log.info( + 'Drafting an encryption-rollout plan (schema-add + dual-write code). After it ships to production, run `stash plan` again to draft the cutover.', + ) + } else { + p.log.info( + 'Detected dual-writes recorded in cs_migrations. Drafting an encryption-cutover plan (backfill, switch reads, drop plaintext).', + ) + } + } + const agents = detectAgents(cwd, process.env) - const state = buildStateFromContext(ctx, agents) + const state = buildStateFromContext(ctx, agents, planStep) await howToProceedStep.run(state) - // Chain into `stash impl` so the user doesn't have to copy/paste. Lazy - // import avoids a circular module load — plan and impl both pull from - // init/lib/ and need to be importable independently. if (process.stdout.isTTY) { const proceed = await p.confirm({ message: `Plan drafted at \`${PLAN_REL_PATH}\`. Continue to \`${cli} impl\` now?`, diff --git a/packages/cli/src/commands/status/__tests__/status.test.ts b/packages/cli/src/commands/status/__tests__/status.test.ts index b4db3ef4..b8008b5f 100644 --- a/packages/cli/src/commands/status/__tests__/status.test.ts +++ b/packages/cli/src/commands/status/__tests__/status.test.ts @@ -2,7 +2,18 @@ import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from 'node:fs' import { tmpdir } from 'node:os' import { join } from 'node:path' import { afterEach, beforeEach, describe, expect, it } from 'vitest' -import { buildStages, nextAction, readProjectStatus } from '../index.js' +import { nextMoveHint, readProjectStatus } from '../index.js' +import { + type ColumnObservation, + buildColumnQuest, + buildQuestLog, + inferQuestPath, +} from '../quest.js' +import { + renderQuestLogJSON, + renderQuestLogPlain, + renderQuestLogTTY, +} from '../render.js' let cwd: string @@ -23,20 +34,6 @@ function writeContext(payload: Record): void { ) } -function writePlan(): void { - mkdirSync(join(cwd, '.cipherstash'), { recursive: true }) - writeFileSync(join(cwd, '.cipherstash', 'plan.md'), '# plan\n', 'utf-8') -} - -function writeSetupPrompt(): void { - mkdirSync(join(cwd, '.cipherstash'), { recursive: true }) - writeFileSync( - join(cwd, '.cipherstash', 'setup-prompt.md'), - '# prompt\n', - 'utf-8', - ) -} - const sampleContext = { cliVersion: '0.0.0', integration: 'drizzle' as const, @@ -66,21 +63,6 @@ describe('readProjectStatus', () => { expect(status.initialized).toBe(true) expect(status.context?.integration).toBe('drizzle') expect(status.planExists).toBe(false) - expect(status.agentEngaged).toBe(false) - }) - - it('reports plan written once plan.md exists', () => { - writeContext(sampleContext) - writePlan() - const status = readProjectStatus(cwd) - expect(status.planExists).toBe(true) - }) - - it('reports agentEngaged when setup-prompt.md exists', () => { - writeContext(sampleContext) - writeSetupPrompt() - const status = readProjectStatus(cwd) - expect(status.agentEngaged).toBe(true) }) it('treats malformed context.json as not-initialized rather than throwing', () => { @@ -95,89 +77,376 @@ describe('readProjectStatus', () => { }) }) -describe('buildStages', () => { - it('marks every stage pending in a virgin project', () => { - const stages = buildStages(readProjectStatus(cwd), 'pnpm dlx stash') - expect(stages.map((s) => s.status)).toEqual([ - 'pending', - 'pending', - 'pending', - ]) - // Init detail nudges the user to begin. - expect(stages[0].detail).toMatch(/init/) +describe('inferQuestPath', () => { + it('treats a column with no cs_migrations entry as new', () => { + expect( + inferQuestPath({ table: 't', column: 'c', phase: null, eql: null }), + ).toBe('new') }) - it('marks Initialized done and shows integration + table count when context exists', () => { - writeContext(sampleContext) - const stages = buildStages(readProjectStatus(cwd), 'pnpm dlx stash') - expect(stages[0].status).toBe('done') - expect(stages[0].detail).toContain('drizzle') - expect(stages[0].detail).toContain('pnpm') - expect(stages[0].detail).toContain('2 tables') + it('treats a column with any cs_migrations history as migrate', () => { + expect( + inferQuestPath({ + table: 't', + column: 'c', + phase: 'dual-writing', + }), + ).toBe('migrate') + }) + + it('defaults to migrate when DB connectivity is missing', () => { + // The 5-objective shape is more informative when we genuinely don't + // know — better to show the full lifecycle locked than to default to + // a 2-objective new-column shape that hides relevant work. + expect(inferQuestPath({ table: 't', column: 'c' })).toBe('migrate') }) +}) + +describe('buildColumnQuest — migrate path', () => { + function obs(extra: Partial): ColumnObservation { + return { table: 'users', column: 'email', phase: null, eql: null, ...extra } + } - it('uses singular "table" for a one-table project', () => { - writeContext({ - ...sampleContext, - schemas: [{ tableName: 'x', columns: [] }], + it('with no signals: 0/5, schema-add active, rest locked', () => { + const quest = buildColumnQuest(obs({ phase: 'dual-writing' })) + // Picking dual-writing forces the migrate path; reset and check no-events. + const noEvents = buildColumnQuest({ + table: 'users', + column: 'email', + phase: 'dual-writing', }) - const stages = buildStages(readProjectStatus(cwd), 'pnpm dlx stash') - expect(stages[0].detail).toContain('1 table') - expect(stages[0].detail).not.toContain('1 tables') + expect(noEvents.path).toBe('migrate') + expect(quest.progress.total).toBe(5) }) - it('marks Plan written done and shows the plan path when plan exists', () => { - writeContext(sampleContext) - writePlan() - const stages = buildStages(readProjectStatus(cwd), 'pnpm dlx stash') - expect(stages[1].status).toBe('done') - expect(stages[1].detail).toContain('.cipherstash/plan.md') + it('phase=null + EQL pending + twin exists: schema-add done (1/5), dual-writes deployed active', () => { + // The encrypted twin column existing alongside the original is the + // unambiguous "this is a migrate column" signal — the rollout PR + // created `_encrypted` and ran `db push` (writing pending). The + // user has not yet recorded a `dual_writing` event, so the next + // active objective is "deploy dual-writes to production". + const quest = buildColumnQuest( + obs({ + phase: null, + eql: { state: 'pending' }, + physicalEncryptedTwinExists: true, + }), + ) + expect(quest.path).toBe('migrate') + expect(quest.progress).toEqual({ done: 1, total: 5 }) + expect(quest.objectives[0].status).toBe('done') + expect(quest.objectives[1].status).toBe('active') + expect(quest.objectives[2].status).toBe('locked') }) - it('points at `plan` for next-step when init done but plan missing', () => { - writeContext(sampleContext) - const stages = buildStages(readProjectStatus(cwd), 'pnpm dlx stash') - expect(stages[1].status).toBe('pending') - expect(stages[1].detail).toMatch(/plan/) - expect(stages[2].detail).toMatch(/waiting on plan/) + it('phase=dual-writing: 2/5, backfill active', () => { + const quest = buildColumnQuest(obs({ phase: 'dual-writing' })) + expect(quest.progress).toEqual({ done: 2, total: 5 }) + expect(quest.objectives[1].status).toBe('done') + expect(quest.objectives[2].status).toBe('active') + expect(quest.objectives[2].label).toMatch(/backfill/i) }) - it('keeps Implementation pending even after agent engagement (DB state lives in encrypt status)', () => { - writeContext(sampleContext) - writePlan() - writeSetupPrompt() - const stages = buildStages(readProjectStatus(cwd), 'pnpm dlx stash') - expect(stages[2].status).toBe('pending') - expect(stages[2].detail).toContain('encrypt status') + it('phase=backfilling counts as 2/5 (backfill in flight is still the active step)', () => { + const quest = buildColumnQuest(obs({ phase: 'backfilling' })) + expect(quest.progress.done).toBe(2) + expect(quest.objectives[2].status).toBe('active') + }) + + it('phase=backfilled: 3/5, cutover active', () => { + const quest = buildColumnQuest(obs({ phase: 'backfilled' })) + expect(quest.progress.done).toBe(3) + expect(quest.objectives[3].status).toBe('active') + expect(quest.objectives[3].label).toMatch(/cut over/i) + }) + + it('phase=cut-over: 4/5, drop plaintext active', () => { + const quest = buildColumnQuest(obs({ phase: 'cut-over' })) + expect(quest.progress.done).toBe(4) + expect(quest.objectives[4].status).toBe('active') + expect(quest.objectives[4].label).toMatch(/drop plaintext/i) + }) + + it('phase=dropped: 5/5 (complete)', () => { + const quest = buildColumnQuest(obs({ phase: 'dropped' })) + expect(quest.progress).toEqual({ done: 5, total: 5 }) + expect(quest.complete).toBe(true) + expect(quest.nextMove).toBeUndefined() + }) + + it('next-move hint references concrete CLI invocations with --table/--column', () => { + const backfill = buildColumnQuest(obs({ phase: 'dual-writing' })) + expect(backfill.nextMove).toContain('stash encrypt backfill') + expect(backfill.nextMove).toContain('--table users') + expect(backfill.nextMove).toContain('--column email') + + const cutover = buildColumnQuest(obs({ phase: 'backfilled' })) + expect(cutover.nextMove).toContain('stash encrypt cutover') + + const drop = buildColumnQuest(obs({ phase: 'cut-over' })) + expect(drop.nextMove).toContain('stash encrypt drop') + }) + + it('falls back to physical-column existence as a schema-add signal', () => { + // cs_migrations is silent (schema_added events are never written); + // information_schema is the next-best signal that schema-add has been + // applied. When EQL config is missing too but the encrypted twin + // column physically exists, treat schema-add as done. + const quest = buildColumnQuest( + obs({ + phase: null, + eql: null, + physicalEncryptedTwinExists: true, + }), + ) + expect(quest.progress.done).toBe(1) + expect(quest.objectives[0].status).toBe('done') }) }) -describe('nextAction', () => { - it('points at init when uninitialized', () => { - expect(nextAction(readProjectStatus(cwd), 'pnpm dlx stash')).toMatch(/init/) +describe('buildColumnQuest — new path', () => { + it('no EQL config: 0/2, schema-add active', () => { + const quest = buildColumnQuest({ + table: 'orders', + column: 'note', + phase: null, + eql: null, + }) + expect(quest.path).toBe('new') + expect(quest.progress).toEqual({ done: 0, total: 2 }) + expect(quest.objectives[0].status).toBe('active') }) - it('points at `plan` when initialized but no plan exists', () => { - writeContext(sampleContext) - expect(nextAction(readProjectStatus(cwd), 'pnpm dlx stash')).toMatch( - /\bplan\b/, + it('EQL pending: 1/2, activate active', () => { + const quest = buildColumnQuest({ + table: 'orders', + column: 'note', + phase: null, + eql: { state: 'pending' }, + }) + expect(quest.progress).toEqual({ done: 1, total: 2 }) + expect(quest.objectives[1].status).toBe('active') + expect(quest.nextMove).toMatch(/db activate/) + }) + + it('EQL active: 2/2, complete', () => { + const quest = buildColumnQuest({ + table: 'orders', + column: 'note', + phase: null, + eql: { state: 'active' }, + }) + expect(quest.complete).toBe(true) + expect(quest.nextMove).toBeUndefined() + }) +}) + +describe('buildColumnQuest — DB unreachable', () => { + it('locks every objective except the first when phase and eql are both undefined', () => { + // The only honest answer is "I don't know what's been done" — show + // the full migrate-shape with the first objective active so the user + // sees that the rollout exists; the renderer surfaces a footer note + // about the missing observation. + const quest = buildColumnQuest({ table: 't', column: 'c' }) + expect(quest.path).toBe('migrate') + expect(quest.objectives[0].status).toBe('active') + expect(quest.objectives.slice(1).every((o) => o.status === 'locked')).toBe( + true, ) }) +}) - it('asks the user to review the plan before implementing', () => { - writeContext(sampleContext) - writePlan() - const action = nextAction(readProjectStatus(cwd), 'pnpm dlx stash') - expect(action).toMatch(/plan\.md/) - expect(action).toMatch(/impl/) +describe('buildQuestLog', () => { + it('separates active and completed quests', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [ + { table: 'users', column: 'email', phase: 'dropped' }, + { table: 'users', column: 'phone', phase: 'dual-writing' }, + ], + }) + expect(log.completed).toHaveLength(1) + expect(log.completed[0].column).toBe('email') + expect(log.active).toHaveLength(1) + expect(log.active[0].column).toBe('phone') }) - it('routes to encrypt status once the agent has been engaged', () => { - writeContext(sampleContext) - writePlan() - writeSetupPrompt() - expect(nextAction(readProjectStatus(cwd), 'pnpm dlx stash')).toMatch( - /encrypt status/, + it('reports observedFromDb=false when DB couldn’t be reached', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: false, + observations: [{ table: 'users', column: 'email' }], + }) + expect(log.observedFromDb).toBe(false) + }) + + it('an empty observations list with initialized=true means the user has not declared columns yet', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [], + }) + expect(log.active).toEqual([]) + expect(log.completed).toEqual([]) + }) +}) + +describe('renderQuestLogTTY', () => { + it('shows an empty-state for uninitialized projects with init prompt', () => { + const out = renderQuestLogTTY( + buildQuestLog({ + initialized: false, + observedFromDb: false, + observations: [], + }), ) + expect(out).toMatch(/no quests yet/i) + expect(out).toMatch(/stash init/) + }) + + it('renders the active-quest section with progress bar, objectives, and next-move hint', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [ + { table: 'users', column: 'email', phase: 'dual-writing' }, + ], + }) + const out = renderQuestLogTTY(log) + expect(out).toContain('CipherStash Quest Log') + expect(out).toContain('ACTIVE QUEST') + expect(out).toContain('Encrypt users.email') + expect(out).toMatch(/2\/5 objectives/) + expect(out).toContain('▓') + expect(out).toContain('░') + expect(out).toMatch(/← you are here/) + expect(out).toMatch(/Next move/) + }) + + it('shows a 🏆 line per completed quest', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [{ table: 'users', column: 'ssn', phase: 'dropped' }], + }) + const out = renderQuestLogTTY(log) + expect(out).toContain('🏆 COMPLETED') + expect(out).toContain('users.ssn') + }) + + it('appends a DB-unreachable note when observedFromDb is false', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: false, + observations: [{ table: 'users', column: 'email' }], + }) + const out = renderQuestLogTTY(log) + expect(out).toMatch(/could not reach the database/i) + }) +}) + +describe('renderQuestLogPlain', () => { + it('emits no emoji or progress-bar glyphs', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [ + { table: 'users', column: 'email', phase: 'dual-writing' }, + ], + }) + const out = renderQuestLogPlain(log) + expect(out).not.toMatch(/⚔️|🏆|🔒|💡|▓|░/) + // Still has the structural content. + expect(out).toContain('Encrypt users.email') + expect(out).toMatch(/Progress: 2\/5/) + expect(out).toContain('Next move:') + // Still uses bracketed status markers as a stable plain-text signal + // for scripts. + expect(out).toMatch(/\[x\]/) + expect(out).toMatch(/\[>\]/) + expect(out).toMatch(/\[ \]/) + }) + + it('reports completed rollouts cleanly', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [{ table: 'users', column: 'ssn', phase: 'dropped' }], + }) + const out = renderQuestLogPlain(log) + expect(out).toContain('Completed') + expect(out).toContain('users.ssn') + expect(out).not.toMatch(/🏆/) + }) +}) + +describe('renderQuestLogJSON', () => { + it('emits a stable JSON shape with all fields a script needs', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [ + { table: 'users', column: 'email', phase: 'dual-writing' }, + { table: 'users', column: 'ssn', phase: 'dropped' }, + ], + }) + const json = renderQuestLogJSON(log) + const parsed = JSON.parse(json) + expect(parsed.initialized).toBe(true) + expect(parsed.observedFromDb).toBe(true) + expect(parsed.active).toHaveLength(1) + expect(parsed.completed).toHaveLength(1) + + const active = parsed.active[0] + expect(active.table).toBe('users') + expect(active.column).toBe('email') + expect(active.path).toBe('migrate') + expect(active.progress).toEqual({ done: 2, total: 5 }) + expect(active.complete).toBe(false) + expect(active.nextMove).toContain('stash encrypt backfill') + expect(Array.isArray(active.objectives)).toBe(true) + expect(active.objectives[0]).toHaveProperty('label') + expect(active.objectives[0]).toHaveProperty('status') + }) +}) + +describe('nextMoveHint', () => { + it('points at init when uninitialized', () => { + const log = buildQuestLog({ + initialized: false, + observedFromDb: false, + observations: [], + }) + expect(nextMoveHint(log, 'pnpm dlx stash')).toMatch(/init/) + }) + + it('points at plan when initialized but no quests', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [], + }) + expect(nextMoveHint(log, 'pnpm dlx stash')).toMatch(/plan/) + }) + + it('uses the first active quest’s nextMove when one exists', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [ + { table: 'users', column: 'email', phase: 'dual-writing' }, + ], + }) + expect(nextMoveHint(log, 'pnpm dlx stash')).toContain('stash encrypt backfill') + }) + + it('reports complete when every quest is done', () => { + const log = buildQuestLog({ + initialized: true, + observedFromDb: true, + observations: [{ table: 'users', column: 'ssn', phase: 'dropped' }], + }) + expect(nextMoveHint(log, 'pnpm dlx stash')).toMatch(/complete|nothing/i) }) }) diff --git a/packages/cli/src/commands/status/index.ts b/packages/cli/src/commands/status/index.ts index 3c8e5013..2eb0411f 100644 --- a/packages/cli/src/commands/status/index.ts +++ b/packages/cli/src/commands/status/index.ts @@ -1,6 +1,8 @@ import { existsSync } from 'node:fs' import { resolve } from 'node:path' +import { latestByColumn, readManifest } from '@cipherstash/migrate' import * as p from '@clack/prompts' +import pg from 'pg' import { readContextFile } from '../init/lib/read-context.js' import { PLAN_REL_PATH } from '../init/lib/setup-prompt.js' import { @@ -8,23 +10,25 @@ import { SETUP_PROMPT_REL_PATH, } from '../init/lib/write-context.js' import { detectPackageManager, runnerCommand } from '../init/utils.js' +import { + type ColumnObservation, + type QuestLog, + buildQuestLog, +} from './quest.js' +import { + renderQuestLogJSON, + renderQuestLogPlain, + renderQuestLogTTY, +} from './render.js' export type StageStatus = 'done' | 'pending' -export interface Stage { - label: string - status: StageStatus - detail: string -} - export interface ProjectStatus { initialized: boolean context?: ContextFile planExists: boolean /** Setup-prompt is written by every `stash impl` run, regardless of mode. - * Its presence means the user has handed off to an agent at least once; - * it does NOT mean implementation is complete. We surface it as a softer - * "agent has been engaged" signal rather than treating it as done. */ + * Its presence means the user has handed off to an agent at least once. */ agentEngaged: boolean } @@ -38,99 +42,218 @@ export function readProjectStatus(cwd: string): ProjectStatus { } } -export function buildStages(status: ProjectStatus, cli: string): Stage[] { - const initDetail = status.context - ? `${status.context.integration} · ${status.context.packageManager} · ${status.context.schemas.length} table${status.context.schemas.length === 1 ? '' : 's'}` - : `run \`${cli} init\` to begin` - - const planDetail = status.planExists - ? PLAN_REL_PATH - : status.initialized - ? `run \`${cli} plan\` to draft` - : 'waiting on init' - - let implLabel = 'Implementation' - let implDetail: string - const implStatus: StageStatus = 'pending' - if (!status.initialized) { - implDetail = 'waiting on init' - } else if (!status.planExists) { - implDetail = 'waiting on plan' - } else if (!status.agentEngaged) { - implDetail = `run \`${cli} impl\` to execute the plan` - } else { - // Agent has been engaged at least once. We can't tell from disk alone - // whether the implementation is complete — that requires DB inspection - // (`stash encrypt status`). Keep status as `pending` and point there. - implLabel = 'Implementation' - implDetail = `agent engaged — see \`${cli} encrypt status\` for column-level state` +/** + * Read the manifest + cs_migrations + EQL config + physical-column state + * and assemble per-column observations. DB reachability is best-effort — + * any error is swallowed and the result is `{ observedFromDb: false, + * observations }` with `phase`/`eql` undefined per column. + */ +export async function gatherObservations( + cwd: string, +): Promise<{ observedFromDb: boolean; observations: ColumnObservation[] }> { + const manifest = await readManifest(cwd).catch(() => null) + if (!manifest) { + return { observedFromDb: false, observations: [] } } - return [ - { - label: 'Initialized', - status: status.initialized ? 'done' : 'pending', - detail: initDetail, - }, - { - label: 'Plan written', - status: status.planExists ? 'done' : 'pending', - detail: planDetail, - }, - { - label: implLabel, - status: implStatus, - detail: implDetail, - }, - ] -} + const targetColumns: { table: string; column: string }[] = [] + for (const [table, cols] of Object.entries(manifest.tables)) { + for (const col of cols) { + targetColumns.push({ table, column: col.column }) + } + } + + if (targetColumns.length === 0) { + return { observedFromDb: false, observations: [] } + } + + let databaseUrl: string | undefined + try { + const { loadStashConfig } = await import('../../config/index.js') + const config = await loadStashConfig() + databaseUrl = config.databaseUrl + } catch { + // Couldn't load config — return manifest-only observations. + return { + observedFromDb: false, + observations: targetColumns.map((c) => ({ ...c })), + } + } + + const client = new pg.Client({ connectionString: databaseUrl }) + try { + await client.connect() + const [phases, eqlConfig, physicalCols] = await Promise.all([ + latestByColumn(client).catch(() => new Map()), + fetchEqlConfig(client), + fetchPhysicalColumns(client), + ]) + + const observations: ColumnObservation[] = targetColumns.map((c) => { + const key: `${string}.${string}` = `${c.table}.${c.column}` + const phaseRow = phases.get(key) + return { + table: c.table, + column: c.column, + phase: phaseRow ? phaseRow.phase : null, + eql: eqlConfig.get(key) ?? null, + physicalEncryptedTwinExists: ( + physicalCols.get(c.table) ?? new Set() + ).has(`${c.column}_encrypted`), + } + }) -export function nextAction(status: ProjectStatus, cli: string): string { - if (!status.initialized) return `Run \`${cli} init\` to begin.` - if (!status.planExists) { - return `Run \`${cli} plan\` to draft your encryption plan.` + return { observedFromDb: true, observations } + } catch { + return { + observedFromDb: false, + observations: targetColumns.map((c) => ({ ...c })), + } + } finally { + await client.end().catch(() => undefined) } - if (!status.agentEngaged) { - return `Review \`${PLAN_REL_PATH}\`, then run \`${cli} impl\` to implement.` +} + +async function fetchEqlConfig( + client: pg.Client, +): Promise> { + const out = new Map() + try { + const result = await client.query<{ state: string; data: unknown }>( + `SELECT state, data FROM public.eql_v2_configuration + WHERE state IN ('active', 'pending', 'encrypting') + ORDER BY CASE state WHEN 'active' THEN 0 WHEN 'encrypting' THEN 1 ELSE 2 END`, + ) + for (const row of result.rows) { + const data = row.data as { + tables?: Record> + } | null + if (!data?.tables) continue + for (const [tableName, columns] of Object.entries(data.tables)) { + for (const columnName of Object.keys(columns)) { + const key = `${tableName}.${columnName}` + if (out.has(key)) continue + out.set(key, { + state: row.state as 'active' | 'pending' | 'encrypting', + }) + } + } + } + } catch (err) { + if (err instanceof Error && /eql_v2_configuration/i.test(err.message)) { + return out + } + throw err } - return `Run \`${cli} encrypt status\` to inspect per-column migration state.` + return out } -const LABEL_WIDTH = 16 +async function fetchPhysicalColumns( + client: pg.Client, +): Promise>> { + const out = new Map>() + try { + const result = await client.query<{ + table_name: string + column_name: string + }>( + `SELECT table_name, column_name FROM information_schema.columns + WHERE table_schema = current_schema()`, + ) + for (const row of result.rows) { + const set = out.get(row.table_name) ?? new Set() + set.add(row.column_name) + out.set(row.table_name, set) + } + } catch { + // information_schema is always present; if this fails, swallow. + } + return out +} -function renderStage(stage: Stage): string { - const marker = stage.status === 'done' ? '✓' : '◯' - return `${marker} ${stage.label.padEnd(LABEL_WIDTH)} ${stage.detail}` +interface StatusFlags { + /** Force the fancy quest-log output even in non-TTY contexts. */ + quest?: boolean + /** Force a structured JSON output (machine-readable). */ + json?: boolean + /** Force the plain-text fallback even in TTY contexts. */ + plain?: boolean } /** - * `stash status` — the lifecycle map. Reads disk state only: - * `.cipherstash/context.json` (init done?), `.cipherstash/plan.md` (plan - * written?), `.cipherstash/setup-prompt.md` (agent engaged at least once?). - * Points at `stash db status` and `stash encrypt status` for the deeper - * state that requires database connectivity. + * `stash status` — the encryption rollout quest log. * - * Designed to give the user a one-shot answer to "where am I?" without - * waiting on auth, DB connection, or any network round-trip. Runs in - * milliseconds. The deeper commands stay specialised. + * Reads `.cipherstash/context.json` to know whether init has run, then + * (best-effort) consults `cs_migrations`, `eql_v2_configuration`, and + * `information_schema` to derive per-column quest objectives. Renders + * a quest-log shape with active/completed sections, progress bars, and + * per-quest "next move" hints. + * + * Output mode: + * `--quest` forces the fancy TTY shape (works anywhere). + * `--plain` forces the emoji-free plain shape (works anywhere). + * `--json` forces a structured JSON output (machine-readable). + * default — fancy in TTY, plain otherwise. */ -export async function statusCommand() { +export async function statusCommand(flags: StatusFlags = {}) { const cwd = process.cwd() const pm = detectPackageManager() const cli = runnerCommand(pm, 'stash') + const project = readProjectStatus(cwd) - const status = readProjectStatus(cwd) - const stages = buildStages(status, cli) + if (flags.json) { + const log = await buildLog(cwd, project) + process.stdout.write(renderQuestLogJSON(log)) + return + } - p.intro('CipherStash project status') + // The intro/outro frames are TTY-only; in plain mode we want the raw + // body without `clack` decorations so the output is grep-friendly. + const log = await buildLog(cwd, project) - p.note(stages.map(renderStage).join('\n'), 'Lifecycle') + const useTTY = flags.quest ?? (process.stdout.isTTY && !flags.plain) - const deeper = [ - `Database state: \`${cli} db status\``, - `Per-column state: \`${cli} encrypt status\``, - ].join('\n') - p.note(deeper, 'Deeper inspection') + if (useTTY) { + p.intro('CipherStash') + p.note(renderQuestLogTTY(log), 'Quest log') + p.outro(nextMoveHint(log, cli)) + } else { + process.stdout.write(`${renderQuestLogPlain(log)}\n`) + process.stdout.write(`Next: ${nextMoveHint(log, cli)}\n`) + } +} + +async function buildLog( + cwd: string, + project: ProjectStatus, +): Promise { + const { observedFromDb, observations } = project.initialized + ? await gatherObservations(cwd) + : { observedFromDb: false, observations: [] } + return buildQuestLog({ + initialized: project.initialized, + observedFromDb, + observations, + }) +} - p.outro(nextAction(status, cli)) +/** + * One-line prompt for what to do next, used as the outro of both rendering + * paths. Encodes the same routing the legacy `nextAction` helper used to: + * "haven't init'd → init", "no quests → plan", "active quest → impl or + * follow the per-quest hint", "everything done → relax". + */ +export function nextMoveHint(log: QuestLog, cli: string): string { + if (!log.initialized) return `Run \`${cli} init\` to begin.` + if (log.active.length === 0 && log.completed.length === 0) { + return `Run \`${cli} plan\` to draft your encryption rollout.` + } + if (log.active.length === 0) { + return 'All rollouts complete. Nothing to do.' + } + // First active quest's nextMove already names a concrete CLI invocation + // when relevant; if not, fall back to a generic plan/impl pointer. + const first = log.active[0] + if (first.nextMove) return first.nextMove + return `Run \`${cli} plan\` to draft the next step.` } diff --git a/packages/cli/src/commands/status/quest.ts b/packages/cli/src/commands/status/quest.ts new file mode 100644 index 00000000..f4e64e40 --- /dev/null +++ b/packages/cli/src/commands/status/quest.ts @@ -0,0 +1,262 @@ +import type { MigrationPhase } from '@cipherstash/migrate' + +/** Status of one objective inside a quest. */ +export type ObjectiveStatus = 'done' | 'active' | 'locked' + +export interface Objective { + label: string + status: ObjectiveStatus +} + +export type QuestPath = 'new' | 'migrate' + +export interface ColumnQuest { + table: string + column: string + path: QuestPath + /** Title shown in the quest-log header. */ + title: string + objectives: Objective[] + progress: { done: number; total: number } + /** One-line "what to do next" hint. Empty when the quest is complete. */ + nextMove?: string + /** True iff every objective is done. */ + complete: boolean +} + +export interface QuestLog { + /** True when `.cipherstash/context.json` exists and parses. False when the + * user has not run `stash init`. The renderer surfaces an empty-state + * message in that case. */ + initialized: boolean + /** Whether DB observability succeeded. When false, column quests are + * still surfaced (from `migrations.json`) but objective state defaults + * to "locked" because we can't tell what's been done. The renderer + * shows a footer note. */ + observedFromDb: boolean + active: ColumnQuest[] + completed: ColumnQuest[] +} + +/** EQL config state for a column. `null` when the column isn't registered. */ +export interface EqlColumnSummary { + state: 'active' | 'pending' | 'encrypting' +} + +/** + * Inputs to {@link buildColumnQuest}. Every field is optional; the builder + * derives the most accurate objective state it can from whatever is + * provided. When DB connectivity is missing all of `phase`, `eql`, and + * `physicalEncryptedTwinExists` should be `undefined`, and the builder + * produces a quest with all objectives locked. + */ +export interface ColumnObservation { + table: string + column: string + /** From `cs_migrations`. `null` means no events recorded. `undefined` + * means the DB couldn't be reached. */ + phase?: MigrationPhase | null + /** From `eql_v2_configuration`. `null` means not registered. `undefined` + * means DB unreachable. */ + eql?: EqlColumnSummary | null + /** Whether `_encrypted` exists in `information_schema.columns`. + * Used as a fallback signal that schema-add has been applied even if + * cs_migrations doesn't yet track this column. `undefined` when the + * caller can't tell. */ + physicalEncryptedTwinExists?: boolean +} + +const MIGRATE_OBJECTIVES = [ + 'Schema-add — encrypted twin column added', + 'Dual-writes deployed to production', + 'Backfill historical rows', + 'Cut over to encrypted (rename swap, switch reads)', + 'Drop plaintext column', +] + +const NEW_OBJECTIVES = [ + 'Schema-add — encrypted column declared and migrated', + 'Live in active EQL config', +] + +/** + * Decide whether a column should be rendered as a migrate quest or a new + * quest. The discriminator is the encrypted-twin column: if a `_encrypted` + * column exists physically, the user is migrating an existing populated + * column (the twin is created alongside the original). Without that signal + * (and without `cs_migrations` history), default to the new-column shape. + * + * When DB connectivity is missing entirely (`phase === undefined`), default + * to migrate — the 5-objective shape is more informative when we don't know. + */ +export function inferQuestPath(obs: ColumnObservation): QuestPath { + if (obs.phase === undefined) return 'migrate' + if (obs.phase !== null) return 'migrate' + if (obs.physicalEncryptedTwinExists) return 'migrate' + return 'new' +} + +/** + * Build a column quest from one observation. Pure; no I/O. + * + * Migrate-column objective mapping (phase → done count): + * null + no EQL → 0 done, active = "schema-add" + * null + EQL pending → 1 done, active = "dual-writes deployed" + * null + encrypted twin only → 1 done, active = "dual-writes deployed" + * dual-writing → 2 done, active = "backfill" + * backfilling → 2 done, active = "backfill" + * backfilled → 3 done, active = "cut over" + * cut-over → 4 done, active = "drop plaintext" + * dropped → 5 done (complete) + * + * New-column objective mapping: + * no EQL → 0 done, active = "schema-add" + * EQL pending → 1 done, active = "promote to active" + * EQL active → 2 done (complete) + */ +export function buildColumnQuest(obs: ColumnObservation): ColumnQuest { + const path = inferQuestPath(obs) + const labels = path === 'migrate' ? MIGRATE_OBJECTIVES : NEW_OBJECTIVES + const doneCount = computeDoneCount(path, obs) + const total = labels.length + + const objectives: Objective[] = labels.map((label, idx) => { + let status: ObjectiveStatus + if (idx < doneCount) status = 'done' + else if (idx === doneCount) status = 'active' + else status = 'locked' + + // When DB is unreachable, we can't be confident about anything past + // schema-add; mark everything beyond as locked but flag the active + // one as such. + if (obs.phase === undefined && obs.eql === undefined) { + status = 'locked' + } + return { label, status } + }) + + // If DB unreachable, no `active` was set above; set the first + // objective active so the user has *something* to look at. + if (obs.phase === undefined && obs.eql === undefined) { + objectives[0] = { ...objectives[0], status: 'active' } + } + + const complete = doneCount === total + + return { + table: obs.table, + column: obs.column, + path, + title: titleFor(obs.table, obs.column, path), + objectives, + progress: { done: complete ? total : doneCount, total }, + nextMove: complete ? undefined : nextMoveFor(path, doneCount, obs), + complete, + } +} + +function titleFor(table: string, column: string, path: QuestPath): string { + return path === 'migrate' + ? `Encrypt ${table}.${column}` + : `Add encrypted column ${table}.${column}` +} + +function computeDoneCount(path: QuestPath, obs: ColumnObservation): number { + if (path === 'new') return computeDoneNew(obs) + return computeDoneMigrate(obs) +} + +function computeDoneNew(obs: ColumnObservation): number { + // Without DB observability, default to 0. + if (obs.eql === undefined) return 0 + if (obs.eql === null) return 0 + if (obs.eql.state === 'active') return 2 + // Pending or encrypting — schema-add is registered, activation pending. + return 1 +} + +function computeDoneMigrate(obs: ColumnObservation): number { + if (obs.phase === undefined && obs.eql === undefined) return 0 + + // Phase progression dominates when we have it. + switch (obs.phase) { + case 'dropped': + return 5 + case 'cut-over': + return 4 + case 'backfilled': + return 3 + case 'backfilling': + case 'dual-writing': + return 2 + case 'schema-added': + // Synthesised by some renderers; equivalent to "no events but + // schema-add has been applied". + return 1 + case null: + case undefined: + // No cs_migrations entry. Look for fallback signals: the encrypted + // twin column existing in information_schema or the column being + // registered with EQL counts as schema-add done. + if (obs.eql || obs.physicalEncryptedTwinExists) return 1 + return 0 + } +} + +function nextMoveFor( + path: QuestPath, + doneCount: number, + obs: ColumnObservation, +): string { + if (path === 'new') { + if (doneCount === 0) { + return 'Declare the encrypted column in your schema and run the migration, then `stash db push`.' + } + return 'Promote the pending EQL config — `stash db activate`.' + } + + // Migrate. + switch (doneCount) { + case 0: + return 'Add the encrypted twin column (`_encrypted`) and run the migration.' + case 1: + return 'Wire dual-write code on every persistence path, deploy to production, then run `stash encrypt backfill` (it confirms dual-writes and records the event).' + case 2: + return `Run \`stash encrypt backfill --table ${obs.table} --column ${obs.column}\` to encrypt historical rows.` + case 3: + return `Run \`stash encrypt cutover --table ${obs.table} --column ${obs.column}\` to rename the encrypted twin into place and switch reads.` + case 4: + return `Run \`stash encrypt drop --table ${obs.table} --column ${obs.column}\` to remove the plaintext column.` + default: + return '' + } +} + +/** + * Compose a quest log from per-column observations. Pure; no I/O. + * + * `initialized` is true when the project has run `stash init` (we have a + * `context.json`). The renderer uses this to decide whether to show an + * empty-state quest log. `observedFromDb` is true when at least one + * observation has live DB data; false if the DB query failed and we're + * working from manifest alone. + */ +export function buildQuestLog(input: { + initialized: boolean + observedFromDb: boolean + observations: ColumnObservation[] +}): QuestLog { + const quests = input.observations.map(buildColumnQuest) + const active: ColumnQuest[] = [] + const completed: ColumnQuest[] = [] + for (const quest of quests) { + if (quest.complete) completed.push(quest) + else active.push(quest) + } + return { + initialized: input.initialized, + observedFromDb: input.observedFromDb, + active, + completed, + } +} diff --git a/packages/cli/src/commands/status/render.ts b/packages/cli/src/commands/status/render.ts new file mode 100644 index 00000000..40132317 --- /dev/null +++ b/packages/cli/src/commands/status/render.ts @@ -0,0 +1,210 @@ +import type { ColumnQuest, Objective, QuestLog } from './quest.js' + +const PROGRESS_BAR_WIDTH = 6 + +/** + * Render a quest log as the balanced TTY quest-log shape — emoji, + * progress bars, lock icons, "← you are here" markers, and one-line + * "next move" hints. Designed for an interactive terminal; non-TTY + * callers should use {@link renderQuestLogPlain} instead. + */ +export function renderQuestLogTTY(log: QuestLog): string { + if (!log.initialized) { + return [ + '⚔️ CipherStash Quest Log', + '', + ' No quests yet — your encryption rollout has not begun.', + '', + ' First move: run `npx stash init` to set up CipherStash, then', + ' `npx stash plan` to draft an encryption rollout.', + ].join('\n') + } + + const lines: string[] = ['⚔️ CipherStash Quest Log', ''] + + if (log.active.length === 0 && log.completed.length === 0) { + lines.push( + ' No active quests yet — your encryption rollout has not begun.', + '', + ' First move: run `npx stash plan` to draft a rollout for the', + ' columns you want to protect.', + ) + return lines.join('\n') + } + + for (const quest of log.active) { + lines.push(...renderActiveQuestTTY(quest)) + lines.push('') + } + + for (const quest of log.completed) { + lines.push(` 🏆 COMPLETED · ${quest.table}.${quest.column} — fully encrypted`) + } + + if (!log.observedFromDb) { + lines.push( + '', + ' Note: could not reach the database, so per-column progress is', + ' derived from `.cipherstash/migrations.json` only. Run again with', + ' the database available for live state.', + ) + } + + return lines.join('\n').replace(/\n+$/, '') +} + +function renderActiveQuestTTY(quest: ColumnQuest): string[] { + const lines: string[] = [] + lines.push(` ▶ ACTIVE QUEST · ${quest.title}`) + const bar = progressBar(quest.progress.done, quest.progress.total) + lines.push( + ` ${bar} ${quest.progress.done}/${quest.progress.total} objectives`, + ) + for (const obj of quest.objectives) { + lines.push(` ${objectiveIconTTY(obj)} ${decorate(obj)}`) + } + if (quest.nextMove) { + lines.push(` 💡 Next move: ${quest.nextMove}`) + } + return lines +} + +function objectiveIconTTY(obj: Objective): string { + switch (obj.status) { + case 'done': + return '✓' + case 'active': + return '▸' + case 'locked': + return '🔒' + } +} + +function decorate(obj: Objective): string { + if (obj.status === 'active') return `${obj.label} ← you are here` + return obj.label +} + +function progressBar(done: number, total: number): string { + if (total === 0) return ''.padEnd(PROGRESS_BAR_WIDTH, '░') + const filled = Math.round((done / total) * PROGRESS_BAR_WIDTH) + return '▓'.repeat(filled) + '░'.repeat(PROGRESS_BAR_WIDTH - filled) +} + +/** + * Render a quest log as plain text. Same content as the TTY shape, + * stripped of emoji, lock icons, and progress-bar glyphs. The structure + * stays identical so a script grepping the output of either form gets + * the same fields. + * + * Used in non-TTY contexts by default — CI logs, pipes, agents reading + * the output. The user can force the TTY shape anywhere with `--quest`. + */ +export function renderQuestLogPlain(log: QuestLog): string { + if (!log.initialized) { + return [ + 'CipherStash status — encryption rollout', + '', + ' No quests yet — encryption rollout has not begun.', + ' First move: run `npx stash init`, then `npx stash plan`.', + ].join('\n') + } + + const lines: string[] = ['CipherStash status — encryption rollout', ''] + + if (log.active.length === 0 && log.completed.length === 0) { + lines.push( + ' No active quests.', + ' First move: run `npx stash plan` to draft a rollout.', + ) + return lines.join('\n') + } + + if (log.active.length > 0) { + lines.push('Active rollouts') + lines.push('───────────────────────────────────────────────────') + for (const quest of log.active) { + lines.push(...renderActiveQuestPlain(quest)) + lines.push('') + } + } + + if (log.completed.length > 0) { + lines.push('Completed') + lines.push('───────────────────────────────────────────────────') + for (const quest of log.completed) { + lines.push(` ${quest.table}.${quest.column} — fully encrypted`) + } + } + + if (!log.observedFromDb) { + lines.push( + '', + 'Note: could not reach the database; per-column progress is derived from', + '.cipherstash/migrations.json only. Run again with the database available', + 'for live state.', + ) + } + + return lines.join('\n').replace(/\n+$/, '') +} + +function renderActiveQuestPlain(quest: ColumnQuest): string[] { + const lines: string[] = [] + lines.push(` ${quest.title}`) + lines.push( + ` Progress: ${quest.progress.done}/${quest.progress.total} objectives`, + ) + for (const obj of quest.objectives) { + lines.push(` ${objectiveIconPlain(obj)} ${obj.label}`) + } + if (quest.nextMove) { + lines.push(` Next move: ${quest.nextMove}`) + } + return lines +} + +function objectiveIconPlain(obj: Objective): string { + switch (obj.status) { + case 'done': + return '[x]' + case 'active': + return '[>]' + case 'locked': + return '[ ]' + } +} + +/** + * Render a quest log as JSON. Stable shape — flat per-quest objects with + * an objectives array, designed for scripts and agents reading the + * status output. We never break this shape without a major version bump. + */ +export function renderQuestLogJSON(log: QuestLog): string { + return `${JSON.stringify( + { + initialized: log.initialized, + observedFromDb: log.observedFromDb, + active: log.active.map(serializeQuest), + completed: log.completed.map(serializeQuest), + }, + null, + 2, + )}\n` +} + +function serializeQuest(quest: ColumnQuest) { + return { + table: quest.table, + column: quest.column, + path: quest.path, + title: quest.title, + progress: quest.progress, + complete: quest.complete, + nextMove: quest.nextMove ?? null, + objectives: quest.objectives.map((o) => ({ + label: o.label, + status: o.status, + })), + } +} diff --git a/skills/stash-cli/SKILL.md b/skills/stash-cli/SKILL.md index 7ae4c6a3..f22908be 100644 --- a/skills/stash-cli/SKILL.md +++ b/skills/stash-cli/SKILL.md @@ -38,11 +38,31 @@ The setup lifecycle is split across four explicit save-points. Each command can | Command | Owns | Ends with | |---------|------|-----------| | `stash init` | Auth, database, dep install, EQL install, encryption client scaffold, `.cipherstash/context.json` | Default-yes prompt → chains to `stash plan` | -| `stash plan` | Drafts `.cipherstash/plan.md` via agent handoff (Claude Code or Codex) | Default-yes prompt → chains to `stash impl` | -| `stash impl` | Executes the plan via agent handoff (any of four targets) | Outro pointing at `stash db status` to verify | -| `stash status` | Disk-only "where am I?" map, runs in ms | — | +| `stash plan` | Drafts `.cipherstash/plan.md` via agent handoff. State-driven — auto-detects whether to plan an encryption rollout or an encryption cutover. | Default-yes prompt → chains to `stash impl` | +| `stash impl` | Executes the plan via agent handoff. Refuses cutover-step plans without a recorded `dual_writing` event; prints the deploy-gate banner after a rollout-step run. | Deploy-gate banner (rollout) or "verify state" (cutover/new) | +| `stash status` | The encryption-rollout quest log — per-column "where am I" map, runs in ms | — | -Use `stash status` at any time to see which save-points are complete. +### Rolling encryption out to production + +Two paths to a fully-encrypted column: + +- **New encrypted column** — declared encrypted from the start. Single deploy. Use the `stash plan` → `stash impl` chain straight through. +- **Existing column with live data** — split across two passes around a hard production-deploy gate. + +For migrate columns, the flow is: + +1. **`stash plan`** detects that no `dual_writing` event is recorded and writes an encryption-rollout plan: schema-add for the encrypted twin, `stash db push` (pending), and the application-side dual-write code. +2. **`stash impl`** executes that plan and stops with a deploy-gate banner. Encrypted values are not flowing yet — the dual-write code has to be running in production before backfill is safe. +3. **You ship and deploy** the rollout PR. +4. **`stash status`** confirms dual-writes are live. +5. **`stash plan`** detects `dual_writing` and writes a separate cutover plan: backfill, schema rename + re-push, cutover, read-path switch, drop. +6. **`stash impl`** executes the cutover. + +The split is invisible to the user — they just keep running `stash plan` and `stash impl`; the CLI knows where they are. + +For users without a deployed application to gate on (local dev, sandboxes, freshly-seeded test DBs), `stash plan --complete-rollout` produces a single end-to-end plan with no deploy gate. The flag prints a default-no confirm with a loud warning before generating; only safe when no production app writes to this database. + +Use `stash status` at any time to see which save-points are complete and what each rollout's next move is. ## Configuration @@ -114,16 +134,31 @@ The `--supabase` and `--drizzle` flags tailor the intro message and EQL install ```bash npx stash plan +npx stash plan --complete-rollout ``` -`plan` is the **draft for review** save-point. Pre-flights `.cipherstash/context.json` (errors with a "Run `stash init` first" pointer if missing). Hands off to a coding agent — all four targets are offered: Claude Code, Codex, AGENTS.md (for Cursor/Windsurf/Cline), and the CipherStash Agent (`@cipherstash/wizard`). Claude Code, Codex, and AGENTS.md consume the local mode-aware `setup-prompt.md`. The wizard receives `--mode plan` and forwards it to the CipherStash gateway, which returns a planning prompt; the wizard runtime skips its post-agent install/push/migrate steps when `mode === 'plan'`. Every target produces a valid plan-mode artifact at `.cipherstash/plan.md`. +`plan` is the **draft for review** save-point. Pre-flights `.cipherstash/context.json` (errors with a "Run `stash init` first" pointer if missing). Hands off to a coding agent — all four targets are offered: Claude Code, Codex, AGENTS.md (for Cursor/Windsurf/Cline), and the CipherStash Agent (`@cipherstash/wizard`). + +`plan` is **state-driven**. It reads `.cipherstash/migrations.json` and `cs_migrations` and dispatches to one of three plan templates: + +| Detected state | Plan written | +|---|---| +| Manifest empty, fresh project, or no `dual_writing` events recorded | **Encryption rollout** — schema-add, dual-write code, `stash db push` (pending). Ends at the deploy gate. | +| At least one column has a `dual_writing` (or later) event recorded | **Encryption cutover** — backfill, schema rename + re-push, cutover, read-path switch, drop plaintext. Requires the rollout to already be deployed. | +| `--complete-rollout` flag passed | **Complete rollout** — schema-add through drop, no deploy gate. Escape hatch for databases without a deployed application. Default-no confirm with a loud warning before generating. | -The agent produces `.cipherstash/plan.md` with a machine-readable header `` listing each table/column the user wants to protect and whether it's a `"new"` (additive — the column doesn't yet exist) or `"migrate"` (existing column with live data) lifecycle. The plan also covers prose detail: deploy ordering for migrate columns, project-specific risks (bundler exclusion, partial CipherStash state), the exact CLI sequence to execute when ready. +The chosen template drives the agent's prompt body. The wizard receives `--mode plan` (plus the resolved step) and forwards it to the CipherStash gateway. Every target produces a valid plan-mode artifact at `.cipherstash/plan.md`. + +The agent writes a machine-readable header `` at the top of the plan. `step` is `"rollout" | "cutover" | "complete"`; each column entry carries `path: "new" | "migrate"`. `stash impl` parses this header to render a confirmation panel and to enforce the deploy gate. Ends with a default-yes prompt: *"Continue to `stash impl` now?"* Yes auto-launches `stash impl`. To re-plan, delete `.cipherstash/plan.md` first — `stash plan` will warn (non-blocking) if a plan already exists, since the agent will be told to revise it rather than start fresh. +#### Why the rollout/cutover split + +There is no atomic way to replace a populated plaintext column with an encrypted one without corrupting data. The rollout phase deploys the *capability* to write encrypted values (the encrypted twin column and the application-side dual-write code). The cutover phase deploys the *transition* (backfill historical rows, then rename swap so reads decrypt). Backfill is only safe once dual-writes are running in production, because any row written during the backfill window must be picked up by both columns — otherwise it lands in plaintext only and creates silent migration drift. The split makes that pre-condition explicit. + ### `impl` — Execute the plan ```bash @@ -135,31 +170,61 @@ npx stash impl --continue-without-plan | State | Behaviour | |-------|-----------| -| Plan exists, TTY | Parses the summary block. Renders a confirm panel: "3 columns across 2 tables · staged across 4 deploys (schema-add → backfill → cutover → drop)". Default-yes confirm. | -| Plan exists, non-TTY | Logs and proceeds without confirm (CI/pipe-safe). | +| Plan exists, TTY | Parses the summary block. Enforces the deploy gate (see below). Renders a confirm panel describing the plan scope. Default-yes confirm. | +| Plan exists, non-TTY | Logs and proceeds without confirm (CI/pipe-safe). The deploy-gate check still runs. | | No plan, TTY | Interactive `p.select`: "Draft a plan first (recommended)" / "Continue without a plan" / cancel. "Draft" delegates to `stash plan`. "Continue" goes through a security confirm (default-no) before implementing. | | No plan, `--continue-without-plan` | Skips the picker, runs the security confirm (still default-no), then implements. | | No plan, non-TTY, no flag | Errors out with "Run `stash plan` first, or pass `--continue-without-plan` to skip planning." Forces explicit intent in CI. | Once the user clears the gate, `impl` dispatches to a handoff target (Claude Code, Codex, AGENTS.md for Cursor/Windsurf/Cline, or `@cipherstash/wizard`) and the agent executes the plan: schema edits, migrations, `stash db push`, `stash encrypt {backfill,cutover,drop}` as appropriate. -`--continue-without-plan` exists to support scripts and one-off implementations where planning isn't needed. It is **not** a way to bypass safety — the security confirm still fires when interactive. +#### Deploy-gate enforcement -### `status` — Show project lifecycle state +For plans with `step: "cutover"`, `impl` queries `cs_migrations` for every column listed in the plan-summary block and verifies that each one has a `dual_writing` (or later) event recorded. If any are missing, `impl` refuses to proceed and points the user at re-running `stash plan` after their rollout PR is deployed. The error names the specific columns that are not yet recorded. + +This is the safety net for the case where someone runs cutover work locally before the dual-write code is actually live in production. The encrypt commands themselves also gate on the same event before doing anything destructive, but `impl` checks early so the confirm prompt never appears for an unsafe plan. + +#### Outro + +After a successful handoff: + +- **`step: "rollout"`** — prints a deploy-gate banner explaining that encrypted values are not yet flowing because the dual-write code is not deployed, with the next-step sequence (deploy → `stash status` → `stash plan`). +- **`step: "cutover"` or `step: "complete"`** — prints a generic "verify state" outro pointing at `stash status`. +- **No plan / no summary** — same generic outro. + +`--continue-without-plan` exists to support scripts and one-off implementations where planning isn't needed. It is **not** a way to bypass safety — the security confirm still fires when interactive, and the cutover-step deploy-gate check applies regardless. + +### `status` — The encryption-rollout quest log ```bash npx stash status +npx stash status --quest # force the fancy output anywhere +npx stash status --plain # force the plain-text fallback anywhere +npx stash status --json # structured output for scripts ``` -`status` is the **map**. Disk-only — no auth, no DB connection, runs in milliseconds. Reads three files: +`status` is the **map**. Reads `.cipherstash/context.json` (was init run?), `.cipherstash/migrations.json` (which columns are tracked?), and — best-effort — `cs_migrations` plus `eql_v2_configuration` for live per-column state. DB connectivity is optional; when missing, the command falls back to a manifest-only view and surfaces a footer note. -- `.cipherstash/context.json` → was init run? -- `.cipherstash/plan.md` → has a plan been drafted? -- `.cipherstash/setup-prompt.md` → has the agent been engaged at least once? +Renders one **quest** per tracked column. Each quest carries: -Renders a lifecycle panel with three stages (Initialized, Plan written, Implementation), each marked `✓` or `◯`. Prints a context-aware "Next:" line that always names exactly one command to run. +- A title (`Encrypt users.email` for migrate columns; `Add encrypted column orders.note` for new columns). +- A progress bar and an `N/M objectives` count. +- A list of objectives with `✓` for done, `▸` for the active "you are here" objective, and `🔒` for locked. +- A one-line "Next move" hint naming the concrete CLI invocation when relevant (`stash encrypt backfill --table users --column email`, etc.). -Points at `stash db status` for EQL/database state and `stash encrypt status` for per-column migration phase — those are the deeper, network-touching status commands. Use them when you need to inspect what's actually installed in the database or where each column is in the encryption lifecycle. +Quests separate into **active** (something to do next) and **completed** (🏆 line per column). + +Output mode: + +- **TTY by default** — the quest-log shape with emoji and progress bars. +- **Non-TTY by default** — a plain-text fallback with the same content (no emoji, bracketed status markers `[x]` / `[>]` / `[ ]`). Designed for CI logs, pipes, and agents reading the output. +- **`--quest`** forces the fancy shape anywhere; **`--plain`** forces the plain shape anywhere; **`--json`** emits a structured JSON document. + +Use the JSON form for scripts; it has a stable shape (`active`, `completed`, per-quest `objectives`, `progress`, `nextMove`) that does not break without a major version bump. + +Run `status` after every transition during a rollout. It is the canonical "where am I?" surface; agents working through the rollout should re-read it as they go rather than tracking state mentally. + +For the deeper, raw views that touch only the database, use `stash db status` (EQL installation state) and `stash encrypt status` (per-column migration phase, EQL state, backfill progress with drift detection). ### `auth login` — Authenticate with CipherStash @@ -356,11 +421,11 @@ npx stash db migrate Not yet implemented — placeholder for future encrypt-config migration tooling. -### `encrypt` — Migrate plaintext columns to encrypted, in phases +### `encrypt` — Drive the encryption-cutover work for a column + +The `encrypt` group is the cutover-step toolset: it runs the database-side work that takes an existing plaintext column the rest of the way to encrypted, after the encryption-rollout PR is deployed and dual-writes are live in production. The internal event log uses `schema-added → dual-writing → backfilling → backfilled → cut-over → dropped` as machine-readable phase names; the user-facing story is the rollout/cutover model documented in the `stash-encryption` skill. -The `encrypt` group orchestrates the column-encryption lifecycle: -`schema-added → dual-writing → backfilling → backfilled → cut-over → dropped`. -It drives the `@cipherstash/migrate` library, which records every transition in a `cipherstash.cs_migrations` table (installed by `stash db install`) and reads the user's intent from `.cipherstash/migrations.json`. See the `stash-encryption` skill for the lifecycle model itself; this section documents the CLI surface. +It drives the `@cipherstash/migrate` library, which records every transition in a `cipherstash.cs_migrations` table (installed by `stash db install`) and reads the user's intent from `.cipherstash/migrations.json`. This section documents the CLI surface. The examples below use `npx stash`. Substitute `bunx`, `pnpm dlx`, or `yarn dlx` (or run `stash` directly when it's installed as a project dev dep — `stash init` sets that up). diff --git a/skills/stash-drizzle/SKILL.md b/skills/stash-drizzle/SKILL.md index c53c2786..4640c82b 100644 --- a/skills/stash-drizzle/SKILL.md +++ b/skills/stash-drizzle/SKILL.md @@ -334,9 +334,13 @@ if (!decrypted.failure) { ## Migrating an Existing Column to Encrypted -The hard case: a Drizzle table that already exists in production with live data in a plaintext column you want to encrypt. You can't just change the column type — that would drop the data and break NOT NULL constraints. Use the **column lifecycle** documented in the `stash-encryption` skill (`schema-added → dual-writing → backfilling → cut-over → dropped`), driven by the `stash encrypt` CLI commands. +The hard case: a Drizzle table that already exists in production with live data in a plaintext column you want to encrypt. You can't just change the column type — that would drop the data and break NOT NULL constraints. -This section walks the Drizzle-specific shape of each phase. The CLI commands themselves are documented in the `stash-cli` skill. +CipherStash splits this into two named steps with a hard production-deploy gate between them: an **encryption rollout** (schema-add + dual-write code + `db push`) and an **encryption cutover** (backfill + rename + drop). The `stash-encryption` skill is the canonical reference for the lifecycle; this section walks the Drizzle-specific shape. + +> **Runner note.** Examples below use `npx stash` for npm projects. Substitute `bunx stash` (Bun), `pnpm dlx stash` (pnpm), or `yarn dlx stash` (Yarn) — or run `stash` directly when it is installed as a project dev dep. The behaviour is identical across runners; only the prefix changes. + +> **Where am I?** Run `stash status` first (substitute the runner per the note above). It shows you which Drizzle tables/columns are mid-rollout, which are post-deploy, and what the next move is. Re-run after every transition. ### Starting state @@ -352,7 +356,11 @@ export const users = pgTable('users', { And an `INSERT INTO users (email) VALUES (...)` somewhere in your app code. -### Phase 1 — Schema-add: declare the encrypted twin +### Step 1 — Encryption rollout (one PR, one deploy) + +Everything below lands in one PR. The deploy of that PR is the gate. + +#### Schema-add: declare the encrypted twin Add an `email_encrypted` column **alongside** `email`. Crucially, the encrypted column is **nullable** at creation — never `.notNull()`, because rows that already exist will have NULL in this column until backfill catches them. @@ -388,16 +396,14 @@ Generate the migration with `drizzle-kit generate`. The generated SQL should be Register the new encryption config with EQL: ```bash -stash db push +npx stash db push ``` -If this is the project's first encrypted column, `db push` writes directly to the active EQL config (nothing to rename). If an active config already exists, `db push` writes the new config as `pending` — that's expected. The pending row will be promoted to active by `stash encrypt cutover` in phase 4. +If this is the project's first encrypted column, `db push` writes directly to the active EQL config (nothing to rename). If an active config already exists, `db push` writes the new config as `pending` — that's expected. The pending row will be promoted to active by `stash encrypt cutover` in the cutover step. -After this phase, rows still have `email_encrypted = NULL`. App reads still come from `email`. Nothing has broken. +#### Dual-writing: write to both columns from app code -### Phase 2 — Dual-writing: write to both columns from app code - -Find every code path that writes to `users.email` (insert, update, upsert, seeders, fixtures) and update it to encrypt and also write to `email_encrypted`: +Find **every** code path that writes to `users.email` and update it to encrypt and also write to `email_encrypted`: ```typescript // Before @@ -415,14 +421,31 @@ await db.insert(users).values({ Same shape for UPDATE: if your app updates `email`, it must also re-encrypt and update `email_encrypted` in the same statement. -**Ship this code change to production.** Verify in the DB that new rows arrive with `email_encrypted IS NOT NULL` (run a SELECT or check via your observability). Only proceed once you're confident every write path is dual-writing. +**The dual-write rule.** Every persistence path that mutates this row writes both columns, in the same transaction, on every code branch. Insert sites, update sites, upserts, ON CONFLICT clauses, seeders, fixtures, CSV importers, admin actions, background jobs, third-party webhook handlers — all of them. A single missed branch means rows inserted in production after deploy land in plaintext only, and backfill won't catch them. Grep for every site that touches `users.email` before declaring this step done. + +After this phase, existing rows still have `email_encrypted = NULL`. App reads still come from `email`. Nothing has broken. + +### ⛔ Deploy gate + +Stop. Ship this PR to production. The deployed environment must be running the dual-write code before any cutover-step work is safe. + +When the deploy is live: + +```bash +npx stash status # verify the rollout is recorded +npx stash plan # detects dual-writes are live; drafts the cutover plan +``` + +`npx stash impl` will refuse to run a cutover-step plan if `cs_migrations` has no `dual_writing` event for `users.email`. That refusal is the safety net for cases where someone runs cutover work locally before the code is actually live. + +### Step 2 — Encryption cutover -### Phase 3 — Backfill: encrypt the historical rows +Once dual-writes are live in production and `cs_migrations` records `dual_writing`: -Once dual-writes are live in production: +#### Backfill: encrypt the historical rows ```bash -stash encrypt backfill --table users --column email +npx stash encrypt backfill --table users --column email # (Interactive: answer 'yes' to the dual-write confirmation prompt.) # (CI: pass --confirm-dual-writes-deployed instead.) ``` @@ -431,12 +454,12 @@ Resumable, idempotent, chunked. The CLI walks the table in keyset-pagination ord If something goes wrong (e.g. you discover the dual-write code wasn't actually live when backfill ran), re-run with `--force` to re-encrypt every row regardless of current state. -### Phase 4 — Cutover: rename swap and activate +#### Cutover: rename swap and activate First, update the Drizzle schema to the post-cutover shape — switch `email` to use `encryptedType` and remove the `email_encrypted` column. Then re-push the encryption config so EQL has a pending row that points at `email` (no `_encrypted` suffix): ```bash -stash db push +npx stash db push # → writes the new config as `pending`. Active config (still pointing at # `email_encrypted`) keeps serving while we complete the cutover. ``` @@ -444,12 +467,12 @@ stash db push Now run the cutover: ```bash -stash encrypt cutover --table users --column email +npx stash encrypt cutover --table users --column email ``` Inside one transaction it: (1) renames `email` → `email_plaintext` and `email_encrypted` → `email`, (2) promotes the pending EQL config to `active` (and the prior active to `inactive`), (3) records a `cut_over` event in `cs_migrations`. -The Drizzle schema you just edited now matches the physical DB shape — `email` is the encrypted column. Keep the temporary `email_plaintext: text('email_plaintext')` declaration in the schema file until phase 5 drops it: +The Drizzle schema you just edited now matches the physical DB shape — `email` is the encrypted column. Keep the temporary `email_plaintext: text('email_plaintext')` declaration in the schema file until the drop step: ```typescript // src/db/schema.ts (post-cutover) @@ -459,7 +482,7 @@ export const users = pgTable('users', { freeTextSearch: true, equality: true, }), - email_plaintext: text('email_plaintext'), // temporary; dropped in phase 5 + email_plaintext: text('email_plaintext'), // temporary; dropped next }) ``` @@ -481,12 +504,12 @@ const email = decrypted.data.email For queries that filter on `email`, switch to the encrypted operators from `createEncryptionOperators` — `eq`, `like`, `gte`, etc. (See `## Query Encrypted Data` above.) -### Phase 5 — Drop: remove the plaintext column +#### Drop: remove the plaintext column Once read paths are updated and you're confident reads are decrypting correctly, generate the drop migration: ```bash -stash encrypt drop --table users --column email +npx stash encrypt drop --table users --column email ``` The CLI emits a Drizzle migration file with `ALTER TABLE users DROP COLUMN email_plaintext;`. Review and apply with `drizzle-kit migrate`. Update the schema to remove `email_plaintext`: @@ -507,11 +530,12 @@ Also remove the dual-write code from app paths — `email_plaintext` is gone; on ### Inspecting progress at any time ```bash -stash encrypt status # shows current phase, EQL state, backfill progress -stash encrypt plan # diffs your migrations.json intent vs observed state +npx stash status # quest log: where each rollout is, what to do next +npx stash encrypt status # raw per-column phase, EQL state, backfill progress +npx stash encrypt plan # diffs your migrations.json intent vs observed state ``` -Both are read-only. +All three are read-only. ## Complete Operator Reference diff --git a/skills/stash-dynamodb/SKILL.md b/skills/stash-dynamodb/SKILL.md index 6fd6100e..85d2dd51 100644 --- a/skills/stash-dynamodb/SKILL.md +++ b/skills/stash-dynamodb/SKILL.md @@ -33,6 +33,18 @@ CipherStash encrypts each attribute into two DynamoDB attributes: Non-encrypted attributes pass through unchanged. On decryption, the `__source` and `__hmac` attributes are recombined back into the original attribute name with the plaintext value. +## Rolling Encryption Out to Production + +DynamoDB encryption is **single-deploy**. There is no rollout/cutover split — unlike the Postgres path, DynamoDB has no row-level rename swap and no shared-state proxy. The application owns every write, so adding encryption is an application-side change that ships in one PR: + +1. Declare the encrypted schema (see Setup below). +2. Wrap your DynamoDB client with `encryptedDynamoDB` (or call `encryptItem` / `decryptItem` directly at write/read sites). +3. Ship the change. + +For tables with **existing populated items**, the `__source` and `__hmac` attributes are added by the next write that touches each item. If you need every existing item encrypted at once (e.g. because a query uses `email__hmac` and would miss legacy items), run a one-shot script that reads every item, calls `encryptItem`, and writes it back. Idempotent: re-running an already-encrypted item is a no-op as long as the schema hasn't changed. + +> **Where am I?** Run `npx stash status` (or `bunx`/`pnpm dlx`/`yarn dlx` per your runner) for a project-wide view across both Postgres and DynamoDB integrations. DynamoDB columns surface in the quest log as already-complete since there is no staged lifecycle to track. + ## Setup ### 1. Define Encrypted Schema diff --git a/skills/stash-encryption/SKILL.md b/skills/stash-encryption/SKILL.md index 37c30bd1..d79ec922 100644 --- a/skills/stash-encryption/SKILL.md +++ b/skills/stash-encryption/SKILL.md @@ -585,22 +585,60 @@ CREATE TABLE users ( ); ``` -## Column Migration Lifecycle +## Rolling Encryption Out to Production -Adding a fresh encrypted column to a table you don't yet write to is the easy case — declare it in the schema, run the migration, start writing. The harder case is taking an **existing plaintext column with live data** and turning it into an encrypted one without dropping a write or returning the wrong value mid-cutover. CipherStash models that as a six-phase lifecycle: +Adding a fresh encrypted column to a table you don't yet write to is the easy case — declare it in the schema, run the migration, start writing. The harder case is taking an **existing plaintext column with live data** and turning it into an encrypted one without dropping a write or returning the wrong value mid-cutover. + +CipherStash splits that into two named steps with a hard production-deploy gate between them: ``` -schema-added → dual-writing → backfilling → backfilled → cut-over → dropped +ENCRYPTION ROLLOUT → ⛔ deploy gate → ENCRYPTION CUTOVER +───────────────────── ────────────────────── +schema-add backfill historical rows +dual-write code switch reads to encrypted +db push (writes pending) drop plaintext column ``` -| Phase | What's true | What changes here | -|---|---|---| -| `schema-added` | The encrypted twin column (`_encrypted`) exists in the DB and is registered in `eql_v2_configuration`. The plaintext column is unchanged; the application still writes only plaintext. | A schema migration adds the column. | -| `dual-writing` | Application code now writes both `` (plaintext, unchanged) **and** `_encrypted` (encrypted via the encryption client) on every insert/update. Reads still come from the plaintext column. | Persistence-layer code change. The CLI cannot detect this state; the user (or agent) declares the transition. | -| `backfilling` | A backfill job is encrypting the existing plaintext rows into `_encrypted`, in chunks, resumably. New rows continue to land in both columns from dual-writing. | The backfill engine in `@cipherstash/migrate` (driven by `stash encrypt backfill`). | -| `backfilled` | Every row has a non-null `_encrypted` value. Plaintext column still authoritative for reads. | Backfill completes, records the transition. | -| `cut-over` | A single transaction renames `` → `_plaintext` and `_encrypted` → `` (`eql_v2.rename_encrypted_columns()`). Application reads of `` now return decrypted ciphertext transparently — no app code change required for reads. | One DB transaction. | -| `dropped` | `_plaintext` is removed via a regular schema migration. The application stops writing to it (dual-writing logic is removed). | App-code change to remove dual-writes + a schema migration. | +The gate is the rule that backfill is only safe once the dual-write code is **running in the production environment that owns the database** — not on the developer's laptop, not in CI. Any row inserted during the backfill window must be written to both columns by the application; otherwise it lands in plaintext only and creates silent migration drift. + +> **Runner note.** Examples below use `npx stash` for npm projects. Substitute `bunx stash` (Bun), `pnpm dlx stash` (pnpm), or `yarn dlx stash` (Yarn) — or run `stash` directly when it is installed as a project dev dep (`stash init` sets that up). The behaviour is identical across runners; only the prefix changes. The `stash-cli` skill has the full mapping. + +### Where am I? + +Always start with `stash status` (`npx stash status` / `pnpm dlx stash status` / etc., per the runner note above). It is disk-only, idempotent, and tells you which encryption rollouts are in flight, what's been deployed, and what the next move is per column. Re-run it after every transition. Never act blind. + +### Step 1 — Encryption rollout + +Everything that lands in the repo and ships in **one** PR: + +| Action | What changes | +|---|---| +| Schema-add | Migration adds `_encrypted` (nullable `jsonb`) alongside the existing plaintext column. Plaintext column unchanged; application still writes only plaintext. | +| `stash db push` | Registers the new column in `eql_v2_configuration`. With no active config yet, writes directly to `active`. With an existing active config, writes `pending` — cutover (later) will promote it. | +| Dual-write code | Application now writes both `` and `_encrypted` on every persistence path that mutates the row, in the same transaction, on every code branch. Reads still come from the plaintext column. | + +**The dual-write definition matters.** "Writes both columns" is not enough. The rule is: every persistence path that mutates this row writes both columns, in the same transaction, on every code branch. A single missed branch — a CSV import, an admin action, a background job, a third-party webhook handler — means rows inserted in production after deploy land in plaintext only, and backfill won't catch them. Grep for every site that writes the plaintext column before declaring rollout complete. + +### ⛔ Deploy gate + +Stop. The rollout PR ships to production. The deployed environment must be running this code before any cutover-step work is safe. + +When the deploy is live, run `npx stash status`. Look for the active quest's "Next move" hint to confirm dual-writes are recorded. Then run `npx stash plan` again — the CLI detects that dual-writes are live and writes a separate cutover plan. + +`npx stash impl` will refuse to run a cutover-step plan if `cs_migrations` has no `dual_writing` event for the targeted columns. That refusal is intentional; it's the safety net for cases where someone runs cutover work locally before the code is actually live. + +### Step 2 — Encryption cutover + +Once dual-writes are recorded as live in `cs_migrations`: + +| Action | What changes | +|---|---| +| `stash encrypt backfill` | Walks the table in keyset-pagination order, encrypts each chunk, writes a single transactional `UPDATE` per chunk plus a `cs_migrations` checkpoint. SIGINT-safe; idempotent re-runs converge. | +| Schema rename + `stash db push` | Update the schema file: drop the `_encrypted` suffix; switch the original column declaration onto the encrypted type. Push registers the renamed shape as `pending`. | +| `stash encrypt cutover` | One transaction: renames `` → `_plaintext`, `_encrypted` → ``, and promotes `pending` → `active`. Application reads of `` now return decrypted ciphertext transparently. | +| Wire reads through the encryption client | Read paths must decrypt before returning the value to callers (`decryptModel(row, table)` for Drizzle; `encryptedSupabase` wrapper for Supabase; `decrypt`/`bulkDecryptModels` otherwise). Without this step, reads return raw `eql_v2_encrypted` payloads to end users. | +| Remove dual-write code | The plaintext column is now `_plaintext` and is no longer authoritative. Delete the dual-write logic. | +| `stash encrypt drop` | Emits a migration that removes `_plaintext`. Apply with the project's normal migration tooling. | ### State storage @@ -610,50 +648,54 @@ Three sources of truth, kept separate on purpose: - **`eql_v2_configuration`** (DB, EQL-managed) — *EQL intent*. Which columns are encrypted and with which indexes; drives the CipherStash Proxy. - **`cipherstash.cs_migrations`** (DB, CipherStash-managed) — *runtime state*. Append-only event log: phase transitions, backfill cursors, error rows. Latest row per `(table, column)` is the current state. -`stash encrypt status` shows all three side-by-side and flags drift (e.g. EQL says registered, the physical `_encrypted` column is missing). +`stash encrypt status` shows all three side-by-side and flags drift (e.g. EQL says registered, the physical `_encrypted` column is missing). `stash status` (the quest log) rolls them up into the per-column "what's the next move" view used during a rollout. -### CLI surface +> **Note on internal phase names.** The runtime event log uses `schema-added → dual-writing → backfilling → backfilled → cut-over → dropped` as machine-readable phase names. They appear in `cs_migrations` rows and `stash encrypt status` output. Treat them as internal mechanism detail — the user-facing story is "encryption rollout, then cutover, with a deploy gate in between." -The `stash encrypt` command group drives each phase. See the `stash-cli` skill for full flag reference. Typical sequence for a single column: +### CLI sequence for a single column ```bash -# Phase 1 — schema-added -# Add the encrypted twin column via your normal migration tooling -# (drizzle-kit / supabase migrations / etc.). Then register the new -# encryption config with EQL: -stash db push -# First push (no active config yet) → writes directly to active. -# Subsequent push (active already exists) → writes pending; cutover -# in phase 4 will promote it. - -# Phase 2 + 3 — dual-writing then backfilling, in one command -# (First, edit the application code to write both columns and ship that deploy. -# Then run backfill — it will prompt to confirm dual-writes are live, append -# the `dual_writing` event, and run the chunked encryption loop.) -stash encrypt backfill --table users --column email -# In CI / non-interactive contexts, swap the prompt for the explicit flag: -stash encrypt backfill --table users --column email --confirm-dual-writes-deployed -# Resumable; checkpoints to cs_migrations after every chunk. SIGINT-safe. - -# Recovery — if dual-writes weren't actually live when backfill first ran, -# rows inserted during the backfill landed in plaintext only and the encrypted -# twin is stale. Re-run with --force to re-encrypt every row regardless. -stash encrypt backfill --table users --column email --force - -# Phase 4 — cut-over -# First, edit the schema to drop the `_encrypted` suffix (the column will now -# be named `email`, declared with encryptedType / encryptedColumn). Re-push: -stash db push -# → writes the renamed-shape config as `pending`. The active config (still -# pointing at `email_encrypted`) keeps serving until cutover finishes. - -# Now run the cutover. In one transaction: rename the physical columns, -# promote pending → active, record cs_migrations event. -stash encrypt cutover --table users --column email - -# Phase 5 — dropped -stash encrypt drop --table users --column email -# Emits a migration file removing _plaintext. Apply with your normal tooling. +# Run this often — it's the canonical "where am I?" command. +npx stash status + +# ---- ENCRYPTION ROLLOUT (one PR, one deploy) ---- +# 1. Add the encrypted twin column via your normal migration tooling +# (drizzle-kit / supabase migrations / etc.). +# 2. Register the new encryption config with EQL: +npx stash db push +# First push (no active config yet) → writes directly to active. +# Subsequent push (active already exists) → writes pending; cutover +# will promote it. +# 3. Edit application code so every persistence path writes both +# `` and `_encrypted` in the same transaction, on every +# code branch. +# 4. Ship the PR to production. + +# ---- ⛔ DEPLOY GATE ---- +# Verify dual-writes are live, then redraft the plan for cutover work: +npx stash status +npx stash plan + +# ---- ENCRYPTION CUTOVER ---- +npx stash encrypt backfill --table users --column email +# Prompts to confirm dual-writes are live (or pass +# --confirm-dual-writes-deployed in CI). Resumable; SIGINT-safe. + +# Recovery — if dual-writes weren't actually live when backfill ran, +# re-run with --force to encrypt every plaintext row regardless. +npx stash encrypt backfill --table users --column email --force + +# Edit the schema to drop the `_encrypted` suffix, then re-push: +npx stash db push +# → writes the renamed-shape config as `pending`. The active config +# keeps serving until cutover finishes. + +npx stash encrypt cutover --table users --column email +# In one transaction: rename physical columns, promote pending → active. + +# Wire the read paths through the encryption client. Remove dual-write +# code. Then drop the plaintext column: +npx stash encrypt drop --table users --column email ``` ### Library use @@ -682,12 +724,13 @@ await runBackfill({ Useful when the backfill needs to run in a worker, on a schedule, or alongside an existing job runner. -### Invariants the lifecycle preserves +### Invariants the rollout preserves -- **Reads never return the wrong value.** Until cut-over, reads come from the plaintext column. After cut-over, the same `SELECT email` returns the decrypted ciphertext via Proxy or the encryption client. There is no in-between. -- **Writes never drop.** Dual-writing keeps both columns in sync until the cut-over moment. After cut-over, writes go to the encrypted column. +- **Reads never return the wrong value.** Until cutover, reads come from the plaintext column. After cutover, the same `SELECT email` returns the decrypted ciphertext via Proxy or the encryption client. There is no in-between. +- **Writes never drop.** Dual-writing keeps both columns in sync until the cutover moment. After cutover, writes go to the encrypted column. +- **The deploy gate is a one-way door for production.** Backfill against rows the dual-write code never saw produces silent drift. The CLI refuses to run cutover-step plans without a `dual_writing` event recorded; do not paper over that refusal. - **Re-runs are safe.** Backfill is idempotent (` IS NOT NULL AND _encrypted IS NULL` guards every chunk). `cs_migrations` is append-only. -- **Rollback is possible up to cut-over.** Until the rename happens, the plaintext column is authoritative; aborting just leaves the encrypted twin partially populated. After cut-over, rollback is a manual restore — the migration plan should treat cut-over as the one-way door. +- **Rollback is possible up to cutover.** Until the rename happens, the plaintext column is authoritative; aborting just leaves the encrypted twin partially populated. After cutover, rollback is a manual restore — treat cutover as the one-way door for data. ## Migration from @cipherstash/protect diff --git a/skills/stash-supabase/SKILL.md b/skills/stash-supabase/SKILL.md index d0de8d27..f1d008c0 100644 --- a/skills/stash-supabase/SKILL.md +++ b/skills/stash-supabase/SKILL.md @@ -401,3 +401,199 @@ type EncryptedSupabaseError = { - `EncryptedQueryBuilder` - `PendingOrCondition` - `SupabaseClientLike` + +## Migrating an Existing Column to Encrypted + +The hard case: a Supabase table that already exists with live data in a plaintext column you want to encrypt. You can't just change the column type — that would drop the data. + +CipherStash splits this into two named steps with a hard production-deploy gate between them: an **encryption rollout** (schema-add + dual-write code + `db push`) and an **encryption cutover** (backfill + rename + drop). The `stash-encryption` skill is the canonical reference for the lifecycle; this section walks the Supabase-specific shape. + +> **Runner note.** Examples below use `npx stash` for npm projects. Substitute `bunx stash` (Bun), `pnpm dlx stash` (pnpm), or `yarn dlx stash` (Yarn) — or run `stash` directly when it is installed as a project dev dep. The behaviour is identical across runners; only the prefix changes. + +> **Where am I?** Run `stash status` first (substitute the runner per the note above). It shows you which tables/columns are mid-rollout, which are post-deploy, and what the next move is. Re-run after every transition. + +### Starting state + +You have: + +```sql +-- supabase/migrations/_initial.sql (already applied) +CREATE TABLE users ( + id uuid PRIMARY KEY DEFAULT gen_random_uuid(), + email text NOT NULL, -- plaintext, populated, NOT NULL + created_at timestamptz DEFAULT now() +); +``` + +…and an `await supabase.from('users').insert({ email })` somewhere in your app code. + +### Step 1 — Encryption rollout (one PR, one deploy) + +Everything below lands in one PR. The deploy of that PR is the gate. + +#### Schema-add: declare the encrypted twin + +Generate a Supabase migration: + +```bash +supabase migration new add_users_email_encrypted +``` + +Edit the generated file to add an `email_encrypted` column **alongside** `email`. The encrypted column must be **nullable** at creation — never `NOT NULL`, because rows that already exist will have NULL in this column until backfill catches them. + +```sql +-- supabase/migrations/_add_users_email_encrypted.sql +ALTER TABLE users + ADD COLUMN email_encrypted eql_v2_encrypted; -- nullable +``` + +Apply with `supabase db reset` locally or `supabase migration up` against the remote project. + +Update the encryption schema to declare the new encrypted column: + +```typescript +// src/encryption/schema.ts +import { encryptedTable, encryptedColumn } from '@cipherstash/stack/schema' + +export const users = encryptedTable('users', { + email_encrypted: encryptedColumn('email_encrypted') + .freeTextSearch() + .equality(), +}) + +// src/encryption/index.ts +import { Encryption } from '@cipherstash/stack' +import { users } from './schema' + +export const encryptionClient = await Encryption({ schemas: [users] }) +``` + +Register the new encryption config with EQL: + +```bash +npx stash db push +``` + +If this is the project's first encrypted column, `db push` writes directly to the active EQL config. If an active config already exists, it writes the new config as `pending` — that's expected. Cutover (later) will promote it. + +#### Dual-writing: write to both columns from app code + +Find **every** code path that writes to `users.email` and update it to encrypt and also write to `email_encrypted`. The cleanest pattern is to keep the raw `supabase` client for the plaintext write and use the `encryptedSupabase` wrapper for the encrypted write — wrapped in a single function so callers can't forget one half: + +```typescript +// src/db/users.ts +import { supabase, encrypted } from './clients' +import { users } from '../encryption/schema' + +export async function insertUser(email: string) { + // The encryptedSupabase wrapper handles the encryption call for you; + // the plaintext write is a separate `supabase` call so the rollout + // does not change read behaviour for `email` yet. + const ciphertext = await encrypted.encryptValue(email, { + table: users, + column: 'email_encrypted', + }) + if (ciphertext.failure) throw new Error(ciphertext.failure.message) + + return supabase.from('users').insert({ + email, // plaintext — keep writing + email_encrypted: ciphertext.data, // encrypted twin — new + }) +} +``` + +Same shape for UPDATE: every site that updates `email` must also re-encrypt and update `email_encrypted` in the same statement. + +**The dual-write rule.** Every persistence path that mutates this row writes both columns, in the same transaction, on every code branch. Insert sites, update sites, upserts, ON CONFLICT clauses, seeders, fixtures, edge functions, RPC functions, admin actions, background jobs, third-party webhooks — all of them. A single missed branch means rows inserted in production after deploy land in plaintext only, and backfill won't catch them. Grep for every site that touches `users.email` before declaring this step done. + +After this phase, existing rows still have `email_encrypted = NULL`. Reads still come from `email`. Nothing has broken. + +### ⛔ Deploy gate + +Stop. Ship this PR to production. The deployed environment must be running the dual-write code before any cutover-step work is safe. + +When the deploy is live: + +```bash +npx stash status # verify the rollout is recorded +npx stash plan # detects dual-writes are live; drafts the cutover plan +``` + +`npx stash impl` will refuse to run a cutover-step plan if `cs_migrations` has no `dual_writing` event for `users.email`. That refusal is the safety net for cases where someone runs cutover work locally before the code is actually live. + +### Step 2 — Encryption cutover + +Once dual-writes are live in production and `cs_migrations` records `dual_writing`: + +#### Backfill: encrypt the historical rows + +```bash +npx stash encrypt backfill --table users --column email +# (Interactive: answer 'yes' to the dual-write confirmation prompt.) +# (CI: pass --confirm-dual-writes-deployed instead.) +``` + +Resumable, idempotent, chunked. The CLI walks the table in keyset-pagination order, encrypts each chunk via the encryption client, and writes the ciphertext into `email_encrypted` inside transactions that also checkpoint to `cs_migrations`. SIGINT-safe. + +If something goes wrong (e.g. you discover the dual-write code wasn't actually live when backfill ran), re-run with `--force` to re-encrypt every row regardless of current state. + +#### Cutover: rename swap and activate + +First, update the encryption schema to the post-cutover shape — the encrypted column will live under the original column name: + +```typescript +// src/encryption/schema.ts (post-cutover) +export const users = encryptedTable('users', { + email: encryptedColumn('email').freeTextSearch().equality(), +}) +``` + +Re-push the encryption config so EQL has a pending row that points at `email` (no `_encrypted` suffix): + +```bash +npx stash db push +# → writes the new config as `pending`. Active config (still pointing at +# `email_encrypted`) keeps serving while we complete the cutover. +``` + +Now run the cutover: + +```bash +npx stash encrypt cutover --table users --column email +``` + +Inside one transaction it: (1) renames `email` → `email_plaintext` and `email_encrypted` → `email`, (2) promotes the pending EQL config to `active` (and the prior active to `inactive`), (3) records a `cut_over` event in `cs_migrations`. + +App code that does `select('email')` now returns ciphertext that must be decrypted via the `encryptedSupabase` wrapper. **This is the moment that breaks read paths if they aren't going through the wrapper.** + +Update read paths to use `encryptedSupabase`: + +```typescript +// Before +const { data } = await supabase.from('users').select('email').eq('id', id).single() + +// After — encryptedSupabase decrypts transparently +const { data } = await encrypted.from('users').select('email').eq('id', id).single() +``` + +For queries that filter on `email`, the `encryptedSupabase` wrapper handles the encrypted operators internally — the call site is the same shape as before (`.eq()`, `.like()`, `.ilike()`, `.gte()`, etc.), but the values are encrypted before reaching the database. See `## Query Filters` above. + +#### Drop: remove the plaintext column + +Once read paths are routing through `encryptedSupabase` and you're confident reads are decrypting correctly: + +```bash +npx stash encrypt drop --table users --column email +``` + +The CLI emits a Supabase migration file with `ALTER TABLE users DROP COLUMN email_plaintext;`. Review and apply with `supabase migration up` (or `supabase db reset` locally). Then remove the dual-write code from app paths — `email_plaintext` is gone; only `email` (encrypted) is written now via `encryptedSupabase`. + +### Inspecting progress at any time + +```bash +npx stash status # quest log: where each rollout is, what to do next +npx stash encrypt status # raw per-column phase, EQL state, backfill progress +npx stash encrypt plan # diffs your migrations.json intent vs observed state +``` + +All three are read-only. From 1df1a49b200c7e38d857d4ecc795ccb7517e44e6 Mon Sep 17 00:00:00 2001 From: CJ Brewer Date: Thu, 7 May 2026 17:18:24 -0600 Subject: [PATCH 2/2] chore: address review feedback --- .../src/commands/encrypt/lib/db-readers.ts | 114 ++++++++++ packages/cli/src/commands/encrypt/status.ts | 90 +------- packages/cli/src/commands/impl/index.ts | 32 +-- .../src/commands/impl/steps/handoff-wizard.ts | 10 +- .../init/lib/__tests__/parse-plan.test.ts | 30 ++- .../cli/src/commands/init/lib/parse-plan.ts | 62 ++---- .../src/commands/init/lib/rollout-state.ts | 61 ++---- .../src/commands/init/lib/write-context.ts | 8 + packages/cli/src/commands/plan/index.ts | 4 + .../commands/status/__tests__/status.test.ts | 207 +++++++++++------- packages/cli/src/commands/status/index.ts | 118 ++++------ packages/cli/src/commands/status/quest.ts | 110 +++++----- packages/cli/src/commands/status/render.ts | 23 +- skills/stash-cli/SKILL.md | 110 +++++----- skills/stash-drizzle/SKILL.md | 24 +- skills/stash-dynamodb/SKILL.md | 2 +- skills/stash-encryption/SKILL.md | 26 +-- skills/stash-secrets/SKILL.md | 22 +- skills/stash-supabase/SKILL.md | 24 +- 19 files changed, 569 insertions(+), 508 deletions(-) create mode 100644 packages/cli/src/commands/encrypt/lib/db-readers.ts diff --git a/packages/cli/src/commands/encrypt/lib/db-readers.ts b/packages/cli/src/commands/encrypt/lib/db-readers.ts new file mode 100644 index 00000000..3e1a4ad5 --- /dev/null +++ b/packages/cli/src/commands/encrypt/lib/db-readers.ts @@ -0,0 +1,114 @@ +import { latestByColumn } from '@cipherstash/migrate' +import type pg from 'pg' + +/** + * `latestByColumn` from `@cipherstash/migrate`, but tolerant of the + * pre-install case where `cipherstash.cs_migrations` doesn't exist. + * The encryption-rollout commands all need to be readable on a fresh + * project; treating "table missing" as "no events" keeps them so. + */ +export async function latestByColumnSafe( + client: pg.ClientBase, +): Promise extends Promise ? T : never> { + try { + return (await latestByColumn(client)) as Awaited> + } catch (err) { + if ( + err instanceof Error && + /cs_migrations|schema "cipherstash"/i.test(err.message) + ) { + return new Map() as Awaited> + } + throw err + } +} + +export interface EqlColumnInfo { + /** Index kinds attached to this column in the EQL config (`unique`, + * `match`, `ore`, `ste_vec`). Empty when no indexes are configured. */ + indexes: string[] + /** Lifecycle state of the EQL config row this column belongs to. */ + state: 'active' | 'pending' | 'encrypting' +} + +/** + * Read every column registered in `eql_v2_configuration` (active, + * pending, or encrypting) keyed by `.`. Active rows win + * when a column appears in more than one state. + * + * The call is best-effort: if `eql_v2_configuration` doesn't exist yet + * (EQL not installed), an empty map is returned instead of throwing. + */ +export async function fetchActiveEqlConfig( + client: pg.ClientBase, +): Promise> { + const out = new Map() + try { + const result = await client.query<{ state: string; data: unknown }>( + `SELECT state, data FROM public.eql_v2_configuration + WHERE state IN ('active', 'pending', 'encrypting') + ORDER BY CASE state WHEN 'active' THEN 0 WHEN 'encrypting' THEN 1 ELSE 2 END`, + ) + for (const row of result.rows) { + const data = row.data as { + tables?: Record< + string, + Record }> + > + } | null + if (!data?.tables) continue + for (const [tableName, columns] of Object.entries(data.tables)) { + for (const [columnName, column] of Object.entries(columns)) { + const key = `${tableName}.${columnName}` + if (out.has(key)) continue + out.set(key, { + indexes: Object.keys(column.indexes ?? {}), + state: row.state as 'active' | 'pending' | 'encrypting', + }) + } + } + } + } catch (err) { + if (err instanceof Error && /eql_v2_configuration/i.test(err.message)) { + return out + } + throw err + } + return out +} + +/** + * Read `information_schema.columns` and group column names by table. + * When `tables` is provided the query is constrained to that set — + * status's quest log only ever needs ~5 specific tables, so passing + * the manifest's tables avoids a full-schema scan. + */ +export async function fetchPhysicalColumns( + client: pg.ClientBase, + tables?: ReadonlyArray, +): Promise>> { + const out = new Map>() + try { + const result = + tables === undefined + ? await client.query<{ table_name: string; column_name: string }>( + `SELECT table_name, column_name FROM information_schema.columns + WHERE table_schema = current_schema()`, + ) + : await client.query<{ table_name: string; column_name: string }>( + `SELECT table_name, column_name FROM information_schema.columns + WHERE table_schema = current_schema() + AND table_name = ANY($1::text[])`, + [tables], + ) + for (const row of result.rows) { + const set = out.get(row.table_name) ?? new Set() + set.add(row.column_name) + out.set(row.table_name, set) + } + } catch { + // information_schema is always present; failures here are surprising + // enough to swallow rather than crash the read-only status path. + } + return out +} diff --git a/packages/cli/src/commands/encrypt/status.ts b/packages/cli/src/commands/encrypt/status.ts index ec30007f..94d5fb1c 100644 --- a/packages/cli/src/commands/encrypt/status.ts +++ b/packages/cli/src/commands/encrypt/status.ts @@ -1,12 +1,14 @@ import { detectPackageManager, runnerCommand } from '@/commands/init/utils.js' import { loadStashConfig } from '@/config/index.js' -import { - type MigrationPhase, - latestByColumn, - readManifest, -} from '@cipherstash/migrate' +import { type MigrationPhase, readManifest } from '@cipherstash/migrate' import * as p from '@clack/prompts' import pg from 'pg' +import { + type EqlColumnInfo, + fetchActiveEqlConfig, + fetchPhysicalColumns, + latestByColumnSafe, +} from './lib/db-readers.js' interface Row { table: string @@ -53,7 +55,7 @@ export async function statusCommand() { if (manifest) { for (const [tableName, columns] of Object.entries(manifest.tables)) { for (const column of columns) { - const key = `${tableName}.${column.column}` + const key: `${string}.${string}` = `${tableName}.${column.column}` seen.add(key) rows.push( renderRow({ @@ -110,82 +112,6 @@ export async function statusCommand() { if (exitCode) process.exit(exitCode) } -async function latestByColumnSafe(client: pg.Client) { - try { - return await latestByColumn(client) - } catch (err) { - if ( - err instanceof Error && - /cs_migrations|schema "cipherstash"/i.test(err.message) - ) { - return new Map() - } - throw err - } -} - -interface EqlColumnInfo { - indexes: string[] - state: 'active' | 'pending' | 'encrypting' -} - -async function fetchActiveEqlConfig( - client: pg.Client, -): Promise> { - const out = new Map() - try { - const result = await client.query<{ state: string; data: unknown }>( - `SELECT state, data FROM public.eql_v2_configuration - WHERE state IN ('active', 'pending', 'encrypting') - ORDER BY CASE state WHEN 'active' THEN 0 WHEN 'encrypting' THEN 1 ELSE 2 END`, - ) - for (const row of result.rows) { - const data = row.data as { - tables?: Record< - string, - Record }> - > - } | null - if (!data?.tables) continue - for (const [tableName, columns] of Object.entries(data.tables)) { - for (const [columnName, column] of Object.entries(columns)) { - const key = `${tableName}.${columnName}` - if (out.has(key)) continue - out.set(key, { - indexes: Object.keys(column.indexes ?? {}), - state: row.state as 'active' | 'pending' | 'encrypting', - }) - } - } - } - } catch (err) { - if (err instanceof Error && /eql_v2_configuration/i.test(err.message)) { - return out - } - throw err - } - return out -} - -async function fetchPhysicalColumns( - client: pg.Client, -): Promise>> { - const out = new Map>() - const result = await client.query<{ - table_name: string - column_name: string - }>( - `SELECT table_name, column_name FROM information_schema.columns - WHERE table_schema = current_schema()`, - ) - for (const row of result.rows) { - const set = out.get(row.table_name) ?? new Set() - set.add(row.column_name) - out.set(row.table_name, set) - } - return out -} - function renderRow(input: { tableName: string columnName: string diff --git a/packages/cli/src/commands/impl/index.ts b/packages/cli/src/commands/impl/index.ts index f23b0735..bd2e7902 100644 --- a/packages/cli/src/commands/impl/index.ts +++ b/packages/cli/src/commands/impl/index.ts @@ -85,6 +85,7 @@ async function verifyCutoverPreconditions( } const states = await detectColumnStates(databaseUrl, migrate) + if (states === null) return null return states .filter((s) => classifyPhase(s.phase) !== 'cutover') .map((s) => ({ table: s.table, column: s.column })) @@ -122,30 +123,13 @@ function printDeployGateBanner(cli: string): void { } /** - * `stash impl` — execute an encryption plan. + * `stash impl` — execute an encryption plan via agent handoff. * - * Always runs in implement mode. Behaviour branches on disk state and - * flags: - * - * - **Plan exists** (TTY): parse the structured summary block, render - * a confirmation panel, ask the user to proceed. Default-yes. - * For `cutover`-step plans, verify `dual_writing` events are - * recorded for every migrate column before launching — refuse if - * not, and point the user at re-running `stash plan` after deploy. - * - **Plan exists** (non-TTY): proceed without confirmation. - * - **No plan, `--continue-without-plan`**: confirm once, then implement. - * - **No plan, TTY**: present a `p.select` — draft a plan first - * (delegates to `planCommand`) or continue without one (confirms - * once, then implements). - * - **No plan, non-TTY**: error out with a clear next-action; CI must - * pass `--continue-without-plan` or run `stash plan` first. - * - * After successful handoff, the outro depends on plan step: - * - `rollout` — deploy-gate banner; explicit "do not run encrypt - * backfill yet" message. - * - `cutover` — confirmation that the rollout is fully complete. - * - `complete` — same as cutover (escape hatch covers everything). - * - no plan / no summary — generic "verify state" pointer. + * Cutover-step plans are gated on a `dual_writing` event being recorded + * in `cs_migrations` for every migrate column (the safety net that + * stops destructive work running ahead of a production deploy of the + * dual-write code). After a rollout-step handoff, the outro is the + * deploy-gate banner instead of a generic "verify state" pointer. */ export async function implCommand(flags: Record) { const cwd = process.cwd() @@ -200,7 +184,7 @@ export async function implCommand(flags: Record) { // (potentially hour-long) implementation phase. if (isTTY) { if (summary) { - p.note(renderPlanSummary(summary), 'Plan summary') + p.note(renderPlanSummary(summary, cli), 'Plan summary') } else { p.note( `Plan at \`${PLAN_REL_PATH}\` doesn't include a machine-readable summary. Open it in your editor before proceeding.`, diff --git a/packages/cli/src/commands/impl/steps/handoff-wizard.ts b/packages/cli/src/commands/impl/steps/handoff-wizard.ts index 0642dcb5..32334b0c 100644 --- a/packages/cli/src/commands/impl/steps/handoff-wizard.ts +++ b/packages/cli/src/commands/impl/steps/handoff-wizard.ts @@ -12,10 +12,12 @@ import { runWizardSpawn } from '../../wizard/index.js' * Hand off to the CipherStash Agent (the in-house wizard package). * * Writes `.cipherstash/context.json` so the wizard has the same prepared - * facts the other handoffs use, then spawns the wizard via `runWizardSpawn` - * — the same path the top-level `stash wizard` subcommand takes, but with - * the exit code surfaced rather than `process.exit`-ed so `stash impl` can - * finish its own outro. + * facts the other handoffs use (including the resolved `planStep` when + * `stash plan` is the caller — the wizard reads it from there rather + * than via argv), then spawns the wizard via `runWizardSpawn` — the same + * path the top-level `stash wizard` subcommand takes, but with the exit + * code surfaced rather than `process.exit`-ed so `stash impl` can finish + * its own outro. * * No skills are installed here. The wizard fetches its own agent-side * prompt from the gateway and runs its own `maybeInstallSkills` flow. diff --git a/packages/cli/src/commands/init/lib/__tests__/parse-plan.test.ts b/packages/cli/src/commands/init/lib/__tests__/parse-plan.test.ts index cd9e6471..f18d7b50 100644 --- a/packages/cli/src/commands/init/lib/__tests__/parse-plan.test.ts +++ b/packages/cli/src/commands/init/lib/__tests__/parse-plan.test.ts @@ -130,6 +130,11 @@ describe('parsePlanSummary', () => { }) describe('renderPlanSummary', () => { + // Use a recognisable non-npm runner in tests so we can assert that the + // footer is rendered with whatever `cli` the caller passes in — never + // hard-codes `npx stash`. + const CLI = 'pnpm dlx stash' + function summary( columns: PlanSummary['columns'], step?: PlanSummary['step'], @@ -144,6 +149,7 @@ describe('renderPlanSummary', () => { { table: 'users', column: 'phone', path: 'migrate' }, { table: 'orders', column: 'notes', path: 'migrate' }, ]), + CLI, ) expect(out).toContain('3 columns across 2 tables') }) @@ -151,6 +157,7 @@ describe('renderPlanSummary', () => { it('uses singular forms when counts are 1', () => { const out = renderPlanSummary( summary([{ table: 'users', column: 'email', path: 'new' }]), + CLI, ) expect(out).toContain('1 column across 1 table') expect(out).not.toContain('1 columns') @@ -163,6 +170,7 @@ describe('renderPlanSummary', () => { { table: 'users', column: 'email', path: 'new' }, { table: 'users', column: 'phone', path: 'migrate' }, ]), + CLI, ) expect(out).toContain('users.email') expect(out).toContain('users.phone') @@ -170,16 +178,22 @@ describe('renderPlanSummary', () => { expect(out).toContain('migrate existing column') }) - it('rollout step: footer mentions deploy gate and the next-plan handoff', () => { + it('rollout step: footer mentions deploy gate and uses the caller-supplied runner', () => { const out = renderPlanSummary( summary( [{ table: 'users', column: 'phone', path: 'migrate' }], 'rollout', ), + CLI, ) expect(out).toMatch(/encryption rollout/i) expect(out).toMatch(/deploy/i) - expect(out).toMatch(/stash plan/) + // The runner prefix is whatever the caller passed — never `npx stash` + // baked in. This guards the regression where the footer hard-coded + // `npx` and broke for bun / pnpm / yarn projects. + expect(out).toContain(`${CLI} status`) + expect(out).toContain(`${CLI} plan`) + expect(out).not.toContain('npx stash status') }) it('cutover step: footer mentions backfill, reads switch, and drop', () => { @@ -188,6 +202,7 @@ describe('renderPlanSummary', () => { [{ table: 'users', column: 'phone', path: 'migrate' }], 'cutover', ), + CLI, ) expect(out).toMatch(/encryption cutover/i) expect(out).toMatch(/backfill/i) @@ -200,18 +215,16 @@ describe('renderPlanSummary', () => { [{ table: 'users', column: 'phone', path: 'migrate' }], 'complete', ), + CLI, ) expect(out).toMatch(/complete encryption rollout/i) expect(out).toMatch(/skips the production-deploy gate/i) }) it('legacy plans without `step`: defaults to complete-rollout footer', () => { - // Pre-split plans had no step field. effectiveStep falls back to - // `complete`; the footer reflects that, and the user/agent reading the - // plan in `stash impl` sees the same text they would have seen had - // they generated the plan via `stash plan --complete-rollout` today. const out = renderPlanSummary( summary([{ table: 'users', column: 'phone', path: 'migrate' }]), + CLI, ) expect(out).toMatch(/complete encryption rollout/i) }) @@ -222,17 +235,16 @@ describe('renderPlanSummary', () => { { table: 'users', column: 'email', path: 'new' }, { table: 'users', column: 'phone', path: 'new' }, ]), + CLI, ) expect(out).toContain('single-deploy') expect(out).not.toMatch(/encryption rollout/i) }) it('all-additive plans ignore step (no migrate columns means no rollout split)', () => { - // An agent could conceivably emit `step: "rollout"` on an all-new plan. - // The renderer should still describe it as single-deploy because the - // rollout/cutover split is intrinsic to migrate columns, not to step. const out = renderPlanSummary( summary([{ table: 'users', column: 'email', path: 'new' }], 'rollout'), + CLI, ) expect(out).toContain('single-deploy') }) diff --git a/packages/cli/src/commands/init/lib/parse-plan.ts b/packages/cli/src/commands/init/lib/parse-plan.ts index 4e9db36b..b4afca79 100644 --- a/packages/cli/src/commands/init/lib/parse-plan.ts +++ b/packages/cli/src/commands/init/lib/parse-plan.ts @@ -1,32 +1,12 @@ /** * Parse and render `.cipherstash/plan.md` summary blocks. * - * The agent is instructed (in `renderPlanPrompt`) to begin the plan file - * with an HTML-comment block carrying a structured JSON summary: - * - * - * - * `step` (optional for backwards compatibility) tells `stash impl` which - * scope of the encryption rollout this plan covers: - * - `"rollout"` — schema-add + dual-write code + db push (pending). - * Deploy gate after this; cutover comes in a later run. - * - `"cutover"` — backfill + cutover + drop. Requires `dual_writing` - * events in `cs_migrations`; impl refuses otherwise. - * - `"complete"` — the whole lifecycle in one document (escape hatch - * for users without a production-deploy to gate on). - * - * Plans without `step` are treated as `"complete"` for backwards - * compatibility — that is how every plan was shaped before the rollout - * split landed. Plans without the block (or with a malformed one) fall - * back to a soft "open the plan in your editor" message — never an error. + * The agent embeds an HTML-comment header at the top of the plan file + * carrying a JSON summary that `stash impl` parses. Plans without the + * block fall back to a soft "open it in your editor" message — never + * an error. `step` is optional for backwards compat: plans pre-dating + * the rollout/cutover split had no step field and were always + * end-to-end, so missing `step` is treated as `"complete"`. */ export type PlanPath = 'new' | 'migrate' @@ -106,7 +86,10 @@ export function effectiveStep(summary: PlanSummary): PlanStep { const COLUMN_LABEL_WIDTH = 20 /** - * Render the plan summary as the body of a `p.note` panel. + * Render the plan summary as the body of a `p.note` panel. The `cli` + * argument is the package-manager-aware command prefix (e.g. `pnpm dlx + * stash` / `bunx stash` / `npx stash`) — pass `runnerCommand(pm, 'stash')` + * so the rendered footer uses the runner the user actually invokes. * * 3 columns across 2 tables * @@ -116,17 +99,16 @@ const COLUMN_LABEL_WIDTH = 20 * * Encryption rollout — implementation lands schema-add and dual-write * code in your repo. Deploy that to production, verify with - * `npx stash status`, then run `npx stash plan` again to draft the - * encryption cutover. + * ` stash status`, then run ` stash plan` again to + * draft the encryption cutover. * * Footer copy varies by step: - * - rollout → "Encryption rollout — deploy gate next." - * - cutover → "Encryption cutover — backfill, switch reads, drop plaintext." - * - complete → "Complete rollout — skips the deploy gate; only safe when - * this database is not backing a deployed application." - * - (no migrate columns) → "All columns are additive — single-deploy." + * - rollout → next-step pointer at the deploy gate. + * - cutover → backfill / switch reads / drop plaintext summary. + * - complete → loud "skips the deploy gate" warning. + * - (no migrate columns) → "single-deploy" line. */ -export function renderPlanSummary(summary: PlanSummary): string { +export function renderPlanSummary(summary: PlanSummary, cli: string): string { const tables = new Set(summary.columns.map((c) => c.table)) const migrateCount = summary.columns.filter( (c) => c.path === 'migrate', @@ -143,18 +125,22 @@ export function renderPlanSummary(summary: PlanSummary): string { return `◇ ${`${c.table}.${c.column}`.padEnd(COLUMN_LABEL_WIDTH)} ${desc}` }) - const footer = renderFooter(effectiveStep(summary), migrateCount) + const footer = renderFooter(effectiveStep(summary), migrateCount, cli) return [header, '', ...rows, '', footer].join('\n') } -function renderFooter(step: PlanStep, migrateCount: number): string { +function renderFooter( + step: PlanStep, + migrateCount: number, + cli: string, +): string { if (migrateCount === 0) { return 'All columns are additive — single-deploy implementation.' } switch (step) { case 'rollout': - return 'Encryption rollout — implementation lands schema-add and dual-write code in your repo. Deploy that to production, verify with `npx stash status`, then run `npx stash plan` again to draft the encryption cutover.' + return `Encryption rollout — implementation lands schema-add and dual-write code in your repo. Deploy that to production, verify with \`${cli} status\`, then run \`${cli} plan\` again to draft the encryption cutover.` case 'cutover': return 'Encryption cutover — implementation runs the backfill, switches reads to encrypted, and drops plaintext. Requires dual-writes already live in production.' case 'complete': diff --git a/packages/cli/src/commands/init/lib/rollout-state.ts b/packages/cli/src/commands/init/lib/rollout-state.ts index 6ec1455b..b06cb41f 100644 --- a/packages/cli/src/commands/init/lib/rollout-state.ts +++ b/packages/cli/src/commands/init/lib/rollout-state.ts @@ -1,5 +1,12 @@ -import { type MigrationPhase, latestByColumn } from '@cipherstash/migrate' +import type { MigrationPhase } from '@cipherstash/migrate' import pg from 'pg' +import { latestByColumnSafe } from '../../encrypt/lib/db-readers.js' + +/** Conservative connect timeout for rollout-state lookups: the CLI + * surfaces these on hot paths (`stash plan`, `stash impl`, `stash + * status`), and pg's default of "no timeout" lets an unreachable host + * hang on the OS-level TCP timeout (~75s on most platforms). */ +const CONNECT_TIMEOUT_MS = 5_000 /** * What rollout work this column needs next, derived from `cs_migrations`. @@ -26,17 +33,7 @@ export interface ColumnState { needs: ColumnNeeds } -/** - * Classify a phase into the next plan-step the column needs. The mapping is: - * - * null → unknown (no events; brand new or not started) - * schema-added → rollout (synthesised in some renderers; safe default) - * dual-writing → cutover (deploy gate crossed) - * backfilling → cutover (cutover work in flight) - * backfilled → cutover (ready to rename swap) - * cut-over → cutover (rename done; drop still pending) - * dropped → completed (lifecycle complete) - */ +/** Classify a phase into the plan-step the column needs next. */ export function classifyPhase(phase: MigrationPhase | null): ColumnNeeds { if (phase === null) return 'unknown' if (phase === 'schema-added') return 'rollout' @@ -54,17 +51,24 @@ export function classifyPhase(phase: MigrationPhase | null): ColumnNeeds { * have a connection should use `classifyPhases` against the result of * `latestByColumn` directly. * - * On any unexpected error, returns `unknown` for every input — never - * throws. The encryption rollout is paused-by-default safer than - * crashed-by-default. + * Returns `null` on connect / query failure. Callers must distinguish + * `null` ("could not observe") from a populated array containing + * `needs: 'unknown'` rows ("observed, but no events recorded for this + * column"). Conflating the two would let a transient DB outage masquerade + * as "rollout has not started" — and `verifyCutoverPreconditions` then + * blocks legitimate cutover work with a misleading "no `dual_writing` + * event" error. */ export async function detectColumnStates( databaseUrl: string, columns: ReadonlyArray<{ table: string; column: string }>, -): Promise { +): Promise { if (columns.length === 0) return [] - const client = new pg.Client({ connectionString: databaseUrl }) + const client = new pg.Client({ + connectionString: databaseUrl, + connectionTimeoutMillis: CONNECT_TIMEOUT_MS, + }) try { await client.connect() const events = await latestByColumnSafe(client) @@ -73,12 +77,7 @@ export async function detectColumnStates( return row?.phase ?? null }) } catch { - return columns.map((c) => ({ - table: c.table, - column: c.column, - phase: null, - needs: 'unknown' as const, - })) + return null } finally { await client.end().catch(() => undefined) } @@ -129,19 +128,3 @@ export function rollupPlanStep( return 'unknown' } -async function latestByColumnSafe(client: pg.Client) { - try { - return await latestByColumn(client) - } catch (err) { - // The cs_migrations table may not exist yet (project that has run - // `stash init` but not `stash db install`, or a fresh database). - // Treat as "no events" rather than a hard error. - if ( - err instanceof Error && - /cs_migrations|schema "cipherstash"/i.test(err.message) - ) { - return new Map() - } - throw err - } -} diff --git a/packages/cli/src/commands/init/lib/write-context.ts b/packages/cli/src/commands/init/lib/write-context.ts index e702563a..85e01857 100644 --- a/packages/cli/src/commands/init/lib/write-context.ts +++ b/packages/cli/src/commands/init/lib/write-context.ts @@ -12,6 +12,7 @@ import { detectPackageManager, prodInstallCommand, } from '../utils.js' +import type { PlanStep } from './parse-plan.js' import { type SetupPromptContext, renderSetupPrompt } from './setup-prompt.js' export const CONTEXT_REL_PATH = '.cipherstash/context.json' @@ -33,6 +34,12 @@ export interface ContextFile { * AGENTS.md handoff (skill content is inlined into AGENTS.md instead) * and for wizard (the wizard installs its own). */ installedSkills: string[] + /** Plan-step the CLI resolved at handoff time. Written by `stash plan` + * so the wizard handoff (and any other reader of this file) can pick + * up the same rollout/cutover/complete dispatch the prompt-driven + * handoffs use. Absent when the file was written by `stash init` or + * `stash impl` rather than `stash plan`. */ + planStep?: PlanStep generatedAt: string } @@ -97,6 +104,7 @@ export function buildContextFile(state: InitState): ContextFile { envKeys: [], schemas: state.schemas ?? [], installedSkills: [], + planStep: state.planStep, generatedAt: new Date().toISOString(), } } diff --git a/packages/cli/src/commands/plan/index.ts b/packages/cli/src/commands/plan/index.ts index caf32745..51862dd7 100644 --- a/packages/cli/src/commands/plan/index.ts +++ b/packages/cli/src/commands/plan/index.ts @@ -88,6 +88,10 @@ async function detectPlanStep(cwd: string): Promise { } const states = await detectColumnStates(databaseUrl, columns) + // DB unreachable — fall back to a rollout-shaped plan rather than + // refusing. The plan command is read-only and the agent will surface + // the missing observation in the prose. + if (states === null) return 'rollout' const step = rollupPlanStep(states) // `unknown` and `completed` both map to rollout for plan-step selection: // unknown — no events; treat as fresh. diff --git a/packages/cli/src/commands/status/__tests__/status.test.ts b/packages/cli/src/commands/status/__tests__/status.test.ts index b8008b5f..9c3973b6 100644 --- a/packages/cli/src/commands/status/__tests__/status.test.ts +++ b/packages/cli/src/commands/status/__tests__/status.test.ts @@ -8,6 +8,7 @@ import { buildColumnQuest, buildQuestLog, inferQuestPath, + isComplete, } from '../quest.js' import { renderQuestLogJSON, @@ -15,6 +16,12 @@ import { renderQuestLogTTY, } from '../render.js' +// Use a non-npm runner everywhere so the tests fail loudly if any +// renderer or builder ever hard-codes `npx stash`. The exact value is +// arbitrary — what matters is that it is the value the caller passed, +// not a baked-in default. +const CLI = 'pnpm dlx stash' + let cwd: string beforeEach(() => { @@ -95,9 +102,6 @@ describe('inferQuestPath', () => { }) it('defaults to migrate when DB connectivity is missing', () => { - // The 5-objective shape is more informative when we genuinely don't - // know — better to show the full lifecycle locked than to default to - // a 2-objective new-column shape that hides relevant work. expect(inferQuestPath({ table: 't', column: 'c' })).toBe('migrate') }) }) @@ -108,29 +112,39 @@ describe('buildColumnQuest — migrate path', () => { } it('with no signals: 0/5, schema-add active, rest locked', () => { - const quest = buildColumnQuest(obs({ phase: 'dual-writing' })) - // Picking dual-writing forces the migrate path; reset and check no-events. - const noEvents = buildColumnQuest({ - table: 'users', - column: 'email', - phase: 'dual-writing', - }) - expect(noEvents.path).toBe('migrate') + const quest = buildColumnQuest( + { table: 'users', column: 'email', phase: null, eql: null }, + CLI, + ) + expect(quest.path).toBe('new') + // The schema-add objective should be the active one and every other + // objective locked. Asserting per-objective status guards the no- + // signal invariant against regressions in computeDoneCount. + expect(quest.progress).toEqual({ done: 0, total: 2 }) + expect(quest.objectives[0].status).toBe('active') + expect(quest.objectives[1].status).toBe('locked') + }) + + it('5-objective shape applies to any non-null migrate phase (sanity)', () => { + // The migrate quest is always 5 objectives regardless of which phase + // produced it. Kept as a separate sanity check rather than smuggled + // into the no-signals test where it does not belong. + const quest = buildColumnQuest( + { table: 'users', column: 'email', phase: 'dual-writing' }, + CLI, + ) + expect(quest.path).toBe('migrate') expect(quest.progress.total).toBe(5) }) it('phase=null + EQL pending + twin exists: schema-add done (1/5), dual-writes deployed active', () => { - // The encrypted twin column existing alongside the original is the - // unambiguous "this is a migrate column" signal — the rollout PR - // created `_encrypted` and ran `db push` (writing pending). The - // user has not yet recorded a `dual_writing` event, so the next - // active objective is "deploy dual-writes to production". const quest = buildColumnQuest( obs({ phase: null, eql: { state: 'pending' }, physicalEncryptedTwinExists: true, }), + CLI, ) expect(quest.path).toBe('migrate') expect(quest.progress).toEqual({ done: 1, total: 5 }) @@ -140,7 +154,7 @@ describe('buildColumnQuest — migrate path', () => { }) it('phase=dual-writing: 2/5, backfill active', () => { - const quest = buildColumnQuest(obs({ phase: 'dual-writing' })) + const quest = buildColumnQuest(obs({ phase: 'dual-writing' }), CLI) expect(quest.progress).toEqual({ done: 2, total: 5 }) expect(quest.objectives[1].status).toBe('done') expect(quest.objectives[2].status).toBe('active') @@ -148,56 +162,57 @@ describe('buildColumnQuest — migrate path', () => { }) it('phase=backfilling counts as 2/5 (backfill in flight is still the active step)', () => { - const quest = buildColumnQuest(obs({ phase: 'backfilling' })) + const quest = buildColumnQuest(obs({ phase: 'backfilling' }), CLI) expect(quest.progress.done).toBe(2) expect(quest.objectives[2].status).toBe('active') }) it('phase=backfilled: 3/5, cutover active', () => { - const quest = buildColumnQuest(obs({ phase: 'backfilled' })) + const quest = buildColumnQuest(obs({ phase: 'backfilled' }), CLI) expect(quest.progress.done).toBe(3) expect(quest.objectives[3].status).toBe('active') expect(quest.objectives[3].label).toMatch(/cut over/i) }) it('phase=cut-over: 4/5, drop plaintext active', () => { - const quest = buildColumnQuest(obs({ phase: 'cut-over' })) + const quest = buildColumnQuest(obs({ phase: 'cut-over' }), CLI) expect(quest.progress.done).toBe(4) expect(quest.objectives[4].status).toBe('active') expect(quest.objectives[4].label).toMatch(/drop plaintext/i) }) it('phase=dropped: 5/5 (complete)', () => { - const quest = buildColumnQuest(obs({ phase: 'dropped' })) + const quest = buildColumnQuest(obs({ phase: 'dropped' }), CLI) expect(quest.progress).toEqual({ done: 5, total: 5 }) - expect(quest.complete).toBe(true) + expect(isComplete(quest)).toBe(true) expect(quest.nextMove).toBeUndefined() }) - it('next-move hint references concrete CLI invocations with --table/--column', () => { - const backfill = buildColumnQuest(obs({ phase: 'dual-writing' })) - expect(backfill.nextMove).toContain('stash encrypt backfill') + it('next-move hint uses the caller-supplied runner and concrete --table/--column', () => { + // Regression guard: the hint must be prefixed with whatever `cli` the + // caller passed in (e.g. `pnpm dlx stash`), never a hard-coded + // `npx stash` string. + const backfill = buildColumnQuest(obs({ phase: 'dual-writing' }), CLI) + expect(backfill.nextMove).toContain(`${CLI} encrypt backfill`) expect(backfill.nextMove).toContain('--table users') expect(backfill.nextMove).toContain('--column email') + expect(backfill.nextMove).not.toContain('npx stash') - const cutover = buildColumnQuest(obs({ phase: 'backfilled' })) - expect(cutover.nextMove).toContain('stash encrypt cutover') + const cutover = buildColumnQuest(obs({ phase: 'backfilled' }), CLI) + expect(cutover.nextMove).toContain(`${CLI} encrypt cutover`) - const drop = buildColumnQuest(obs({ phase: 'cut-over' })) - expect(drop.nextMove).toContain('stash encrypt drop') + const drop = buildColumnQuest(obs({ phase: 'cut-over' }), CLI) + expect(drop.nextMove).toContain(`${CLI} encrypt drop`) }) it('falls back to physical-column existence as a schema-add signal', () => { - // cs_migrations is silent (schema_added events are never written); - // information_schema is the next-best signal that schema-add has been - // applied. When EQL config is missing too but the encrypted twin - // column physically exists, treat schema-add as done. const quest = buildColumnQuest( obs({ phase: null, eql: null, physicalEncryptedTwinExists: true, }), + CLI, ) expect(quest.progress.done).toBe(1) expect(quest.objectives[0].status).toBe('done') @@ -206,54 +221,63 @@ describe('buildColumnQuest — migrate path', () => { describe('buildColumnQuest — new path', () => { it('no EQL config: 0/2, schema-add active', () => { - const quest = buildColumnQuest({ - table: 'orders', - column: 'note', - phase: null, - eql: null, - }) + const quest = buildColumnQuest( + { table: 'orders', column: 'note', phase: null, eql: null }, + CLI, + ) expect(quest.path).toBe('new') expect(quest.progress).toEqual({ done: 0, total: 2 }) expect(quest.objectives[0].status).toBe('active') }) - it('EQL pending: 1/2, activate active', () => { - const quest = buildColumnQuest({ - table: 'orders', - column: 'note', - phase: null, - eql: { state: 'pending' }, - }) + it('EQL pending: 1/2, activate active, hint uses caller-supplied runner', () => { + const quest = buildColumnQuest( + { + table: 'orders', + column: 'note', + phase: null, + eql: { state: 'pending' }, + }, + CLI, + ) expect(quest.progress).toEqual({ done: 1, total: 2 }) expect(quest.objectives[1].status).toBe('active') - expect(quest.nextMove).toMatch(/db activate/) + expect(quest.nextMove).toContain(`${CLI} db activate`) }) it('EQL active: 2/2, complete', () => { - const quest = buildColumnQuest({ - table: 'orders', - column: 'note', - phase: null, - eql: { state: 'active' }, - }) - expect(quest.complete).toBe(true) + const quest = buildColumnQuest( + { + table: 'orders', + column: 'note', + phase: null, + eql: { state: 'active' }, + }, + CLI, + ) + expect(isComplete(quest)).toBe(true) expect(quest.nextMove).toBeUndefined() }) }) describe('buildColumnQuest — DB unreachable', () => { it('locks every objective except the first when phase and eql are both undefined', () => { - // The only honest answer is "I don't know what's been done" — show - // the full migrate-shape with the first objective active so the user - // sees that the rollout exists; the renderer surfaces a footer note - // about the missing observation. - const quest = buildColumnQuest({ table: 't', column: 'c' }) + const quest = buildColumnQuest({ table: 't', column: 'c' }, CLI) expect(quest.path).toBe('migrate') expect(quest.objectives[0].status).toBe('active') expect(quest.objectives.slice(1).every((o) => o.status === 'locked')).toBe( true, ) }) + + it('suppresses nextMove when DB is unreachable (do not invent a step)', () => { + // Regression guard: a column actually mid-cutover whose DB is briefly + // unreachable would otherwise be told to re-run schema-add via + // nextMoveFor's doneCount=0 fallback. The renderer surfaces the + // unreachable footer instead. + const quest = buildColumnQuest({ table: 't', column: 'c' }, CLI) + expect(quest.nextMove).toBeUndefined() + }) }) describe('buildQuestLog', () => { @@ -265,6 +289,7 @@ describe('buildQuestLog', () => { { table: 'users', column: 'email', phase: 'dropped' }, { table: 'users', column: 'phone', phase: 'dual-writing' }, ], + cli: CLI, }) expect(log.completed).toHaveLength(1) expect(log.completed[0].column).toBe('email') @@ -277,6 +302,7 @@ describe('buildQuestLog', () => { initialized: true, observedFromDb: false, observations: [{ table: 'users', column: 'email' }], + cli: CLI, }) expect(log.observedFromDb).toBe(false) }) @@ -286,6 +312,7 @@ describe('buildQuestLog', () => { initialized: true, observedFromDb: true, observations: [], + cli: CLI, }) expect(log.active).toEqual([]) expect(log.completed).toEqual([]) @@ -293,16 +320,20 @@ describe('buildQuestLog', () => { }) describe('renderQuestLogTTY', () => { - it('shows an empty-state for uninitialized projects with init prompt', () => { + it('shows an empty-state for uninitialized projects with init prompt that uses the caller-supplied runner', () => { const out = renderQuestLogTTY( buildQuestLog({ initialized: false, observedFromDb: false, observations: [], + cli: CLI, }), + CLI, ) expect(out).toMatch(/no quests yet/i) - expect(out).toMatch(/stash init/) + expect(out).toContain(`${CLI} init`) + expect(out).toContain(`${CLI} plan`) + expect(out).not.toContain('npx stash') }) it('renders the active-quest section with progress bar, objectives, and next-move hint', () => { @@ -312,8 +343,9 @@ describe('renderQuestLogTTY', () => { observations: [ { table: 'users', column: 'email', phase: 'dual-writing' }, ], + cli: CLI, }) - const out = renderQuestLogTTY(log) + const out = renderQuestLogTTY(log, CLI) expect(out).toContain('CipherStash Quest Log') expect(out).toContain('ACTIVE QUEST') expect(out).toContain('Encrypt users.email') @@ -322,6 +354,9 @@ describe('renderQuestLogTTY', () => { expect(out).toContain('░') expect(out).toMatch(/← you are here/) expect(out).toMatch(/Next move/) + // Quest-level next-move text uses the caller-supplied runner. + expect(out).toContain(`${CLI} encrypt backfill`) + expect(out).not.toContain('npx stash') }) it('shows a 🏆 line per completed quest', () => { @@ -329,8 +364,9 @@ describe('renderQuestLogTTY', () => { initialized: true, observedFromDb: true, observations: [{ table: 'users', column: 'ssn', phase: 'dropped' }], + cli: CLI, }) - const out = renderQuestLogTTY(log) + const out = renderQuestLogTTY(log, CLI) expect(out).toContain('🏆 COMPLETED') expect(out).toContain('users.ssn') }) @@ -340,8 +376,9 @@ describe('renderQuestLogTTY', () => { initialized: true, observedFromDb: false, observations: [{ table: 'users', column: 'email' }], + cli: CLI, }) - const out = renderQuestLogTTY(log) + const out = renderQuestLogTTY(log, CLI) expect(out).toMatch(/could not reach the database/i) }) }) @@ -354,15 +391,16 @@ describe('renderQuestLogPlain', () => { observations: [ { table: 'users', column: 'email', phase: 'dual-writing' }, ], + cli: CLI, }) - const out = renderQuestLogPlain(log) + const out = renderQuestLogPlain(log, CLI) expect(out).not.toMatch(/⚔️|🏆|🔒|💡|▓|░/) - // Still has the structural content. expect(out).toContain('Encrypt users.email') expect(out).toMatch(/Progress: 2\/5/) expect(out).toContain('Next move:') - // Still uses bracketed status markers as a stable plain-text signal - // for scripts. + expect(out).toContain(`${CLI} encrypt backfill`) + expect(out).not.toContain('npx stash') + // Bracketed status markers as a stable plain-text signal for scripts. expect(out).toMatch(/\[x\]/) expect(out).toMatch(/\[>\]/) expect(out).toMatch(/\[ \]/) @@ -373,12 +411,28 @@ describe('renderQuestLogPlain', () => { initialized: true, observedFromDb: true, observations: [{ table: 'users', column: 'ssn', phase: 'dropped' }], + cli: CLI, }) - const out = renderQuestLogPlain(log) + const out = renderQuestLogPlain(log, CLI) expect(out).toContain('Completed') expect(out).toContain('users.ssn') expect(out).not.toMatch(/🏆/) }) + + it('empty-state hints use the caller-supplied runner', () => { + const out = renderQuestLogPlain( + buildQuestLog({ + initialized: false, + observedFromDb: false, + observations: [], + cli: CLI, + }), + CLI, + ) + expect(out).toContain(`${CLI} init`) + expect(out).toContain(`${CLI} plan`) + expect(out).not.toContain('npx stash') + }) }) describe('renderQuestLogJSON', () => { @@ -390,6 +444,7 @@ describe('renderQuestLogJSON', () => { { table: 'users', column: 'email', phase: 'dual-writing' }, { table: 'users', column: 'ssn', phase: 'dropped' }, ], + cli: CLI, }) const json = renderQuestLogJSON(log) const parsed = JSON.parse(json) @@ -404,7 +459,7 @@ describe('renderQuestLogJSON', () => { expect(active.path).toBe('migrate') expect(active.progress).toEqual({ done: 2, total: 5 }) expect(active.complete).toBe(false) - expect(active.nextMove).toContain('stash encrypt backfill') + expect(active.nextMove).toContain(`${CLI} encrypt backfill`) expect(Array.isArray(active.objectives)).toBe(true) expect(active.objectives[0]).toHaveProperty('label') expect(active.objectives[0]).toHaveProperty('status') @@ -417,8 +472,9 @@ describe('nextMoveHint', () => { initialized: false, observedFromDb: false, observations: [], + cli: CLI, }) - expect(nextMoveHint(log, 'pnpm dlx stash')).toMatch(/init/) + expect(nextMoveHint(log, CLI)).toMatch(/init/) }) it('points at plan when initialized but no quests', () => { @@ -426,8 +482,9 @@ describe('nextMoveHint', () => { initialized: true, observedFromDb: true, observations: [], + cli: CLI, }) - expect(nextMoveHint(log, 'pnpm dlx stash')).toMatch(/plan/) + expect(nextMoveHint(log, CLI)).toMatch(/plan/) }) it('uses the first active quest’s nextMove when one exists', () => { @@ -437,8 +494,9 @@ describe('nextMoveHint', () => { observations: [ { table: 'users', column: 'email', phase: 'dual-writing' }, ], + cli: CLI, }) - expect(nextMoveHint(log, 'pnpm dlx stash')).toContain('stash encrypt backfill') + expect(nextMoveHint(log, CLI)).toContain(`${CLI} encrypt backfill`) }) it('reports complete when every quest is done', () => { @@ -446,7 +504,8 @@ describe('nextMoveHint', () => { initialized: true, observedFromDb: true, observations: [{ table: 'users', column: 'ssn', phase: 'dropped' }], + cli: CLI, }) - expect(nextMoveHint(log, 'pnpm dlx stash')).toMatch(/complete|nothing/i) + expect(nextMoveHint(log, CLI)).toMatch(/complete|nothing/i) }) }) diff --git a/packages/cli/src/commands/status/index.ts b/packages/cli/src/commands/status/index.ts index 2eb0411f..f11621a3 100644 --- a/packages/cli/src/commands/status/index.ts +++ b/packages/cli/src/commands/status/index.ts @@ -1,8 +1,13 @@ import { existsSync } from 'node:fs' import { resolve } from 'node:path' -import { latestByColumn, readManifest } from '@cipherstash/migrate' +import { type Manifest, readManifest } from '@cipherstash/migrate' import * as p from '@clack/prompts' import pg from 'pg' +import { + fetchActiveEqlConfig, + fetchPhysicalColumns, + latestByColumnSafe, +} from '../encrypt/lib/db-readers.js' import { readContextFile } from '../init/lib/read-context.js' import { PLAN_REL_PATH } from '../init/lib/setup-prompt.js' import { @@ -21,6 +26,23 @@ import { renderQuestLogTTY, } from './render.js' +/** Status is a hot-path "where am I in milliseconds" command — fail fast + * if the DB is unreachable rather than waiting on the OS-level TCP + * timeout (~75s on most platforms). */ +const CONNECT_TIMEOUT_MS = 2_000 + +function manifestColumns( + manifest: Manifest, +): { table: string; column: string }[] { + const out: { table: string; column: string }[] = [] + for (const [table, cols] of Object.entries(manifest.tables)) { + for (const col of cols) { + out.push({ table, column: col.column }) + } + } + return out +} + export type StageStatus = 'done' | 'pending' export interface ProjectStatus { @@ -56,13 +78,7 @@ export async function gatherObservations( return { observedFromDb: false, observations: [] } } - const targetColumns: { table: string; column: string }[] = [] - for (const [table, cols] of Object.entries(manifest.tables)) { - for (const col of cols) { - targetColumns.push({ table, column: col.column }) - } - } - + const targetColumns = manifestColumns(manifest) if (targetColumns.length === 0) { return { observedFromDb: false, observations: [] } } @@ -73,30 +89,34 @@ export async function gatherObservations( const config = await loadStashConfig() databaseUrl = config.databaseUrl } catch { - // Couldn't load config — return manifest-only observations. return { observedFromDb: false, observations: targetColumns.map((c) => ({ ...c })), } } - const client = new pg.Client({ connectionString: databaseUrl }) + const client = new pg.Client({ + connectionString: databaseUrl, + connectionTimeoutMillis: CONNECT_TIMEOUT_MS, + }) + const tables = Array.from(new Set(targetColumns.map((c) => c.table))) try { await client.connect() const [phases, eqlConfig, physicalCols] = await Promise.all([ - latestByColumn(client).catch(() => new Map()), - fetchEqlConfig(client), - fetchPhysicalColumns(client), + latestByColumnSafe(client), + fetchActiveEqlConfig(client), + fetchPhysicalColumns(client, tables), ]) const observations: ColumnObservation[] = targetColumns.map((c) => { const key: `${string}.${string}` = `${c.table}.${c.column}` const phaseRow = phases.get(key) + const eqlInfo = eqlConfig.get(key) return { table: c.table, column: c.column, phase: phaseRow ? phaseRow.phase : null, - eql: eqlConfig.get(key) ?? null, + eql: eqlInfo ? { state: eqlInfo.state } : null, physicalEncryptedTwinExists: ( physicalCols.get(c.table) ?? new Set() ).has(`${c.column}_encrypted`), @@ -114,63 +134,6 @@ export async function gatherObservations( } } -async function fetchEqlConfig( - client: pg.Client, -): Promise> { - const out = new Map() - try { - const result = await client.query<{ state: string; data: unknown }>( - `SELECT state, data FROM public.eql_v2_configuration - WHERE state IN ('active', 'pending', 'encrypting') - ORDER BY CASE state WHEN 'active' THEN 0 WHEN 'encrypting' THEN 1 ELSE 2 END`, - ) - for (const row of result.rows) { - const data = row.data as { - tables?: Record> - } | null - if (!data?.tables) continue - for (const [tableName, columns] of Object.entries(data.tables)) { - for (const columnName of Object.keys(columns)) { - const key = `${tableName}.${columnName}` - if (out.has(key)) continue - out.set(key, { - state: row.state as 'active' | 'pending' | 'encrypting', - }) - } - } - } - } catch (err) { - if (err instanceof Error && /eql_v2_configuration/i.test(err.message)) { - return out - } - throw err - } - return out -} - -async function fetchPhysicalColumns( - client: pg.Client, -): Promise>> { - const out = new Map>() - try { - const result = await client.query<{ - table_name: string - column_name: string - }>( - `SELECT table_name, column_name FROM information_schema.columns - WHERE table_schema = current_schema()`, - ) - for (const row of result.rows) { - const set = out.get(row.table_name) ?? new Set() - set.add(row.column_name) - out.set(row.table_name, set) - } - } catch { - // information_schema is always present; if this fails, swallow. - } - return out -} - interface StatusFlags { /** Force the fancy quest-log output even in non-TTY contexts. */ quest?: boolean @@ -202,23 +165,23 @@ export async function statusCommand(flags: StatusFlags = {}) { const project = readProjectStatus(cwd) if (flags.json) { - const log = await buildLog(cwd, project) + const log = await buildLog(cwd, project, cli) process.stdout.write(renderQuestLogJSON(log)) return } // The intro/outro frames are TTY-only; in plain mode we want the raw // body without `clack` decorations so the output is grep-friendly. - const log = await buildLog(cwd, project) + const log = await buildLog(cwd, project, cli) const useTTY = flags.quest ?? (process.stdout.isTTY && !flags.plain) if (useTTY) { p.intro('CipherStash') - p.note(renderQuestLogTTY(log), 'Quest log') + p.note(renderQuestLogTTY(log, cli), 'Quest log') p.outro(nextMoveHint(log, cli)) } else { - process.stdout.write(`${renderQuestLogPlain(log)}\n`) + process.stdout.write(`${renderQuestLogPlain(log, cli)}\n`) process.stdout.write(`Next: ${nextMoveHint(log, cli)}\n`) } } @@ -226,6 +189,7 @@ export async function statusCommand(flags: StatusFlags = {}) { async function buildLog( cwd: string, project: ProjectStatus, + cli: string, ): Promise { const { observedFromDb, observations } = project.initialized ? await gatherObservations(cwd) @@ -234,6 +198,7 @@ async function buildLog( initialized: project.initialized, observedFromDb, observations, + cli, }) } @@ -251,6 +216,9 @@ export function nextMoveHint(log: QuestLog, cli: string): string { if (log.active.length === 0) { return 'All rollouts complete. Nothing to do.' } + if (!log.observedFromDb) { + return `Database unreachable — re-run \`${cli} status\` once it is available to see what's next.` + } // First active quest's nextMove already names a concrete CLI invocation // when relevant; if not, fall back to a generic plan/impl pointer. const first = log.active[0] diff --git a/packages/cli/src/commands/status/quest.ts b/packages/cli/src/commands/status/quest.ts index f4e64e40..477d703b 100644 --- a/packages/cli/src/commands/status/quest.ts +++ b/packages/cli/src/commands/status/quest.ts @@ -18,10 +18,14 @@ export interface ColumnQuest { title: string objectives: Objective[] progress: { done: number; total: number } - /** One-line "what to do next" hint. Empty when the quest is complete. */ + /** One-line "what to do next" hint. Empty when the quest is complete + * (i.e. when `progress.done === progress.total`). */ nextMove?: string - /** True iff every objective is done. */ - complete: boolean +} + +/** A quest is complete when every objective has been done. */ +export function isComplete(quest: ColumnQuest): boolean { + return quest.progress.done === quest.progress.total } export interface QuestLog { @@ -99,59 +103,58 @@ export function inferQuestPath(obs: ColumnObservation): QuestPath { /** * Build a column quest from one observation. Pure; no I/O. * - * Migrate-column objective mapping (phase → done count): - * null + no EQL → 0 done, active = "schema-add" - * null + EQL pending → 1 done, active = "dual-writes deployed" - * null + encrypted twin only → 1 done, active = "dual-writes deployed" - * dual-writing → 2 done, active = "backfill" - * backfilling → 2 done, active = "backfill" - * backfilled → 3 done, active = "cut over" - * cut-over → 4 done, active = "drop plaintext" - * dropped → 5 done (complete) - * - * New-column objective mapping: - * no EQL → 0 done, active = "schema-add" - * EQL pending → 1 done, active = "promote to active" - * EQL active → 2 done (complete) + * `cli` is interpolated into the `nextMove` hint so the user sees + * commands prefixed with their actual package-manager runner — pass + * `runnerCommand(pm, 'stash')` rather than a hard-coded `npx stash`. */ -export function buildColumnQuest(obs: ColumnObservation): ColumnQuest { +export function buildColumnQuest( + obs: ColumnObservation, + cli: string, +): ColumnQuest { const path = inferQuestPath(obs) const labels = path === 'migrate' ? MIGRATE_OBJECTIVES : NEW_OBJECTIVES - const doneCount = computeDoneCount(path, obs) const total = labels.length + const doneCount = computeDoneCount(path, obs) + const dbUnreachable = obs.phase === undefined && obs.eql === undefined - const objectives: Objective[] = labels.map((label, idx) => { - let status: ObjectiveStatus - if (idx < doneCount) status = 'done' - else if (idx === doneCount) status = 'active' - else status = 'locked' - - // When DB is unreachable, we can't be confident about anything past - // schema-add; mark everything beyond as locked but flag the active - // one as such. - if (obs.phase === undefined && obs.eql === undefined) { - status = 'locked' - } - return { label, status } - }) - - // If DB unreachable, no `active` was set above; set the first - // objective active so the user has *something* to look at. - if (obs.phase === undefined && obs.eql === undefined) { - objectives[0] = { ...objectives[0], status: 'active' } - } + // When the DB is unreachable we can't claim any objective is done. + // Surface the first objective as active (so the user sees the rollout + // exists) and lock the rest; the renderer adds the "DB unreachable" + // footnote that explains the missing observation. + const objectives: Objective[] = labels.map((label, idx) => ({ + label, + status: dbUnreachable + ? idx === 0 + ? 'active' + : 'locked' + : idx < doneCount + ? 'done' + : idx === doneCount + ? 'active' + : 'locked', + })) const complete = doneCount === total + // Without a live DB observation we cannot trust `doneCount` — it falls + // back to 0, but a column mid-cutover would still appear "0/5" here. + // Suppressing `nextMove` avoids confidently telling the user to re-run + // schema-add (or any other phase-zero action) when the actual state is + // unknown. The renderer's "could not reach the database" footer is the + // honest answer for what to do next. + const nextMove = + complete || dbUnreachable + ? undefined + : nextMoveFor(path, doneCount, obs, cli) + return { table: obs.table, column: obs.column, path, title: titleFor(obs.table, obs.column, path), objectives, - progress: { done: complete ? total : doneCount, total }, - nextMove: complete ? undefined : nextMoveFor(path, doneCount, obs), - complete, + progress: { done: doneCount, total }, + nextMove, } } @@ -207,12 +210,13 @@ function nextMoveFor( path: QuestPath, doneCount: number, obs: ColumnObservation, + cli: string, ): string { if (path === 'new') { if (doneCount === 0) { - return 'Declare the encrypted column in your schema and run the migration, then `stash db push`.' + return `Declare the encrypted column in your schema and run the migration, then \`${cli} db push\`.` } - return 'Promote the pending EQL config — `stash db activate`.' + return `Promote the pending EQL config — \`${cli} db activate\`.` } // Migrate. @@ -220,13 +224,13 @@ function nextMoveFor( case 0: return 'Add the encrypted twin column (`_encrypted`) and run the migration.' case 1: - return 'Wire dual-write code on every persistence path, deploy to production, then run `stash encrypt backfill` (it confirms dual-writes and records the event).' + return `Wire dual-write code on every persistence path, deploy to production, then run \`${cli} encrypt backfill\` (it confirms dual-writes and records the event).` case 2: - return `Run \`stash encrypt backfill --table ${obs.table} --column ${obs.column}\` to encrypt historical rows.` + return `Run \`${cli} encrypt backfill --table ${obs.table} --column ${obs.column}\` to encrypt historical rows.` case 3: - return `Run \`stash encrypt cutover --table ${obs.table} --column ${obs.column}\` to rename the encrypted twin into place and switch reads.` + return `Run \`${cli} encrypt cutover --table ${obs.table} --column ${obs.column}\` to rename the encrypted twin into place and switch reads.` case 4: - return `Run \`stash encrypt drop --table ${obs.table} --column ${obs.column}\` to remove the plaintext column.` + return `Run \`${cli} encrypt drop --table ${obs.table} --column ${obs.column}\` to remove the plaintext column.` default: return '' } @@ -239,18 +243,22 @@ function nextMoveFor( * `context.json`). The renderer uses this to decide whether to show an * empty-state quest log. `observedFromDb` is true when at least one * observation has live DB data; false if the DB query failed and we're - * working from manifest alone. + * working from manifest alone. `cli` is the package-manager-aware + * command prefix passed through to per-quest `nextMove` hints. */ export function buildQuestLog(input: { initialized: boolean observedFromDb: boolean observations: ColumnObservation[] + cli: string }): QuestLog { - const quests = input.observations.map(buildColumnQuest) + const quests = input.observations.map((obs) => + buildColumnQuest(obs, input.cli), + ) const active: ColumnQuest[] = [] const completed: ColumnQuest[] = [] for (const quest of quests) { - if (quest.complete) completed.push(quest) + if (isComplete(quest)) completed.push(quest) else active.push(quest) } return { diff --git a/packages/cli/src/commands/status/render.ts b/packages/cli/src/commands/status/render.ts index 40132317..732e0aac 100644 --- a/packages/cli/src/commands/status/render.ts +++ b/packages/cli/src/commands/status/render.ts @@ -1,4 +1,4 @@ -import type { ColumnQuest, Objective, QuestLog } from './quest.js' +import { type ColumnQuest, type Objective, type QuestLog, isComplete } from './quest.js' const PROGRESS_BAR_WIDTH = 6 @@ -7,16 +7,19 @@ const PROGRESS_BAR_WIDTH = 6 * progress bars, lock icons, "← you are here" markers, and one-line * "next move" hints. Designed for an interactive terminal; non-TTY * callers should use {@link renderQuestLogPlain} instead. + * + * `cli` is the package-manager-aware command prefix for empty-state + * hints (` init`, ` plan`). Pass `runnerCommand(pm, 'stash')`. */ -export function renderQuestLogTTY(log: QuestLog): string { +export function renderQuestLogTTY(log: QuestLog, cli: string): string { if (!log.initialized) { return [ '⚔️ CipherStash Quest Log', '', ' No quests yet — your encryption rollout has not begun.', '', - ' First move: run `npx stash init` to set up CipherStash, then', - ' `npx stash plan` to draft an encryption rollout.', + ` First move: run \`${cli} init\` to set up CipherStash, then`, + ` \`${cli} plan\` to draft an encryption rollout.`, ].join('\n') } @@ -26,7 +29,7 @@ export function renderQuestLogTTY(log: QuestLog): string { lines.push( ' No active quests yet — your encryption rollout has not begun.', '', - ' First move: run `npx stash plan` to draft a rollout for the', + ` First move: run \`${cli} plan\` to draft a rollout for the`, ' columns you want to protect.', ) return lines.join('\n') @@ -99,14 +102,16 @@ function progressBar(done: number, total: number): string { * * Used in non-TTY contexts by default — CI logs, pipes, agents reading * the output. The user can force the TTY shape anywhere with `--quest`. + * `cli` is the package-manager-aware command prefix for empty-state + * hints (` init`, ` plan`). */ -export function renderQuestLogPlain(log: QuestLog): string { +export function renderQuestLogPlain(log: QuestLog, cli: string): string { if (!log.initialized) { return [ 'CipherStash status — encryption rollout', '', ' No quests yet — encryption rollout has not begun.', - ' First move: run `npx stash init`, then `npx stash plan`.', + ` First move: run \`${cli} init\`, then \`${cli} plan\`.`, ].join('\n') } @@ -115,7 +120,7 @@ export function renderQuestLogPlain(log: QuestLog): string { if (log.active.length === 0 && log.completed.length === 0) { lines.push( ' No active quests.', - ' First move: run `npx stash plan` to draft a rollout.', + ` First move: run \`${cli} plan\` to draft a rollout.`, ) return lines.join('\n') } @@ -200,7 +205,7 @@ function serializeQuest(quest: ColumnQuest) { path: quest.path, title: quest.title, progress: quest.progress, - complete: quest.complete, + complete: isComplete(quest), nextMove: quest.nextMove ?? null, objectives: quest.objectives.map((o) => ({ label: o.label, diff --git a/skills/stash-cli/SKILL.md b/skills/stash-cli/SKILL.md index f22908be..d384d527 100644 --- a/skills/stash-cli/SKILL.md +++ b/skills/stash-cli/SKILL.md @@ -5,7 +5,7 @@ description: Configure and use the `stash` package for project initialization, E # CipherStash CLI -Configure and use `stash` for project initialization, EQL database setup, encryption schema management, and Supabase integration. Previously published as `@cipherstash/stack-forge`; the `stash-forge` binary is now consolidated under `npx stash`. The AI-powered wizard formerly bundled here lives in [`@cipherstash/wizard`](https://www.npmjs.com/package/@cipherstash/wizard). +Configure and use `stash` for project initialization, EQL database setup, encryption schema management, and Supabase integration. Previously published as `@cipherstash/stack-forge`; the `stash-forge` binary is now consolidated under `stash`. The AI-powered wizard formerly bundled here lives in [`@cipherstash/wizard`](https://www.npmjs.com/package/@cipherstash/wizard). ## Trigger @@ -95,18 +95,20 @@ type StashConfig = { ## CLI Usage -The primary interface is the `stash` package, run via `npx` (or your package manager's equivalent runner): +The primary interface is the `stash` package. `stash init` installs it as a project dev dependency, so after init you invoke commands directly: ```bash -npx stash [options] +stash [options] ``` +Through your package manager (`pnpm exec`, `bun x`, `yarn`, or via `npm run`-style scripts), `stash` resolves to the project-local binary. **Before init has run** — for example when you're scaffolding the very first command — use your package manager's one-shot runner: `bunx stash init`, `pnpm dlx stash init`, `yarn dlx stash init`, or `npx stash init`. The CLI is package-manager-aware; pick whichever your project uses. + ### `init` — Scaffold a CipherStash project ```bash -npx stash init -npx stash init --supabase -npx stash init --drizzle +stash init +stash init --supabase +stash init --drizzle ``` Init is the **scaffold** save-point. It does mechanical setup only — no agent handoff. Six phases, prompts only when it can't make a sensible default: @@ -133,8 +135,8 @@ The `--supabase` and `--drizzle` flags tailor the intro message and EQL install ### `plan` — Draft a reviewable encryption plan ```bash -npx stash plan -npx stash plan --complete-rollout +stash plan +stash plan --complete-rollout ``` `plan` is the **draft for review** save-point. Pre-flights `.cipherstash/context.json` (errors with a "Run `stash init` first" pointer if missing). Hands off to a coding agent — all four targets are offered: Claude Code, Codex, AGENTS.md (for Cursor/Windsurf/Cline), and the CipherStash Agent (`@cipherstash/wizard`). @@ -147,7 +149,7 @@ npx stash plan --complete-rollout | At least one column has a `dual_writing` (or later) event recorded | **Encryption cutover** — backfill, schema rename + re-push, cutover, read-path switch, drop plaintext. Requires the rollout to already be deployed. | | `--complete-rollout` flag passed | **Complete rollout** — schema-add through drop, no deploy gate. Escape hatch for databases without a deployed application. Default-no confirm with a loud warning before generating. | -The chosen template drives the agent's prompt body. The wizard receives `--mode plan` (plus the resolved step) and forwards it to the CipherStash gateway. Every target produces a valid plan-mode artifact at `.cipherstash/plan.md`. +The chosen template drives the agent's prompt body for the Claude Code, Codex, and AGENTS.md handoffs. The wizard handoff receives `--mode plan` on argv and reads the resolved step from `.cipherstash/context.json` (the `planStep` field). Every target produces a valid plan-mode artifact at `.cipherstash/plan.md`. The agent writes a machine-readable header `` at the top of the plan. `step` is `"rollout" | "cutover" | "complete"`; each column entry carries `path: "new" | "migrate"`. `stash impl` parses this header to render a confirmation panel and to enforce the deploy gate. @@ -162,8 +164,8 @@ There is no atomic way to replace a populated plaintext column with an encrypted ### `impl` — Execute the plan ```bash -npx stash impl -npx stash impl --continue-without-plan +stash impl +stash impl --continue-without-plan ``` `impl` is the **execute** save-point. Pre-flights `.cipherstash/context.json`. Behaviour branches on disk state: @@ -197,10 +199,10 @@ After a successful handoff: ### `status` — The encryption-rollout quest log ```bash -npx stash status -npx stash status --quest # force the fancy output anywhere -npx stash status --plain # force the plain-text fallback anywhere -npx stash status --json # structured output for scripts +stash status +stash status --quest # force the fancy output anywhere +stash status --plain # force the plain-text fallback anywhere +stash status --json # structured output for scripts ``` `status` is the **map**. Reads `.cipherstash/context.json` (was init run?), `.cipherstash/migrations.json` (which columns are tracked?), and — best-effort — `cs_migrations` plus `eql_v2_configuration` for live per-column state. DB connectivity is optional; when missing, the command falls back to a manifest-only view and surfaces a footer note. @@ -229,7 +231,7 @@ For the deeper, raw views that touch only the database, use `stash db status` (E ### `auth login` — Authenticate with CipherStash ```bash -npx stash auth login +stash auth login ``` Opens a browser-based device code flow and saves a token to `~/.cipherstash/auth.json`. Database-touching commands check for this file before running. @@ -237,12 +239,12 @@ Opens a browser-based device code flow and saves a token to `~/.cipherstash/auth ### `db install` — Configure the database and install EQL extensions ```bash -npx stash db install -npx stash db install --supabase -npx stash db install --supabase --migration -npx stash db install --supabase --direct -npx stash db install --drizzle -npx stash db install --force +stash db install +stash db install --supabase +stash db install --supabase --migration +stash db install --supabase --direct +stash db install --drizzle +stash db install --force ``` `stash init` runs `db install` automatically as part of its EQL install phase. Run `db install` directly when you skipped init, when you need flags init doesn't expose (`--migration`, `--migrations-dir`, `--exclude-operator-family`), or when re-installing/upgrading EQL on its own. @@ -293,10 +295,10 @@ Direct-push installs (`--supabase --direct`) do **not** survive `supabase db res ### `db upgrade` — Upgrade EQL extensions ```bash -npx stash db upgrade -npx stash db upgrade --dry-run -npx stash db upgrade --supabase -npx stash db upgrade --latest +stash db upgrade +stash db upgrade --dry-run +stash db upgrade --supabase +stash db upgrade --latest ``` **Flags:** @@ -313,9 +315,9 @@ The EQL install SQL is idempotent and safe to re-run. The command checks the cur ### `db validate` — Validate encryption schema ```bash -npx stash db validate -npx stash db validate --supabase -npx stash db validate --exclude-operator-family +stash db validate +stash db validate --supabase +stash db validate --exclude-operator-family ``` **Flags:** @@ -344,8 +346,8 @@ Synchronises the CipherStash configuration in `eql_v2_configuration` with what y - For SDK users running the column-encryption lifecycle (`stash encrypt {backfill,cutover,drop}`) — `cutover` reads pending columns from EQL config to know what to rename. ```bash -npx stash db push -npx stash db push --dry-run +stash db push +stash db push --dry-run ``` **Flags:** @@ -367,8 +369,8 @@ When pushing, the CLI: | Situation | Command | |-----------|---------| -| Adding a brand-new encrypted column (no rename) | `npx stash db activate` | -| Cutting over from a `_encrypted` twin (path 3 lifecycle) | `npx stash encrypt cutover --table T --column C` | +| Adding a brand-new encrypted column (no rename) | `stash db activate` | +| Cutting over from a `_encrypted` twin (path 3 lifecycle) | `stash encrypt cutover --table T --column C` | **SDK to EQL type mapping:** @@ -387,17 +389,17 @@ When pushing, the CLI: Runs `eql_v2.migrate_config()` followed by `eql_v2.activate_config()` inside a single transaction, advancing any `pending` row to `active` (and marking the prior `active` as `inactive`). No physical column renames. ```bash -npx stash db activate +stash db activate ``` -Use after `npx stash db push` when the new config purely adds columns or changes index ops without renaming any column. For path 3 (existing populated column → encrypted), use `npx stash encrypt cutover` instead — it does the same activation plus the physical rename. +Use after `stash db push` when the new config purely adds columns or changes index ops without renaming any column. For path 3 (existing populated column → encrypted), use `stash encrypt cutover` instead — it does the same activation plus the physical rename. Errors out with a clear message when there is no pending configuration to activate. ### `db status` — Show EQL installation status ```bash -npx stash db status +stash db status ``` Reports: @@ -408,7 +410,7 @@ Reports: ### `db test-connection` — Test database connectivity ```bash -npx stash db test-connection +stash db test-connection ``` Verifies the database URL in your config is valid and the database is reachable. Reports the database name, connected role, and PostgreSQL server version. Useful for debugging connection issues before running `db install`. @@ -416,7 +418,7 @@ Verifies the database URL in your config is valid and the database is reachable. ### `db migrate` — Run pending encrypt config migrations ```bash -npx stash db migrate +stash db migrate ``` Not yet implemented — placeholder for future encrypt-config migration tooling. @@ -427,13 +429,13 @@ The `encrypt` group is the cutover-step toolset: it runs the database-side work It drives the `@cipherstash/migrate` library, which records every transition in a `cipherstash.cs_migrations` table (installed by `stash db install`) and reads the user's intent from `.cipherstash/migrations.json`. This section documents the CLI surface. -The examples below use `npx stash`. Substitute `bunx`, `pnpm dlx`, or `yarn dlx` (or run `stash` directly when it's installed as a project dev dep — `stash init` sets that up). +The examples below show the bare `stash` form, which works after `stash init` adds the CLI as a project dev dep. See the "CLI Usage" section above for how to invoke it through your package manager before that. #### `encrypt status` — Show per-column phase, EQL state, and backfill progress ```bash -npx stash encrypt status -npx stash encrypt status --table users +stash encrypt status +stash encrypt status --table users ``` Reads three sources in parallel — the `migrations.json` manifest (intent), the live `eql_v2_configuration` row (EQL state), and the latest `cs_migrations` event per column (runtime state) — and renders a table per column with phase, indexes, progress, and any drift between intent and observed state. @@ -441,7 +443,7 @@ Reads three sources in parallel — the `migrations.json` manifest (intent), the #### `encrypt plan` — Diff intent vs. observed state ```bash -npx stash encrypt plan +stash encrypt plan ``` Like `status`, but explicitly lists what would change to reconcile observed state with `.cipherstash/migrations.json`. Read-only — does not mutate the DB or the manifest. @@ -449,10 +451,10 @@ Like `status`, but explicitly lists what would change to reconcile observed stat #### `encrypt backfill` — Resumably encrypt plaintext into the encrypted column ```bash -npx stash encrypt backfill --table users --column email -npx stash encrypt backfill --table users --column email --chunk-size 5000 -npx stash encrypt backfill --table users --column email --confirm-dual-writes-deployed -npx stash encrypt backfill --table users --column email --force +stash encrypt backfill --table users --column email +stash encrypt backfill --table users --column email --chunk-size 5000 +stash encrypt backfill --table users --column email --confirm-dual-writes-deployed +stash encrypt backfill --table users --column email --force ``` Chunked, resumable, idempotent backfill. Walks the table in keyset-pagination order, encrypts each chunk via `bulkEncryptModels` from `@cipherstash/stack`, and writes a single `UPDATE ... FROM (VALUES ...)` per chunk inside a transaction that also checkpoints to `cs_migrations`. SIGINT/SIGTERM finishes the current chunk and exits cleanly; re-running picks up from the last checkpoint. The `IS NOT NULL AND _encrypted IS NULL` guard makes concurrent runners and re-runs safe — they converge. @@ -472,7 +474,7 @@ Flags: #### `encrypt cutover` — Rename swap encrypted → primary column AND promote pending → active ```bash -npx stash encrypt cutover --table users --column email +stash encrypt cutover --table users --column email ``` **Precondition:** the column must be in the `backfilled` phase per `cs_migrations`, AND a pending EQL configuration must exist (registered via `stash db push` against a schema where the column is declared under its final name without the `_encrypted` suffix). @@ -489,7 +491,7 @@ If a Proxy URL is configured (via `--proxy-url` or `CIPHERSTASH_PROXY_URL`), it #### `encrypt drop` — Generate a migration that removes the plaintext column ```bash -npx stash encrypt drop --table users --column email +stash encrypt drop --table users --column email ``` For columns in the `cut_over` phase. Detects the user's migration tooling (Drizzle today; Prisma + raw-SQL planned) and emits a migration file containing `ALTER TABLE
DROP COLUMN _plaintext;`. Does not apply the migration — the user reviews and runs their normal migrate command. Records the `dropped` event only after a follow-up `encrypt status` confirms the column is gone from `information_schema.columns`. @@ -497,19 +499,19 @@ For columns in the `cut_over` phase. Detects the user's migration tooling (Drizz ### `schema build` — Generate an encryption client from your database ```bash -npx stash schema build -npx stash schema build --supabase +stash schema build +stash schema build --supabase ``` Connects to your database, lets you select tables and columns to encrypt, asks about searchable indexes, and generates a typed encryption client file. Reads `databaseUrl` from `stash.config.ts`. -For AI-guided schema integration that edits your existing schema files in place, the recommended path is `npx stash plan` followed by `npx stash impl` — these add a planning save-point and can hand off to Claude Code, Codex, an AGENTS.md-driven editor, or the in-house `@cipherstash/wizard` package. `npx @cipherstash/wizard` standalone is still available for users who want to skip the plan checkpoint. +For AI-guided schema integration that edits your existing schema files in place, the recommended path is `stash plan` followed by `stash impl` — these add a planning save-point and can hand off to Claude Code, Codex, an AGENTS.md-driven editor, or the in-house `@cipherstash/wizard` package. `npx @cipherstash/wizard` standalone is still available for users who want to skip the plan checkpoint. ### `env` — Print production env vars for deployment ```bash -npx stash env -npx stash env --write +stash env +stash env --write ``` Experimental. Prints the environment variables (`CS_*`) you need to deploy a CipherStash-backed app. With `--write`, writes them into a `.env.production` file. @@ -624,7 +626,7 @@ The database role needs `CREATE` privileges on the database and public schema, o ### Config not found -`stash.config.ts` must be in the project root or a parent directory. The file must `export default defineConfig(...)`. The fastest fix is `npx stash init`, which scaffolds the config (and authenticates, installs deps, installs EQL, and writes `.cipherstash/context.json` in the same run). For a CLI-only setup, `npx stash db install` also scaffolds the config. +`stash.config.ts` must be in the project root or a parent directory. The file must `export default defineConfig(...)`. The fastest fix is `stash init`, which scaffolds the config (and authenticates, installs deps, installs EQL, and writes `.cipherstash/context.json` in the same run). For a CLI-only setup, `stash db install` also scaffolds the config. ### Supabase environments diff --git a/skills/stash-drizzle/SKILL.md b/skills/stash-drizzle/SKILL.md index 4640c82b..570e9d6d 100644 --- a/skills/stash-drizzle/SKILL.md +++ b/skills/stash-drizzle/SKILL.md @@ -338,7 +338,7 @@ The hard case: a Drizzle table that already exists in production with live data CipherStash splits this into two named steps with a hard production-deploy gate between them: an **encryption rollout** (schema-add + dual-write code + `db push`) and an **encryption cutover** (backfill + rename + drop). The `stash-encryption` skill is the canonical reference for the lifecycle; this section walks the Drizzle-specific shape. -> **Runner note.** Examples below use `npx stash` for npm projects. Substitute `bunx stash` (Bun), `pnpm dlx stash` (pnpm), or `yarn dlx stash` (Yarn) — or run `stash` directly when it is installed as a project dev dep. The behaviour is identical across runners; only the prefix changes. +> **Runner note.** `stash init` adds `stash` to the project as a dev dependency, so `stash ` runs through whichever package manager the project uses (Bun, pnpm, Yarn, or npm) — examples below show this bare form. Before init has run, prefix with your package manager's one-shot runner: `bunx`, `pnpm dlx`, `yarn dlx`, or `npx`. The CLI's behaviour is identical across all of them. > **Where am I?** Run `stash status` first (substitute the runner per the note above). It shows you which Drizzle tables/columns are mid-rollout, which are post-deploy, and what the next move is. Re-run after every transition. @@ -396,7 +396,7 @@ Generate the migration with `drizzle-kit generate`. The generated SQL should be Register the new encryption config with EQL: ```bash -npx stash db push +stash db push ``` If this is the project's first encrypted column, `db push` writes directly to the active EQL config (nothing to rename). If an active config already exists, `db push` writes the new config as `pending` — that's expected. The pending row will be promoted to active by `stash encrypt cutover` in the cutover step. @@ -432,11 +432,11 @@ Stop. Ship this PR to production. The deployed environment must be running the d When the deploy is live: ```bash -npx stash status # verify the rollout is recorded -npx stash plan # detects dual-writes are live; drafts the cutover plan +stash status # verify the rollout is recorded +stash plan # detects dual-writes are live; drafts the cutover plan ``` -`npx stash impl` will refuse to run a cutover-step plan if `cs_migrations` has no `dual_writing` event for `users.email`. That refusal is the safety net for cases where someone runs cutover work locally before the code is actually live. +`stash impl` will refuse to run a cutover-step plan if `cs_migrations` has no `dual_writing` event for `users.email`. That refusal is the safety net for cases where someone runs cutover work locally before the code is actually live. ### Step 2 — Encryption cutover @@ -445,7 +445,7 @@ Once dual-writes are live in production and `cs_migrations` records `dual_writin #### Backfill: encrypt the historical rows ```bash -npx stash encrypt backfill --table users --column email +stash encrypt backfill --table users --column email # (Interactive: answer 'yes' to the dual-write confirmation prompt.) # (CI: pass --confirm-dual-writes-deployed instead.) ``` @@ -459,7 +459,7 @@ If something goes wrong (e.g. you discover the dual-write code wasn't actually l First, update the Drizzle schema to the post-cutover shape — switch `email` to use `encryptedType` and remove the `email_encrypted` column. Then re-push the encryption config so EQL has a pending row that points at `email` (no `_encrypted` suffix): ```bash -npx stash db push +stash db push # → writes the new config as `pending`. Active config (still pointing at # `email_encrypted`) keeps serving while we complete the cutover. ``` @@ -467,7 +467,7 @@ npx stash db push Now run the cutover: ```bash -npx stash encrypt cutover --table users --column email +stash encrypt cutover --table users --column email ``` Inside one transaction it: (1) renames `email` → `email_plaintext` and `email_encrypted` → `email`, (2) promotes the pending EQL config to `active` (and the prior active to `inactive`), (3) records a `cut_over` event in `cs_migrations`. @@ -509,7 +509,7 @@ For queries that filter on `email`, switch to the encrypted operators from `crea Once read paths are updated and you're confident reads are decrypting correctly, generate the drop migration: ```bash -npx stash encrypt drop --table users --column email +stash encrypt drop --table users --column email ``` The CLI emits a Drizzle migration file with `ALTER TABLE users DROP COLUMN email_plaintext;`. Review and apply with `drizzle-kit migrate`. Update the schema to remove `email_plaintext`: @@ -530,9 +530,9 @@ Also remove the dual-write code from app paths — `email_plaintext` is gone; on ### Inspecting progress at any time ```bash -npx stash status # quest log: where each rollout is, what to do next -npx stash encrypt status # raw per-column phase, EQL state, backfill progress -npx stash encrypt plan # diffs your migrations.json intent vs observed state +stash status # quest log: where each rollout is, what to do next +stash encrypt status # raw per-column phase, EQL state, backfill progress +stash encrypt plan # diffs your migrations.json intent vs observed state ``` All three are read-only. diff --git a/skills/stash-dynamodb/SKILL.md b/skills/stash-dynamodb/SKILL.md index 85d2dd51..b5c9a305 100644 --- a/skills/stash-dynamodb/SKILL.md +++ b/skills/stash-dynamodb/SKILL.md @@ -43,7 +43,7 @@ DynamoDB encryption is **single-deploy**. There is no rollout/cutover split — For tables with **existing populated items**, the `__source` and `__hmac` attributes are added by the next write that touches each item. If you need every existing item encrypted at once (e.g. because a query uses `email__hmac` and would miss legacy items), run a one-shot script that reads every item, calls `encryptItem`, and writes it back. Idempotent: re-running an already-encrypted item is a no-op as long as the schema hasn't changed. -> **Where am I?** Run `npx stash status` (or `bunx`/`pnpm dlx`/`yarn dlx` per your runner) for a project-wide view across both Postgres and DynamoDB integrations. DynamoDB columns surface in the quest log as already-complete since there is no staged lifecycle to track. +> **Where am I?** Run `stash status` (or `bunx`/`pnpm dlx`/`yarn dlx` per your runner) for a project-wide view across both Postgres and DynamoDB integrations. DynamoDB columns surface in the quest log as already-complete since there is no staged lifecycle to track. ## Setup diff --git a/skills/stash-encryption/SKILL.md b/skills/stash-encryption/SKILL.md index d79ec922..a5972d08 100644 --- a/skills/stash-encryption/SKILL.md +++ b/skills/stash-encryption/SKILL.md @@ -601,11 +601,11 @@ db push (writes pending) drop plaintext column The gate is the rule that backfill is only safe once the dual-write code is **running in the production environment that owns the database** — not on the developer's laptop, not in CI. Any row inserted during the backfill window must be written to both columns by the application; otherwise it lands in plaintext only and creates silent migration drift. -> **Runner note.** Examples below use `npx stash` for npm projects. Substitute `bunx stash` (Bun), `pnpm dlx stash` (pnpm), or `yarn dlx stash` (Yarn) — or run `stash` directly when it is installed as a project dev dep (`stash init` sets that up). The behaviour is identical across runners; only the prefix changes. The `stash-cli` skill has the full mapping. +> **Runner note.** `stash init` adds `stash` to the project as a dev dependency, so `stash ` runs through whichever package manager the project uses (Bun, pnpm, Yarn, or npm) — examples below show this bare form. Before init has run, prefix with your package manager's one-shot runner: `bunx`, `pnpm dlx`, `yarn dlx`, or `npx`. The CLI's behaviour is identical across all of them; only the prefix changes. The `stash-cli` skill has the full mapping. ### Where am I? -Always start with `stash status` (`npx stash status` / `pnpm dlx stash status` / etc., per the runner note above). It is disk-only, idempotent, and tells you which encryption rollouts are in flight, what's been deployed, and what the next move is per column. Re-run it after every transition. Never act blind. +Always start with `stash status` (`stash status` / `pnpm dlx stash status` / etc., per the runner note above). It is disk-only, idempotent, and tells you which encryption rollouts are in flight, what's been deployed, and what the next move is per column. Re-run it after every transition. Never act blind. ### Step 1 — Encryption rollout @@ -623,9 +623,9 @@ Everything that lands in the repo and ships in **one** PR: Stop. The rollout PR ships to production. The deployed environment must be running this code before any cutover-step work is safe. -When the deploy is live, run `npx stash status`. Look for the active quest's "Next move" hint to confirm dual-writes are recorded. Then run `npx stash plan` again — the CLI detects that dual-writes are live and writes a separate cutover plan. +When the deploy is live, run `stash status`. Look for the active quest's "Next move" hint to confirm dual-writes are recorded. Then run `stash plan` again — the CLI detects that dual-writes are live and writes a separate cutover plan. -`npx stash impl` will refuse to run a cutover-step plan if `cs_migrations` has no `dual_writing` event for the targeted columns. That refusal is intentional; it's the safety net for cases where someone runs cutover work locally before the code is actually live. +`stash impl` will refuse to run a cutover-step plan if `cs_migrations` has no `dual_writing` event for the targeted columns. That refusal is intentional; it's the safety net for cases where someone runs cutover work locally before the code is actually live. ### Step 2 — Encryption cutover @@ -656,13 +656,13 @@ Three sources of truth, kept separate on purpose: ```bash # Run this often — it's the canonical "where am I?" command. -npx stash status +stash status # ---- ENCRYPTION ROLLOUT (one PR, one deploy) ---- # 1. Add the encrypted twin column via your normal migration tooling # (drizzle-kit / supabase migrations / etc.). # 2. Register the new encryption config with EQL: -npx stash db push +stash db push # First push (no active config yet) → writes directly to active. # Subsequent push (active already exists) → writes pending; cutover # will promote it. @@ -673,29 +673,29 @@ npx stash db push # ---- ⛔ DEPLOY GATE ---- # Verify dual-writes are live, then redraft the plan for cutover work: -npx stash status -npx stash plan +stash status +stash plan # ---- ENCRYPTION CUTOVER ---- -npx stash encrypt backfill --table users --column email +stash encrypt backfill --table users --column email # Prompts to confirm dual-writes are live (or pass # --confirm-dual-writes-deployed in CI). Resumable; SIGINT-safe. # Recovery — if dual-writes weren't actually live when backfill ran, # re-run with --force to encrypt every plaintext row regardless. -npx stash encrypt backfill --table users --column email --force +stash encrypt backfill --table users --column email --force # Edit the schema to drop the `_encrypted` suffix, then re-push: -npx stash db push +stash db push # → writes the renamed-shape config as `pending`. The active config # keeps serving until cutover finishes. -npx stash encrypt cutover --table users --column email +stash encrypt cutover --table users --column email # In one transaction: rename physical columns, promote pending → active. # Wire the read paths through the encryption client. Remove dual-write # code. Then drop the plaintext column: -npx stash encrypt drop --table users --column email +stash encrypt drop --table users --column email ``` ### Library use diff --git a/skills/stash-secrets/SKILL.md b/skills/stash-secrets/SKILL.md index ddf5afbd..c25b4cea 100644 --- a/skills/stash-secrets/SKILL.md +++ b/skills/stash-secrets/SKILL.md @@ -129,41 +129,41 @@ if (result.failure) { ## CLI Usage -The CLI is available via `npx stash` after install. +`stash init` adds `stash` to the project as a dev dependency, so `stash ` runs through whichever package manager the project uses (Bun / pnpm / Yarn / npm). Examples below show the bare `stash` form. Before init has run, prefix with your package manager's runner — `bunx`, `pnpm dlx`, `yarn dlx`, or `npx` — whichever matches your project. ### Set a Secret ```bash -npx stash secrets set --name DATABASE_URL --value "postgres://..." --environment production -npx stash secrets set -n DATABASE_URL -V "postgres://..." -e production +stash secrets set --name DATABASE_URL --value "postgres://..." --environment production +stash secrets set -n DATABASE_URL -V "postgres://..." -e production ``` ### Get a Secret ```bash -npx stash secrets get --name DATABASE_URL --environment production -npx stash secrets get -n DATABASE_URL -e production +stash secrets get --name DATABASE_URL --environment production +stash secrets get -n DATABASE_URL -e production ``` ### Get Many Secrets ```bash -npx stash secrets get-many --name DATABASE_URL,API_KEY --environment production -npx stash secrets get-many -n DATABASE_URL,API_KEY,JWT_SECRET -e production +stash secrets get-many --name DATABASE_URL,API_KEY --environment production +stash secrets get-many -n DATABASE_URL,API_KEY,JWT_SECRET -e production ``` ### List Secrets ```bash -npx stash secrets list --environment production -npx stash secrets list -e production +stash secrets list --environment production +stash secrets list -e production ``` ### Delete a Secret ```bash -npx stash secrets delete --name DATABASE_URL --environment production -npx stash secrets delete -n DATABASE_URL -e production --yes # skip confirmation +stash secrets delete --name DATABASE_URL --environment production +stash secrets delete -n DATABASE_URL -e production --yes # skip confirmation ``` ### CLI Flag Reference diff --git a/skills/stash-supabase/SKILL.md b/skills/stash-supabase/SKILL.md index f1d008c0..c156b2c4 100644 --- a/skills/stash-supabase/SKILL.md +++ b/skills/stash-supabase/SKILL.md @@ -408,7 +408,7 @@ The hard case: a Supabase table that already exists with live data in a plaintex CipherStash splits this into two named steps with a hard production-deploy gate between them: an **encryption rollout** (schema-add + dual-write code + `db push`) and an **encryption cutover** (backfill + rename + drop). The `stash-encryption` skill is the canonical reference for the lifecycle; this section walks the Supabase-specific shape. -> **Runner note.** Examples below use `npx stash` for npm projects. Substitute `bunx stash` (Bun), `pnpm dlx stash` (pnpm), or `yarn dlx stash` (Yarn) — or run `stash` directly when it is installed as a project dev dep. The behaviour is identical across runners; only the prefix changes. +> **Runner note.** `stash init` adds `stash` to the project as a dev dependency, so `stash ` runs through whichever package manager the project uses (Bun, pnpm, Yarn, or npm) — examples below show this bare form. Before init has run, prefix with your package manager's one-shot runner: `bunx`, `pnpm dlx`, `yarn dlx`, or `npx`. The CLI's behaviour is identical across all of them. > **Where am I?** Run `stash status` first (substitute the runner per the note above). It shows you which tables/columns are mid-rollout, which are post-deploy, and what the next move is. Re-run after every transition. @@ -471,7 +471,7 @@ export const encryptionClient = await Encryption({ schemas: [users] }) Register the new encryption config with EQL: ```bash -npx stash db push +stash db push ``` If this is the project's first encrypted column, `db push` writes directly to the active EQL config. If an active config already exists, it writes the new config as `pending` — that's expected. Cutover (later) will promote it. @@ -515,11 +515,11 @@ Stop. Ship this PR to production. The deployed environment must be running the d When the deploy is live: ```bash -npx stash status # verify the rollout is recorded -npx stash plan # detects dual-writes are live; drafts the cutover plan +stash status # verify the rollout is recorded +stash plan # detects dual-writes are live; drafts the cutover plan ``` -`npx stash impl` will refuse to run a cutover-step plan if `cs_migrations` has no `dual_writing` event for `users.email`. That refusal is the safety net for cases where someone runs cutover work locally before the code is actually live. +`stash impl` will refuse to run a cutover-step plan if `cs_migrations` has no `dual_writing` event for `users.email`. That refusal is the safety net for cases where someone runs cutover work locally before the code is actually live. ### Step 2 — Encryption cutover @@ -528,7 +528,7 @@ Once dual-writes are live in production and `cs_migrations` records `dual_writin #### Backfill: encrypt the historical rows ```bash -npx stash encrypt backfill --table users --column email +stash encrypt backfill --table users --column email # (Interactive: answer 'yes' to the dual-write confirmation prompt.) # (CI: pass --confirm-dual-writes-deployed instead.) ``` @@ -551,7 +551,7 @@ export const users = encryptedTable('users', { Re-push the encryption config so EQL has a pending row that points at `email` (no `_encrypted` suffix): ```bash -npx stash db push +stash db push # → writes the new config as `pending`. Active config (still pointing at # `email_encrypted`) keeps serving while we complete the cutover. ``` @@ -559,7 +559,7 @@ npx stash db push Now run the cutover: ```bash -npx stash encrypt cutover --table users --column email +stash encrypt cutover --table users --column email ``` Inside one transaction it: (1) renames `email` → `email_plaintext` and `email_encrypted` → `email`, (2) promotes the pending EQL config to `active` (and the prior active to `inactive`), (3) records a `cut_over` event in `cs_migrations`. @@ -583,7 +583,7 @@ For queries that filter on `email`, the `encryptedSupabase` wrapper handles the Once read paths are routing through `encryptedSupabase` and you're confident reads are decrypting correctly: ```bash -npx stash encrypt drop --table users --column email +stash encrypt drop --table users --column email ``` The CLI emits a Supabase migration file with `ALTER TABLE users DROP COLUMN email_plaintext;`. Review and apply with `supabase migration up` (or `supabase db reset` locally). Then remove the dual-write code from app paths — `email_plaintext` is gone; only `email` (encrypted) is written now via `encryptedSupabase`. @@ -591,9 +591,9 @@ The CLI emits a Supabase migration file with `ALTER TABLE users DROP COLUMN emai ### Inspecting progress at any time ```bash -npx stash status # quest log: where each rollout is, what to do next -npx stash encrypt status # raw per-column phase, EQL state, backfill progress -npx stash encrypt plan # diffs your migrations.json intent vs observed state +stash status # quest log: where each rollout is, what to do next +stash encrypt status # raw per-column phase, EQL state, backfill progress +stash encrypt plan # diffs your migrations.json intent vs observed state ``` All three are read-only.