Compare commits

..

25 Commits

Author SHA1 Message Date
09cbb308d2 up
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-09 17:31:21 +00:00
c120b3d4d2 opencode: add Cog memory skill 2026-02-09 17:28:35 +00:00
3381945cea opencode: add Cog MCP server 2026-02-09 17:18:36 +00:00
d12aabdccc opencode: disable oh-my-opencode commit footer and co-author 2026-02-09 17:16:58 +00:00
d38a348a06 add openusage
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-09 17:15:10 +00:00
42873f4d2d opencode: add oh-my-opencode config, remove custom oracle agent 2026-02-09 15:00:24 +00:00
c1bb006292 Remove mindy host and orphaned derek secret 2026-02-09 09:26:34 +00:00
9bdaaeb295 flake
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-09 09:17:45 +00:00
6596ec2d9b opencode: add oracle research tools, spec-planner question tool, deny mcp-auth read 2026-02-08 17:46:29 +00:00
0103aa8c16 Remove watchman package and file watcher config 2026-02-08 08:45:17 +00:00
37b13cfd6a flake
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-08 08:41:54 +00:00
29d27dccfb nushell: Remove deprecated use_ls_colors config option 2026-02-08 08:39:13 +00:00
cca27aa971 Replace fish with nushell 2026-02-08 08:37:09 +00:00
75bbb322d3 up
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-06 16:11:06 +00:00
1a79b5fa9f Update Claude model to opus 4-6 2026-02-05 18:37:52 +00:00
9288aef5c7 flake
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-05 18:26:06 +00:00
29a2dfc606 solidjs
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-05 17:08:21 +00:00
2999325de9 up
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-05 14:39:06 +00:00
06584ffedc rm appsignal 2026-02-05 08:46:42 +00:00
90f91bd017 rm custom profile
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-05 08:40:06 +00:00
2b880be833 flake
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-05 08:23:11 +00:00
64a5a29809 Add overseer host package for UI support 2026-02-04 20:30:26 +00:00
c1bae690b3 Use local overseer binary for MCP server 2026-02-04 20:20:01 +00:00
f8e912e201 Add overseer CLI for task management 2026-02-04 20:17:49 +00:00
ff8650bedf oc
Signed-off-by: Christoph Schmatzler <christoph@schmatzler.com>
2026-02-04 20:04:32 +00:00
52 changed files with 5443 additions and 287 deletions

View File

@@ -1,7 +1,6 @@
keys:
- &host_tahani age1njjegjjdqzfnrr54f536yl4lduqgna3wuv7ef6vtl9jw5cju0grsgy62tm
- &host_michael age187jl7e4k9n4guygkmpuqzeh0wenefwrfkpvuyhvwjrjwxqpzassqq3x67j
- &host_mindy age1dqt3znmzcgghsjjzzax0pf0eyu95h0p7kaf5v988ysjv7fl7lumsatl048
- &host_jason age1ez6j3r5wdp0tjy7n5qzv5vfakdc2nh2zeu388zu7a80l0thv052syxq5e2
- &host_chidi age1tlymdmaukhwupzrhszspp26lgd8s64rw4vu9lwc7gsgrjm78095s9fe9l3
creation_rules:
@@ -10,6 +9,5 @@ creation_rules:
- age:
- *host_tahani
- *host_michael
- *host_mindy
- *host_jason
- *host_chidi

View File

@@ -5,7 +5,7 @@
### Local Development
```bash
nix run .#build # Build current host config
nix run .#build -- <hostname> # Build specific host (chidi, jason, michael, mindy, tahani)
nix run .#build -- <hostname> # Build specific host (chidi, jason, michael, tahani)
nix run .#apply # Build and apply locally (darwin-rebuild/nixos-rebuild switch)
nix flake check # Validate flake
```
@@ -13,7 +13,7 @@ nix flake check # Validate flake
### Remote Deployment (NixOS only)
```bash
colmena build # Build all NixOS hosts
colmena apply --on <host> # Deploy to specific NixOS host (michael, mindy, tahani)
colmena apply --on <host> # Deploy to specific NixOS host (michael, tahani)
colmena apply # Deploy to all NixOS hosts
```

107
flake.lock generated
View File

@@ -183,11 +183,11 @@
]
},
"locked": {
"lastModified": 1768135262,
"narHash": "sha256-PVvu7OqHBGWN16zSi6tEmPwwHQ4rLPU9Plvs8/1TUBY=",
"lastModified": 1769996383,
"narHash": "sha256-AnYjnFWgS49RlqX7LrC4uA+sCCDBj0Ry/WOJ5XWAsa0=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "80daad04eddbbf5a4d883996a73f3f542fa437ac",
"rev": "57928607ea566b5db3ad13af0e57e921e6b12381",
"type": "github"
},
"original": {
@@ -254,11 +254,11 @@
]
},
"locked": {
"lastModified": 1770164260,
"narHash": "sha256-mQgOAYWlVJyuyXjZN6yxqXWyODvQI5P/UZUCU7IOuYo=",
"lastModified": 1770586272,
"narHash": "sha256-Ucci8mu8QfxwzyfER2DQDbvW9t1BnTUJhBmY7ybralo=",
"owner": "nix-community",
"repo": "home-manager",
"rev": "4fda26500b4539e0a1e3afba9f0e1616bdad4f85",
"rev": "b1f916ba052341edc1f80d4b2399f1092a4873ca",
"type": "github"
},
"original": {
@@ -270,11 +270,11 @@
"homebrew-cask": {
"flake": false,
"locked": {
"lastModified": 1770233516,
"narHash": "sha256-62fx3NVLGWo49oTE7PBy8H/UQ7zrzHAwTSCVYq79TFU=",
"lastModified": 1770623639,
"narHash": "sha256-LNLzbnhp5IEizTMMapF2FtLVD21sFzBfVgXcwNz7fKU=",
"owner": "homebrew",
"repo": "homebrew-cask",
"rev": "456bd4672340f4f9a5849176a0d17ab5c47fc9d7",
"rev": "c3bb7aedf0881187cbeb55ad2873240feba21603",
"type": "github"
},
"original": {
@@ -286,11 +286,11 @@
"homebrew-core": {
"flake": false,
"locked": {
"lastModified": 1770233633,
"narHash": "sha256-rMDzWIP7zWIT857ye1aMyxDmvwqbXfgBYGGJLsQGiYM=",
"lastModified": 1770627860,
"narHash": "sha256-ihOndNFECGtZhkrtynP8nDJ8fbSxhNd2zWcq3CLDnQA=",
"owner": "homebrew",
"repo": "homebrew-core",
"rev": "49044f4f9464b207b211b42005cbc190882bbc37",
"rev": "a12e59e6d202fc64aee013f8574c043a4c00a271",
"type": "github"
},
"original": {
@@ -306,11 +306,11 @@
"treefmt-nix": "treefmt-nix"
},
"locked": {
"lastModified": 1770215677,
"narHash": "sha256-2re/gbzb2fZHpQp6u7mM5rBVhf55McYdwOeGdYgJNKo=",
"lastModified": 1770616720,
"narHash": "sha256-NY7yFg3ZG0fzseC4SK/TQjgaODczuvCDtJZNsBmN2QU=",
"owner": "numtide",
"repo": "llm-agents.nix",
"rev": "a23961fc90c59a0cd7f4886c0bcc0efd796a8287",
"rev": "09019dadd541051fc11f5008b56f4e8a14d2df4c",
"type": "github"
},
"original": {
@@ -382,11 +382,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1770141374,
"narHash": "sha256-yD4K/vRHPwXbJf5CK3JkptBA6nFWUKNX/jlFp2eKEQc=",
"lastModified": 1770537093,
"narHash": "sha256-pF1quXG5wsgtyuPOHcLfYg/ft/QMr8NnX0i6tW2187s=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "41965737c1797c1d83cfb0b644ed0840a6220bd1",
"rev": "fef9403a3e4d31b0a23f0bacebbec52c248fbb51",
"type": "github"
},
"original": {
@@ -413,11 +413,11 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1770233805,
"narHash": "sha256-MpkToNABB/lZIdctfSQPBJr5VqlglymE/ugz5KVBahQ=",
"lastModified": 1770627848,
"narHash": "sha256-pWVT4wjh+HKIdvGhph0vU1Kh48OSaSutPGpXxGNxSxw=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "bff843f57d05643dcb41ce3cdf70a032b1dad9fb",
"rev": "fe776c9fe2c37f51546bb50ced285ea2a365e7d9",
"type": "github"
},
"original": {
@@ -429,11 +429,11 @@
},
"nixpkgs_3": {
"locked": {
"lastModified": 1768875095,
"narHash": "sha256-dYP3DjiL7oIiiq3H65tGIXXIT1Waiadmv93JS0sS+8A=",
"lastModified": 1770380644,
"narHash": "sha256-P7dWMHRUWG5m4G+06jDyThXO7kwSk46C1kgjEWcybkE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ed142ab1b3a092c4d149245d0c4126a5d7ea00b0",
"rev": "ae67888ff7ef9dff69b3cf0cc0fbfbcd3a722abe",
"type": "github"
},
"original": {
@@ -466,11 +466,11 @@
"systems": "systems_3"
},
"locked": {
"lastModified": 1770025103,
"narHash": "sha256-Qlb19PP0n6s+v1MywVjROV5XwsCvA58XXiXHk0Govb4=",
"lastModified": 1770627083,
"narHash": "sha256-Js8WrUwQ3lLRjWb8jGGE5npRN96E4mtPwyuNDuCDkcg=",
"owner": "nix-community",
"repo": "nixvim",
"rev": "31c3b3687dc85e3fbbf5c44728a5ee231608f8a9",
"rev": "d354487c4692de3d0918170c45bde05175b12e30",
"type": "github"
},
"original": {
@@ -482,11 +482,11 @@
"nono": {
"flake": false,
"locked": {
"lastModified": 1770230267,
"narHash": "sha256-EUSXFyhPBkffuukLvr4Q3LythNds2ca5wJotW0APSY8=",
"lastModified": 1770553882,
"narHash": "sha256-yEXw+rtuhoZvx1eO2Q+qPeGpvVbyASh7D9YEVAteoo8=",
"owner": "lukehinds",
"repo": "nono",
"rev": "42df275918579adeb3243a9f9e085f052e19199c",
"rev": "e80983bb6a4058335e96e02eeabe17314f771a9c",
"type": "github"
},
"original": {
@@ -495,6 +495,39 @@
"type": "github"
}
},
"openusage": {
"flake": false,
"locked": {
"lastModified": 1770543295,
"narHash": "sha256-DvgEPZhFm06igalUPgnQ8VLkl0gk/3rm+lbEJ2/s7gM=",
"owner": "robinebers",
"repo": "openusage",
"rev": "22a7bd5f7856397400e60dd787ad82b23c763969",
"type": "github"
},
"original": {
"owner": "robinebers",
"ref": "v0.5.1",
"repo": "openusage",
"type": "github"
}
},
"overseer": {
"flake": false,
"locked": {
"lastModified": 1770303305,
"narHash": "sha256-NM1haQAk1mWdmewgIv6tzApaIQxWKrIrri0+uXHY3Zc=",
"owner": "dmmulroy",
"repo": "overseer",
"rev": "5880d97939744ff72eb552c671da2fae1789041e",
"type": "github"
},
"original": {
"owner": "dmmulroy",
"repo": "overseer",
"type": "github"
}
},
"root": {
"inputs": {
"colmena": "colmena",
@@ -510,6 +543,8 @@
"nixpkgs": "nixpkgs_2",
"nixvim": "nixvim",
"nono": "nono",
"openusage": "openusage",
"overseer": "overseer",
"sops-nix": "sops-nix",
"zjstatus": "zjstatus"
}
@@ -559,11 +594,11 @@
]
},
"locked": {
"lastModified": 1770145881,
"narHash": "sha256-ktjWTq+D5MTXQcL9N6cDZXUf9kX8JBLLBLT0ZyOTSYY=",
"lastModified": 1770526836,
"narHash": "sha256-xbvX5Ik+0inJcLJtJ/AajAt7xCk6FOCrm5ogpwwvVDg=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "17eea6f3816ba6568b8c81db8a4e6ca438b30b7c",
"rev": "d6e0e666048a5395d6ea4283143b7c9ac704720d",
"type": "github"
},
"original": {
@@ -656,11 +691,11 @@
]
},
"locked": {
"lastModified": 1769691507,
"narHash": "sha256-8aAYwyVzSSwIhP2glDhw/G0i5+wOrren3v6WmxkVonM=",
"lastModified": 1770228511,
"narHash": "sha256-wQ6NJSuFqAEmIg2VMnLdCnUc0b7vslUohqqGGD+Fyxk=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "28b19c5844cc6e2257801d43f2772a4b4c050a1b",
"rev": "337a4fe074be1042a35086f15481d763b8ddc0e7",
"type": "github"
},
"original": {

View File

@@ -44,6 +44,14 @@
url = "github:lukehinds/nono";
flake = false;
};
overseer = {
url = "github:dmmulroy/overseer";
flake = false;
};
openusage = {
url = "github:robinebers/openusage/v0.5.1";
flake = false;
};
};
outputs = inputs @ {flake-parts, ...}:

View File

@@ -25,8 +25,7 @@
../../profiles/bash.nix
../../profiles/bat.nix
../../profiles/direnv.nix
../../profiles/eza.nix
../../profiles/fish.nix
../../profiles/nushell.nix
../../profiles/fzf.nix
../../profiles/ghostty.nix
../../profiles/git.nix

View File

@@ -24,8 +24,7 @@
../../profiles/bash.nix
../../profiles/bat.nix
../../profiles/direnv.nix
../../profiles/eza.nix
../../profiles/fish.nix
../../profiles/nushell.nix
../../profiles/fzf.nix
../../profiles/ghostty.nix
../../profiles/git.nix

View File

@@ -39,7 +39,7 @@
home-manager.users.${user} = {
imports = [
../../profiles/fish.nix
../../profiles/nushell.nix
../../profiles/home.nix
../../profiles/ssh.nix
inputs.nixvim.homeModules.nixvim

View File

@@ -26,8 +26,7 @@
../../profiles/bash.nix
../../profiles/bat.nix
../../profiles/direnv.nix
../../profiles/eza.nix
../../profiles/fish.nix
../../profiles/nushell.nix
../../profiles/fzf.nix
../../profiles/git.nix
../../profiles/home.nix
@@ -37,6 +36,7 @@
../../profiles/nono.nix
../../profiles/neovim
../../profiles/opencode.nix
../../profiles/overseer.nix
../../profiles/claude-code.nix
../../profiles/ripgrep.nix
../../profiles/ssh.nix

132
overlays/openusage.nix Normal file
View File

@@ -0,0 +1,132 @@
{inputs}: final: prev: let
version = "0.5.1";
in {
openusage =
prev.rustPlatform.buildRustPackage (finalAttrs: {
pname = "openusage";
inherit version;
src = inputs.openusage;
cargoRoot = "src-tauri";
cargoLock = {
lockFile = "${inputs.openusage}/src-tauri/Cargo.lock";
outputHashes = {
"tauri-nspanel-2.1.0" = "sha256-PLACEHOLDER";
"tauri-plugin-aptabase-1.0.0" = "sha256-PLACEHOLDER";
};
};
buildAndTestSubdir = finalAttrs.cargoRoot;
node_modules =
prev.stdenv.mkDerivation {
inherit (finalAttrs) src version;
pname = "${finalAttrs.pname}-node_modules";
impureEnvVars =
prev.lib.fetchers.proxyImpureEnvVars
++ [
"GIT_PROXY_COMMAND"
"SOCKS_SERVER"
];
nativeBuildInputs = [
prev.bun
prev.writableTmpDirAsHomeHook
];
dontConfigure = true;
dontFixup = true;
dontPatchShebangs = true;
buildPhase = ''
runHook preBuild
export BUN_INSTALL_CACHE_DIR=$(mktemp -d)
bun install \
--no-progress \
--frozen-lockfile \
--ignore-scripts
runHook postBuild
'';
installPhase = ''
runHook preInstall
cp -R ./node_modules $out
runHook postInstall
'';
outputHash = "sha256-PLACEHOLDER";
outputHashMode = "recursive";
};
nativeBuildInputs = [
prev.cargo-tauri.hook
prev.rustPlatform.bindgenHook
prev.bun
prev.nodejs
prev.pkg-config
prev.makeBinaryWrapper
];
buildInputs =
prev.lib.optionals prev.stdenv.isDarwin (
with prev.darwin.apple_sdk.frameworks; [
AppKit
CoreFoundation
CoreServices
Security
WebKit
]
);
# Disable updater artifact generation — we don't have signing keys.
tauriConf = builtins.toJSON {bundle.createUpdaterArtifacts = false;};
passAsFile = ["tauriConf"];
preBuild = ''
tauriBuildFlags+=(
"--config"
"$tauriConfPath"
)
'';
configurePhase = ''
runHook preConfigure
# Copy pre-fetched node_modules
cp -R ${finalAttrs.node_modules} node_modules/
chmod -R u+rw node_modules
chmod -R u+x node_modules/.bin
patchShebangs node_modules
export HOME=$TMPDIR
export PATH="$PWD/node_modules/.bin:$PATH"
# Bundle plugins (copy from plugins/ to src-tauri/resources/bundled_plugins/)
${prev.nodejs}/bin/node copy-bundled.cjs
runHook postConfigure
'';
env = {
OPENSSL_NO_VENDOR = true;
};
doCheck = false;
postInstall =
prev.lib.optionalString prev.stdenv.isDarwin ''
makeWrapper $out/Applications/OpenUsage.app/Contents/MacOS/OpenUsage $out/bin/openusage
'';
meta = {
description = "Track all your AI coding subscriptions in one place";
homepage = "https://github.com/robinebers/openusage";
license = prev.lib.licenses.mit;
platforms = prev.lib.platforms.darwin;
mainProgram = "openusage";
};
});
}

101
overlays/overseer.nix Normal file
View File

@@ -0,0 +1,101 @@
{inputs}: final: prev: let
manifest = (prev.lib.importTOML "${inputs.overseer}/overseer/Cargo.toml").package;
overseer-cli =
prev.rustPlatform.buildRustPackage {
pname = "overseer-cli";
version = manifest.version;
cargoLock.lockFile = "${inputs.overseer}/overseer/Cargo.lock";
src = "${inputs.overseer}/overseer";
nativeBuildInputs = with prev; [
pkg-config
];
buildInputs = with prev; [
openssl
];
OPENSSL_NO_VENDOR = 1;
doCheck = false;
};
overseer-host =
prev.buildNpmPackage {
pname = "overseer-host";
version = manifest.version;
src = "${inputs.overseer}/host";
npmDepsHash = "sha256-WIjx6N8vnH3C6Kxn4tiryi3bM0xnov5ok2k9XrndIS0=";
buildPhase = ''
runHook preBuild
npm run build
runHook postBuild
'';
installPhase = ''
runHook preInstall
mkdir -p $out
cp -r dist $out/
cp -r node_modules $out/
cp package.json $out/
runHook postInstall
'';
};
overseer-ui =
prev.buildNpmPackage {
pname = "overseer-ui";
version = manifest.version;
src = "${inputs.overseer}/ui";
npmDepsHash = "sha256-krOsSd8OAPsdCOCf1bcz9c/Myj6jpHOkaD/l+R7PQpY=";
buildPhase = ''
runHook preBuild
npm run build
runHook postBuild
'';
installPhase = ''
runHook preInstall
mkdir -p $out
cp -r dist $out/
runHook postInstall
'';
};
in {
# The CLI looks for host/dist/index.js and ui/dist relative to the binary
# Using paths like: exe_dir.join("../@dmmulroy/overseer/host/dist/index.js")
# So we create: bin/os and @dmmulroy/overseer/host/dist/index.js
overseer =
prev.runCommand "overseer-${manifest.version}" {
nativeBuildInputs = [prev.makeWrapper];
} ''
# Create npm-like structure that the CLI expects
mkdir -p $out/bin
mkdir -p $out/@dmmulroy/overseer/host
mkdir -p $out/@dmmulroy/overseer/ui
# Copy host files
cp -r ${overseer-host}/dist $out/@dmmulroy/overseer/host/
cp -r ${overseer-host}/node_modules $out/@dmmulroy/overseer/host/
cp ${overseer-host}/package.json $out/@dmmulroy/overseer/host/
# Copy UI files
cp -r ${overseer-ui}/dist $out/@dmmulroy/overseer/ui/
# Copy CLI binary
cp ${overseer-cli}/bin/os $out/bin/os
# Make wrapper that ensures node is available
wrapProgram $out/bin/os \
--prefix PATH : ${prev.nodejs}/bin
'';
}

View File

@@ -1,7 +1,7 @@
{
programs.atuin = {
enable = true;
enableFishIntegration = true;
enableNushellIntegration = true;
flags = [
"--disable-up-arrow"
];

View File

@@ -1,5 +1,6 @@
{pkgs, ...}: {
programs.fish.enable = true;
environment.shells = [pkgs.nushell];
nixpkgs = {
config = {

View File

@@ -118,7 +118,7 @@
name = user;
home = "/Users/${user}";
isHidden = false;
shell = pkgs.fish;
shell = pkgs.nushell;
};
home-manager.useGlobalPkgs = true;

View File

@@ -1,6 +0,0 @@
{
programs.eza = {
enable = true;
enableFishIntegration = true;
};
}

View File

@@ -1,54 +0,0 @@
{
programs.fish = {
enable = true;
functions = {
open_project = ''
set -l base "$HOME/Projects"
set -l choice (fd -t d -d 1 -a . "$base/Personal" "$base/Work" \
| string replace -r -- "^$base/" "" \
| fzf --prompt "project > ")
test -n "$choice"; and cd "$base/$choice"
'';
};
interactiveShellInit = ''
set fish_greeting
set fish_color_normal 4c4f69
set fish_color_command 1e66f5
set fish_color_param dd7878
set fish_color_keyword d20f39
set fish_color_quote 40a02b
set fish_color_redirection ea76cb
set fish_color_end fe640b
set fish_color_comment 8c8fa1
set fish_color_error d20f39
set fish_color_gray 9ca0b0
set fish_color_selection --background=ccd0da
set fish_color_search_match --background=ccd0da
set fish_color_option 40a02b
set fish_color_operator ea76cb
set fish_color_escape e64553
set fish_color_autosuggestion 9ca0b0
set fish_color_cancel d20f39
set fish_color_cwd df8e1d
set fish_color_user 179299
set fish_color_host 1e66f5
set fish_color_host_remote 40a02b
set fish_color_status d20f39
set fish_pager_color_progress 9ca0b0
set fish_pager_color_prefix ea76cb
set fish_pager_color_completion 4c4f69
set fish_pager_color_description 9ca0b0
set -gx LS_COLORS "$(vivid generate catppuccin-latte)"
set -gx COLORTERM truecolor
set -gx COLORFGBG "15;0"
set -gx TERM_BACKGROUND light
for mode in default insert
bind --mode $mode \cp open_project
end
'';
};
}

View File

@@ -1,7 +1,6 @@
{
programs.fzf = {
enable = true;
enableFishIntegration = true;
};
home.sessionVariables = {

View File

@@ -1,6 +1,6 @@
{pkgs, ...}: {
xdg.configFile."ghostty/config".text = ''
command = ${pkgs.fish}/bin/fish
command = ${pkgs.nushell}/bin/nu
theme = Catppuccin Latte
window-padding-x = 12
window-padding-y = 3
@@ -10,7 +10,7 @@
cursor-style = block
mouse-hide-while-typing = true
mouse-scroll-multiplier = 1.25
shell-integration = fish
shell-integration = none
shell-integration-features = no-cursor
clipboard-read = allow
clipboard-write = allow

View File

@@ -95,15 +95,10 @@ in {
gf = "git fetch";
gfa = "git fetch --all --tags --prune";
gfo = "git fetch origin";
gfg = "git ls-files | grep";
gg = "git gui citool";
gga = "git gui citool --amend";
ggpull = "git pull origin \"$(git branch --show-current)\"";
ggpush = "git push origin \"$(git branch --show-current)\"";
ggsup = "git branch --set-upstream-to=origin/$(git branch --show-current)";
ghh = "git help";
gignore = "git update-index --assume-unchanged";
gignored = "git ls-files -v | grep \"^[[:lower:]]\"";
gl = "git pull";
glg = "git log --stat";
glgp = "git log --stat --patch";
@@ -118,7 +113,6 @@ in {
glols = "git log --graph --pretty=\"%Cred%h%Creset -%C(auto)%d%Creset %s %Cgreen(%ar) %C(bold blue)<%an>%Creset\" --stat";
glod = "git log --graph --pretty=\"%Cred%h%Creset -%C(auto)%d%Creset %s %Cgreen(%ad) %C(bold blue)<%an>%Creset\"";
glods = "git log --graph --pretty=\"%Cred%h%Creset -%C(auto)%d%Creset %s %Cgreen(%ad) %C(bold blue)<%an>%Creset\" --date=short";
gluc = "git pull upstream $(git branch --show-current)";
glum = "git pull upstream main";
gm = "git merge";
gma = "git merge --abort";
@@ -133,7 +127,6 @@ in {
gpd = "git push --dry-run";
gpf = "git push --force-with-lease";
gpod = "git push origin --delete";
gpoat = "git push origin --all && git push origin --tags";
gpr = "git pull --rebase";
gpra = "git pull --rebase --autostash";
gprav = "git pull --rebase --autostash -v";
@@ -142,8 +135,6 @@ in {
gprv = "git pull --rebase -v";
gprum = "git pull --rebase upstream main";
gprumi = "git pull --rebase=interactive upstream main";
gpsup = "git push --set-upstream origin $(git branch --show-current)";
gpsupf = "git push --set-upstream origin $(git branch --show-current) --force-with-lease";
gpv = "git push --verbose";
gpu = "git push upstream";
gr = "git remote";
@@ -169,13 +160,11 @@ in {
grm = "git rm";
grmc = "git rm --cached";
grmv = "git remote rename";
groh = "git reset origin/$(git branch --show-current) --hard";
grrm = "git remote remove";
grs = "git restore";
grset = "git remote set-url";
grss = "git restore --source";
grst = "git restore --staged";
grt = "cd \"$(git rev-parse --show-toplevel || echo .)\"";
gru = "git reset --";
grup = "git remote update";
grv = "git remote --verbose";
@@ -201,16 +190,43 @@ in {
gswm = "git switch main";
gta = "git tag --annotate";
gts = "git tag --sign";
gtv = "git tag | sort -V";
gunignore = "git update-index --no-assume-unchanged";
gunwip = "git rev-list --max-count=1 --format=\"%s\" HEAD | grep -q \"\\--wip--\" && git reset HEAD~1";
gwch = "git whatchanged -p --abbrev-commit --pretty=medium";
gwipe = "git reset --hard && git clean --force -df";
gwt = "git worktree";
gwta = "git worktree add";
gwtls = "git worktree list";
gwtmv = "git worktree move";
gwtrm = "git worktree remove";
gwip = "git add -A; git rm $(git ls-files --deleted) 2> /dev/null; git commit --no-verify --no-gpg-sign --message \"--wip-- [skip ci]\"";
};
# Complex git aliases that require pipes/subshells — nushell `alias` can't
# handle these, so they're defined as custom commands instead.
programs.nushell.extraConfig = ''
def ggpull [] { git pull origin (git branch --show-current | str trim) }
def ggpush [] { git push origin (git branch --show-current | str trim) }
def ggsup [] { git branch $"--set-upstream-to=origin/(git branch --show-current | str trim)" }
def gluc [] { git pull upstream (git branch --show-current | str trim) }
def gpsup [] { git push --set-upstream origin (git branch --show-current | str trim) }
def gpsupf [] { git push --set-upstream origin (git branch --show-current | str trim) --force-with-lease }
def groh [] { git reset $"origin/(git branch --show-current | str trim)" --hard }
def --env grt [] {
let toplevel = (do { git rev-parse --show-toplevel } | complete | get stdout | str trim)
if ($toplevel | is-not-empty) { cd $toplevel } else { cd . }
}
def gfg [...pattern: string] { git ls-files | lines | where {|f| $f =~ ($pattern | str join ".*") } }
def gignored [] { git ls-files -v | lines | where {|l| ($l | str substring 0..1) =~ "[a-z]" } }
def gpoat [] { git push origin --all; git push origin --tags }
def gtv [] { git tag | lines | sort }
def gwipe [] { git reset --hard; git clean --force -df }
def gunwip [] {
let msg = (git rev-list --max-count=1 --format="%s" HEAD | lines | get 1)
if ($msg | str contains "--wip--") { git reset HEAD~1 }
}
def gwip [] {
git add -A
let deleted = (git ls-files --deleted | lines)
if ($deleted | is-not-empty) { git rm ...$deleted }
git commit --no-verify --no-gpg-sign --message "--wip-- [skip ci]"
}
'';
}

View File

@@ -1,7 +1,7 @@
{
programs.mise = {
enable = true;
enableFishIntegration = true;
enableNushellIntegration = true;
globalConfig.settings = {
auto_install = false;
};

View File

@@ -66,7 +66,7 @@
"network"
"systemd-journal"
];
shell = pkgs.fish;
shell = pkgs.nushell;
openssh.authorizedKeys.keys = constants.sshKeys;
};

View File

@@ -2,18 +2,4 @@
home.packages = with pkgs; [
nono
];
xdg.configFile."nono/profiles/opencode.toml".text = ''
[meta]
name = "opencode"
version = "1.0.0"
description = "OpenCode AI agent"
[filesystem]
allow = ["$WORKDIR"]
read = ["$XDG_CONFIG_HOME/opencode"]
[network]
block = false
'';
}

225
profiles/nushell.nix Normal file
View File

@@ -0,0 +1,225 @@
{pkgs, ...}: {
programs.nushell = {
enable = true;
settings = {
show_banner = false;
completions = {
algorithm = "fuzzy";
case_sensitive = false;
};
history = {
file_format = "sqlite";
};
};
environmentVariables = {
COLORTERM = "truecolor";
COLORFGBG = "15;0";
TERM_BACKGROUND = "light";
};
extraEnv = ''
$env.LS_COLORS = (${pkgs.vivid}/bin/vivid generate catppuccin-latte)
'';
extraConfig = ''
# --- Catppuccin Latte Theme ---
let theme = {
rosewater: "#dc8a78"
flamingo: "#dd7878"
pink: "#ea76cb"
mauve: "#8839ef"
red: "#d20f39"
maroon: "#e64553"
peach: "#fe640b"
yellow: "#df8e1d"
green: "#40a02b"
teal: "#179299"
sky: "#04a5e5"
sapphire: "#209fb5"
blue: "#1e66f5"
lavender: "#7287fd"
text: "#4c4f69"
subtext1: "#5c5f77"
subtext0: "#6c6f85"
overlay2: "#7c7f93"
overlay1: "#8c8fa1"
overlay0: "#9ca0b0"
surface2: "#acb0be"
surface1: "#bcc0cc"
surface0: "#ccd0da"
base: "#eff1f5"
mantle: "#e6e9ef"
crust: "#dce0e8"
}
let scheme = {
recognized_command: $theme.blue
unrecognized_command: $theme.text
constant: $theme.peach
punctuation: $theme.overlay2
operator: $theme.sky
string: $theme.green
virtual_text: $theme.surface2
variable: { fg: $theme.flamingo attr: i }
filepath: $theme.yellow
}
$env.config.color_config = {
separator: { fg: $theme.surface2 attr: b }
leading_trailing_space_bg: { fg: $theme.lavender attr: u }
header: { fg: $theme.text attr: b }
row_index: $scheme.virtual_text
record: $theme.text
list: $theme.text
hints: $scheme.virtual_text
search_result: { fg: $theme.base bg: $theme.yellow }
shape_closure: $theme.teal
closure: $theme.teal
shape_flag: { fg: $theme.maroon attr: i }
shape_matching_brackets: { attr: u }
shape_garbage: $theme.red
shape_keyword: $theme.mauve
shape_match_pattern: $theme.green
shape_signature: $theme.teal
shape_table: $scheme.punctuation
cell-path: $scheme.punctuation
shape_list: $scheme.punctuation
shape_record: $scheme.punctuation
shape_vardecl: $scheme.variable
shape_variable: $scheme.variable
empty: { attr: n }
filesize: {||
if $in < 1kb {
$theme.teal
} else if $in < 10kb {
$theme.green
} else if $in < 100kb {
$theme.yellow
} else if $in < 10mb {
$theme.peach
} else if $in < 100mb {
$theme.maroon
} else if $in < 1gb {
$theme.red
} else {
$theme.mauve
}
}
duration: {||
if $in < 1day {
$theme.teal
} else if $in < 1wk {
$theme.green
} else if $in < 4wk {
$theme.yellow
} else if $in < 12wk {
$theme.peach
} else if $in < 24wk {
$theme.maroon
} else if $in < 52wk {
$theme.red
} else {
$theme.mauve
}
}
datetime: {|| (date now) - $in |
if $in < 1day {
$theme.teal
} else if $in < 1wk {
$theme.green
} else if $in < 4wk {
$theme.yellow
} else if $in < 12wk {
$theme.peach
} else if $in < 24wk {
$theme.maroon
} else if $in < 52wk {
$theme.red
} else {
$theme.mauve
}
}
shape_external: $scheme.unrecognized_command
shape_internalcall: $scheme.recognized_command
shape_external_resolved: $scheme.recognized_command
shape_block: $scheme.recognized_command
block: $scheme.recognized_command
shape_custom: $theme.pink
custom: $theme.pink
background: $theme.base
foreground: $theme.text
cursor: { bg: $theme.rosewater fg: $theme.base }
shape_range: $scheme.operator
range: $scheme.operator
shape_pipe: $scheme.operator
shape_operator: $scheme.operator
shape_redirection: $scheme.operator
glob: $scheme.filepath
shape_directory: $scheme.filepath
shape_filepath: $scheme.filepath
shape_glob_interpolation: $scheme.filepath
shape_globpattern: $scheme.filepath
shape_int: $scheme.constant
int: $scheme.constant
bool: $scheme.constant
float: $scheme.constant
nothing: $scheme.constant
binary: $scheme.constant
shape_nothing: $scheme.constant
shape_bool: $scheme.constant
shape_float: $scheme.constant
shape_binary: $scheme.constant
shape_datetime: $scheme.constant
shape_literal: $scheme.constant
string: $scheme.string
shape_string: $scheme.string
shape_string_interpolation: $theme.flamingo
shape_raw_string: $scheme.string
shape_externalarg: $scheme.string
}
$env.config.highlight_resolved_externals = true
$env.config.explore = {
status_bar_background: { fg: $theme.text, bg: $theme.mantle },
command_bar_text: { fg: $theme.text },
highlight: { fg: $theme.base, bg: $theme.yellow },
status: {
error: $theme.red,
warn: $theme.yellow,
info: $theme.blue,
},
selected_cell: { bg: $theme.blue fg: $theme.base },
}
# --- Custom Commands ---
def --env open_project [] {
let base = ($env.HOME | path join "Projects")
let choice = (
${pkgs.fd}/bin/fd -t d -d 1 -a . ($base | path join "Personal") ($base | path join "Work")
| lines
| each {|p| $p | str replace $"($base)/" "" }
| str join "\n"
| ${pkgs.fzf}/bin/fzf --prompt "project > "
)
if ($choice | str trim | is-not-empty) {
cd ($base | path join ($choice | str trim))
}
}
# --- Keybinding: Ctrl+O for open_project ---
$env.config.keybindings = ($env.config.keybindings | append [
{
name: open_project
modifier: control
keycode: char_o
mode: [emacs vi_insert vi_normal]
event: {
send: executehostcommand
cmd: "open_project"
}
}
])
'';
};
}

View File

@@ -6,18 +6,18 @@
home.sessionVariables = {
OPENCODE_ENABLE_EXA = 1;
OPENCODE_EXPERIMENTAL_LSP_TOOL = 1;
# OPENCODE_EXPERIMENTAL_MARKDOWN = 1;
OPENCODE_EXPERIMENTAL_MARKDOWN = 1;
OPENCODE_EXPERIMENTAL_PLAN_MODE = 1;
OPENCODE_EXPERIMENTAL_FILE_WATCHER = 1;
};
programs.opencode = {
enable = true;
package = inputs.llm-agents.packages.${pkgs.stdenv.hostPlatform.system}.opencode;
settings = {
model = "opencode/claude-opus-4-5";
model = "anthropic/claude-opus-4-6";
small_model = "opencode/minimax-m2.1";
theme = "catppuccin";
plugin = ["oh-my-opencode" "opencode-anthropic-auth"];
keybinds = {
leader = "ctrl+o";
};
@@ -28,14 +28,15 @@
"*.env.*" = "deny";
"*.envrc" = "deny";
"secrets/*" = "deny";
"~/.local/share/opencode/mcp-auth.json" = "deny";
};
};
agent = {
plan = {
model = "opencode/gpt-5.2-codex";
model = "anthropic/claude-opus-4-6";
};
explore = {
model = "opencode/minimax-m2.1";
model = "anthropic/claude-haiku-4-5";
};
};
instructions = [
@@ -49,6 +50,14 @@
};
};
mcp = {
cog = {
enabled = true;
type = "remote";
url = "https://trycog.ai/mcp";
headers = {
Authorization = "Bearer {env:COG_API_TOKEN}";
};
};
context7 = {
enabled = true;
type = "remote";
@@ -64,21 +73,10 @@
type = "local";
command = ["bunx" "opensrc-mcp"];
};
appsignal = {
enabled = true;
overseer = {
enabled = false;
type = "local";
command = [
"docker"
"run"
"-i"
"--rm"
"-e"
"APPSIGNAL_API_KEY"
"appsignal/mcp"
];
environment = {
APPSIGNAL_API_KEY = "{env:APPSIGNAL_API_KEY}";
};
command = ["${pkgs.overseer}/bin/os" "mcp"];
};
};
};
@@ -101,5 +99,14 @@
source = ./opencode/tool;
recursive = true;
};
"opencode/oh-my-opencode.json".text =
builtins.toJSON {
"$schema" = "https://raw.githubusercontent.com/code-yeongyu/oh-my-opencode/master/assets/oh-my-opencode.schema.json";
disabled_mcps = ["websearch" "context7" "grep_app"];
git_master = {
commit_footer = false;
include_co_authored_by = false;
};
};
};
}

View File

@@ -1,86 +0,0 @@
---
description: Senior engineering advisor for code reviews, architecture decisions, complex debugging, and planning. Invoke when you need deeper analysis before acting — reviews, trade-offs, debugging race conditions, planning refactors. Prompt with precise problem + files. Ask for concrete outcomes.
mode: subagent
model: opencode/gpt-5.2-codex
options:
thinking:
type: enabled
budgetTokens: 31999
permission:
"*": deny
read: allow
grep: allow
glob: allow
webfetch: allow
lsp: allow
---
You are the Oracle - an expert AI advisor with advanced reasoning capabilities.
Your role is to provide high-quality technical guidance, code reviews, architectural advice, and strategic planning for software engineering tasks.
You are a subagent inside an AI coding system, called when the main agent needs a smarter, more capable model. You are invoked in a zero-shot manner - no one can ask you follow-up questions or provide follow-up answers.
## Key Responsibilities
- Analyze code and architecture patterns
- Provide specific, actionable technical recommendations
- Plan implementations and refactoring strategies
- Answer deep technical questions with clear reasoning
- Suggest best practices and improvements
- Identify potential issues and propose solutions
## Operating Principles (Simplicity-First)
1. **Default to simplest viable solution** that meets stated requirements
2. **Prefer minimal, incremental changes** that reuse existing code, patterns, and dependencies
3. **Optimize for maintainability and developer time** over theoretical scalability
4. **Apply YAGNI and KISS** - avoid premature optimization
5. **One primary recommendation** - offer alternatives only if trade-offs are materially different
6. **Calibrate depth to scope** - brief for small tasks, deep only when required
7. **Stop when "good enough"** - note signals that would justify revisiting
## Effort Estimates
Include rough effort signal when proposing changes:
- **S** (<1 hour) - trivial, single-location change
- **M** (1-3 hours) - moderate, few files
- **L** (1-2 days) - significant, cross-cutting
- **XL** (>2 days) - major refactor or new system
## Response Format
Keep responses concise and action-oriented. For straightforward questions, collapse sections as appropriate:
### 1. TL;DR
1-3 sentences with the recommended simple approach.
### 2. Recommendation
Numbered steps or short checklist. Include minimal diffs/snippets only as needed.
### 3. Rationale
Brief justification. Mention why alternatives are unnecessary now.
### 4. Risks & Guardrails
Key caveats and mitigations.
### 5. When to Reconsider
Concrete triggers that justify a more complex design.
### 6. Advanced Path (optional)
Brief outline only if relevant and trade-offs are significant.
## Tool Usage
You have read-only access: read, grep, glob, LSP, webfetch.
Use them freely to verify assumptions and gather context. Your extended thinking enables deep analysis - leverage it fully.
## Guidelines
- Investigate thoroughly; report concisely - focus on highest-leverage insights
- For planning tasks, break down into minimal steps that achieve the goal incrementally
- Justify recommendations briefly - avoid long speculative exploration
- If the request is ambiguous, state your interpretation explicitly before answering
- If unanswerable from available context, say so directly
**IMPORTANT:** Only your last message is returned to the main agent and displayed to the user. Make it comprehensive yet focused, with a clear, simple recommendation that enables immediate action.

View File

@@ -1,6 +1,5 @@
---
description: Review changes with parallel @code-review subagents
agent: plan
---
Review the code changes using THREE (3) @code-review subagents and correlate results into a summary ranked by severity. Use the provided user guidance to steer the review and focus on specific code paths, changes, and/or areas of concern.

View File

@@ -0,0 +1,17 @@
---
description: Convert a markdown plan/spec to Overseer tasks
---
Convert markdown planning documents into trackable Overseer task hierarchies.
First, invoke the skill tool to load the overseer-plan skill:
```
skill({ name: 'overseer-plan' })
```
Then follow the skill instructions to convert the document.
<user-request>
$ARGUMENTS
</user-request>

View File

@@ -0,0 +1,17 @@
---
description: Manage tasks via Overseer - create, list, start, complete, find ready work
---
Task orchestration via Overseer codemode MCP.
First, invoke the skill tool to load the overseer skill:
```
skill({ name: 'overseer' })
```
Then follow the skill instructions to manage tasks.
<user-request>
$ARGUMENTS
</user-request>

View File

@@ -0,0 +1,17 @@
---
description: Dialogue-driven spec development through skeptical questioning
---
Develop implementation-ready specs through iterative dialogue and skeptical questioning.
First, invoke the skill tool to load the spec-planner skill:
```
skill({ name: 'spec-planner' })
```
Then follow the skill instructions to develop the spec.
<user-request>
$ARGUMENTS
</user-request>

View File

@@ -0,0 +1,406 @@
---
name: cog
description: Persistent knowledge graph memory via Cog MCP. Use when recording insights, querying prior knowledge, or managing memory consolidation.
metadata:
author: trycog
version: "1.0.0"
---
# Cog Memory System
Persistent knowledge graph for teams. Concepts (engrams) linked via relationships (synapses). Spreading activation surfaces connected knowledge.
## Core Workflow
```
1. UNDERSTAND task (read files, parse request)
2. QUERY Cog with specific keywords <- MANDATORY, no exceptions
3. WAIT for results
4. EXPLORE/IMPLEMENT guided by Cog knowledge
5. RECORD insights as short-term memories during work
6. CONSOLIDATE memories after work (reinforce valid, flush invalid)
```
**Hierarchy of truth:** Current code > User statements > Cog knowledge
---
## Visual Indicators (MANDATORY)
Print before EVERY Cog tool call:
| Tool | Print |
|------|-------|
| `cog_recall` | `Querying Cog...` |
| `cog_learn` | `Recording to Cog...` |
| `cog_associate` | `Linking concepts...` |
| `cog_update` | `Updating engram...` |
| `cog_trace` | `Tracing connections...` |
| `cog_connections` | `Exploring connections...` |
| `cog_unlink` | `Removing link...` |
| `cog_list_short_term` | `Listing short-term memories...` |
| `cog_reinforce` | `Reinforcing memory...` |
| `cog_flush` | `Flushing invalid memory...` |
| `cog_verify` | `Verifying synapse...` |
| `cog_stale` | `Listing stale synapses...` |
---
## Tools Reference
| Tool | Purpose |
|------|---------|
| `cog_recall` | Search with spreading activation |
| `cog_learn` | Create memory with **chains** (sequential) or associations (hub) |
| `cog_get` | Retrieve engram by ID |
| `cog_associate` | Link two existing concepts |
| `cog_trace` | Find paths between concepts |
| `cog_update` | Modify engram term/definition |
| `cog_unlink` | Remove synapse |
| `cog_connections` | List engram connections |
| `cog_bootstrap` | Exploration prompt for empty brains |
| `cog_list_short_term` | List pending consolidations |
| `cog_reinforce` | Convert short-term to long-term |
| `cog_flush` | Delete invalid short-term memory |
| `cog_verify` | Confirm synapse is still accurate |
| `cog_stale` | List synapses needing verification |
---
## Querying Rules
### Before exploring code, ALWAYS query Cog first
Even for "trivial" tasks. The 2-second query may reveal gotchas, prior solutions, or context that changes your approach.
### Query Reformulation (Critical for Recall)
Before calling `cog_recall`, **transform your query from question-style to definition-style**. You are an LLM -- use that capability to bridge the vocabulary gap between how users ask questions and how knowledge is stored.
#### Think like a definition, not a question
| User Intent | Don't Query | Do Query |
|-------------|-------------|----------|
| "How do I handle stale data?" | `"handle stale data"` | `"cache invalidation event-driven TTL expiration data freshness"` |
| "Why does auth break after a while?" | `"auth breaks"` | `"token expiration refresh timing session timeout JWT lifecycle"` |
| "Where should validation go?" | `"where validation"` | `"input validation system boundaries sanitization defense in depth"` |
#### The reformulation process
1. **Identify the concept** -- What is the user actually asking about?
2. **Generate canonical terms** -- What would an engram about this be titled?
3. **Add related terminology** -- What words would the DEFINITION use?
4. **Include synonyms** -- What other terms describe the same thing?
#### Example transformation
```
User asks: "Why is the payment service sometimes charging twice?"
Your thinking:
- Concept: duplicate charges, idempotency
- Canonical terms: "idempotency", "duplicate prevention", "payment race condition"
- Definition words: "idempotent", "transaction", "mutex", "lock", "retry"
- Synonyms: "double charge", "duplicate transaction"
Query: "payment idempotency duplicate transaction race condition mutex retry"
```
### Query with specific keywords
| Task Type | Understand First | Then Query With |
|-----------|------------------|-----------------|
| Bug fix | Error message, symptoms | `"canonical error name component pattern race condition"` |
| Feature | User's description | `"domain terms design patterns architectural concepts"` |
| Test fix | Read the test file | `"API names assertion patterns test utilities"` |
| Architecture | System area | `"component relationships boundaries dependencies"` |
**Bad:** `"authentication"` (too vague)
**Good:** `"JWT refresh token expiration session lifecycle OAuth flow"` (definition-style)
### Use Cog results
- Follow paths Cog reveals
- Read components Cog mentions first
- Heed gotchas Cog warns about
- If Cog is wrong, correct it immediately with `cog_update`
---
## Recording Rules
### CRITICAL: Chains vs Associations
**Before recording, ask: Is this sequential or hub-shaped?**
| Structure | Use | Example |
|-----------|-----|---------|
| **Sequential** (A -> B -> C) | `chain_to` | Technology enables Pattern enables Feature |
| **Hub** (A, B, C all connect to X) | `associations` | Meeting connects to Participants, Outcomes |
**Default to chains** for:
- Technology dependencies (DB -> ORM -> API)
- Causal sequences (Cause -> Effect -> Consequence)
- Architectural decisions (ADR -> Technology -> Feature)
- Enabling relationships (Infrastructure -> enables -> Capability)
- Reasoning paths (Premise -> implies -> Conclusion)
**Use associations** for:
- Hub/star patterns (one thing connects to many unrelated things)
- Linking to existing concepts in the graph
- Multi-party contexts (meetings, decisions with stakeholders)
### Chain Example (PREFERRED for dependencies)
```
cog_learn({
"term": "PostgreSQL",
"definition": "Relational database with ACID guarantees",
"chain_to": [
{"term": "Ecto ORM", "definition": "Elixir database wrapper with changesets", "predicate": "enables"},
{"term": "Phoenix Contexts", "definition": "Business logic boundaries in Phoenix", "predicate": "enables"}
]
})
```
Creates: PostgreSQL ->[enables]-> Ecto ORM ->[enables]-> Phoenix Contexts
### Association Example (for hubs)
```
cog_learn({
"term": "Auth Review 2024-01-20",
"definition": "Decided JWT with refresh tokens. Rejected session cookies.",
"associations": [
{"target": "JWT Pattern", "predicate": "leads_to"},
{"target": "Session Cookies", "predicate": "contradicts"},
{"target": "Mobile Team", "predicate": "is_component_of"}
]
})
```
Creates hub: JWT Pattern <-[leads_to]<- Auth Review ->[contradicts]-> Session Cookies
---
### When to record (during work)
At these checkpoints, ask: *"What did I just learn that I didn't know 5 minutes ago?"*
| Checkpoint | Record |
|------------|--------|
| After identifying root cause | Why it was broken |
| After reading surprising code | The non-obvious behavior |
| After a failed attempt | Why it didn't work |
| Before implementing fix | The insight (freshest now) |
| After discovering connection | The relationship |
| After a meeting or decision | The context hub linking participants and outcomes |
| After researching/exploring architecture | System limits, configuration points, component boundaries |
**Record immediately.** Don't wait until task end -- you'll forget details.
### Before calling `cog_learn`
1. **Decide: chain or hub?** (see above)
2. **For chains**: Build the sequence of steps with `chain_to`
3. **For hubs**: Identify association targets from source material or Cog query
**Skip the query when:**
- Source material explicitly names related concepts (ADRs, documentation, structured data)
- You already know target terms from conversation context
- The insight references specific concepts by name
**Query first when:**
- Recording an insight and unsure what it relates to
- Source is vague about connections
- Exploring a new domain with unknown existing concepts
### After calling `cog_learn`
The operation is complete. **Do NOT verify your work by:**
- Calling `cog_recall` to check the engram exists
- Calling `cog_connections` to verify associations were created
- Calling `cog_trace` to see if paths formed
Trust the response confirmation. Verification wastes turns and adds no value -- if the operation failed, you'll see an error.
### Recording Efficiency
**One operation = one tool call.** Use `chain_to` for sequences, `associations` for hubs.
**Never** follow `cog_learn` with separate `cog_associate` calls -- put all relationships in the original call.
### Writing good engrams
**Terms (2-5 words):**
- "Session Token Refresh Timing"
- "Why We Chose PostgreSQL"
- NOT "Architecture" (too broad)
- NOT "Project Overview" (super-hub)
**Definitions (1-3 sentences):**
1. What it is
2. Why it matters / consequences
3. Related keywords for search
**Never create super-hubs** -- engrams so generic everything connects to them (e.g., "Overview", "Main System"). They pollute search results.
### Relationship predicates
| Predicate | Meaning | Best for | Use in |
|-----------|---------|----------|--------|
| `enables` | A makes B possible | Tech dependencies | **chain_to** |
| `requires` | A is prerequisite for B | Dependencies | **chain_to** |
| `implies` | If A then B | Logical consequences | **chain_to** |
| `leads_to` | A flows to B | Outcomes, consequences | **chain_to** |
| `precedes` | A comes before B | Sequencing, timelines | **chain_to** |
| `derived_from` | A is based on B | Origins | **chain_to** |
| `contradicts` | A and B mutually exclusive | Rejected alternatives | associations |
| `is_component_of` | A is part of B | Parts to whole | associations |
| `contains` | A includes B | Whole to parts | associations |
| `example_of` | A demonstrates pattern B | Instances of patterns | associations |
| `generalizes` | A is broader than B | Abstract concepts | associations |
| `supersedes` | A replaces B | Deprecations | associations |
| `similar_to` | A and B are closely related | Related approaches | associations |
| `contrasts_with` | A is alternative to B | Different approaches | associations |
| `related_to` | General link (use sparingly) | When nothing else fits | associations |
**Chain predicates** (`enables`, `requires`, `implies`, `leads_to`, `precedes`, `derived_from`) express **flow** -- use them in `chain_to` to build traversable paths.
### Modeling Complex Contexts (Hub Node Pattern)
Synapses are binary (one source, one target). For multi-party relationships, use a **hub engram** connecting all participants.
#### When to use hub nodes
| Scenario | Hub Example | Connected Concepts |
|----------|-------------|-------------------|
| Meeting with outcomes | "Q1 Planning 2024-01" | Participants, decisions |
| Decision with stakeholders | "Decision: Adopt GraphQL" | Pros, cons, voters |
| Feature with components | "User Auth Feature" | OAuth, session, UI |
| Incident with timeline | "2024-01 Payment Outage" | Cause, systems, fix |
---
## Consolidation (MANDATORY)
**Every task must end with consolidation.** Short-term memories decay in 24 hours.
### After work is complete:
```
cog_list_short_term({"limit": 20})
```
For each memory:
- **Valid and useful?** -> `cog_reinforce` (makes permanent)
- **Wrong or not useful?** -> `cog_flush` (deletes)
### When to reinforce immediately
- Insights from code you just wrote (you know it's correct)
- Gotchas you just hit and fixed
- Patterns you just applied successfully
### When to wait for validation
- Hypotheses about why something is broken
- Assumptions about unfamiliar code
- Solutions you haven't tested
---
## Verification (Prevents Staleness)
Synapses decay if not verified as still semantically accurate.
### When to verify
- After using `cog_trace` and confirming paths are correct
- When reviewing `cog_connections` and relationships hold
- After successfully using knowledge from a synapse
### Staleness levels
| Level | Months Unverified | Score | Behavior |
|-------|-------------------|-------|----------|
| Fresh | < 3 | 0.0-0.49 | Normal |
| Warning | 3-6 | 0.5-0.79 | Appears in `cog_stale` |
| Critical | 6+ | 0.8-0.99 | Penalty in path scoring |
| Deprecated | 12+ | 1.0 | Excluded from spreading activation |
### Periodic maintenance
Run `cog_stale({"level": "all"})` periodically to review relationships that may have become outdated. For each stale synapse:
- **Still accurate?** -> `cog_verify` to reset staleness
- **No longer true?** -> `cog_unlink` to remove
---
## Validation & Correction
### Cog is hints, not truth
Always verify against current code. If Cog is wrong:
| Scenario | Action |
|----------|--------|
| Minor inaccuracy | `cog_update` to fix |
| Pattern changed significantly | Unlink old, create new engram |
| Completely obsolete | Update to note "DEPRECATED: [reason]" |
---
## Subagents
Subagents MUST query Cog before exploring. Same rules apply:
1. Understand task
2. **Reformulate query to definition-style**
3. Query Cog with reformulated keywords
4. Wait for results
5. Then explore
---
## Summary Reporting
Only mention Cog when relevant:
| Condition | Include |
|-----------|---------|
| Cog helped | `**Cog helped by:** [specific value]` |
| Memories created | `**Recorded to Cog:** [term names]` |
| Cog not used | Nothing (don't mention Cog) |
| Cog queried but unhelpful | Don't mention the empty query, but **still record** new knowledge you discovered through exploration |
---
## Never Store
- Passwords, API keys, tokens, secrets
- SSH/PGP keys, certificates
- Connection strings with credentials
- PII (emails, SSNs, credit cards)
- `.env` file contents
Server auto-rejects sensitive content.
---
## Limitations
- **No engram deletion** -- use `cog_update` or `cog_unlink`
- **No multi-query** -- chain manually
- **One synapse per direction** -- repeat calls strengthen existing link
---
## Spreading Activation
`cog_recall` returns:
1. **Seeds** -- direct matches
2. **Paths** -- engrams connecting seeds (built from chains!)
3. **Synapses** -- relationships along paths
This surfaces the "connective tissue" between results. **Chains create these traversable paths.**

View File

@@ -0,0 +1,110 @@
---
name: overseer-plan
description: Convert markdown planning documents to Overseer tasks via MCP codemode. Use when converting plans, specs, or design docs to trackable task hierarchies.
license: MIT
metadata:
author: dmmulroy
version: "1.0.0"
---
# Converting Markdown Documents to Overseer Tasks
Use `/overseer-plan` to convert any markdown planning document into trackable Overseer tasks.
## When to Use
- After completing a plan in plan mode
- Converting specs/design docs to implementation tasks
- Creating tasks from roadmap or milestone documents
## Usage
```
/overseer-plan <markdown-file-path>
/overseer-plan <file> --priority 3 # Set priority (1-5)
/overseer-plan <file> --parent <task-id> # Create as child of existing task
```
## What It Does
1. Reads markdown file
2. Extracts title from first `#` heading (strips "Plan: " prefix)
3. Creates Overseer milestone (or child task if `--parent` provided)
4. Analyzes structure for child task breakdown
5. Creates child tasks (depth 1) or subtasks (depth 2) when appropriate
6. Returns task ID and breakdown summary
## Hierarchy Levels
| Depth | Name | Example |
|-------|------|---------|
| 0 | **Milestone** | "Add user authentication system" |
| 1 | **Task** | "Implement JWT middleware" |
| 2 | **Subtask** | "Add token verification function" |
## Breakdown Decision
**Create subtasks when:**
- 3-7 clearly separable work items
- Implementation across multiple files/components
- Clear sequential dependencies
**Keep single milestone when:**
- 1-2 steps only
- Work items tightly coupled
- Plan is exploratory/investigative
## Task Quality Criteria
Every task must be:
- **Atomic**: Single committable unit of work
- **Validated**: Has tests OR explicit acceptance criteria in context ("Done when: ...")
- **Clear**: Technical, specific, imperative verb
Every milestone must:
- **Demoable**: Produces runnable/testable increment
- **Builds on prior**: Can depend on previous milestone's output
## Review Workflow
1. Analyze document -> propose breakdown
2. **Invoke Oracle** to review breakdown and suggest improvements
3. Incorporate feedback
4. Create in Overseer (persists to SQLite via MCP)
## After Creating
```javascript
await tasks.get("<id>"); // TaskWithContext (full context + learnings)
await tasks.list({ parentId: "<id>" }); // Task[] (children without context chain)
await tasks.start("<id>"); // Task (VCS required - creates bookmark, records start commit)
await tasks.complete("<id>", { result: "...", learnings: [...] }); // Task (VCS required - commits, bubbles learnings)
```
**VCS Required**: `start` and `complete` require jj or git (fail with `NotARepository` if none found). CRUD operations work without VCS.
**Note**: Priority must be 1-5. Blockers cannot be ancestors or descendants.
## When NOT to Use
- Document incomplete or exploratory
- Content not actionable
- No meaningful planning content
---
## Reading Order
| Task | File |
|------|------|
| Understanding API | @file references/api.md |
| Agent implementation | @file references/implementation.md |
| See examples | @file references/examples.md |
## In This Reference
| File | Purpose |
|------|---------|
| `references/api.md` | Overseer MCP codemode API types/methods |
| `references/implementation.md` | Step-by-step execution instructions for agent |
| `references/examples.md` | Complete worked examples |

View File

@@ -0,0 +1,192 @@
# Overseer Codemode MCP API
Execute JavaScript code to interact with Overseer task management.
## Task Interfaces
```typescript
// Basic task - returned by list(), create(), start(), complete()
// Note: Does NOT include context or learnings fields
interface Task {
id: string;
parentId: string | null;
description: string;
priority: 1 | 2 | 3 | 4 | 5;
completed: boolean;
completedAt: string | null;
startedAt: string | null;
createdAt: string; // ISO 8601
updatedAt: string;
result: string | null; // Completion notes
commitSha: string | null; // Auto-populated on complete
depth: 0 | 1 | 2; // 0=milestone, 1=task, 2=subtask
blockedBy?: string[]; // Blocking task IDs (omitted if empty)
blocks?: string[]; // Tasks this blocks (omitted if empty)
bookmark?: string; // VCS bookmark name (if started)
startCommit?: string; // Commit SHA at start
effectivelyBlocked: boolean; // True if task OR ancestor has incomplete blockers
}
// Task with full context - returned by get(), nextReady()
interface TaskWithContext extends Task {
context: {
own: string; // This task's context
parent?: string; // Parent's context (depth > 0)
milestone?: string; // Root milestone's context (depth > 1)
};
learnings: {
own: Learning[]; // This task's learnings (bubbled from completed children)
parent: Learning[]; // Parent's learnings (depth > 0)
milestone: Learning[]; // Milestone's learnings (depth > 1)
};
}
// Task tree structure - returned by tree()
interface TaskTree {
task: Task;
children: TaskTree[];
}
// Progress summary - returned by progress()
interface TaskProgress {
total: number;
completed: number;
ready: number; // !completed && !effectivelyBlocked
blocked: number; // !completed && effectivelyBlocked
}
// Task type alias for depth filter
type TaskType = "milestone" | "task" | "subtask";
```
## Learning Interface
```typescript
interface Learning {
id: string;
taskId: string;
content: string;
sourceTaskId: string | null;
createdAt: string;
}
```
## Tasks API
```typescript
declare const tasks: {
list(filter?: {
parentId?: string;
ready?: boolean;
completed?: boolean;
depth?: 0 | 1 | 2; // 0=milestones, 1=tasks, 2=subtasks
type?: TaskType; // Alias: "milestone"|"task"|"subtask" (mutually exclusive with depth)
}): Promise<Task[]>;
get(id: string): Promise<TaskWithContext>;
create(input: {
description: string;
context?: string;
parentId?: string;
priority?: 1 | 2 | 3 | 4 | 5; // Must be 1-5
blockedBy?: string[]; // Cannot be ancestors/descendants
}): Promise<Task>;
update(id: string, input: {
description?: string;
context?: string;
priority?: 1 | 2 | 3 | 4 | 5;
parentId?: string;
}): Promise<Task>;
start(id: string): Promise<Task>;
complete(id: string, input?: { result?: string; learnings?: string[] }): Promise<Task>;
reopen(id: string): Promise<Task>;
delete(id: string): Promise<void>;
block(taskId: string, blockerId: string): Promise<void>;
unblock(taskId: string, blockerId: string): Promise<void>;
nextReady(milestoneId?: string): Promise<TaskWithContext | null>;
tree(rootId?: string): Promise<TaskTree | TaskTree[]>;
search(query: string): Promise<Task[]>;
progress(rootId?: string): Promise<TaskProgress>;
};
```
| Method | Returns | Description |
|--------|---------|-------------|
| `list` | `Task[]` | Filter by `parentId`, `ready`, `completed`, `depth`, `type` |
| `get` | `TaskWithContext` | Get task with full context chain + inherited learnings |
| `create` | `Task` | Create task (priority must be 1-5) |
| `update` | `Task` | Update description, context, priority, parentId |
| `start` | `Task` | **VCS required** - creates bookmark, records start commit |
| `complete` | `Task` | **VCS required** - commits changes + bubbles learnings to parent |
| `reopen` | `Task` | Reopen completed task |
| `delete` | `void` | Delete task + best-effort VCS bookmark cleanup |
| `block` | `void` | Add blocker (cannot be self, ancestor, or descendant) |
| `unblock` | `void` | Remove blocker relationship |
| `nextReady` | `TaskWithContext \| null` | Get deepest ready leaf with full context |
| `tree` | `TaskTree \| TaskTree[]` | Get task tree (all milestones if no ID) |
| `search` | `Task[]` | Search by description/context/result (case-insensitive) |
| `progress` | `TaskProgress` | Aggregate counts for milestone or all tasks |
## Learnings API
Learnings are added via `tasks.complete(id, { learnings: [...] })` and bubble to immediate parent (preserving `sourceTaskId`).
```typescript
declare const learnings: {
list(taskId: string): Promise<Learning[]>;
};
```
| Method | Description |
|--------|-------------|
| `list` | List learnings for task |
## VCS Integration (Required for Workflow)
VCS operations are **automatically handled** by the tasks API:
| Task Operation | VCS Effect |
|----------------|------------|
| `tasks.start(id)` | **VCS required** - creates bookmark `task/<id>`, records start commit |
| `tasks.complete(id)` | **VCS required** - commits changes (NothingToCommit = success) |
| `tasks.delete(id)` | Best-effort bookmark cleanup (logs warning on failure) |
**VCS (jj or git) is required** for start/complete. Fails with `NotARepository` if none found. CRUD operations work without VCS.
## Quick Examples
```javascript
// Create milestone with subtask
const milestone = await tasks.create({
description: "Build authentication system",
context: "JWT-based auth with refresh tokens",
priority: 1
});
const subtask = await tasks.create({
description: "Implement token refresh logic",
parentId: milestone.id,
context: "Handle 7-day expiry"
});
// Start work (VCS required - creates bookmark)
await tasks.start(subtask.id);
// ... do implementation work ...
// Complete task with learnings (VCS required - commits changes, bubbles learnings to parent)
await tasks.complete(subtask.id, {
result: "Implemented using jose library",
learnings: ["Use jose instead of jsonwebtoken"]
});
// Get progress summary
const progress = await tasks.progress(milestone.id);
// -> { total: 2, completed: 1, ready: 1, blocked: 0 }
// Search tasks
const authTasks = await tasks.search("authentication");
// Get task tree
const tree = await tasks.tree(milestone.id);
// -> { task: Task, children: TaskTree[] }
```

View File

@@ -0,0 +1,177 @@
# Examples
## Example 1: With Breakdown
### Input (`auth-plan.md`)
```markdown
# Plan: Add Authentication System
## Implementation
1. Create database schema for users/tokens
2. Implement auth controller with endpoints
3. Add JWT middleware for route protection
4. Build frontend login/register forms
5. Add integration tests
```
### Execution
```javascript
const milestone = await tasks.create({
description: "Add Authentication System",
context: `# Add Authentication System\n\n## Implementation\n1. Create database schema...`,
priority: 3
});
const subtasks = [
{ desc: "Create database schema for users/tokens", done: "Migration runs, tables exist with FK constraints" },
{ desc: "Implement auth controller with endpoints", done: "POST /register, /login return expected responses" },
{ desc: "Add JWT middleware for route protection", done: "Unauthorized requests return 401, valid tokens pass" },
{ desc: "Build frontend login/register forms", done: "Forms render, submit without errors" },
{ desc: "Add integration tests", done: "`npm test` passes with auth coverage" }
];
for (const sub of subtasks) {
await tasks.create({
description: sub.desc,
context: `Part of 'Add Authentication System'.\n\nDone when: ${sub.done}`,
parentId: milestone.id
});
}
return { milestone: milestone.id, subtaskCount: subtasks.length };
```
### Output
```
Created milestone task_01ABC from plan
Analyzed plan structure: Found 5 distinct implementation steps
Created 5 subtasks:
- task_02XYZ: Create database schema for users/tokens
- task_03ABC: Implement auth controller with endpoints
- task_04DEF: Add JWT middleware for route protection
- task_05GHI: Build frontend login/register forms
- task_06JKL: Add integration tests
View structure: execute `await tasks.list({ parentId: "task_01ABC" })`
```
## Example 2: No Breakdown
### Input (`bugfix-plan.md`)
```markdown
# Plan: Fix Login Validation Bug
## Problem
Login fails when username has spaces
## Solution
Update validation regex in auth.ts line 42
```
### Execution
```javascript
const milestone = await tasks.create({
description: "Fix Login Validation Bug",
context: `# Fix Login Validation Bug\n\n## Problem\nLogin fails...`,
priority: 3
});
return { milestone: milestone.id, breakdown: false };
```
### Output
```
Created milestone task_01ABC from plan
Plan describes a cohesive single task. No subtask breakdown needed.
View task: execute `await tasks.get("task_01ABC")`
```
## Example 3: Epic-Level (Two-Level Hierarchy)
### Input (`full-auth-plan.md`)
```markdown
# Complete User Authentication System
## Phase 1: Backend Infrastructure
1. Database schema for users/sessions
2. Password hashing with bcrypt
3. JWT token generation
## Phase 2: API Endpoints
1. POST /auth/register
2. POST /auth/login
3. POST /auth/logout
## Phase 3: Frontend
1. Login/register forms
2. Protected routes
3. Session persistence
```
### Execution
```javascript
const milestone = await tasks.create({
description: "Complete User Authentication System",
context: `<full-markdown>`,
priority: 3
});
const phases = [
{ name: "Backend Infrastructure", items: [
{ desc: "Database schema", done: "Migration runs, tables exist" },
{ desc: "Password hashing", done: "bcrypt hashes verified in tests" },
{ desc: "JWT tokens", done: "Token generation/validation works" }
]},
{ name: "API Endpoints", items: [
{ desc: "POST /auth/register", done: "Creates user, returns 201" },
{ desc: "POST /auth/login", done: "Returns JWT on valid credentials" },
{ desc: "POST /auth/logout", done: "Invalidates session, returns 200" }
]},
{ name: "Frontend", items: [
{ desc: "Login/register forms", done: "Forms render, submit successfully" },
{ desc: "Protected routes", done: "Redirect to login when unauthenticated" },
{ desc: "Session persistence", done: "Refresh maintains logged-in state" }
]}
];
for (const phase of phases) {
const phaseTask = await tasks.create({
description: phase.name,
parentId: milestone.id
});
for (const item of phase.items) {
await tasks.create({
description: item.desc,
context: `Part of '${phase.name}'.\n\nDone when: ${item.done}`,
parentId: phaseTask.id
});
}
}
return milestone;
```
### Output
```
Created milestone task_01ABC from plan
Analyzed plan structure: Found 3 major phases
Created as milestone with 3 tasks:
- task_02XYZ: Backend Infrastructure (3 subtasks)
- task_03ABC: API Endpoints (3 subtasks)
- task_04DEF: Frontend (3 subtasks)
View structure: execute `await tasks.list({ parentId: "task_01ABC" })`
```

View File

@@ -0,0 +1,210 @@
# Implementation Instructions
**For the skill agent executing `/overseer-plan`.** Follow this workflow exactly.
## Step 1: Read Markdown File
Read the provided file using the Read tool.
## Step 2: Extract Title
- Parse first `#` heading as title
- Strip "Plan: " prefix if present (case-insensitive)
- Fallback: use filename without extension
## Step 3: Create Milestone via MCP
Basic creation:
```javascript
const milestone = await tasks.create({
description: "<extracted-title>",
context: `<full-markdown-content>`,
priority: <priority-if-provided-else-3>
});
return milestone;
```
With `--parent` option:
```javascript
const task = await tasks.create({
description: "<extracted-title>",
context: `<full-markdown-content>`,
parentId: "<parent-id>",
priority: <priority-if-provided-else-3>
});
return task;
```
Capture returned task ID for subsequent steps.
## Step 4: Analyze Plan Structure
### Breakdown Indicators
1. **Numbered/bulleted implementation lists (3-7 items)**
```markdown
## Implementation
1. Create database schema
2. Build API endpoints
3. Add frontend components
```
2. **Clear subsections under implementation/tasks/steps**
```markdown
### 1. Backend Changes
- Modify server.ts
### 2. Frontend Updates
- Update login form
```
3. **File-specific sections**
```markdown
### `src/auth.ts` - Add JWT validation
### `src/middleware.ts` - Create auth middleware
```
4. **Sequential phases**
```markdown
**Phase 1: Database Layer**
**Phase 2: API Layer**
```
### Do NOT Break Down When
- Only 1-2 steps/items
- Plan is a single cohesive fix
- Content is exploratory ("investigate", "research")
- Work items inseparable
- Plan very short (<10 lines)
## Step 5: Validate Atomicity & Acceptance Criteria
For each proposed task, verify:
- **Atomic**: Can be completed in single commit
- **Validated**: Has clear acceptance criteria
If task too large -> split further.
If no validation -> add to context:
```
Done when: <specific observable criteria>
```
Examples of good acceptance criteria:
- "Done when: `npm test` passes, new migration applied"
- "Done when: API returns 200 with expected payload"
- "Done when: Component renders without console errors"
- "Done when: Type check passes (`tsc --noEmit`)"
## Step 6: Oracle Review
Before creating tasks, invoke Oracle to review the proposed breakdown.
**Prompt Oracle with:**
```
Review this task breakdown for "<milestone>":
1. <task> - Done when: <criteria>
2. <task> - Done when: <criteria>
...
Check:
- Are tasks truly atomic (single commit)?
- Is validation criteria clear and observable?
- Does milestone deliver demoable increment?
- Missing dependencies/blockers?
- Any tasks that should be split or merged?
```
Incorporate Oracle's feedback, then proceed to create tasks.
## Step 7: Create Subtasks (If Breaking Down)
### Extract for Each Subtask
1. **Description**: Strip numbering, keep concise (1-10 words), imperative form
2. **Context**: Section content + "Part of [milestone description]" + acceptance criteria
### Flat Breakdown
```javascript
const subtasks = [
{ description: "Create database schema", context: "Schema for users/tokens. Part of 'Add Auth'.\n\nDone when: Migration runs, tables exist with FK constraints." },
{ description: "Build API endpoints", context: "POST /auth/register, /auth/login. Part of 'Add Auth'.\n\nDone when: Endpoints return expected responses, tests pass." }
];
const created = [];
for (const sub of subtasks) {
const task = await tasks.create({
description: sub.description,
context: sub.context,
parentId: milestone.id
});
created.push(task);
}
return { milestone: milestone.id, subtasks: created };
```
### Epic-Level Breakdown (phases with sub-items)
```javascript
// Create phase as task under milestone
const phase = await tasks.create({
description: "Backend Infrastructure",
context: "Phase 1 context...",
parentId: milestoneId
});
// Create subtasks under phase
for (const item of phaseItems) {
await tasks.create({
description: item.description,
context: item.context,
parentId: phase.id
});
}
```
## Step 8: Report Results
### Subtasks Created
```
Created milestone <id> from plan
Analyzed plan structure: Found <N> distinct implementation steps
Created <N> subtasks:
- <id>: <description>
- <id>: <description>
...
View structure: execute `await tasks.list({ parentId: "<id>" })`
```
### No Breakdown
```
Created milestone <id> from plan
Plan describes a cohesive single task. No subtask breakdown needed.
View task: execute `await tasks.get("<id>")`
```
### Epic-Level Breakdown
```
Created milestone <id> from plan
Analyzed plan structure: Found <N> major phases
Created as milestone with <N> tasks:
- <id>: <phase-name> (<M> subtasks)
- <id>: <phase-name> (<M> subtasks)
...
View structure: execute `await tasks.list({ parentId: "<id>" })`
```

View File

@@ -0,0 +1,191 @@
---
name: overseer
description: Manage tasks via Overseer codemode MCP. Use when tracking multi-session work, breaking down implementation, or persisting context for handoffs.
license: MIT
metadata:
author: dmmulroy
version: "1.0.0"
---
# Agent Coordination with Overseer
## Core Principle: Tickets, Not Todos
Overseer tasks are **tickets** - structured artifacts with comprehensive context:
- **Description**: One-line summary (issue title)
- **Context**: Full background, requirements, approach (issue body)
- **Result**: Implementation details, decisions, outcomes (PR description)
Think: "Would someone understand the what, why, and how from this task alone AND what success looks like?"
## Task IDs are Ephemeral
**Never reference task IDs in external artifacts** (commits, PRs, docs). Task IDs like `task_01JQAZ...` become meaningless once tasks complete. Describe the work itself, not the task that tracked it.
## Overseer vs OpenCode's TodoWrite
| | Overseer | TodoWrite |
| --------------- | ------------------------------------- | ---------------------- |
| **Persistence** | SQLite database | Session-only |
| **Context** | Rich (description + context + result) | Basic |
| **Hierarchy** | 3-level (milestone -> task -> subtask)| Flat |
Use **Overseer** for persistent work. Use **TodoWrite** for ephemeral in-session tracking only.
## When to Use Overseer
**Use Overseer when:**
- Breaking down complexity into subtasks
- Work spans multiple sessions
- Context needs to persist for handoffs
- Recording decisions for future reference
**Skip Overseer when:**
- Work is a single atomic action
- Everything fits in one message exchange
- Overhead exceeds value
- TodoWrite is sufficient
## Finding Work
```javascript
// Get next ready task with full context (recommended for work sessions)
const task = await tasks.nextReady(milestoneId); // TaskWithContext | null
if (!task) {
console.log("No ready tasks");
return;
}
// Get all ready tasks (for progress overview)
const readyTasks = await tasks.list({ ready: true }); // Task[]
```
**Use `nextReady()`** when starting work - returns `TaskWithContext | null` (deepest ready leaf with full context chain + inherited learnings).
**Use `list({ ready: true })`** for status/progress checks - returns `Task[]` without context chain.
## Basic Workflow
```javascript
// 1. Get next ready task (returns TaskWithContext | null)
const task = await tasks.nextReady();
if (!task) return "No ready tasks";
// 2. Review context (available on TaskWithContext)
console.log(task.context.own); // This task's context
console.log(task.context.parent); // Parent's context (if depth > 0)
console.log(task.context.milestone); // Root milestone context (if depth > 1)
console.log(task.learnings.own); // Learnings attached to this task (bubbled from children)
// 3. Start work (VCS required - creates bookmark, records start commit)
await tasks.start(task.id);
// 4. Implement...
// 5. Complete with learnings (VCS required - commits changes, bubbles learnings to parent)
await tasks.complete(task.id, {
result: "Implemented login endpoint with JWT tokens",
learnings: ["bcrypt rounds should be 12 for production"]
});
```
See @file references/workflow.md for detailed workflow guidance.
## Understanding Task Context
Tasks have **progressive context** - inherited from ancestors:
```javascript
const task = await tasks.get(taskId); // Returns TaskWithContext
// task.context.own - this task's context (always present)
// task.context.parent - parent task's context (if depth > 0)
// task.context.milestone - root milestone's context (if depth > 1)
// Task's own learnings (bubbled from completed children)
// task.learnings.own - learnings attached to this task
```
## Return Type Summary
| Method | Returns | Notes |
|--------|---------|-------|
| `tasks.get(id)` | `TaskWithContext` | Full context chain + inherited learnings |
| `tasks.nextReady()` | `TaskWithContext \| null` | Deepest ready leaf with full context |
| `tasks.list()` | `Task[]` | Basic task fields only |
| `tasks.create()` | `Task` | No context chain |
| `tasks.start/complete()` | `Task` | No context chain |
## Blockers
Blockers prevent a task from being ready until the blocker completes.
**Constraints:**
- Blockers cannot be self
- Blockers cannot be ancestors (parent, grandparent, etc.)
- Blockers cannot be descendants
- Creating/reparenting with invalid blockers is rejected
```javascript
// Add blocker - taskA waits for taskB
await tasks.block(taskA.id, taskB.id);
// Remove blocker
await tasks.unblock(taskA.id, taskB.id);
```
## Task Hierarchies
Three levels: **Milestone** (depth 0) -> **Task** (depth 1) -> **Subtask** (depth 2).
| Level | Name | Purpose | Example |
|-------|------|---------|---------|
| 0 | **Milestone** | Large initiative | "Add user authentication system" |
| 1 | **Task** | Significant work item | "Implement JWT middleware" |
| 2 | **Subtask** | Atomic step | "Add token verification function" |
**Choosing the right level:**
- Small feature (1-2 files) -> Single task
- Medium feature (3-7 steps) -> Task with subtasks
- Large initiative (5+ tasks) -> Milestone with tasks
See @file references/hierarchies.md for detailed guidance.
## Recording Results
Complete tasks **immediately after implementing AND verifying**:
- Capture decisions while fresh
- Note deviations from plan
- Document verification performed
- Create follow-up tasks for tech debt
Your result must include explicit verification evidence. See @file references/verification.md.
## Best Practices
1. **Right-size tasks**: Completable in one focused session
2. **Clear completion criteria**: Context should define "done"
3. **Don't over-decompose**: 3-7 children per parent
4. **Action-oriented descriptions**: Start with verbs ("Add", "Fix", "Update")
5. **Verify before completing**: Tests passing, manual testing done
---
## Reading Order
| Task | File |
|------|------|
| Understanding API | @file references/api.md |
| Implementation workflow | @file references/workflow.md |
| Task decomposition | @file references/hierarchies.md |
| Good/bad examples | @file references/examples.md |
| Verification checklist | @file references/verification.md |
## In This Reference
| File | Purpose |
|------|---------|
| `references/api.md` | Overseer MCP codemode API types/methods |
| `references/workflow.md` | Start->implement->complete workflow |
| `references/hierarchies.md` | Milestone/task/subtask organization |
| `references/examples.md` | Good/bad context and result examples |
| `references/verification.md` | Verification checklist and process |

View File

@@ -0,0 +1,192 @@
# Overseer Codemode MCP API
Execute JavaScript code to interact with Overseer task management.
## Task Interface
```typescript
// Basic task - returned by list(), create(), start(), complete()
// Note: Does NOT include context or learnings fields
interface Task {
id: string;
parentId: string | null;
description: string;
priority: 1 | 2 | 3 | 4 | 5;
completed: boolean;
completedAt: string | null;
startedAt: string | null;
createdAt: string; // ISO 8601
updatedAt: string;
result: string | null; // Completion notes
commitSha: string | null; // Auto-populated on complete
depth: 0 | 1 | 2; // 0=milestone, 1=task, 2=subtask
blockedBy?: string[]; // Blocking task IDs (omitted if empty)
blocks?: string[]; // Tasks this blocks (omitted if empty)
bookmark?: string; // VCS bookmark name (if started)
startCommit?: string; // Commit SHA at start
effectivelyBlocked: boolean; // True if task OR ancestor has incomplete blockers
}
// Task with full context - returned by get(), nextReady()
interface TaskWithContext extends Task {
context: {
own: string; // This task's context
parent?: string; // Parent's context (depth > 0)
milestone?: string; // Root milestone's context (depth > 1)
};
learnings: {
own: Learning[]; // This task's learnings (bubbled from completed children)
parent: Learning[]; // Parent's learnings (depth > 0)
milestone: Learning[]; // Milestone's learnings (depth > 1)
};
}
// Task tree structure - returned by tree()
interface TaskTree {
task: Task;
children: TaskTree[];
}
// Progress summary - returned by progress()
interface TaskProgress {
total: number;
completed: number;
ready: number; // !completed && !effectivelyBlocked
blocked: number; // !completed && effectivelyBlocked
}
// Task type alias for depth filter
type TaskType = "milestone" | "task" | "subtask";
```
## Learning Interface
```typescript
interface Learning {
id: string;
taskId: string;
content: string;
sourceTaskId: string | null;
createdAt: string;
}
```
## Tasks API
```typescript
declare const tasks: {
list(filter?: {
parentId?: string;
ready?: boolean;
completed?: boolean;
depth?: 0 | 1 | 2; // 0=milestones, 1=tasks, 2=subtasks
type?: TaskType; // Alias: "milestone"|"task"|"subtask" (mutually exclusive with depth)
}): Promise<Task[]>;
get(id: string): Promise<TaskWithContext>;
create(input: {
description: string;
context?: string;
parentId?: string;
priority?: 1 | 2 | 3 | 4 | 5; // Required range: 1-5
blockedBy?: string[];
}): Promise<Task>;
update(id: string, input: {
description?: string;
context?: string;
priority?: 1 | 2 | 3 | 4 | 5;
parentId?: string;
}): Promise<Task>;
start(id: string): Promise<Task>;
complete(id: string, input?: { result?: string; learnings?: string[] }): Promise<Task>;
reopen(id: string): Promise<Task>;
delete(id: string): Promise<void>;
block(taskId: string, blockerId: string): Promise<void>;
unblock(taskId: string, blockerId: string): Promise<void>;
nextReady(milestoneId?: string): Promise<TaskWithContext | null>;
tree(rootId?: string): Promise<TaskTree | TaskTree[]>;
search(query: string): Promise<Task[]>;
progress(rootId?: string): Promise<TaskProgress>;
};
```
| Method | Returns | Description |
|--------|---------|-------------|
| `list` | `Task[]` | Filter by `parentId`, `ready`, `completed`, `depth`, `type` |
| `get` | `TaskWithContext` | Get task with full context chain + inherited learnings |
| `create` | `Task` | Create task (priority must be 1-5) |
| `update` | `Task` | Update description, context, priority, parentId |
| `start` | `Task` | **VCS required** - creates bookmark, records start commit |
| `complete` | `Task` | **VCS required** - commits changes + bubbles learnings to parent |
| `reopen` | `Task` | Reopen completed task |
| `delete` | `void` | Delete task + best-effort VCS bookmark cleanup |
| `block` | `void` | Add blocker (cannot be self, ancestor, or descendant) |
| `unblock` | `void` | Remove blocker relationship |
| `nextReady` | `TaskWithContext \| null` | Get deepest ready leaf with full context |
| `tree` | `TaskTree \| TaskTree[]` | Get task tree (all milestones if no ID) |
| `search` | `Task[]` | Search by description/context/result (case-insensitive) |
| `progress` | `TaskProgress` | Aggregate counts for milestone or all tasks |
## Learnings API
Learnings are added via `tasks.complete(id, { learnings: [...] })` and bubble to immediate parent (preserving `sourceTaskId`).
```typescript
declare const learnings: {
list(taskId: string): Promise<Learning[]>;
};
```
| Method | Description |
|--------|-------------|
| `list` | List learnings for task |
## VCS Integration (Required for Workflow)
VCS operations are **automatically handled** by the tasks API:
| Task Operation | VCS Effect |
|----------------|------------|
| `tasks.start(id)` | **VCS required** - creates bookmark `task/<id>`, records start commit |
| `tasks.complete(id)` | **VCS required** - commits changes (NothingToCommit = success) |
| `tasks.delete(id)` | Best-effort bookmark cleanup (logs warning on failure) |
**VCS (jj or git) is required** for start/complete. Fails with `NotARepository` if none found. CRUD operations work without VCS.
## Quick Examples
```javascript
// Create milestone with subtask
const milestone = await tasks.create({
description: "Build authentication system",
context: "JWT-based auth with refresh tokens",
priority: 1
});
const subtask = await tasks.create({
description: "Implement token refresh logic",
parentId: milestone.id,
context: "Handle 7-day expiry"
});
// Start work (auto-creates VCS bookmark)
await tasks.start(subtask.id);
// ... do implementation work ...
// Complete task with learnings (VCS required - commits changes, bubbles learnings to parent)
await tasks.complete(subtask.id, {
result: "Implemented using jose library",
learnings: ["Use jose instead of jsonwebtoken"]
});
// Get progress summary
const progress = await tasks.progress(milestone.id);
// -> { total: 2, completed: 1, ready: 1, blocked: 0 }
// Search tasks
const authTasks = await tasks.search("authentication");
// Get task tree
const tree = await tasks.tree(milestone.id);
// -> { task: Task, children: TaskTree[] }
```

View File

@@ -0,0 +1,195 @@
# Examples
Good and bad examples for writing task context and results.
## Writing Context
Context should include everything needed to do the work without asking questions:
- **What** needs to be done and why
- **Implementation approach** (steps, files to modify, technical choices)
- **Done when** (acceptance criteria)
### Good Context Example
```javascript
await tasks.create({
description: "Migrate storage to one file per task",
context: `Change storage format for git-friendliness:
Structure:
.overseer/
└── tasks/
├── task_01ABC.json
└── task_02DEF.json
NO INDEX - just scan task files. For typical task counts (<100), this is fast.
Implementation:
1. Update storage.ts:
- read(): Scan .overseer/tasks/*.json, parse each, return TaskStore
- write(task): Write single task to .overseer/tasks/{id}.json
- delete(id): Remove .overseer/tasks/{id}.json
- Add readTask(id) for single task lookup
2. Task file format: Same as current Task schema (one task per file)
3. Migration: On read, if old tasks.json exists, migrate to new format
4. Update tests
Benefits:
- Create = new file (never conflicts)
- Update = single file change
- Delete = remove file
- No index to maintain or conflict
- git diff shows exactly which tasks changed`
});
```
**Why it works:** States the goal, shows the structure, lists specific implementation steps, explains benefits. Someone could pick this up without asking questions.
### Bad Context Example
```javascript
await tasks.create({
description: "Add auth",
context: "Need to add authentication"
});
```
**What's missing:** How to implement it, what files, what's done when, technical approach.
## Writing Results
Results should capture what was actually done:
- **What changed** (implementation summary)
- **Key decisions** (and why)
- **Verification** (tests passing, manual testing done)
### Good Result Example
```javascript
await tasks.complete(taskId, `Migrated storage from single tasks.json to one file per task:
Structure:
- Each task stored as .overseer/tasks/{id}.json
- No index file (avoids merge conflicts)
- Directory scanned on read to build task list
Implementation:
- Modified Storage.read() to scan .overseer/tasks/ directory
- Modified Storage.write() to write/delete individual task files
- Auto-migration from old single-file format on first read
- Atomic writes using temp file + rename pattern
Trade-offs:
- Slightly slower reads (must scan directory + parse each file)
- Acceptable since task count is typically small (<100)
- Better git history - each task change is isolated
Verification:
- All 60 tests passing
- Build successful
- Manually tested migration: old -> new format works`);
```
**Why it works:** States what changed, lists implementation details, explains trade-offs, confirms verification.
### Bad Result Example
```javascript
await tasks.complete(taskId, "Fixed the storage issue");
```
**What's missing:** What was actually implemented, how, what decisions were made, verification evidence.
## Subtask Context Example
Link subtasks to their parent and explain what this piece does specifically:
```javascript
await tasks.create({
description: "Add token verification function",
parentId: jwtTaskId,
context: `Part of JWT middleware (parent task). This subtask: token verification.
What it does:
- Verify JWT signature and expiration on protected routes
- Extract user ID from token payload
- Attach user object to request
- Return 401 for invalid/expired tokens
Implementation:
- Create src/middleware/verify-token.ts
- Export verifyToken middleware function
- Use jose library (preferred over jsonwebtoken)
- Handle expired vs invalid token cases separately
Done when:
- Middleware function complete and working
- Unit tests cover valid/invalid/expired scenarios
- Integrated into auth routes in server.ts
- Parent task can use this to protect endpoints`
});
```
## Error Handling Examples
### Handling Pending Children
```javascript
try {
await tasks.complete(taskId, "Done");
} catch (err) {
if (err.message.includes("pending children")) {
const pending = await tasks.list({ parentId: taskId, completed: false });
console.log(`Cannot complete: ${pending.length} children pending`);
for (const child of pending) {
console.log(`- ${child.id}: ${child.description}`);
}
return;
}
throw err;
}
```
### Handling Blocked Tasks
```javascript
const task = await tasks.get(taskId);
if (task.blockedBy.length > 0) {
console.log("Task is blocked by:");
for (const blockerId of task.blockedBy) {
const blocker = await tasks.get(blockerId);
console.log(`- ${blocker.description} (${blocker.completed ? 'done' : 'pending'})`);
}
return "Cannot start - blocked by other tasks";
}
await tasks.start(taskId);
```
## Creating Task Hierarchies
```javascript
// Create milestone with tasks
const milestone = await tasks.create({
description: "Implement user authentication",
context: "Full auth: JWT, login/logout, password reset, rate limiting",
priority: 2
});
const subtasks = [
"Add login endpoint",
"Add logout endpoint",
"Implement JWT token service",
"Add password reset flow"
];
for (const desc of subtasks) {
await tasks.create({ description: desc, parentId: milestone.id });
}
```
See @file references/hierarchies.md for sequential subtasks with blockers.

View File

@@ -0,0 +1,170 @@
# Task Hierarchies
Guidance for organizing work into milestones, tasks, and subtasks.
## Three Levels
| Level | Name | Purpose | Example |
|-------|------|---------|---------|
| 0 | **Milestone** | Large initiative (5+ tasks) | "Add user authentication system" |
| 1 | **Task** | Significant work item | "Implement JWT middleware" |
| 2 | **Subtask** | Atomic implementation step | "Add token verification function" |
**Maximum depth is 3 levels.** Attempting to create a child of a subtask will fail.
## When to Use Each Level
### Single Task (No Hierarchy)
- Small feature (1-2 files, ~1 session)
- Work is atomic, no natural breakdown
### Task with Subtasks
- Medium feature (3-5 files, 3-7 steps)
- Work naturally decomposes into discrete steps
- Subtasks could be worked on independently
### Milestone with Tasks
- Large initiative (multiple areas, many sessions)
- Work spans 5+ distinct tasks
- You want high-level progress tracking
## Creating Hierarchies
```javascript
// Create the milestone
const milestone = await tasks.create({
description: "Add user authentication system",
context: "Full auth system with JWT tokens, password reset...",
priority: 2
});
// Create tasks under it
const jwtTask = await tasks.create({
description: "Implement JWT token generation",
context: "Create token service with signing and verification...",
parentId: milestone.id
});
const resetTask = await tasks.create({
description: "Add password reset flow",
context: "Email-based password reset with secure tokens...",
parentId: milestone.id
});
// For complex tasks, add subtasks
const verifySubtask = await tasks.create({
description: "Add token verification function",
context: "Verify JWT signature and expiration...",
parentId: jwtTask.id
});
```
## Subtask Best Practices
Each subtask should be:
- **Independently understandable**: Clear on its own
- **Linked to parent**: Reference parent, explain how this piece fits
- **Specific scope**: What this subtask does vs what parent/siblings do
- **Clear completion**: Define "done" for this piece specifically
Example subtask context:
```
Part of JWT middleware (parent task). This subtask: token verification.
What it does:
- Verify JWT signature and expiration
- Extract user ID from payload
- Return 401 for invalid/expired tokens
Done when:
- Function complete and tested
- Unit tests cover valid/invalid/expired cases
```
## Decomposition Strategy
When faced with large tasks:
1. **Assess scope**: Is this milestone-level (5+ tasks) or task-level (3-7 subtasks)?
2. Create parent task/milestone with overall goal and context
3. Analyze and identify 3-7 logical children
4. Create children with specific contexts and boundaries
5. Work through systematically, completing with results
6. Complete parent with summary of overall implementation
### Don't Over-Decompose
- **3-7 children per parent** is usually right
- If you'd only have 1-2 subtasks, just make separate tasks
- If you need depth 3+, restructure your breakdown
## Viewing Hierarchies
```javascript
// List all tasks under a milestone
const children = await tasks.list({ parentId: milestoneId });
// Get task with context breadcrumb
const task = await tasks.get(taskId);
// task.context.parent - parent's context
// task.context.milestone - root milestone's context
// Check progress
const pending = await tasks.list({ parentId: milestoneId, completed: false });
const done = await tasks.list({ parentId: milestoneId, completed: true });
console.log(`Progress: ${done.length}/${done.length + pending.length}`);
```
## Completion Rules
1. **Cannot complete with pending children**
```javascript
// This will fail if task has incomplete subtasks
await tasks.complete(taskId, "Done");
// Error: "pending children"
```
2. **Complete children first**
- Work through subtasks systematically
- Complete each with meaningful results
3. **Parent result summarizes overall implementation**
```javascript
await tasks.complete(milestoneId, `User authentication system complete:
Implemented:
- JWT token generation and verification
- Login/logout endpoints
- Password reset flow
- Rate limiting
5 tasks completed, all tests passing.`);
```
## Blocking Dependencies
Use `blockedBy` for cross-task dependencies:
```javascript
// Create task that depends on another
const deployTask = await tasks.create({
description: "Deploy to production",
context: "...",
blockedBy: [testTaskId, reviewTaskId]
});
// Add blocker to existing task
await tasks.block(deployTaskId, testTaskId);
// Remove blocker
await tasks.unblock(deployTaskId, testTaskId);
```
**Use blockers when:**
- Task B cannot start until Task A completes
- Multiple tasks depend on a shared prerequisite
**Don't use blockers when:**
- Tasks can be worked on in parallel
- The dependency is just logical grouping (use subtasks instead)

View File

@@ -0,0 +1,186 @@
# Verification Guide
Before marking any task complete, you MUST verify your work. Verification separates "I think it's done" from "it's actually done."
## The Verification Process
1. **Re-read the task context**: What did you originally commit to do?
2. **Check acceptance criteria**: Does your implementation satisfy the "Done when" conditions?
3. **Run relevant tests**: Execute the test suite and document results
4. **Test manually**: Actually try the feature/change yourself
5. **Compare with requirements**: Does what you built match what was asked?
## Strong vs Weak Verification
### Strong Verification Examples
- "All 60 tests passing, build successful"
- "All 69 tests passing (4 new tests for middleware edge cases)"
- "Manually tested with valid/invalid/expired tokens - all cases work"
- "Ran `cargo test` - 142 tests passed, 0 failed"
### Weak Verification (Avoid)
- "Should work now" - "should" means not verified
- "Made the changes" - no evidence it works
- "Added tests" - did the tests pass? What's the count?
- "Fixed the bug" - what bug? Did you verify the fix?
- "Done" - done how? prove it
## Verification by Task Type
| Task Type | How to Verify |
|-----------|---------------|
| Code changes | Run full test suite, document passing count |
| New features | Run tests + manual testing of functionality |
| Configuration | Test the config works (run commands, check workflows) |
| Documentation | Verify examples work, links resolve, formatting renders |
| Refactoring | Confirm tests still pass, no behavior changes |
| Bug fixes | Reproduce bug first, verify fix, add regression test |
## Cross-Reference Checklist
Before marking complete, verify all applicable items:
- [ ] Task description requirements met
- [ ] Context "Done when" criteria satisfied
- [ ] Tests passing (document count: "All X tests passing")
- [ ] Build succeeds (if applicable)
- [ ] Manual testing done (describe what you tested)
- [ ] No regressions introduced
- [ ] Edge cases considered (error handling, invalid input)
- [ ] Follow-up work identified (created new tasks if needed)
**If you can't check all applicable boxes, the task isn't done yet.**
## Result Examples with Verification
### Code Implementation
```javascript
await tasks.complete(taskId, `Implemented JWT middleware:
Implementation:
- Created src/middleware/verify-token.ts
- Separated 'expired' vs 'invalid' error codes
- Added user extraction from payload
Verification:
- All 69 tests passing (4 new tests for edge cases)
- Manually tested with valid token: Access granted
- Manually tested with expired token: 401 with 'token_expired'
- Manually tested with invalid signature: 401 with 'invalid_token'`);
```
### Configuration/Infrastructure
```javascript
await tasks.complete(taskId, `Added GitHub Actions workflow for CI:
Implementation:
- Created .github/workflows/ci.yml
- Jobs: lint, test, build with pnpm cache
Verification:
- Pushed to test branch, opened PR #123
- Workflow triggered automatically
- All jobs passed (lint: 0 errors, test: 69/69, build: success)
- Total run time: 2m 34s`);
```
### Refactoring
```javascript
await tasks.complete(taskId, `Refactored storage to one file per task:
Implementation:
- Split tasks.json into .overseer/tasks/{id}.json files
- Added auto-migration from old format
- Atomic writes via temp+rename
Verification:
- All 60 tests passing (including 8 storage tests)
- Build successful
- Manually tested migration: old -> new format works
- Confirmed git diff shows only changed tasks`);
```
### Bug Fix
```javascript
await tasks.complete(taskId, `Fixed login validation accepting usernames with spaces:
Root cause:
- Validation regex didn't account for leading/trailing spaces
Fix:
- Added .trim() before validation in src/auth/validate.ts:42
- Updated regex to reject internal spaces
Verification:
- All 45 tests passing (2 new regression tests)
- Manually tested:
- " admin" -> rejected (leading space)
- "admin " -> rejected (trailing space)
- "ad min" -> rejected (internal space)
- "admin" -> accepted`);
```
### Documentation
```javascript
await tasks.complete(taskId, `Updated API documentation for auth endpoints:
Implementation:
- Added docs for POST /auth/login
- Added docs for POST /auth/logout
- Added docs for POST /auth/refresh
- Included example requests/responses
Verification:
- All code examples tested and working
- Links verified (no 404s)
- Rendered in local preview - formatting correct
- Spell-checked content`);
```
## Common Verification Mistakes
| Mistake | Better Approach |
|---------|-----------------|
| "Tests pass" | "All 42 tests passing" (include count) |
| "Manually tested" | "Manually tested X, Y, Z scenarios" (be specific) |
| "Works" | "Works: [evidence]" (show proof) |
| "Fixed" | "Fixed: [root cause] -> [solution] -> [verification]" |
## When Verification Fails
If verification reveals issues:
1. **Don't complete the task** - it's not done
2. **Document what failed** in task context
3. **Fix the issues** before completing
4. **Re-verify** after fixes
```javascript
// Update context with failure notes
await tasks.update(taskId, {
context: task.context + `
Verification attempt 1 (failed):
- Tests: 41/42 passing
- Failing: test_token_refresh - timeout issue
- Need to investigate async handling`
});
// After fixing
await tasks.complete(taskId, `Implemented token refresh:
Implementation:
- Added refresh endpoint
- Fixed async timeout (was missing await)
Verification:
- All 42 tests passing (fixed timeout issue)
- Manual testing: refresh works within 30s window`);
```

View File

@@ -0,0 +1,164 @@
# Implementation Workflow
Step-by-step guide for working with Overseer tasks during implementation.
## 1. Get Next Ready Task
```javascript
// Get next task with full context (recommended)
const task = await tasks.nextReady();
// Or scope to specific milestone
const task = await tasks.nextReady(milestoneId);
if (!task) {
return "No tasks ready - all blocked or completed";
}
```
`nextReady()` returns a `TaskWithContext` (task with inherited context and learnings) or `null`.
## 2. Review Context
Before starting, verify you can answer:
- **What** needs to be done specifically?
- **Why** is this needed?
- **How** should it be implemented?
- **When** is it done (acceptance criteria)?
```javascript
const task = await tasks.get(taskId);
// Task's own context
console.log("Task:", task.context.own);
// Parent context (if task has parent)
if (task.context.parent) {
console.log("Parent:", task.context.parent);
}
// Milestone context (if depth > 1)
if (task.context.milestone) {
console.log("Milestone:", task.context.milestone);
}
// Task's own learnings (bubbled from completed children)
console.log("Task learnings:", task.learnings.own);
```
**If any answer is unclear:**
1. Check parent task or completed blockers for details
2. Suggest entering plan mode to flesh out requirements
**Proceed without full context when:**
- Task is trivial/atomic (e.g., "Add .gitignore entry")
- Conversation already provides the missing context
- Description itself is sufficiently detailed
## 3. Start Task
```javascript
await tasks.start(taskId);
```
**VCS Required:** Creates bookmark `task/<id>`, records start commit. Fails with `NotARepository` if no jj/git found.
After starting, the task status changes to `in_progress`.
## 4. Implement
Work on the task implementation. Note any learnings to include when completing.
## 5. Verify Work
Before completing, verify your implementation. See @file references/verification.md for full checklist.
Quick checklist:
- [ ] Task description requirements met
- [ ] Context "Done when" criteria satisfied
- [ ] Tests passing (document count)
- [ ] Build succeeds
- [ ] Manual testing done
## 6. Complete Task with Learnings
```javascript
await tasks.complete(taskId, {
result: `Implemented login endpoint:
Implementation:
- Created src/auth/login.ts
- Added JWT token generation
- Integrated with user service
Verification:
- All 42 tests passing (3 new)
- Manually tested valid/invalid credentials`,
learnings: [
"bcrypt rounds should be 12+ for production",
"jose library preferred over jsonwebtoken"
]
});
```
**VCS Required:** Commits changes (NothingToCommit treated as success), then deletes the task's bookmark (best-effort) and clears the DB bookmark field on success. Fails with `NotARepository` if no jj/git found.
**Learnings Effect:** Learnings bubble to immediate parent only. `sourceTaskId` is preserved through bubbling, so if this task's learnings later bubble further, the origin is tracked.
The `result` becomes part of the task's permanent record.
## VCS Integration (Required for Workflow)
VCS operations are **automatically handled** by the tasks API:
| Task Operation | VCS Effect |
|----------------|------------|
| `tasks.start(id)` | **VCS required** - creates bookmark `task/<id>`, records start commit |
| `tasks.complete(id)` | **VCS required** - commits changes, deletes bookmark (best-effort), clears DB bookmark on success |
| `tasks.complete(milestoneId)` | Same + deletes ALL descendant bookmarks recursively (depth-1 and depth-2) |
| `tasks.delete(id)` | Best-effort bookmark cleanup (logs warning on failure) |
**Note:** VCS (jj or git) is required for start/complete. CRUD operations work without VCS.
## Error Handling
### Pending Children
```javascript
try {
await tasks.complete(taskId, "Done");
} catch (err) {
if (err.message.includes("pending children")) {
const pending = await tasks.list({ parentId: taskId, completed: false });
return `Cannot complete: ${pending.length} children pending`;
}
throw err;
}
```
### Task Not Ready
```javascript
const task = await tasks.get(taskId);
// Check if blocked
if (task.blockedBy.length > 0) {
console.log("Blocked by:", task.blockedBy);
// Complete blockers first or unblock
await tasks.unblock(taskId, blockerId);
}
```
## Complete Workflow Example
```javascript
const task = await tasks.nextReady();
if (!task) return "No ready tasks";
await tasks.start(task.id);
// ... implement ...
await tasks.complete(task.id, {
result: "Implemented: ... Verification: All 58 tests passing",
learnings: ["Use jose for JWT"]
});
```

View File

@@ -0,0 +1,464 @@
---
name: solidjs
description: |
SolidJS framework development skill for building reactive web applications with fine-grained reactivity.
Use when working with SolidJS projects including: (1) Creating components with signals, stores, and effects,
(2) Implementing reactive state management, (3) Using control flow components (Show, For, Switch/Match),
(4) Setting up routing with Solid Router, (5) Building full-stack apps with SolidStart,
(6) Data fetching with createResource, (7) Context API for shared state, (8) SSR/SSG configuration.
Triggers: solid, solidjs, solid-js, solid start, solidstart, createSignal, createStore, createEffect.
---
# SolidJS Development
SolidJS is a declarative JavaScript library for building user interfaces with fine-grained reactivity. Unlike virtual DOM frameworks, Solid compiles templates to real DOM nodes and updates them with fine-grained reactions.
## Core Principles
1. **Components run once** — Component functions execute only during initialization, not on every update
2. **Fine-grained reactivity** — Only the specific DOM nodes that depend on changed data update
3. **No virtual DOM** — Direct DOM manipulation via compiled templates
4. **Signals are functions** — Access values by calling: `count()` not `count`
## Reactivity Primitives
### Signals — Basic State
```tsx
import { createSignal } from "solid-js";
const [count, setCount] = createSignal(0);
// Read value (getter)
console.log(count()); // 0
// Update value (setter)
setCount(1);
setCount(prev => prev + 1); // Functional update
```
**Options:**
```tsx
const [value, setValue] = createSignal(initialValue, {
equals: false, // Always trigger updates, even if value unchanged
name: "debugName" // For devtools
});
```
### Effects — Side Effects
```tsx
import { createEffect } from "solid-js";
createEffect(() => {
console.log("Count changed:", count());
// Runs after render, re-runs when dependencies change
});
```
**Key behaviors:**
- Initial run: after render, before browser paint
- Subsequent runs: when tracked dependencies change
- Never runs during SSR or hydration
- Use `onCleanup` for cleanup logic
### Memos — Derived/Cached Values
```tsx
import { createMemo } from "solid-js";
const doubled = createMemo(() => count() * 2);
// Access like signal
console.log(doubled()); // Cached, only recalculates when count changes
```
Use memos when:
- Derived value is expensive to compute
- Derived value is accessed multiple times
- You want to prevent downstream updates when result unchanged
### Resources — Async Data
```tsx
import { createResource } from "solid-js";
const [user, { mutate, refetch }] = createResource(userId, fetchUser);
// In JSX
<Show when={!user.loading} fallback={<Loading />}>
<div>{user()?.name}</div>
</Show>
// Resource properties
user.loading // boolean
user.error // error if failed
user.state // "unresolved" | "pending" | "ready" | "refreshing" | "errored"
user.latest // last successful value
```
## Stores — Complex State
For nested objects/arrays with fine-grained updates:
```tsx
import { createStore } from "solid-js/store";
const [state, setState] = createStore({
user: { name: "John", age: 30 },
todos: []
});
// Path syntax updates
setState("user", "name", "Jane");
setState("todos", todos => [...todos, newTodo]);
setState("todos", 0, "completed", true);
// Produce for immer-like updates
import { produce } from "solid-js/store";
setState(produce(s => {
s.user.age++;
s.todos.push(newTodo);
}));
```
**Store utilities:**
- `produce` — Immer-like mutations
- `reconcile` — Diff and patch data (for API responses)
- `unwrap` — Get raw non-reactive object
## Components
### Basic Component
```tsx
import { Component } from "solid-js";
const MyComponent: Component<{ name: string }> = (props) => {
return <div>Hello, {props.name}</div>;
};
```
### Props Handling
```tsx
import { splitProps, mergeProps } from "solid-js";
// Default props
const merged = mergeProps({ size: "medium" }, props);
// Split props (for spreading)
const [local, others] = splitProps(props, ["class", "onClick"]);
return <button class={local.class} {...others} />;
```
**Props rules:**
- Props are reactive getters — don't destructure at top level
- Use `props.value` in JSX, not `const { value } = props`
### Children Helper
```tsx
import { children } from "solid-js";
const Wrapper: Component = (props) => {
const resolved = children(() => props.children);
createEffect(() => {
console.log("Children:", resolved());
});
return <div>{resolved()}</div>;
};
```
## Control Flow Components
### Show — Conditional Rendering
```tsx
import { Show } from "solid-js";
<Show when={user()} fallback={<Login />}>
{(user) => <Profile user={user()} />}
</Show>
```
### For — List Rendering (keyed by reference)
```tsx
import { For } from "solid-js";
<For each={items()} fallback={<Empty />}>
{(item, index) => (
<div>{index()}: {item.name}</div>
)}
</For>
```
**Note:** `index` is a signal, `item` is the value.
### Index — List Rendering (keyed by index)
```tsx
import { Index } from "solid-js";
<Index each={items()}>
{(item, index) => (
<input value={item().text} />
)}
</Index>
```
**Note:** `item` is a signal, `index` is the value. Better for primitive arrays or inputs.
### Switch/Match — Multiple Conditions
```tsx
import { Switch, Match } from "solid-js";
<Switch fallback={<Default />}>
<Match when={state() === "loading"}>
<Loading />
</Match>
<Match when={state() === "error"}>
<Error />
</Match>
<Match when={state() === "success"}>
<Success />
</Match>
</Switch>
```
### Dynamic — Dynamic Component
```tsx
import { Dynamic } from "solid-js/web";
<Dynamic component={selected()} someProp="value" />
```
### Portal — Render Outside DOM Hierarchy
```tsx
import { Portal } from "solid-js/web";
<Portal mount={document.body}>
<Modal />
</Portal>
```
### ErrorBoundary — Error Handling
```tsx
import { ErrorBoundary } from "solid-js";
<ErrorBoundary fallback={(err, reset) => (
<div>
Error: {err.message}
<button onClick={reset}>Retry</button>
</div>
)}>
<RiskyComponent />
</ErrorBoundary>
```
### Suspense — Async Loading
```tsx
import { Suspense } from "solid-js";
<Suspense fallback={<Loading />}>
<AsyncComponent />
</Suspense>
```
## Context API
```tsx
import { createContext, useContext } from "solid-js";
// Create context
const CounterContext = createContext<{
count: () => number;
increment: () => void;
}>();
// Provider component
export function CounterProvider(props) {
const [count, setCount] = createSignal(0);
return (
<CounterContext.Provider value={{
count,
increment: () => setCount(c => c + 1)
}}>
{props.children}
</CounterContext.Provider>
);
}
// Consumer hook
export function useCounter() {
const ctx = useContext(CounterContext);
if (!ctx) throw new Error("useCounter must be used within CounterProvider");
return ctx;
}
```
## Lifecycle
```tsx
import { onMount, onCleanup } from "solid-js";
function MyComponent() {
onMount(() => {
console.log("Mounted");
const handler = () => {};
window.addEventListener("resize", handler);
onCleanup(() => {
window.removeEventListener("resize", handler);
});
});
return <div>Content</div>;
}
```
## Refs
```tsx
let inputRef: HTMLInputElement;
<input ref={inputRef} />
<input ref={(el) => { /* el is the DOM element */ }} />
```
## Event Handling
```tsx
// Standard events (lowercase)
<button onClick={handleClick}>Click</button>
<button onClick={(e) => handleClick(e)}>Click</button>
// Delegated events (on:)
<input on:input={handleInput} />
// Native events (on:) - not delegated
<div on:scroll={handleScroll} />
```
## Common Patterns
### Conditional Classes
```tsx
import { clsx } from "clsx"; // or classList
<div class={clsx("base", { active: isActive() })} />
<div classList={{ active: isActive(), disabled: isDisabled() }} />
```
### Batch Updates
```tsx
import { batch } from "solid-js";
batch(() => {
setName("John");
setAge(30);
// Effects run once after batch completes
});
```
### Untrack
```tsx
import { untrack } from "solid-js";
createEffect(() => {
console.log(count()); // tracked
console.log(untrack(() => other())); // not tracked
});
```
## TypeScript
```tsx
import type { Component, ParentComponent, JSX } from "solid-js";
// Basic component
const Button: Component<{ label: string }> = (props) => (
<button>{props.label}</button>
);
// With children
const Layout: ParentComponent<{ title: string }> = (props) => (
<div>
<h1>{props.title}</h1>
{props.children}
</div>
);
// Event handler types
const handleClick: JSX.EventHandler<HTMLButtonElement, MouseEvent> = (e) => {
console.log(e.currentTarget);
};
```
## Project Setup
```bash
# Create new project
npm create solid@latest my-app
# With template
npx degit solidjs/templates/ts my-app
# SolidStart
npm create solid@latest my-app -- --template solidstart
```
**vite.config.ts:**
```ts
import { defineConfig } from "vite";
import solid from "vite-plugin-solid";
export default defineConfig({
plugins: [solid()]
});
```
## Anti-Patterns to Avoid
1. **Destructuring props** — Breaks reactivity
```tsx
// ❌ Bad
const { name } = props;
// ✅ Good
props.name
```
2. **Accessing signals outside tracking scope**
```tsx
// ❌ Won't update
console.log(count());
// ✅ Will update
createEffect(() => console.log(count()));
```
3. **Forgetting to call signal getters**
```tsx
// ❌ Passes the function
<div>{count}</div>
// ✅ Passes the value
<div>{count()}</div>
```
4. **Using array index as key** — Use `<For>` for reference-keyed, `<Index>` for index-keyed
5. **Side effects during render** — Use `createEffect` or `onMount`

View File

@@ -0,0 +1,777 @@
# SolidJS API Reference
Complete reference for all SolidJS primitives, utilities, and component APIs.
## Basic Reactivity
### createSignal
```tsx
import { createSignal } from "solid-js";
const [getter, setter] = createSignal<T>(initialValue, options?);
// Options
interface SignalOptions<T> {
equals?: false | ((prev: T, next: T) => boolean);
name?: string;
internal?: boolean;
}
```
**Examples:**
```tsx
const [count, setCount] = createSignal(0);
const [user, setUser] = createSignal<User | null>(null);
// Always update
const [data, setData] = createSignal(obj, { equals: false });
// Custom equality
const [items, setItems] = createSignal([], {
equals: (a, b) => a.length === b.length
});
// Setter forms
setCount(5); // Direct value
setCount(prev => prev + 1); // Functional update
```
### createEffect
```tsx
import { createEffect } from "solid-js";
createEffect<T>(fn: (prev: T) => T, initialValue?: T, options?);
// Options
interface EffectOptions {
name?: string;
}
```
**Examples:**
```tsx
// Basic
createEffect(() => {
console.log("Count:", count());
});
// With previous value
createEffect((prev) => {
console.log("Changed from", prev, "to", count());
return count();
}, count());
// With cleanup
createEffect(() => {
const handler = () => {};
window.addEventListener("resize", handler);
onCleanup(() => window.removeEventListener("resize", handler));
});
```
### createMemo
```tsx
import { createMemo } from "solid-js";
const getter = createMemo<T>(fn: (prev: T) => T, initialValue?: T, options?);
// Options
interface MemoOptions<T> {
equals?: false | ((prev: T, next: T) => boolean);
name?: string;
}
```
**Examples:**
```tsx
const doubled = createMemo(() => count() * 2);
const filtered = createMemo(() => items().filter(i => i.active));
// Previous value
const delta = createMemo((prev) => count() - prev, 0);
```
### createResource
```tsx
import { createResource } from "solid-js";
const [resource, { mutate, refetch }] = createResource(
source?, // Optional reactive source
fetcher, // (source, info) => Promise<T>
options?
);
// Resource properties
resource() // T | undefined
resource.loading // boolean
resource.error // any
resource.state // "unresolved" | "pending" | "ready" | "refreshing" | "errored"
resource.latest // T | undefined (last successful value)
// Options
interface ResourceOptions<T> {
initialValue?: T;
name?: string;
deferStream?: boolean;
ssrLoadFrom?: "initial" | "server";
storage?: (init: T) => [Accessor<T>, Setter<T>];
onHydrated?: (key, info: { value: T }) => void;
}
```
**Examples:**
```tsx
// Without source
const [users] = createResource(fetchUsers);
// With source
const [user] = createResource(userId, fetchUser);
// With options
const [data] = createResource(id, fetchData, {
initialValue: [],
deferStream: true
});
// Actions
mutate(newValue); // Update locally
refetch(); // Re-fetch
refetch(customInfo); // Pass to fetcher's info.refetching
```
## Stores
### createStore
```tsx
import { createStore } from "solid-js/store";
const [store, setStore] = createStore<T>(initialValue);
```
**Update patterns:**
```tsx
const [state, setState] = createStore({
user: { name: "John", age: 30 },
todos: [{ id: 1, text: "Learn Solid", done: false }]
});
// Path syntax
setState("user", "name", "Jane");
setState("user", "age", a => a + 1);
setState("todos", 0, "done", true);
// Array operations
setState("todos", t => [...t, newTodo]);
setState("todos", todos.length, newTodo);
// Multiple paths
setState("todos", { from: 0, to: 2 }, "done", true);
setState("todos", [0, 2, 4], "done", true);
setState("todos", i => i.done, "done", false);
// Object merge (shallow)
setState("user", { age: 31 }); // Keeps other properties
```
### produce
```tsx
import { produce } from "solid-js/store";
setState(produce(draft => {
draft.user.age++;
draft.todos.push({ id: 2, text: "New", done: false });
draft.todos[0].done = true;
}));
```
### reconcile
```tsx
import { reconcile } from "solid-js/store";
// Replace with diff (minimal updates)
setState("todos", reconcile(newTodosFromAPI));
// Options
reconcile(data, { key: "id", merge: true });
```
### unwrap
```tsx
import { unwrap } from "solid-js/store";
const raw = unwrap(store); // Non-reactive plain object
```
### createMutable
```tsx
import { createMutable } from "solid-js/store";
const state = createMutable({
count: 0,
user: { name: "John" }
});
// Direct mutation (like MobX)
state.count++;
state.user.name = "Jane";
```
### modifyMutable
```tsx
import { modifyMutable, reconcile, produce } from "solid-js/store";
modifyMutable(state, reconcile(newData));
modifyMutable(state, produce(s => { s.count++ }));
```
## Component APIs
### children
```tsx
import { children } from "solid-js";
const resolved = children(() => props.children);
// Access
resolved(); // JSX.Element | JSX.Element[]
resolved.toArray(); // Always array
```
### createContext / useContext
```tsx
import { createContext, useContext } from "solid-js";
const MyContext = createContext<T>(defaultValue?);
// Provider
<MyContext.Provider value={value}>
{children}
</MyContext.Provider>
// Consumer
const value = useContext(MyContext);
```
### createUniqueId
```tsx
import { createUniqueId } from "solid-js";
const id = createUniqueId(); // "0", "1", etc.
```
### lazy
```tsx
import { lazy } from "solid-js";
const LazyComponent = lazy(() => import("./Component"));
// Use with Suspense
<Suspense fallback={<Loading />}>
<LazyComponent />
</Suspense>
```
## Lifecycle
### onMount
```tsx
import { onMount } from "solid-js";
onMount(() => {
// Runs once after initial render
console.log("Mounted");
});
```
### onCleanup
```tsx
import { onCleanup } from "solid-js";
// In component
onCleanup(() => {
console.log("Cleaning up");
});
// In effect
createEffect(() => {
const sub = subscribe();
onCleanup(() => sub.unsubscribe());
});
```
## Reactive Utilities
### batch
```tsx
import { batch } from "solid-js";
batch(() => {
setA(1);
setB(2);
setC(3);
// Effects run once after batch
});
```
### untrack
```tsx
import { untrack } from "solid-js";
createEffect(() => {
console.log(a()); // Tracked
console.log(untrack(() => b())); // Not tracked
});
```
### on
```tsx
import { on } from "solid-js";
// Explicit dependencies
createEffect(on(count, (value, prev) => {
console.log("Count changed:", prev, "->", value);
}));
// Multiple dependencies
createEffect(on([a, b], ([a, b], [prevA, prevB]) => {
console.log("Changed");
}));
// Defer first run
createEffect(on(count, (v) => console.log(v), { defer: true }));
```
### mergeProps
```tsx
import { mergeProps } from "solid-js";
const merged = mergeProps(
{ size: "medium", color: "blue" }, // Defaults
props // Overrides
);
```
### splitProps
```tsx
import { splitProps } from "solid-js";
const [local, others] = splitProps(props, ["class", "onClick"]);
// local.class, local.onClick
// others contains everything else
const [a, b, rest] = splitProps(props, ["foo"], ["bar"]);
```
### createRoot
```tsx
import { createRoot } from "solid-js";
const dispose = createRoot((dispose) => {
const [count, setCount] = createSignal(0);
// Use signals...
return dispose;
});
// Later
dispose();
```
### getOwner / runWithOwner
```tsx
import { getOwner, runWithOwner } from "solid-js";
const owner = getOwner();
// Later, in async code
runWithOwner(owner, () => {
createEffect(() => {
// This effect has proper ownership
});
});
```
### mapArray
```tsx
import { mapArray } from "solid-js";
const mapped = mapArray(
() => items(),
(item, index) => ({ ...item, doubled: item.value * 2 })
);
```
### indexArray
```tsx
import { indexArray } from "solid-js";
const mapped = indexArray(
() => items(),
(item, index) => <div>{index}: {item().name}</div>
);
```
### observable
```tsx
import { observable } from "solid-js";
const obs = observable(signal);
obs.subscribe((value) => console.log(value));
```
### from
```tsx
import { from } from "solid-js";
// Convert observable/subscribable to signal
const signal = from(rxObservable);
const signal = from((set) => {
const unsub = subscribe(set);
return unsub;
});
```
### catchError
```tsx
import { catchError } from "solid-js";
catchError(
() => riskyOperation(),
(err) => console.error("Error:", err)
);
```
## Secondary Primitives
### createComputed
```tsx
import { createComputed } from "solid-js";
// Like createEffect but runs during render phase
createComputed(() => {
setDerived(source() * 2);
});
```
### createRenderEffect
```tsx
import { createRenderEffect } from "solid-js";
// Runs before paint (for DOM measurements)
createRenderEffect(() => {
const height = element.offsetHeight;
});
```
### createDeferred
```tsx
import { createDeferred } from "solid-js";
// Returns value after idle time
const deferred = createDeferred(() => expensiveComputation(), {
timeoutMs: 1000
});
```
### createReaction
```tsx
import { createReaction } from "solid-js";
const track = createReaction(() => {
console.log("Something changed");
});
track(() => count()); // Start tracking
```
### createSelector
```tsx
import { createSelector } from "solid-js";
const isSelected = createSelector(selectedId);
<For each={items()}>
{(item) => (
<div class={isSelected(item.id) ? "selected" : ""}>
{item.name}
</div>
)}
</For>
```
## Components
### Show
```tsx
<Show when={condition()} fallback={<Fallback />}>
<Content />
</Show>
// With callback (narrowed type)
<Show when={user()}>
{(user) => <div>{user().name}</div>}
</Show>
```
### For
```tsx
<For each={items()} fallback={<Empty />}>
{(item, index) => <div>{index()}: {item.name}</div>}
</For>
```
### Index
```tsx
<Index each={items()} fallback={<Empty />}>
{(item, index) => <input value={item().text} />}
</Index>
```
### Switch / Match
```tsx
<Switch fallback={<Default />}>
<Match when={state() === "loading"}>
<Loading />
</Match>
<Match when={state() === "error"}>
<Error />
</Match>
</Switch>
```
### Dynamic
```tsx
import { Dynamic } from "solid-js/web";
<Dynamic component={selected()} prop={value} />
<Dynamic component="div" class="dynamic">Content</Dynamic>
```
### Portal
```tsx
import { Portal } from "solid-js/web";
<Portal mount={document.body}>
<Modal />
</Portal>
```
### ErrorBoundary
```tsx
<ErrorBoundary fallback={(err, reset) => (
<div>
<p>Error: {err.message}</p>
<button onClick={reset}>Retry</button>
</div>
)}>
<Content />
</ErrorBoundary>
```
### Suspense
```tsx
<Suspense fallback={<Loading />}>
<AsyncContent />
</Suspense>
```
### SuspenseList
```tsx
<SuspenseList revealOrder="forwards" tail="collapsed">
<Suspense fallback={<Loading />}><Item1 /></Suspense>
<Suspense fallback={<Loading />}><Item2 /></Suspense>
<Suspense fallback={<Loading />}><Item3 /></Suspense>
</SuspenseList>
```
## Rendering
### render
```tsx
import { render } from "solid-js/web";
const dispose = render(() => <App />, document.getElementById("root")!);
// Cleanup
dispose();
```
### hydrate
```tsx
import { hydrate } from "solid-js/web";
hydrate(() => <App />, document.getElementById("root")!);
```
### renderToString
```tsx
import { renderToString } from "solid-js/web";
const html = renderToString(() => <App />);
```
### renderToStringAsync
```tsx
import { renderToStringAsync } from "solid-js/web";
const html = await renderToStringAsync(() => <App />);
```
### renderToStream
```tsx
import { renderToStream } from "solid-js/web";
const stream = renderToStream(() => <App />);
stream.pipe(res);
```
### isServer
```tsx
import { isServer } from "solid-js/web";
if (isServer) {
// Server-only code
}
```
## JSX Attributes
### ref
```tsx
let el: HTMLDivElement;
<div ref={el} />
<div ref={(e) => console.log(e)} />
```
### classList
```tsx
<div classList={{ active: isActive(), disabled: isDisabled() }} />
```
### style
```tsx
<div style={{ color: "red", "font-size": "14px" }} />
<div style={`color: ${color()}`} />
```
### on:event (native)
```tsx
<div on:click={handleClick} />
<div on:scroll={handleScroll} />
```
### use:directive
```tsx
function clickOutside(el: HTMLElement, accessor: () => () => void) {
const handler = (e: MouseEvent) => {
if (!el.contains(e.target as Node)) accessor()();
};
document.addEventListener("click", handler);
onCleanup(() => document.removeEventListener("click", handler));
}
<div use:clickOutside={() => setOpen(false)} />
```
### prop:property
```tsx
<input prop:value={value()} /> // Set as property, not attribute
```
### attr:attribute
```tsx
<div attr:data-custom={value()} /> // Force attribute
```
### bool:attribute
```tsx
<input bool:disabled={isDisabled()} />
```
### @once
```tsx
<div title={/*@once*/ staticValue} /> // Never updates
```
## Types
```tsx
import type {
Component,
ParentComponent,
FlowComponent,
VoidComponent,
JSX,
Accessor,
Setter,
Signal,
Resource,
Owner
} from "solid-js";
// Component types
const MyComponent: Component<Props> = (props) => <div />;
const Parent: ParentComponent<Props> = (props) => <div>{props.children}</div>;
const Flow: FlowComponent<Props, Item> = (props) => props.children(item);
const Void: VoidComponent<Props> = (props) => <input />;
// Event types
type Handler = JSX.EventHandler<HTMLButtonElement, MouseEvent>;
type ChangeHandler = JSX.ChangeEventHandler<HTMLInputElement>;
```

View File

@@ -0,0 +1,720 @@
# SolidJS Patterns & Best Practices
Common patterns, recipes, and best practices for SolidJS development.
## Component Patterns
### Controlled vs Uncontrolled Inputs
**Controlled:**
```tsx
function ControlledInput() {
const [value, setValue] = createSignal("");
return (
<input
value={value()}
onInput={(e) => setValue(e.currentTarget.value)}
/>
);
}
```
**Uncontrolled with ref:**
```tsx
function UncontrolledInput() {
let inputRef: HTMLInputElement;
const handleSubmit = () => {
console.log(inputRef.value);
};
return (
<>
<input ref={inputRef!} />
<button onClick={handleSubmit}>Submit</button>
</>
);
}
```
### Compound Components
```tsx
const Tabs = {
Root: (props: ParentProps<{ defaultTab?: string }>) => {
const [activeTab, setActiveTab] = createSignal(props.defaultTab ?? "");
return (
<TabsContext.Provider value={{ activeTab, setActiveTab }}>
<div class="tabs">{props.children}</div>
</TabsContext.Provider>
);
},
List: (props: ParentProps) => (
<div class="tabs-list" role="tablist">{props.children}</div>
),
Tab: (props: ParentProps<{ value: string }>) => {
const ctx = useTabsContext();
return (
<button
role="tab"
aria-selected={ctx.activeTab() === props.value}
onClick={() => ctx.setActiveTab(props.value)}
>
{props.children}
</button>
);
},
Panel: (props: ParentProps<{ value: string }>) => {
const ctx = useTabsContext();
return (
<Show when={ctx.activeTab() === props.value}>
<div role="tabpanel">{props.children}</div>
</Show>
);
}
};
// Usage
<Tabs.Root defaultTab="first">
<Tabs.List>
<Tabs.Tab value="first">First</Tabs.Tab>
<Tabs.Tab value="second">Second</Tabs.Tab>
</Tabs.List>
<Tabs.Panel value="first">First Content</Tabs.Panel>
<Tabs.Panel value="second">Second Content</Tabs.Panel>
</Tabs.Root>
```
### Render Props
```tsx
function MouseTracker(props: {
children: (pos: { x: number; y: number }) => JSX.Element;
}) {
const [pos, setPos] = createSignal({ x: 0, y: 0 });
onMount(() => {
const handler = (e: MouseEvent) => setPos({ x: e.clientX, y: e.clientY });
window.addEventListener("mousemove", handler);
onCleanup(() => window.removeEventListener("mousemove", handler));
});
return <>{props.children(pos())}</>;
}
// Usage
<MouseTracker>
{(pos) => <div>Mouse: {pos.x}, {pos.y}</div>}
</MouseTracker>
```
### Higher-Order Components
```tsx
function withAuth<P extends object>(Component: Component<P>) {
return (props: P) => {
const { user } = useAuth();
return (
<Show when={user()} fallback={<Redirect to="/login" />}>
<Component {...props} />
</Show>
);
};
}
const ProtectedDashboard = withAuth(Dashboard);
```
### Polymorphic Components
```tsx
type PolymorphicProps<E extends keyof JSX.IntrinsicElements> = {
as?: E;
} & JSX.IntrinsicElements[E];
function Box<E extends keyof JSX.IntrinsicElements = "div">(
props: PolymorphicProps<E>
) {
const [local, others] = splitProps(props as PolymorphicProps<"div">, ["as"]);
return <Dynamic component={local.as || "div"} {...others} />;
}
// Usage
<Box>Default div</Box>
<Box as="section">Section element</Box>
<Box as="button" onClick={handleClick}>Button</Box>
```
## State Patterns
### Derived State with Multiple Sources
```tsx
function SearchResults() {
const [query, setQuery] = createSignal("");
const [filters, setFilters] = createSignal({ category: "all" });
const results = createMemo(() => {
const q = query().toLowerCase();
const f = filters();
return allItems()
.filter(item => item.name.toLowerCase().includes(q))
.filter(item => f.category === "all" || item.category === f.category);
});
return <For each={results()}>{item => <Item item={item} />}</For>;
}
```
### State Machine Pattern
```tsx
type State = "idle" | "loading" | "success" | "error";
type Event = { type: "FETCH" } | { type: "SUCCESS"; data: any } | { type: "ERROR"; error: Error };
function createMachine(initial: State) {
const [state, setState] = createSignal<State>(initial);
const [data, setData] = createSignal<any>(null);
const [error, setError] = createSignal<Error | null>(null);
const send = (event: Event) => {
const current = state();
switch (current) {
case "idle":
if (event.type === "FETCH") setState("loading");
break;
case "loading":
if (event.type === "SUCCESS") {
setData(event.data);
setState("success");
} else if (event.type === "ERROR") {
setError(event.error);
setState("error");
}
break;
}
};
return { state, data, error, send };
}
```
### Optimistic Updates
```tsx
const [todos, setTodos] = createStore<Todo[]>([]);
async function deleteTodo(id: string) {
const original = [...unwrap(todos)];
// Optimistic remove
setTodos(todos => todos.filter(t => t.id !== id));
try {
await api.deleteTodo(id);
} catch {
// Rollback on error
setTodos(reconcile(original));
}
}
```
### Undo/Redo
```tsx
function createHistory<T>(initial: T) {
const [past, setPast] = createSignal<T[]>([]);
const [present, setPresent] = createSignal<T>(initial);
const [future, setFuture] = createSignal<T[]>([]);
const canUndo = () => past().length > 0;
const canRedo = () => future().length > 0;
const set = (value: T | ((prev: T) => T)) => {
const newValue = typeof value === "function"
? (value as (prev: T) => T)(present())
: value;
setPast(p => [...p, present()]);
setPresent(newValue);
setFuture([]);
};
const undo = () => {
if (!canUndo()) return;
const previous = past()[past().length - 1];
setPast(p => p.slice(0, -1));
setFuture(f => [present(), ...f]);
setPresent(previous);
};
const redo = () => {
if (!canRedo()) return;
const next = future()[0];
setPast(p => [...p, present()]);
setFuture(f => f.slice(1));
setPresent(next);
};
return { value: present, set, undo, redo, canUndo, canRedo };
}
```
## Custom Hooks/Primitives
### useLocalStorage
```tsx
function createLocalStorage<T>(key: string, initialValue: T) {
const stored = localStorage.getItem(key);
const initial = stored ? JSON.parse(stored) : initialValue;
const [value, setValue] = createSignal<T>(initial);
createEffect(() => {
localStorage.setItem(key, JSON.stringify(value()));
});
return [value, setValue] as const;
}
```
### useDebounce
```tsx
function createDebounce<T>(source: () => T, delay: number) {
const [debounced, setDebounced] = createSignal<T>(source());
createEffect(() => {
const value = source();
const timer = setTimeout(() => setDebounced(() => value), delay);
onCleanup(() => clearTimeout(timer));
});
return debounced;
}
// Usage
const debouncedQuery = createDebounce(query, 300);
```
### useThrottle
```tsx
function createThrottle<T>(source: () => T, delay: number) {
const [throttled, setThrottled] = createSignal<T>(source());
let lastRun = 0;
createEffect(() => {
const value = source();
const now = Date.now();
if (now - lastRun >= delay) {
lastRun = now;
setThrottled(() => value);
} else {
const timer = setTimeout(() => {
lastRun = Date.now();
setThrottled(() => value);
}, delay - (now - lastRun));
onCleanup(() => clearTimeout(timer));
}
});
return throttled;
}
```
### useMediaQuery
```tsx
function createMediaQuery(query: string) {
const mql = window.matchMedia(query);
const [matches, setMatches] = createSignal(mql.matches);
onMount(() => {
const handler = (e: MediaQueryListEvent) => setMatches(e.matches);
mql.addEventListener("change", handler);
onCleanup(() => mql.removeEventListener("change", handler));
});
return matches;
}
// Usage
const isMobile = createMediaQuery("(max-width: 768px)");
```
### useClickOutside
```tsx
function createClickOutside(
ref: () => HTMLElement | undefined,
callback: () => void
) {
onMount(() => {
const handler = (e: MouseEvent) => {
const el = ref();
if (el && !el.contains(e.target as Node)) {
callback();
}
};
document.addEventListener("click", handler);
onCleanup(() => document.removeEventListener("click", handler));
});
}
// Usage
let dropdownRef: HTMLDivElement;
createClickOutside(() => dropdownRef, () => setOpen(false));
```
### useIntersectionObserver
```tsx
function createIntersectionObserver(
ref: () => HTMLElement | undefined,
options?: IntersectionObserverInit
) {
const [isIntersecting, setIsIntersecting] = createSignal(false);
onMount(() => {
const el = ref();
if (!el) return;
const observer = new IntersectionObserver(([entry]) => {
setIsIntersecting(entry.isIntersecting);
}, options);
observer.observe(el);
onCleanup(() => observer.disconnect());
});
return isIntersecting;
}
```
## Form Patterns
### Form Validation
```tsx
function createForm<T extends Record<string, any>>(initial: T) {
const [values, setValues] = createStore<T>(initial);
const [errors, setErrors] = createStore<Partial<Record<keyof T, string>>>({});
const [touched, setTouched] = createStore<Partial<Record<keyof T, boolean>>>({});
const handleChange = (field: keyof T) => (e: Event) => {
const target = e.target as HTMLInputElement;
setValues(field as any, target.value as any);
};
const handleBlur = (field: keyof T) => () => {
setTouched(field as any, true);
};
const validate = (validators: Partial<Record<keyof T, (v: any) => string | undefined>>) => {
let isValid = true;
for (const [field, validator] of Object.entries(validators)) {
if (validator) {
const error = validator(values[field as keyof T]);
setErrors(field as any, error as any);
if (error) isValid = false;
}
}
return isValid;
};
return { values, errors, touched, handleChange, handleBlur, validate, setValues };
}
// Usage
const form = createForm({ email: "", password: "" });
<input
value={form.values.email}
onInput={form.handleChange("email")}
onBlur={form.handleBlur("email")}
/>
<Show when={form.touched.email && form.errors.email}>
<span class="error">{form.errors.email}</span>
</Show>
```
### Field Array
```tsx
function createFieldArray<T>(initial: T[] = []) {
const [fields, setFields] = createStore<T[]>(initial);
const append = (value: T) => setFields(f => [...f, value]);
const remove = (index: number) => setFields(f => f.filter((_, i) => i !== index));
const update = (index: number, value: Partial<T>) => setFields(index, v => ({ ...v, ...value }));
const move = (from: number, to: number) => {
setFields(produce(f => {
const [item] = f.splice(from, 1);
f.splice(to, 0, item);
}));
};
return { fields, append, remove, update, move };
}
```
## Performance Patterns
### Virtualized List
```tsx
function VirtualList<T>(props: {
items: T[];
itemHeight: number;
height: number;
renderItem: (item: T, index: number) => JSX.Element;
}) {
const [scrollTop, setScrollTop] = createSignal(0);
const startIndex = createMemo(() =>
Math.floor(scrollTop() / props.itemHeight)
);
const visibleCount = createMemo(() =>
Math.ceil(props.height / props.itemHeight) + 1
);
const visibleItems = createMemo(() =>
props.items.slice(startIndex(), startIndex() + visibleCount())
);
return (
<div
style={{ height: `${props.height}px`, overflow: "auto" }}
onScroll={(e) => setScrollTop(e.currentTarget.scrollTop)}
>
<div style={{ height: `${props.items.length * props.itemHeight}px`, position: "relative" }}>
<For each={visibleItems()}>
{(item, i) => (
<div style={{
position: "absolute",
top: `${(startIndex() + i()) * props.itemHeight}px`,
height: `${props.itemHeight}px`
}}>
{props.renderItem(item, startIndex() + i())}
</div>
)}
</For>
</div>
</div>
);
}
```
### Lazy Loading with Intersection Observer
```tsx
function LazyLoad(props: ParentProps<{ placeholder?: JSX.Element }>) {
let ref: HTMLDivElement;
const [isVisible, setIsVisible] = createSignal(false);
onMount(() => {
const observer = new IntersectionObserver(
([entry]) => {
if (entry.isIntersecting) {
setIsVisible(true);
observer.disconnect();
}
},
{ rootMargin: "100px" }
);
observer.observe(ref);
onCleanup(() => observer.disconnect());
});
return (
<div ref={ref!}>
<Show when={isVisible()} fallback={props.placeholder}>
{props.children}
</Show>
</div>
);
}
```
### Memoized Component
```tsx
// For expensive components that shouldn't re-render on parent updates
function MemoizedExpensiveList(props: { items: Item[] }) {
// Component only re-renders when items actually change
return (
<For each={props.items}>
{(item) => <ExpensiveItem item={item} />}
</For>
);
}
```
## Testing Patterns
### Component Testing
```tsx
import { render, fireEvent, screen } from "@solidjs/testing-library";
test("Counter increments", async () => {
render(() => <Counter />);
const button = screen.getByRole("button", { name: /increment/i });
expect(screen.getByText("Count: 0")).toBeInTheDocument();
fireEvent.click(button);
expect(screen.getByText("Count: 1")).toBeInTheDocument();
});
```
### Testing with Context
```tsx
function renderWithContext(component: () => JSX.Element) {
return render(() => (
<ThemeProvider>
<AuthProvider>
{component()}
</AuthProvider>
</ThemeProvider>
));
}
test("Dashboard shows user", () => {
renderWithContext(() => <Dashboard />);
// ...
});
```
### Testing Async Components
```tsx
import { render, waitFor, screen } from "@solidjs/testing-library";
test("Loads user data", async () => {
render(() => <UserProfile userId="123" />);
expect(screen.getByText(/loading/i)).toBeInTheDocument();
await waitFor(() => {
expect(screen.getByText("John Doe")).toBeInTheDocument();
});
});
```
## Error Handling Patterns
### Global Error Handler
```tsx
function App() {
return (
<ErrorBoundary
fallback={(err, reset) => (
<ErrorPage error={err} onRetry={reset} />
)}
>
<Suspense fallback={<AppLoader />}>
<Router>
{/* Routes */}
</Router>
</Suspense>
</ErrorBoundary>
);
}
```
### Async Error Handling
```tsx
function DataComponent() {
const [data] = createResource(fetchData);
return (
<Switch>
<Match when={data.loading}>
<Loading />
</Match>
<Match when={data.error}>
<Error error={data.error} onRetry={() => refetch()} />
</Match>
<Match when={data()}>
{(data) => <Content data={data()} />}
</Match>
</Switch>
);
}
```
## Accessibility Patterns
### Focus Management
```tsx
function Modal(props: ParentProps<{ isOpen: boolean; onClose: () => void }>) {
let dialogRef: HTMLDivElement;
let previousFocus: HTMLElement | null;
createEffect(() => {
if (props.isOpen) {
previousFocus = document.activeElement as HTMLElement;
dialogRef.focus();
} else if (previousFocus) {
previousFocus.focus();
}
});
return (
<Show when={props.isOpen}>
<Portal>
<div
ref={dialogRef!}
role="dialog"
aria-modal="true"
tabIndex={-1}
onKeyDown={(e) => e.key === "Escape" && props.onClose()}
>
{props.children}
</div>
</Portal>
</Show>
);
}
```
### Live Regions
```tsx
function Notifications() {
const [message, setMessage] = createSignal("");
return (
<div
role="status"
aria-live="polite"
aria-atomic="true"
class="sr-only"
>
{message()}
</div>
);
}
```

View File

@@ -0,0 +1,223 @@
---
name: spec-planner
description: Dialogue-driven spec development through skeptical questioning and iterative refinement. Triggers: "spec this out", feature planning, architecture decisions, "is this worth it?" questions, RFC/design doc creation, work scoping. Invoke Librarian for unfamiliar tech/frameworks/APIs.
---
# Spec Planner
Produce implementation-ready specs through rigorous dialogue and honest trade-off analysis.
## Core Philosophy
- **Dialogue over deliverables** - Plans emerge from discussion, not assumption
- **Skeptical by default** - Requirements are incomplete until proven otherwise
- **Second-order thinking** - Consider downstream effects and maintenance burden
## Workflow Phases
```
CLARIFY --[user responds]--> DISCOVER --[done]--> DRAFT --[complete]--> REFINE --[approved]--> DONE
| | | |
+--[still ambiguous]--<------+-------------------+----[gaps found]------+
```
**State phase at end of every response:**
```
---
Phase: CLARIFY | Waiting for: answers to questions 1-4
```
---
## Phase 1: CLARIFY (Mandatory)
**Hard rule:** No spec until user has responded to at least one round of questions.
1. **STOP.** Do not proceed to planning.
2. Identify gaps in: scope, motivation, constraints, edge cases, success criteria
3. Ask 3-5 pointed questions that would change the approach. USE YOUR QUESTION TOOL.
4. **Wait for responses**
**IMPORTANT: Always use the `question` tool to ask clarifying questions.** Do NOT output questions as freeform text. The question tool provides structured options and better UX. Example:
```
question({
questions: [{
header: "Scope",
question: "Which subsystems need detailed specs?",
options: [
{ label: "VCS layer", description: "jj-lib + gix unified interface" },
{ label: "Review workflow", description: "GitHub PR-style local review" },
{ label: "Event system", description: "pub/sub + persistence" }
],
multiple: true
}]
})
```
| Category | Example |
|----------|---------|
| Scope | "Share where? Social media? Direct link? Embed?" |
| Motivation | "What user problem are we actually solving?" |
| Constraints | "Does this need to work with existing privacy settings?" |
| Success | "How will we know this worked?" |
**Escape prevention:** Even if request seems complete, ask 2+ clarifying questions. Skip only for mechanical requests (e.g., "rename X to Y").
**Anti-patterns to resist:**
- "Just give me a rough plan" -> Still needs scope questions
- "I'll figure out the details" -> Those details ARE the spec
- Very long initial request -> Longer != clearer; probe assumptions
**Transition:** User answered AND no new ambiguities -> DISCOVER
---
## Phase 2: DISCOVER
**After clarification, before planning:** Understand existing system.
Launch explore subagents in parallel:
```
Task(
subagent_type="explore",
description="Explore [area name]",
prompt="Explore [area]. Return: key files, abstractions, patterns, integration points."
)
```
| Target | What to Find |
|--------|--------------|
| Affected area | Files, modules that will change |
| Existing patterns | How similar features are implemented |
| Integration points | APIs, events, data flows touched |
**If unfamiliar tech involved**, invoke Librarian:
```
Task(
subagent_type="librarian",
description="Research [tech name]",
prompt="Research [tech] for [use case]. Return: recommended approach, gotchas, production patterns."
)
```
**Output:** Brief architecture summary before proposing solutions.
**Transition:** System context understood -> DRAFT
---
## Phase 3: DRAFT
Apply planning framework from [decision-frameworks.md](./references/decision-frameworks.md):
1. **Problem Definition** - What are we solving? For whom? Cost of not solving?
2. **Constraints Inventory** - Time, system, knowledge, scope ceiling
3. **Solution Space** - Simplest -> Balanced -> Full engineering solution
4. **Trade-off Analysis** - See table format in references
5. **Recommendation** - One clear choice with reasoning
Use appropriate template from [templates.md](./references/templates.md):
- **Quick Decision** - Scoped technical choices
- **Feature Plan** - New feature development
- **ADR** - Architecture decisions
- **RFC** - Larger proposals
**Transition:** Spec produced -> REFINE
---
## Phase 4: REFINE
Run completeness check:
| Criterion | Check |
|-----------|-------|
| Scope bounded | Every deliverable listed; non-goals explicit |
| Ambiguity resolved | No "TBD" or "to be determined" |
| Acceptance testable | Each criterion pass/fail verifiable |
| Dependencies ordered | Clear what blocks what |
| Types defined | Data shapes specified (not "some object") |
| Effort estimated | Each deliverable has S/M/L/XL |
| Risks identified | At least 2 risks with mitigations |
| Open questions | Resolved OR assigned owner |
**If any criterion fails:** Return to dialogue. "To finalize, I need clarity on: [failing criteria]."
**Transition:** All criteria pass + user approval -> DONE
---
## Phase 5: DONE
### Final Output
```
=== Spec Complete ===
Phase: DONE
Type: <feature plan | architecture decision | refactoring | strategy>
Effort: <S/M/L/XL>
Status: Ready for task breakdown
Discovery:
- Explored: <areas investigated>
- Key findings: <relevant architecture/patterns>
Recommendation:
<brief summary>
Key Trade-offs:
- <what we're choosing vs alternatives>
Deliverables (Ordered):
1. [D1] (effort) - depends on: -
2. [D2] (effort) - depends on: D1
Open Questions:
- [ ] <if any remain> -> Owner: [who]
```
### Write Spec to File (MANDATORY)
1. Derive filename from feature/decision name (kebab-case)
2. Write spec to `specs/<filename>.md`
3. Confirm: `Spec written to: specs/<filename>.md`
---
## Effort Estimates
| Size | Time | Scope |
|------|------|-------|
| **S** | <1 hour | Single file, isolated change |
| **M** | 1-3 hours | Few files, contained feature |
| **L** | 1-2 days | Cross-cutting, multiple components |
| **XL** | >2 days | Major refactor, new system |
## Scope Control
When scope creeps:
1. **Name it:** "That's scope expansion. Let's finish X first."
2. **Park it:** "Added to Open Questions. Revisit after core spec stable."
3. **Cost it:** "Adding Y changes effort from M to XL. Worth it?"
**Hard rule:** If scope changes, re-estimate and flag explicitly.
## References
| File | When to Read |
|------|--------------|
| [templates.md](./references/templates.md) | Output formats for plans, ADRs, RFCs |
| [decision-frameworks.md](./references/decision-frameworks.md) | Complex multi-factor decisions |
| [estimation.md](./references/estimation.md) | Breaking down work, avoiding underestimation |
| [technical-debt.md](./references/technical-debt.md) | Evaluating refactoring ROI |
## Integration
| Agent | When to Invoke |
|-------|----------------|
| **Librarian** | Research unfamiliar tech, APIs, frameworks |
| **Oracle** | Deep architectural analysis, complex debugging |

View File

@@ -0,0 +1,75 @@
# Decision Frameworks
## Reversibility Matrix
| Decision Type | Approach |
|---------------|----------|
| **Two-way door** (easily reversed) | Decide fast, learn from outcome |
| **One-way door** (hard to reverse) | Invest time in analysis |
Most decisions are two-way doors. Don't over-analyze.
## Cost of Delay
```
Daily Cost = (Value Delivered / Time to Deliver) x Risk Factor
```
Use when prioritizing:
- High daily cost -> Do first
- Low daily cost -> Can wait
## RICE Scoring
| Factor | Question | Scale |
|--------|----------|-------|
| **R**each | How many users affected? | # users/period |
| **I**mpact | How much per user? | 0.25, 0.5, 1, 2, 3 |
| **C**onfidence | How sure are we? | 20%, 50%, 80%, 100% |
| **E**ffort | Person-weeks | 0.5, 1, 2, 4, 8+ |
```
RICE = (Reach x Impact x Confidence) / Effort
```
## Technical Decision Checklist
Before committing to a technical approach:
- [ ] Have we talked to someone who's done this before?
- [ ] What's the simplest version that teaches us something?
- [ ] What would make us reverse this decision?
- [ ] Who maintains this in 6 months?
- [ ] What's our rollback plan?
## When to Build vs Buy vs Adopt
| Signal | Build | Buy | Adopt (OSS) |
|--------|-------|-----|-------------|
| Core differentiator | Yes | No | Maybe |
| Commodity problem | No | Yes | Yes |
| Tight integration needed | Yes | Maybe | Maybe |
| Team has expertise | Yes | N/A | Yes |
| Time pressure | No | Yes | Maybe |
| Long-term control needed | Yes | No | Maybe |
## Decomposition Strategies
### Vertical Slicing
Cut features into thin end-to-end slices that deliver value:
```
Bad: "Build database layer" -> "Build API" -> "Build UI"
Good: "User can see their profile" -> "User can edit name" -> "User can upload avatar"
```
### Risk-First Ordering
1. Identify highest-risk unknowns
2. Build spike/proof-of-concept for those first
3. Then build around proven foundation
### Dependency Mapping
```
[Feature A] -depends on-> [Feature B] -depends on-> [Feature C]
^
Start here
```

View File

@@ -0,0 +1,69 @@
# Estimation
## Why Estimates Fail
| Cause | Mitigation |
|-------|------------|
| Optimism bias | Use historical data, not gut |
| Missing scope | List "obvious" tasks explicitly |
| Integration blindness | Add 20-30% for glue code |
| Unknown unknowns | Add buffer based on novelty |
| Interruptions | Assume 60% focused time |
## Estimation Techniques
### Three-Point Estimation
```
Expected = (Optimistic + 4xMostLikely + Pessimistic) / 6
```
### Relative Sizing
Compare to known references:
- "This is about twice as complex as Feature X"
- Use Fibonacci (1, 2, 3, 5, 8, 13) to reflect uncertainty
### Task Decomposition
1. Break into tasks <=4 hours
2. If can't decompose, spike first
3. Sum tasks + 20% integration buffer
## Effort Multipliers
| Factor | Multiplier |
|--------|------------|
| New technology | 1.5-2x |
| Unclear requirements | 1.3-1.5x |
| External dependencies (waiting on others) | 1.2-1.5x |
| Legacy/undocumented code | 1.3-2x |
| Production deployment | 1.2x |
| First time doing X | 2-3x |
| Context switching (other priorities) | 1.3x |
| Yak shaving risk (unknown unknowns) | 1.5x |
## Hidden Work Checklist
Always include time for:
- [ ] Code review (20% of dev time)
- [ ] Testing (30-50% of dev time)
- [ ] Documentation (10% of dev time)
- [ ] Deployment/config (varies)
- [ ] Bug fixes from testing (20% buffer)
- [ ] Interruptions / competing priorities
## When to Re-Estimate
Re-estimate when:
- Scope changes materially
- Major unknown becomes known
- Actual progress diverges >30% from estimate
## Communicating Estimates
**Good:** "1-2 weeks, confidence 70%-main risk is the third-party API integration"
**Bad:** "About 2 weeks"
Always include:
1. Range, not point estimate
2. Confidence level
3. Key assumptions/risks

View File

@@ -0,0 +1,94 @@
# Technical Debt
## Debt Categories
| Type | Example | Urgency |
|------|---------|---------|
| **Deliberate-Prudent** | "Ship now, refactor next sprint" | Planned paydown |
| **Deliberate-Reckless** | "We don't have time for tests" | Accumulating risk |
| **Inadvertent-Prudent** | "Now we know a better way" | Normal learning |
| **Inadvertent-Reckless** | "What's layering?" | Learning curve |
## When to Pay Down Debt
**Pay now when:**
- Debt is in path of upcoming work
- Cognitive load slowing every change
- Bugs recurring in same area
- Onboarding time increasing
**Defer when:**
- Area is stable, rarely touched
- Bigger refactor coming anyway
- Time constrained on priority work
- Code may be deprecated
## ROI Framework
```
Debt ROI = (Time Saved Per Touch x Touches/Month x Months) / Paydown Cost
```
| ROI | Action |
|-----|--------|
| >3x | Prioritize immediately |
| 1-3x | Plan into upcoming work |
| <1x | Accept or isolate |
## Refactoring Strategies
### Strangler Fig
1. Build new alongside old
2. Redirect traffic incrementally
3. Remove old when empty
Best for: Large system replacements
### Branch by Abstraction
1. Create abstraction over old code
2. Implement new behind abstraction
3. Switch implementations
4. Remove old
Best for: Library/dependency swaps
### Parallel Change (Expand-Contract)
1. Add new behavior alongside old
2. Migrate callers incrementally
3. Remove old behavior
Best for: API changes
### Mikado Method
1. Try the change
2. When it breaks, note prerequisites
3. Revert
4. Recursively fix prerequisites
5. Apply original change
Best for: Untangling dependencies
## Tracking Debt
Minimum viable debt tracking:
```markdown
## Tech Debt Log
| ID | Description | Impact | Area | Added |
|----|-------------|--------|------|-------|
| TD-1 | No caching layer | Slow queries | /api | 2024-01 |
```
Review monthly. Prune resolved items.
## Communicating Debt to Stakeholders
**Frame as investment, not cleanup:**
- "This will reduce bug rate by ~30%"
- "Deployment time goes from 2 hours to 20 minutes"
- "New features in this area take 2x longer than they should"
**Avoid:**
- "The code is messy"
- "We need to refactor"
- Technical jargon without business impact

View File

@@ -0,0 +1,161 @@
# Output Templates
## Quick Decision
For scoped technical choices with clear options.
```
## Decision: [choice]
**Why:** [1-2 sentences]
**Trade-off:** [what we're giving up]
**Revisit if:** [trigger conditions]
```
## Feature Plan (Implementation-Ready)
For new feature development. **Complete enough for task decomposition.**
```
## Feature: [name]
### Problem Statement
**Who:** [specific user/persona]
**What:** [the problem they face]
**Why it matters:** [business/user impact]
**Evidence:** [how we know this is real]
### Proposed Solution
[High-level approach in 2-3 paragraphs]
### Scope & Deliverables
| Deliverable | Effort | Depends On |
|-------------|--------|------------|
| [D1] | S/M/L | - |
| [D2] | S/M/L | D1 |
### Non-Goals (Explicit Exclusions)
- [Thing people might assume is in scope but isn't]
### Data Model
[Types, schemas, state shapes that will exist or change]
### API/Interface Contract
[Public interfaces between components-input/output/errors]
### Acceptance Criteria
- [ ] [Testable statement 1]
- [ ] [Testable statement 2]
### Test Strategy
| Layer | What | How |
|-------|------|-----|
| Unit | [specific logic] | [approach] |
| Integration | [boundaries] | [approach] |
### Risks & Mitigations
| Risk | Likelihood | Impact | Mitigation |
|------|------------|--------|------------|
### Trade-offs Made
| Chose | Over | Because |
|-------|------|---------|
### Open Questions
- [ ] [Question] -> Owner: [who decides]
### Success Metrics
- [Measurable outcome]
```
## Architecture Decision Record (ADR)
For significant architecture decisions that need documentation.
```
## ADR: [title]
**Status:** Proposed | Accepted | Deprecated | Superseded
**Date:** [date]
### Context
[What forces are at play]
### Decision
[What we're doing]
### Consequences
- [+] [Benefit]
- [-] [Drawback]
- [~] [Neutral observation]
```
## RFC (Request for Comments)
For larger proposals needing broader review.
```
## RFC: [title]
**Author:** [name]
**Status:** Draft | In Review | Accepted | Rejected
**Created:** [date]
### Summary
[1-2 paragraph overview]
### Motivation
[Why are we doing this?]
### Detailed Design
[Technical details]
### Alternatives Considered
| Option | Pros | Cons | Why Not |
|--------|------|------|---------|
### Migration/Rollout
[How we get from here to there]
### Open Questions
- [ ] [Question]
```
## Handoff Artifact
When spec is complete, produce final summary for task decomposition:
```
# [Feature Name] - Implementation Spec
**Status:** Ready for task breakdown
**Effort:** [total estimate]
**Approved by:** [human who approved]
**Date:** [date]
## Deliverables (Ordered)
1. **[D1]** (S) - [one-line description]
- Depends on: -
- Files likely touched: [paths]
2. **[D2]** (M) - [one-line description]
- Depends on: D1
- Files likely touched: [paths]
## Key Technical Decisions
- [Decision]: [choice] because [reason]
## Data Model
[Copy from spec]
## Acceptance Criteria
1. [Criterion 1]
2. [Criterion 2]
## Open Items (Non-Blocking)
- [Item] -> Owner: [who]
---
*Spec approved for task decomposition.*
```

5
profiles/overseer.nix Normal file
View File

@@ -0,0 +1,5 @@
{pkgs, ...}: {
home.packages = with pkgs; [
overseer
];
}

View File

@@ -45,7 +45,6 @@ with pkgs;
tree-sitter
unzip
vivid
watchman
zip
]
++ lib.optionals stdenv.isDarwin [
@@ -53,6 +52,7 @@ with pkgs;
alcove
dockutil
mas
openusage
raycast
tailscale
xcodes

View File

@@ -1,7 +1,7 @@
{
programs.starship = {
enable = true;
enableFishIntegration = true;
enableNushellIntegration = true;
settings = {
add_newline = true;
command_timeout = 2000;

View File

@@ -1,6 +1,6 @@
{
programs.zoxide = {
enable = true;
enableFishIntegration = true;
enableNushellIntegration = true;
};
}

View File

@@ -1,35 +0,0 @@
{
"data": "ENC[AES256_GCM,data:l7jYCSQE0BwYOoIMHgGOmMrWz5s=,iv:4TugFnfmzoeroq6SfRLD36gSSBHGVT6CxQE4Pyp1Ibc=,tag:HOTLlXGab05u+qREtyxAeg==,type:str]",
"sops": {
"age": [
{
"recipient": "age1njjegjjdqzfnrr54f536yl4lduqgna3wuv7ef6vtl9jw5cju0grsgy62tm",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBFTmVlZmxIMGFBbE1RYkNN\nbTh1NG5BK1JCTlZ1MzBtSS9NRXRVM3phN3lrCm9qSlpEQjFGZTBsME9ndXhYdHZv\ncW1oek5hU1pjQXhzT0cvTks3VEozdHcKLS0tIG1TUWJKSE1KRVlXMVdydjBhY1ZS\nWXRhNFA2aHZTT2pud0ZXOFpjVldKYjgKYyH7k0BW/sf3vDQLPaZoB2VHoyKmCkWy\nwTolEp4vkuzZld4KPdDW4jYL8kt8Fwa8TNlPKMUOvY0gt82pbJ1MAA==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age187jl7e4k9n4guygkmpuqzeh0wenefwrfkpvuyhvwjrjwxqpzassqq3x67j",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSA5Y2ZpWlhXUll4WUxheENj\nRjlwK2dJUjBOU1lhV1dKWFhiMlJiQ0Q2Q0VrCnJac0ppdm9URW14TUwrVUdwN2x4\ncUgxSGRTM09ORUk0VXhUcVBxS1Z2a0EKLS0tIGhOSmc2L3FKOXdqdG5Da210aXF4\nZ2h0dW45K2dlZGJQMmxneE1IbXMvWmsKkBfh09E6o2uvNegq5pZgUBWOYjREDDyg\nHEgV7G4cWJBPpBFwS1gLGyRZ0TWrmoXCqF8I22BMG6tq94AGrKqFzg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1dqt3znmzcgghsjjzzax0pf0eyu95h0p7kaf5v988ysjv7fl7lumsatl048",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBCc1B0Ylo5K1poUXpkcFZk\neGJSQmNBVU00RTdLcTNBRUN2N3cySFE0cUdVCjNmZlpGM0VUOWdvbVQ3U1BoRWJ1\na3kwK3JxQ0IrZ0NUaW5sTmZlWmd4MncKLS0tIHozZ29UbTNPZDBjTUl3WlB2YmFs\nS0IrN3lLK2YyM0ROTERtQlppbjVOdWMKtpLveHYL4RfEpCBLt4c4R/NVg1QF+if1\nz26bWNQseIsPtlIk4GImJZhUhbr3sDY81gcl7sd8XGpze7EAVM262g==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1ez6j3r5wdp0tjy7n5qzv5vfakdc2nh2zeu388zu7a80l0thv052syxq5e2",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBySmdCaVRYV2RjOFR4dFVx\nYmd1VlRzQmUwR1RwcTNJdTlxQnFFZXVscnlFCnRVOWQwa3VVUUlHQTF0WHBXb0FL\nMEViVXVDeWJLZnBLdWlUZTZFQVFqYlUKLS0tIHBGdXdPM2tOdzR0Qm5UT2F6dnNF\nTytWcXNYMEJSTU8xOExMK1MwQUNhdk0KVBbrhhwh+Yup+CW3Y+q9RoQ3XFjBfTLb\nzDbCZBhNx9HP7Q8mlndYPkJc3aGfpKxHpaniBLqzDKNKJ5OE4kzY3Q==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1tlymdmaukhwupzrhszspp26lgd8s64rw4vu9lwc7gsgrjm78095s9fe9l3",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBtVnl0ckE1REtQbW8ybEtF\nWG1VNkFtMUVlNk04SnIySVFvM2pkV1dTMTNNCkV6R2NzQzM4WXRIdzJrVEo4L2FX\nQ1dmV2YwNHdMMVJUNmZWRktvK2s1dUUKLS0tIE5nUXNack5meUFzcWxhOTNGLzdG\naGlVYURRK2hLK0lNNm4wYTUrdXptLzAKZgN1tY1G3Jso1+CT0LQQ4I49CgdCECpe\n1wRdgaWI8P4ep2S7QO3Vu+MuoyOgVgGJdG/HzsEAAqJ0XMSBWpeFXg==\n-----END AGE ENCRYPTED FILE-----\n"
},
{
"recipient": "age1h537hhl5qgew5sswjp7xf7d4j4aq0gg9s5flnr8twm2smnqyudhqmum8uy",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB3YnVBeGl5akg2b3BidW5m\nMjBnWWxReTFoNTBYVm83ZkhtSHUvTmtNSGxzCk5XcmVJZ0hpRURIT09teHFzOURt\nK25JSFJCNU5mb2k4ZHBTU0Q0emFtYWMKLS0tIEtqMlFNckZmQk15Z0xoT1BrUWgv\nN0VJT1RjOG1ZOVU1UklKTm5TUEhLQmsKP03juyke7ZmLGx+BHE596d18TWVLFuUV\nP1pK0QlwtRL0l/6r7l4hXN9gJ8JU+zO5NTcLtvvHdSL188q3TX//UQ==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2026-01-07T19:19:41Z",
"mac": "ENC[AES256_GCM,data:SnMD9+jpySE35ylbHInXfsIQ/Nq6FBpunlhgJSOnYCQLE9vGc5Rtkg8cYlqFBz82Ukjk2EJafKcjDgBgTx6+JcYC8idM7yCpqyJaTx9p0nr6+p46ozqrL8lm4qF+yJRK997RjfRStLE2JsLN0SRSBFTDL0yPB6mFc/BncywVVZ0=,iv:fJ0kpgysw8eHbIIrtdyUXwWYvHyOa6kJ1wW+6NvBTxY=,tag:cqyY6qmEa0HF6u61v9VZJw==,type:str]",
"unencrypted_suffix": "_unencrypted",
"version": "3.11.0"
}
}