This commit is contained in:
2026-03-21 18:45:02 +00:00
parent 3b345614f7
commit 80f24ce11f
28 changed files with 219 additions and 1526 deletions

View File

@@ -36,7 +36,6 @@ alejandra . # Format all Nix files
- `_lib/` - Utility functions (underscore = ignored by import-tree)
- `_darwin/` - Darwin-specific sub-modules
- `_neovim/` - Neovim plugin configs
- `_opencode/` - OpenCode agent/command/skill configs
- `_hosts/` - Host-specific sub-files (disk-config, hardware, etc.)
- **Apps**: `apps/` - Per-system app scripts (Nushell)
- **Secrets**: `secrets/` - SOPS-encrypted secrets (`.sops.yaml` for config)

51
flake.lock generated
View File

@@ -816,6 +816,54 @@
"type": "github"
}
},
"pi-agent-stuff": {
"flake": false,
"locked": {
"lastModified": 1774049302,
"narHash": "sha256-kPmbJzS2f8bpKVnBfJ09gcHewHFhFpt6DmKuDHeZD1k=",
"owner": "mitsuhiko",
"repo": "agent-stuff",
"rev": "929d59696696210b403d2fbf9a82259b0078794d",
"type": "github"
},
"original": {
"owner": "mitsuhiko",
"repo": "agent-stuff",
"type": "github"
}
},
"pi-elixir": {
"flake": false,
"locked": {
"lastModified": 1772900407,
"narHash": "sha256-QoCPVdN5CYGe5288cJQmB10ds/UOucHIyG9z9E/4hsw=",
"owner": "dannote",
"repo": "pi-elixir",
"rev": "3b8f667beb696ce6ed456e762bfcf61e7326f5c4",
"type": "github"
},
"original": {
"owner": "dannote",
"repo": "pi-elixir",
"type": "github"
}
},
"pi-rose-pine": {
"flake": false,
"locked": {
"lastModified": 1770936151,
"narHash": "sha256-6TzuWJPAn8zz+lUjZ3slFCNdPVd/Z2C+WoXFsLopk1g=",
"owner": "zenobi-us",
"repo": "pi-rose-pine",
"rev": "9b342f6e16d6b28c00c2f888ba2f050273981bdb",
"type": "github"
},
"original": {
"owner": "zenobi-us",
"repo": "pi-rose-pine",
"type": "github"
}
},
"pimalaya": {
"flake": false,
"locked": {
@@ -861,6 +909,9 @@
"nixpkgs"
],
"nixvim": "nixvim",
"pi-agent-stuff": "pi-agent-stuff",
"pi-elixir": "pi-elixir",
"pi-rose-pine": "pi-rose-pine",
"sops-nix": "sops-nix",
"zjstatus": "zjstatus"
}

View File

@@ -68,6 +68,18 @@
nixpkgs.url = "github:nixos/nixpkgs/master";
nixpkgs-lib.follows = "nixpkgs";
nixvim.url = "github:nix-community/nixvim";
pi-agent-stuff = {
url = "github:mitsuhiko/agent-stuff";
flake = false;
};
pi-elixir = {
url = "github:dannote/pi-elixir";
flake = false;
};
pi-rose-pine = {
url = "github:zenobi-us/pi-rose-pine";
flake = false;
};
sops-nix = {
url = "github:Mic92/sops-nix";
inputs.nixpkgs.follows = "nixpkgs";

View File

@@ -31,8 +31,6 @@
{ mode = 'n', keys = '<Leader>v', desc = '+VCS' },
{ mode = 'n', keys = '<Leader>l', desc = '+LSP' },
{ mode = 'x', keys = '<Leader>l', desc = '+LSP' },
{ mode = 'n', keys = '<Leader>o', desc = '+OpenCode' },
{ mode = 'x', keys = '<Leader>o', desc = '+OpenCode' },
{ mode = 'n', keys = '<Leader>r', desc = '+Review' },
{ mode = 'v', keys = '<Leader>r', desc = '+Review' },
{ mode = 'n', keys = '<Leader>t', desc = '+Tab' },

View File

@@ -3,7 +3,7 @@
enable = true;
settings = {
anti_conceal = {enabled = false;};
file_types = ["markdown" "opencode_output"];
file_types = ["markdown"];
};
};
}

View File

@@ -1,11 +0,0 @@
# Global AGENTS.md
## Version Control
- Use `jj` for VCS, not `git`
- `jj tug` is an alias for `jj bookmark move --from closest_bookmark(@-) --to @-`
## Scripting
- Always use Nushell (`nu`) for scripting
- Never use Python, Perl, Lua, awk, or any other scripting language

View File

@@ -1,45 +0,0 @@
---
description: Reviews code for quality, bugs, security, and best practices
mode: subagent
temperature: 0.1
tools:
write: false
edit: false
permission:
edit: deny
webfetch: allow
---
You are a code reviewer. Provide actionable feedback on code changes.
**Diffs alone are not enough.** Read the full file(s) being modified to understand context. Code that looks wrong in isolation may be correct given surrounding logic.
## What to Look For
**Bugs** — Primary focus.
- Logic errors, off-by-one mistakes, incorrect conditionals
- Missing guards, unreachable code paths, broken error handling
- Edge cases: null/empty inputs, race conditions
- Security: injection, auth bypass, data exposure
**Structure** — Does the code fit the codebase?
- Follows existing patterns and conventions?
- Uses established abstractions?
- Excessive nesting that could be flattened?
**Performance** — Only flag if obviously problematic.
- O(n²) on unbounded data, N+1 queries, blocking I/O on hot paths
## Before You Flag Something
- **Be certain.** Don't flag something as a bug if you're unsure — investigate first.
- **Don't invent hypothetical problems.** If an edge case matters, explain the realistic scenario.
- **Don't be a zealot about style.** Some "violations" are acceptable when they're the simplest option.
- Only review the changes — not pre-existing code that wasn't modified.
## Output
- Be direct about bugs and why they're bugs
- Communicate severity honestly — don't overstate
- Include file paths and line numbers
- Suggest fixes when appropriate
- Matter-of-fact tone, no flattery

View File

@@ -1,49 +0,0 @@
---
description: Turn pasted Albanian lesson into translated notes and solved exercises in zk
---
Process the pasted Albanian lesson content and create two `zk` notes: one for lesson material and one for exercises.
<lesson-material>
$ARGUMENTS
</lesson-material>
Requirements:
1. Parse the lesson content and produce two markdown outputs:
- `material` output: lesson material only.
- `exercises` output: exercises and solutions.
2. Use today's date in both notes (date in title and inside content).
3. In the `material` output:
- Keep clean markdown structure with headings and bullet points.
- Do not add a top-level title heading (no `# ...`) because `zk new --title` already sets the note title.
- Translate examples, dialogues, and all lesson texts into English when not already translated.
- For bigger reading passages, include a word-by-word breakdown.
- For declension/conjugation/grammar tables, provide a complete table of possibilities relevant to the topic.
- Spell out numbers only when the source token is Albanian; do not spell out English numbers.
4. In the `exercises` output:
- Include every exercise in markdown.
- Do not add a top-level title heading (no `# ...`) because `zk new --title` already sets the note title.
- Translate each exercise to English.
- Solve all non-free-writing tasks (multiple choice, fill in the blanks, etc.) and include example solutions.
- For free-writing tasks, provide expanded examples using basic vocabulary from the lesson (if prompted for 3, provide 10).
- Translate free-writing example answers into English.
- Spell out numbers only when the source token is Albanian; do not spell out English numbers.
Execution steps:
1. Generate two markdown contents in memory (do not create temporary files):
- `MATERIAL_CONTENT`
- `EXERCISES_CONTENT`
2. Set `TODAY="$(date +%F)"` once and reuse it for both notes.
3. Create note 1 with `zk` by piping markdown directly to stdin:
- Title format: `Albanian Lesson Material - YYYY-MM-DD`
- Command pattern:
- `printf "%s\n" "$MATERIAL_CONTENT" | zk new --interactive --title "Albanian Lesson Material - $TODAY" --date "$TODAY" --print-path`
4. Create note 2 with `zk` by piping markdown directly to stdin:
- Title format: `Albanian Lesson Exercises - YYYY-MM-DD`
- Command pattern:
- `printf "%s\n" "$EXERCISES_CONTENT" | zk new --interactive --title "Albanian Lesson Exercises - $TODAY" --date "$TODAY" --print-path`
5. Print both created note paths and a short checklist of what was included.
If no lesson material was provided in `$ARGUMENTS`, stop and ask the user to paste it.

View File

@@ -1,10 +0,0 @@
---
description: Review changes with parallel @code-review subagents
---
Review the code changes using THREE (3) @code-review subagents and correlate results into a summary ranked by severity. Use the provided user guidance to steer the review and focus on specific code paths, changes, and/or areas of concern. Once all three @code-review subagents return their findings and you have correlated and summarized the results, consult the @oracle subagent to perform a deep review on the findings focusing on accuracy and correctness by evaluating the surrounding code, system, subsystems, abstractions, and overall architecture of each item. Apply any recommendations from the oracle. NEVER SKIP ORACLE REVIEW.
Guidance: $ARGUMENTS
First, call `skill({ name: 'vcs-detect' })` to determine whether the repo uses git or jj, then use the appropriate VCS commands throughout.
Review uncommitted changes by default. If no uncommitted changes, review the last commit. If the user provides a pull request/merge request number or link, use CLI tools (gh/glab) to fetch it and then perform your review.

View File

@@ -1,108 +0,0 @@
---
description: Triage inbox one message at a time with himalaya only
---
Process email with strict manual triage using Himalaya only.
Hard requirements:
- Use `himalaya` for every mailbox interaction (folders, listing, reading, moving, deleting, attachments).
- Process exactly one message ID at a time. Never run bulk actions on multiple IDs.
- Do not use pattern-matching commands or searches (`grep`, `rg`, `awk`, `sed`, `himalaya envelope list` query filters, etc.).
- Always inspect current folders first, then triage.
- Treat this as a single deterministic run over a snapshot of message IDs discovered during this run.
- Ingest valuable document attachments into Paperless (see Document Ingestion section below).
Workflow:
1. Run `himalaya folder list` first and use those folders as the primary taxonomy.
2. Use this existing folder set as defaults when it fits:
- `INBOX`
- `Correspondence`
- `Orders and Invoices`
- `Payments`
- `Outgoing Shipments`
- `Newsletters and Marketing`
- `Junk`
- `Deleted Messages`
3. Determine source folder:
- If `$ARGUMENTS` is a single known folder name (matches a folder from step 1), use that as source.
- Otherwise use `INBOX`.
4. Build a run scope safely:
- List with fixed page size `20` and JSON output: `himalaya envelope list -f "<source>" -p 1 -s 20 --output json`.
- Start at page `1`. Enumerate IDs in returned order.
- Process each ID fully before touching the next ID.
- Keep an in-memory reviewed set for this run to avoid reprocessing IDs already handled or intentionally left untouched.
- When all IDs on the current page are in the reviewed set, advance to the next page.
- Stop when a page returns fewer results than the page size (end of folder) and all its IDs are in the reviewed set.
5. For each single envelope ID, do all checks before any move/delete:
- Check envelope flags from the JSON listing (seen/answered/flagged) before reading.
- Read the message: `himalaya message read -f "<source>" <id>`.
- If needed for classification or ingestion, download attachments: `himalaya attachment download -f "<source>" <id> --dir /tmp/himalaya-triage`.
- If the message qualifies for document ingestion (see Document Ingestion below), copy eligible attachments to the Paperless consume directory before cleanup.
- Always `rm` downloaded files from `/tmp/himalaya-triage` after processing (whether ingested or not).
- Move: `himalaya message move -f "<source>" "<destination>" <id>`.
- Delete: `himalaya message delete -f "<source>" <id>`.
6. Classification precedence (higher rule wins on conflict):
- **Actionable and unhandled** — if the message needs a reply, requires manual payment, needs a confirmation, or demands any human action, AND has NOT been replied to (no `answered` flag), leave it in the source folder untouched. This is the highest-priority rule: anything that still needs attention stays in `INBOX`.
- Human correspondence already handled — freeform natural-language messages written by a human that have been replied to (`answered` flag set): move to `Correspondence`.
- Human communication not yet replied to but not clearly actionable — when in doubt whether a human message requires action, leave it untouched.
- Clearly ephemeral automated/system message (alerts, bot/status updates, OTP/2FA, password reset codes, login codes) with no archival value: move to `Deleted Messages`.
- Automatic payment transaction notifications (charge/payment confirmations, receipts, failed-payment notices, provider payment events such as Klarna/PayPal/Stripe) that are purely informational and require no action: move to `Payments`.
- Subscription renewal notifications (auto-renew reminders, "will renew soon", price-change notices without a concrete transaction) are operational alerts, not payment records: move to `Deleted Messages`.
- Installment plan activation notifications (e.g. Barclays installment purchase confirmations) are operational confirmations, not payment records: move to `Deleted Messages`.
- "Kontoauszug verfügbar/ist online" notifications are availability alerts, not payment records: move to `Deleted Messages`.
- Orders/invoices/business records: move to `Orders and Invoices`.
- Shipping/tracking notifications (dispatch confirmations, carrier updates, delivery ETAs) without invoice or order-document value: move to `Deleted Messages`.
- Marketing/newsletters: move to `Newsletters and Marketing`.
- Delivery/submission confirmations for items you shipped outbound: move to `Outgoing Shipments`.
- Long-term but uncategorized messages: create a concise new folder and move there.
7. Folder creation rule:
- Create a new folder only if no existing folder fits and the message should be kept.
- Naming constraints: concise topic name, avoid duplicates, and avoid broad catch-all names.
- Command: `himalaya folder add "<new-folder>"`.
Document Ingestion (Paperless):
- **Purpose**: Automatically archive valuable document attachments into Paperless via its consumption directory.
- **Ingestion path**: `/var/lib/paperless/consume/inbox-triage/`
- **When to ingest**: Only for messages whose attachments have long-term archival value. Eligible categories:
- Invoices, receipts, and billing statements (messages going to `Orders and Invoices` or `Payments`)
- Contracts, agreements, and legal documents
- Tax documents, account statements, and financial summaries
- Insurance documents and policy papers
- Official correspondence with document attachments (government, institutions)
- **When NOT to ingest**:
- Marketing emails, newsletters, promotional material
- Shipping/tracking notifications without invoice attachments
- OTP codes, login alerts, password resets, ephemeral notifications
- Subscription renewal reminders without actual invoices
- Duplicate documents already seen in this run
- Inline images, email signatures, logos, and non-document attachments
- **Eligible file types**: PDF, PNG, JPG/JPEG, TIFF, WEBP (documents and scans only). Skip archive files (ZIP, etc.), calendar invites (ICS), and other non-document formats.
- **Procedure**:
1. After downloading attachments to `/tmp/himalaya-triage`, check if any are eligible documents.
2. Copy eligible files: `cp /tmp/himalaya-triage/<filename> /var/lib/paperless/consume/inbox-triage/`
3. If multiple messages could produce filename collisions, prefix the filename with the message ID: `<id>-<filename>`.
4. Log each ingested file in the action log at the end of the run.
- **Conservative rule**: When in doubt whether an attachment is worth archiving, skip it. Paperless storage is cheap, but noise degrades searchability. Prefer false negatives over false positives for marketing material, but prefer false positives over false negatives for anything that looks like a financial or legal document.
Execution rules:
- Never perform bulk operations. One message ID per `read`, `move`, `delete`, and attachment command.
- Always use page size 20 for envelope listing (`-s 20`).
- If any single-ID command fails, log the error and continue with the next unreviewed ID.
- Never skip reading message content before deciding.
- Keep decisions conservative: when in doubt about whether something needs action, leave it in `INBOX`.
- Never move or delete unhandled actionable messages.
- Never move human communications that haven't been replied to, unless clearly non-actionable.
- Define "processed" as "reviewed once in this run" (including intentionally untouched human messages).
- Include only messages observed during this run's listings; if new mail arrives mid-run, leave it for the next run.
- Report a compact action log at the end with:
- source folder,
- total reviewed IDs,
- counts by action (untouched/moved-to-folder/deleted),
- per-destination-folder counts,
- created folders,
- documents ingested to Paperless (count and filenames),
- short rationale for non-obvious classifications.
<user-request>
$ARGUMENTS
</user-request>

View File

@@ -1,17 +0,0 @@
---
description: Dialogue-driven spec development through skeptical questioning
---
Develop implementation-ready specs through iterative dialogue and skeptical questioning.
First, invoke the skill tool to load the spec-planner skill:
```
skill({ name: 'spec-planner' })
```
Then follow the skill instructions to develop the spec.
<user-request>
$ARGUMENTS
</user-request>

View File

@@ -1,17 +0,0 @@
---
description: Add AI session summary to GitHub PR or GitLab MR description
---
Update the PR/MR description with an AI session export summary.
First, invoke the skill tool to load the session-export skill:
```
skill({ name: 'session-export' })
```
Then follow the skill instructions to export the session summary.
<user-request>
$ARGUMENTS
</user-request>

View File

@@ -1,18 +0,0 @@
import type { Plugin } from "@opencode-ai/plugin";
const GIT_PATTERN = /(?:^|[;&|]\s*|&&\s*|\|\|\s*|\$\(\s*|`\s*)git\s/;
export const BlockGitPlugin: Plugin = async () => {
return {
"tool.execute.before": async (input, output) => {
if (input.tool === "bash") {
const command = output.args.command as string;
if (GIT_PATTERN.test(command)) {
throw new Error(
"This project uses jj, only use `jj` commands, not `git`.",
);
}
}
},
};
};

View File

@@ -1,19 +0,0 @@
import type { Plugin } from "@opencode-ai/plugin";
const SCRIPTING_PATTERN =
/(?:^|[;&|]\s*|&&\s*|\|\|\s*|\$\(\s*|`\s*)(?:python[23]?|perl|ruby|php|lua|bash\s+-c|sh\s+-c)\s/;
export const BlockScriptingPlugin: Plugin = async () => {
return {
"tool.execute.before": async (input, output) => {
if (input.tool === "bash") {
const command = output.args.command as string;
if (SCRIPTING_PATTERN.test(command)) {
throw new Error(
"Do not use python, perl, ruby, php, lua, or inline bash/sh for scripting. Use `nu -c` instead.",
);
}
}
},
};
};

View File

@@ -1,16 +0,0 @@
import type { Plugin } from "@opencode-ai/plugin";
export const DirenvPlugin: Plugin = async ({ $ }) => {
return {
"shell.env": async (input, output) => {
try {
const exported = await $`direnv export json`
.cwd(input.cwd)
.quiet()
.json();
Object.assign(output.env, exported);
} catch {}
},
};
};

View File

@@ -1,41 +0,0 @@
---
name: frontend-design
description: Create distinctive, production-grade frontend interfaces with high design quality. Use this skill when the user asks to build web components, pages, artifacts, posters, or applications (examples include websites, landing pages, dashboards, React components, HTML/CSS layouts, or when styling/beautifying any web UI). Generates creative, polished code and UI design that avoids generic AI aesthetics.
---
This skill guides creation of distinctive, production-grade frontend interfaces that avoid generic "AI slop" aesthetics. Implement real working code with exceptional attention to aesthetic details and creative choices.
The user provides frontend requirements: a component, page, application, or interface to build. They may include context about the purpose, audience, or technical constraints.
## Design Thinking
Before coding, understand the context and commit to a BOLD aesthetic direction:
- **Purpose**: What problem does this interface solve? Who uses it?
- **Tone**: Pick an extreme: brutally minimal, maximalist chaos, retro-futuristic, organic/natural, luxury/refined, playful/toy-like, editorial/magazine, brutalist/raw, art deco/geometric, soft/pastel, industrial/utilitarian, etc. There are so many flavors to choose from. Use these for inspiration but design one that is true to the aesthetic direction.
- **Constraints**: Technical requirements (framework, performance, accessibility).
- **Differentiation**: What makes this UNFORGETTABLE? What's the one thing someone will remember?
**CRITICAL**: Choose a clear conceptual direction and execute it with precision. Bold maximalism and refined minimalism both work - the key is intentionality, not intensity.
Then implement working code (HTML/CSS/JS, React, Vue, etc.) that is:
- Production-grade and functional
- Visually striking and memorable
- Cohesive with a clear aesthetic point-of-view
- Meticulously refined in every detail
## Frontend Aesthetics Guidelines
Focus on:
- **Typography**: Choose fonts that are beautiful, unique, and interesting. Avoid generic fonts like Arial and Inter; opt instead for distinctive choices that elevate the frontend's aesthetics; unexpected, characterful font choices. Pair a distinctive display font with a refined body font.
- **Color & Theme**: Commit to a cohesive aesthetic. Use CSS variables for consistency. Dominant colors with sharp accents outperform timid, evenly-distributed palettes.
- **Motion**: Use animations for effects and micro-interactions. Prioritize CSS-only solutions for HTML. Use Motion library for React when available. Focus on high-impact moments: one well-orchestrated page load with staggered reveals (animation-delay) creates more delight than scattered micro-interactions. Use scroll-triggering and hover states that surprise.
- **Spatial Composition**: Unexpected layouts. Asymmetry. Overlap. Diagonal flow. Grid-breaking elements. Generous negative space OR controlled density.
- **Backgrounds & Visual Details**: Create atmosphere and depth rather than defaulting to solid colors. Add contextual effects and textures that match the overall aesthetic. Apply creative forms like gradient meshes, noise textures, geometric patterns, layered transparencies, dramatic shadows, decorative borders, custom cursors, and grain overlays.
NEVER use generic AI-generated aesthetics like overused font families (Inter, Roboto, Arial, system fonts), cliched color schemes (particularly purple gradients on white backgrounds), predictable layouts and component patterns, and cookie-cutter design that lacks context-specific character.
Interpret creatively and make unexpected choices that feel genuinely designed for the context. No design should be the same. Vary between light and dark themes, different fonts, different aesthetics. NEVER converge on common choices (Space Grotesk, for example) across generations.
**IMPORTANT**: Match implementation complexity to the aesthetic vision. Maximalist designs need elaborate code with extensive animations and effects. Minimalist or refined designs need restraint, precision, and careful attention to spacing, typography, and subtle details. Elegance comes from executing the vision well.
Remember: Claude is capable of extraordinary creative work. Don't hold back, show what can truly be created when thinking outside the box and committing fully to a distinctive vision.

View File

@@ -1,123 +0,0 @@
---
name: librarian
description: Multi-repository codebase exploration. Research library internals, find code patterns, understand architecture, compare implementations across GitHub/npm/PyPI/crates. Use when needing deep understanding of how libraries work, finding implementations across open source, or exploring remote repository structure.
references:
- references/tool-routing.md
- references/opensrc-api.md
- references/opensrc-examples.md
- references/linking.md
- references/diagrams.md
---
# Librarian Skill
Deep codebase exploration across remote repositories.
## How to Use This Skill
### Reference Structure
| File | Purpose | When to Read |
|------|---------|--------------|
| `tool-routing.md` | Tool selection decision trees | **Always read first** |
| `opensrc-api.md` | API reference, types | Writing opensrc code |
| `opensrc-examples.md` | JavaScript patterns, workflows | Implementation examples |
| `linking.md` | GitHub URL patterns | Formatting responses |
| `diagrams.md` | Mermaid patterns | Visualizing architecture |
### Reading Order
1. **Start** with `tool-routing.md` → choose tool strategy
2. **If using opensrc:**
- Read `opensrc-api.md` for API details
- Read `opensrc-examples.md` for patterns
3. **Before responding:** `linking.md` + `diagrams.md` for output formatting
## Tool Arsenal
| Tool | Best For | Limitations |
|------|----------|-------------|
| **grep_app** | Find patterns across ALL public GitHub | Literal search only |
| **context7** | Library docs, API examples, usage | Known libraries only |
| **opensrc** | Fetch full source for deep exploration | Must fetch before read |
## Quick Decision Trees
### "How does X work?"
```
Known library?
├─ Yes → context7.resolve-library-id → context7.query-docs
│ └─ Need internals? → opensrc.fetch → read source
└─ No → grep_app search → opensrc.fetch top result
```
### "Find pattern X"
```
Specific repo?
├─ Yes → opensrc.fetch → opensrc.grep → read matches
└─ No → grep_app (broad) → opensrc.fetch interesting repos
```
### "Explore repo structure"
```
1. opensrc.fetch(target)
2. opensrc.tree(source.name) → quick overview
3. opensrc.files(source.name, "**/*.ts") → detailed listing
4. Read: README, package.json, src/index.*
5. Create architecture diagram (see diagrams.md)
```
### "Compare X vs Y"
```
1. opensrc.fetch(["X", "Y"])
2. Use source.name from results for subsequent calls
3. opensrc.grep(pattern, { sources: [nameX, nameY] })
4. Read comparable files, synthesize differences
```
## Critical: Source Naming Convention
**After fetching, always use `source.name` for subsequent calls:**
```javascript
const [{ source }] = await opensrc.fetch("vercel/ai");
const files = await opensrc.files(source.name, "**/*.ts");
```
| Type | Fetch Spec | Source Name |
|------|------------|-------------|
| npm | `"zod"` | `"zod"` |
| npm scoped | `"@tanstack/react-query"` | `"@tanstack/react-query"` |
| pypi | `"pypi:requests"` | `"requests"` |
| crates | `"crates:serde"` | `"serde"` |
| GitHub | `"vercel/ai"` | `"github.com/vercel/ai"` |
| GitLab | `"gitlab:org/repo"` | `"gitlab.com/org/repo"` |
## When NOT to Use opensrc
| Scenario | Use Instead |
|----------|-------------|
| Simple library API questions | context7 |
| Finding examples across many repos | grep_app |
| Very large monorepos (>10GB) | Clone locally |
| Private repositories | Direct access |
## Output Guidelines
1. **Comprehensive final message** - only last message returns to main agent
2. **Parallel tool calls** - maximize efficiency
3. **Link every file reference** - see `linking.md`
4. **Diagram complex relationships** - see `diagrams.md`
5. **Never mention tool names** - say "I'll search" not "I'll use opensrc"
## References
- [Tool Routing Decision Trees](references/tool-routing.md)
- [opensrc API Reference](references/opensrc-api.md)
- [opensrc Code Examples](references/opensrc-examples.md)
- [GitHub Linking Patterns](references/linking.md)
- [Mermaid Diagram Patterns](references/diagrams.md)

View File

@@ -1,51 +0,0 @@
# Mermaid Diagram Patterns
Create diagrams for:
- Architecture (component relationships)
- Data flow (request → response)
- Dependencies (import graph)
- Sequences (step-by-step processes)
## Architecture
```mermaid
graph TD
A[Client] --> B[API Gateway]
B --> C[Auth Service]
B --> D[Data Service]
D --> E[(Database)]
```
## Flow
```mermaid
flowchart LR
Input --> Parse --> Validate --> Transform --> Output
```
## Sequence
```mermaid
sequenceDiagram
Client->>+Server: Request
Server->>+DB: Query
DB-->>-Server: Result
Server-->>-Client: Response
```
## When to Use
| Type | Use For |
|------|---------|
| `graph TD` | Component hierarchy, dependencies |
| `flowchart LR` | Data transformation, pipelines |
| `sequenceDiagram` | Request/response, multi-party interaction |
| `classDiagram` | Type relationships, inheritance |
| `stateDiagram` | State machines, lifecycle |
## Tips
- Keep nodes short (3-4 words max)
- Use subgraphs for grouping related components
- Arrow labels for relationship types
- Prefer LR (left-right) for flows, TD (top-down) for hierarchies

View File

@@ -1,61 +0,0 @@
# GitHub Linking Patterns
All file/dir/code refs → fluent markdown links. Never raw URLs.
## URL Formats
### File
```
https://github.com/{owner}/{repo}/blob/{ref}/{path}
```
### File + Lines
```
https://github.com/{owner}/{repo}/blob/{ref}/{path}#L{start}-L{end}
```
### Directory
```
https://github.com/{owner}/{repo}/tree/{ref}/{path}
```
### GitLab (note `/-/blob/`)
```
https://gitlab.com/{owner}/{repo}/-/blob/{ref}/{path}
```
## Ref Resolution
| Source | Use as ref |
|--------|------------|
| Known version | `v{version}` |
| Default branch | `main` or `master` |
| opensrc fetch | ref from result |
| Specific commit | full SHA |
## Examples
### Correct
```markdown
The [`parseAsync`](https://github.com/colinhacks/zod/blob/main/src/types.ts#L450-L480) method handles...
```
### Wrong
```markdown
See https://github.com/colinhacks/zod/blob/main/src/types.ts#L100
The parseAsync method in src/types.ts handles...
```
## Line Numbers
- Single: `#L42`
- Range: `#L42-L50`
- Prefer ranges for context (2-5 lines around key code)
## Registry → GitHub
| Registry | Find repo in |
|----------|--------------|
| npm | `package.json``repository` |
| PyPI | `pyproject.toml` or setup.py |
| crates | `Cargo.toml` |

View File

@@ -1,235 +0,0 @@
# opensrc API Reference
## Tool
Use the **opensrc MCP server** via single tool:
| Tool | Purpose |
|------|---------|
| `opensrc_execute` | All operations (fetch, read, grep, files, remove, etc.) |
Takes a `code` parameter: JavaScript async arrow function executed server-side. Source trees stay on server, only results return.
## API Surface
### Read Operations
```typescript
// List all fetched sources
opensrc.list(): Source[]
// Check if source exists
opensrc.has(name: string, version?: string): boolean
// Get source metadata
opensrc.get(name: string): Source | undefined
// List files with optional glob
opensrc.files(sourceName: string, glob?: string): Promise<FileEntry[]>
// Get directory tree structure (default depth: 3)
opensrc.tree(sourceName: string, options?: { depth?: number }): Promise<TreeNode>
// Regex search file contents
opensrc.grep(pattern: string, options?: GrepOptions): Promise<GrepResult[]>
// AST-based semantic code search
opensrc.astGrep(sourceName: string, pattern: string, options?: AstGrepOptions): Promise<AstGrepMatch[]>
// Read single file
opensrc.read(sourceName: string, filePath: string): Promise<string>
// Batch read multiple files (supports globs!)
opensrc.readMany(sourceName: string, paths: string[]): Promise<Record<string, string>>
// Parse fetch spec
opensrc.resolve(spec: string): Promise<ParsedSpec>
```
### Mutation Operations
```typescript
// Fetch packages/repos
opensrc.fetch(specs: string | string[], options?: { modify?: boolean }): Promise<FetchedSource[]>
// Remove sources
opensrc.remove(names: string[]): Promise<RemoveResult>
// Clean by type
opensrc.clean(options?: CleanOptions): Promise<RemoveResult>
```
## Types
### Source
```typescript
interface Source {
type: "npm" | "pypi" | "crates" | "repo";
name: string; // Use this for all subsequent calls
version?: string;
ref?: string;
path: string;
fetchedAt: string;
repository: string;
}
```
### FetchedSource
```typescript
interface FetchedSource {
source: Source; // IMPORTANT: use source.name for subsequent calls
alreadyExists: boolean;
}
```
### GrepOptions
```typescript
interface GrepOptions {
sources?: string[]; // Filter to specific sources
include?: string; // File glob pattern (e.g., "*.ts")
maxResults?: number; // Limit results (default: 100)
}
```
### GrepResult
```typescript
interface GrepResult {
source: string;
file: string;
line: number;
content: string;
}
```
### AstGrepOptions
```typescript
interface AstGrepOptions {
glob?: string; // File glob pattern (e.g., "**/*.ts")
lang?: string | string[]; // Language(s): "js", "ts", "tsx", "html", "css"
limit?: number; // Max results (default: 1000)
}
```
### AstGrepMatch
```typescript
interface AstGrepMatch {
file: string;
line: number;
column: number;
endLine: number;
endColumn: number;
text: string; // Matched code text
metavars: Record<string, string>; // Captured $VAR → text
}
```
#### AST Pattern Syntax
| Pattern | Matches |
|---------|---------|
| `$NAME` | Single node, captures to metavars |
| `$$$ARGS` | Zero or more nodes (variadic), captures |
| `$_` | Single node, no capture |
| `$$$` | Zero or more nodes, no capture |
### FileEntry
```typescript
interface FileEntry {
path: string;
size: number;
isDirectory: boolean;
}
```
### TreeNode
```typescript
interface TreeNode {
name: string;
type: "file" | "dir";
children?: TreeNode[]; // only for dirs
}
```
### CleanOptions
```typescript
interface CleanOptions {
packages?: boolean;
repos?: boolean;
npm?: boolean;
pypi?: boolean;
crates?: boolean;
}
```
### RemoveResult
```typescript
interface RemoveResult {
success: boolean;
removed: string[];
}
```
## Error Handling
Operations throw on errors. Wrap in try/catch if needed:
```javascript
async () => {
try {
const content = await opensrc.read("zod", "missing.ts");
return content;
} catch (e) {
return { error: e.message };
}
}
```
`readMany` returns errors as string values prefixed with `[Error:`:
```javascript
const files = await opensrc.readMany("zod", ["exists.ts", "missing.ts"]);
// { "exists.ts": "content...", "missing.ts": "[Error: ENOENT...]" }
// Filter successful reads
const successful = Object.entries(files)
.filter(([_, content]) => !content.startsWith("[Error:"));
```
## Package Spec Formats
| Format | Example | Source Name After Fetch |
|--------|---------|------------------------|
| `<name>` | `"zod"` | `"zod"` |
| `<name>@<version>` | `"zod@3.22.0"` | `"zod"` |
| `pypi:<name>` | `"pypi:requests"` | `"requests"` |
| `crates:<name>` | `"crates:serde"` | `"serde"` |
| `owner/repo` | `"vercel/ai"` | `"github.com/vercel/ai"` |
| `owner/repo@ref` | `"vercel/ai@v1.0.0"` | `"github.com/vercel/ai"` |
| `gitlab:owner/repo` | `"gitlab:org/repo"` | `"gitlab.com/org/repo"` |
## Critical Pattern
**Always capture `source.name` from fetch results:**
```javascript
async () => {
const [{ source }] = await opensrc.fetch("vercel/ai");
// GitHub repos: "vercel/ai" → "github.com/vercel/ai"
const sourceName = source.name;
// Use sourceName for ALL subsequent calls
const files = await opensrc.files(sourceName, "src/**/*.ts");
return files;
}
```

View File

@@ -1,336 +0,0 @@
# opensrc Code Examples
## Workflow: Fetch → Explore
### Basic Fetch and Explore with tree()
```javascript
async () => {
const [{ source }] = await opensrc.fetch("vercel/ai");
// Get directory structure first
const tree = await opensrc.tree(source.name, { depth: 2 });
return tree;
}
```
### Fetch and Read Key Files
```javascript
async () => {
const [{ source }] = await opensrc.fetch("vercel/ai");
const sourceName = source.name; // "github.com/vercel/ai"
const files = await opensrc.readMany(sourceName, [
"package.json",
"README.md",
"src/index.ts"
]);
return { sourceName, files };
}
```
### readMany with Globs
```javascript
async () => {
const [{ source }] = await opensrc.fetch("zod");
// Read all package.json files in monorepo
const files = await opensrc.readMany(source.name, [
"packages/*/package.json" // globs supported!
]);
return Object.keys(files);
}
```
### Batch Fetch Multiple Packages
```javascript
async () => {
const results = await opensrc.fetch(["zod", "valibot", "yup"]);
const names = results.map(r => r.source.name);
// Compare how each handles string validation
const comparisons = {};
for (const name of names) {
const matches = await opensrc.grep("string.*validate|validateString", {
sources: [name],
include: "*.ts",
maxResults: 10
});
comparisons[name] = matches.map(m => `${m.file}:${m.line}`);
}
return comparisons;
}
```
## Search Patterns
### Grep → Read Context
```javascript
async () => {
const matches = await opensrc.grep("export function parse\\(", {
sources: ["zod"],
include: "*.ts"
});
if (matches.length === 0) return "No matches";
const match = matches[0];
const content = await opensrc.read(match.source, match.file);
const lines = content.split("\n");
// Return 40 lines starting from match
return {
file: match.file,
code: lines.slice(match.line - 1, match.line + 39).join("\n")
};
}
```
### Search Across All Fetched Sources
```javascript
async () => {
const sources = opensrc.list();
const results = {};
for (const source of sources) {
const errorHandling = await opensrc.grep("throw new|catch \\(|\\.catch\\(", {
sources: [source.name],
include: "*.ts",
maxResults: 20
});
results[source.name] = {
type: source.type,
errorPatterns: errorHandling.length
};
}
return results;
}
```
## AST-Based Search
Use `astGrep` for semantic code search with pattern matching.
### Find Function Declarations
```javascript
async () => {
const [{ source }] = await opensrc.fetch("lodash");
const fns = await opensrc.astGrep(source.name, "function $NAME($$$ARGS) { $$$BODY }", {
lang: "js",
limit: 20
});
return fns.map(m => ({
file: m.file,
line: m.line,
name: m.metavars.NAME
}));
}
```
### Find React Hooks Usage
```javascript
async () => {
const [{ source }] = await opensrc.fetch("vercel/ai");
const stateHooks = await opensrc.astGrep(
source.name,
"const [$STATE, $SETTER] = useState($$$INIT)",
{ lang: ["ts", "tsx"], limit: 50 }
);
return stateHooks.map(m => ({
file: m.file,
state: m.metavars.STATE,
setter: m.metavars.SETTER
}));
}
```
### Find Class Definitions with Context
```javascript
async () => {
const [{ source }] = await opensrc.fetch("zod");
const classes = await opensrc.astGrep(source.name, "class $NAME", {
glob: "**/*.ts"
});
const details = [];
for (const cls of classes.slice(0, 5)) {
const content = await opensrc.read(source.name, cls.file);
const lines = content.split("\n");
details.push({
name: cls.metavars.NAME,
file: cls.file,
preview: lines.slice(cls.line - 1, cls.line + 9).join("\n")
});
}
return details;
}
```
### Compare Export Patterns Across Libraries
```javascript
async () => {
const results = await opensrc.fetch(["zod", "valibot"]);
const names = results.map(r => r.source.name);
const exports = {};
for (const name of names) {
const matches = await opensrc.astGrep(name, "export const $NAME = $_", {
lang: "ts",
limit: 30
});
exports[name] = matches.map(m => m.metavars.NAME);
}
return exports;
}
```
### grep vs astGrep
| Use Case | Tool |
|----------|------|
| Text/regex pattern | `grep` |
| Function declarations | `astGrep`: `function $NAME($$$) { $$$ }` |
| Arrow functions | `astGrep`: `const $N = ($$$) => $_` |
| Class definitions | `astGrep`: `class $NAME extends $PARENT` |
| Import statements | `astGrep`: `import { $$$IMPORTS } from "$MOD"` |
| JSX components | `astGrep`: `<$COMP $$$PROPS />` |
## Repository Exploration
### Find Entry Points
```javascript
async () => {
const name = "github.com/vercel/ai";
const allFiles = await opensrc.files(name, "**/*.{ts,js}");
const entryPoints = allFiles.filter(f =>
f.path.match(/^(src\/)?(index|main|mod)\.(ts|js)$/) ||
f.path.includes("/index.ts")
);
// Read all entry points
const contents = {};
for (const ep of entryPoints.slice(0, 5)) {
contents[ep.path] = await opensrc.read(name, ep.path);
}
return {
totalFiles: allFiles.length,
entryPoints: entryPoints.map(f => f.path),
contents
};
}
```
### Explore Package Structure
```javascript
async () => {
const name = "zod";
// Get all TypeScript files
const tsFiles = await opensrc.files(name, "**/*.ts");
// Group by directory
const byDir = {};
for (const f of tsFiles) {
const dir = f.path.split("/").slice(0, -1).join("/") || ".";
byDir[dir] = (byDir[dir] || 0) + 1;
}
// Read key files
const pkg = await opensrc.read(name, "package.json");
const readme = await opensrc.read(name, "README.md");
return {
structure: byDir,
package: JSON.parse(pkg),
readmePreview: readme.slice(0, 500)
};
}
```
## Batch Operations
### Read Many with Error Handling
```javascript
async () => {
const files = await opensrc.readMany("zod", [
"src/index.ts",
"src/types.ts",
"src/ZodError.ts",
"src/helpers/parseUtil.ts"
]);
// files is Record<string, string> - errors start with "[Error:"
const successful = Object.entries(files)
.filter(([_, content]) => !content.startsWith("[Error:"))
.map(([path, content]) => ({ path, lines: content.split("\n").length }));
return successful;
}
```
### Parallel Grep Across Multiple Sources
```javascript
async () => {
const targets = ["zod", "valibot"];
const pattern = "export (type|interface)";
const results = await Promise.all(
targets.map(async (name) => {
const matches = await opensrc.grep(pattern, {
sources: [name],
include: "*.ts",
maxResults: 50
});
return { name, count: matches.length, matches };
})
);
return results;
}
```
## Workflow Checklist
### Comprehensive Repository Analysis
```
Repository Analysis Progress:
- [ ] 1. Fetch repository
- [ ] 2. Read package.json + README
- [ ] 3. Identify entry points (src/index.*)
- [ ] 4. Read main entry file
- [ ] 5. Map exports and public API
- [ ] 6. Trace key functionality
- [ ] 7. Create architecture diagram
```
### Library Comparison
```
Comparison Progress:
- [ ] 1. Fetch all libraries
- [ ] 2. Grep for target pattern in each
- [ ] 3. Read matching implementations
- [ ] 4. Create comparison table
- [ ] 5. Synthesize findings
```

View File

@@ -1,109 +0,0 @@
# Tool Routing
## Decision Flowchart
```mermaid
graph TD
Q[User Query] --> T{Query Type?}
T -->|Understand/Explain| U[UNDERSTAND]
T -->|Find/Search| F[FIND]
T -->|Explore/Architecture| E[EXPLORE]
T -->|Compare| C[COMPARE]
U --> U1{Known library?}
U1 -->|Yes| U2[context7.resolve-library-id]
U2 --> U3[context7.query-docs]
U3 --> U4{Need source?}
U4 -->|Yes| U5[opensrc.fetch → read]
U1 -->|No| U6[grep_app → opensrc.fetch]
F --> F1{Specific repo?}
F1 -->|Yes| F2[opensrc.fetch → grep → read]
F1 -->|No| F3[grep_app broad search]
F3 --> F4[opensrc.fetch interesting repos]
E --> E1[opensrc.fetch]
E1 --> E2[opensrc.files]
E2 --> E3[Read entry points]
E3 --> E4[Create diagram]
C --> C1["opensrc.fetch([X, Y])"]
C1 --> C2[grep same pattern]
C2 --> C3[Read comparable files]
C3 --> C4[Synthesize comparison]
```
## Query Type Detection
| Keywords | Query Type | Start With |
|----------|------------|------------|
| "how does", "why does", "explain", "purpose of" | UNDERSTAND | context7 |
| "find", "where is", "implementations of", "examples of" | FIND | grep_app |
| "explore", "walk through", "architecture", "structure" | EXPLORE | opensrc |
| "compare", "vs", "difference between" | COMPARE | opensrc |
## UNDERSTAND Queries
```
Known library? → context7.resolve-library-id → context7.query-docs
└─ Need source? → opensrc.fetch → read
Unknown? → grep_app search → opensrc.fetch top result → read
```
**When to transition context7 → opensrc:**
- Need implementation details (not just API docs)
- Question about internals/private methods
- Tracing code flow through library
## FIND Queries
```
Specific repo? → opensrc.fetch → opensrc.grep → read matches
Broad search? → grep_app → analyze → opensrc.fetch interesting repos
```
**grep_app query tips:**
- Use literal code patterns: `useState(` not "react hooks"
- Filter by language: `language: ["TypeScript"]`
- Narrow by repo: `repo: "vercel/"` for org
## EXPLORE Queries
```
1. opensrc.fetch(target)
2. opensrc.files → understand structure
3. Identify entry points: README, package.json, src/index.*
4. Read entry → internals
5. Create architecture diagram
```
## COMPARE Queries
```
1. opensrc.fetch([X, Y])
2. Extract source.name from each result
3. opensrc.grep same pattern in both
4. Read comparable files
5. Synthesize → comparison table
```
## Tool Capabilities
| Tool | Best For | Not For |
|------|----------|---------|
| **grep_app** | Broad search, unknown scope, finding repos | Semantic queries |
| **context7** | Library APIs, best practices, common patterns | Library internals |
| **opensrc** | Deep exploration, reading internals, tracing flow | Initial discovery |
## Anti-patterns
| Don't | Do |
|-------|-----|
| grep_app for known library docs | context7 first |
| opensrc.fetch before knowing target | grep_app to discover |
| Multiple small reads | opensrc.readMany batch |
| Describe without linking | Link every file ref |
| Text for complex relationships | Mermaid diagram |
| Use tool names in responses | "I'll search..." not "I'll use opensrc" |

View File

@@ -1,122 +0,0 @@
---
name: session-export
description: Update GitHub PR descriptions with AI session export summaries. Use when user asks to add session summary to PR/MR, document AI assistance in PR/MR, or export conversation summary to PR/MR description.
---
# Session Export
Update PR/MR descriptions with a structured summary of the AI-assisted conversation.
## Output Format
```markdown
> [!NOTE]
> This PR was written with AI assistance.
<details><summary>AI Session Export</summary>
<p>
```json
{
"info": {
"title": "<brief task description>",
"agent": "opencode",
"models": ["<model(s) used>"]
},
"summary": [
"<action 1>",
"<action 2>",
...
]
}
```
</p>
</details>
```
## Workflow
### 1. Export Session Data
Get session data using OpenCode CLI:
```bash
opencode export [sessionID]
```
Returns JSON with session info including models used. Use current session if no sessionID provided.
### 2. Generate Summary JSON
From exported data and conversation context, create summary:
- **title**: 2-5 word task description (lowercase)
- **agent**: always "opencode"
- **models**: array from export data
- **summary**: array of terse action statements
- Use past tense ("added", "fixed", "created")
- Start with "user requested..." or "user asked..."
- Chronological order
- Attempt to keep the summary to a max of 25 turns ("user requested", "agent did")
- **NEVER include sensitive data**: API keys, credentials, secrets, tokens, passwords, env vars
### 3. Update PR/MR Description
**GitHub:**
```bash
gh pr edit <PR_NUMBER> --body "$(cat <<'EOF'
<existing description>
> [!NOTE]
> This PR was written with AI assistance.
<details><summary>AI Session Export</summary>
...
</details>
EOF
)"
```
### 4. Preserve Existing Content
Always fetch and preserve existing PR/MR description:
```bash
# GitHub
gh pr view <PR_NUMBER> --json body -q '.body'
Append session export after existing content with blank line separator.
## Example Summary
For a session where user asked to add dark mode:
```json
{
"info": {
"title": "dark mode implementation",
"agent": "opencode",
"models": ["claude sonnet 4"]
},
"summary": [
"user requested dark mode toggle in settings",
"agent explored existing theme system",
"agent created ThemeContext for state management",
"agent added DarkModeToggle component",
"agent updated CSS variables for dark theme",
"agent ran tests and fixed 2 failures",
"agent committed changes"
]
}
```
## Security
**NEVER include in summary:**
- API keys, tokens, secrets
- Passwords, credentials
- Environment variable values
- Private URLs with auth tokens
- Personal identifiable information
- Internal hostnames/IPs

View File

@@ -1,115 +1,131 @@
{
{inputs, ...}: {
den.aspects.ai-tools.homeManager = {
config,
pkgs,
inputs',
...
}: {
home.packages = [
inputs'.llm-agents.packages.claude-code
inputs'.llm-agents.packages.pi
inputs'.llm-agents.packages.codex
pkgs.cog-cli
];
programs.opencode = {
enable = true;
package = inputs'.llm-agents.packages.opencode;
settings = {
model = "anthropic/claude-opus-4-6";
small_model = "anthropic/claude-haiku-4-5";
theme = "rosepine";
plugin = ["opencode-anthropic-auth@latest"];
permission = {
read = {
"*" = "allow";
"*.env" = "deny";
"*.env.*" = "deny";
"*.envrc" = "deny";
"secrets/*" = "deny";
home.file = {
".pi/agent/extensions/pi-elixir" = {
source = inputs.pi-elixir;
recursive = true;
};
".pi/agent/extensions/pi-mcp-adapter" = {
source = "${pkgs.pi-mcp-adapter}/lib/node_modules/pi-mcp-adapter";
recursive = true;
};
agent = {
plan = {
model = "anthropic/claude-opus-4-6";
".pi/agent/skills/elixir-dev" = {
source = "${inputs.pi-elixir}/skills/elixir-dev";
recursive = true;
};
explore = {
model = "anthropic/claude-haiku-4-5";
".pi/agent/themes" = {
source = "${inputs.pi-rose-pine}/themes";
recursive = true;
};
".pi/agent/extensions/no-git.ts".text = ''
/**
* No Git Extension
*
* Blocks git commands and tells the LLM to use jj (Jujutsu) instead.
*/
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
import { isToolCallEventType } from "@mariozechner/pi-coding-agent";
export default function (pi: ExtensionAPI) {
pi.on("tool_call", async (event, _ctx) => {
if (!isToolCallEventType("bash", event)) return;
const command = event.input.command.trim();
if (/\bgit\b/.test(command) && !/\bjj\s+git\b/.test(command)) {
return {
block: true,
reason: "git is not used in this project. Use jj (Jujutsu) instead.",
};
instructions = [
"CLAUDE.md"
"AGENT.md"
# "AGENTS.md"
"AGENTS.local.md"
}
});
}
'';
".pi/agent/extensions/no-scripting.ts".text = ''
/**
* No Scripting Extension
*
* Blocks python, perl, ruby, php, lua, and inline bash/sh scripts.
* Tells the LLM to use `nu -c` instead.
*/
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
import { isToolCallEventType } from "@mariozechner/pi-coding-agent";
const SCRIPTING_PATTERN =
/(?:^|[;&|]\s*|&&\s*|\|\|\s*|\$\(\s*|`\s*)(?:python[23]?|perl|ruby|php|lua|bash\s+-c|sh\s+-c)\s/;
export default function (pi: ExtensionAPI) {
pi.on("tool_call", async (event, _ctx) => {
if (!isToolCallEventType("bash", event)) return;
const command = event.input.command.trim();
if (SCRIPTING_PATTERN.test(command)) {
return {
block: true,
reason:
"Do not use python, perl, ruby, php, lua, or inline bash/sh for scripting. Use `nu -c` instead.",
};
}
});
}
'';
".pi/agent/settings.json".text =
builtins.toJSON {
lastChangelogVersion = "0.61.1";
theme = "rose-pine-dawn";
hideThinkingBlock = true;
defaultProvider = "anthropic";
defaultModel = "claude-opus-4-6";
defaultThinkingLevel = "high";
packages = [
{
source = "${pkgs.pi-agent-stuff}/lib/node_modules/mitsupi";
extensions = [
"pi-extensions/answer.ts"
"pi-extensions/context.ts"
"pi-extensions/multi-edit.ts"
"pi-extensions/review.ts"
"pi-extensions/todos.ts"
];
skills = [];
prompts = [];
themes = [];
}
];
formatter = {
mix = {
disabled = true;
};
};
mcp = {
".pi/agent/mcp.json".text =
builtins.toJSON {
mcpServers = {
opensrc = {
enabled = true;
type = "local";
command = ["node" "/home/cschmatzler/.bun/bin/opensrc-mcp"];
command = "npx";
args = ["-y" "opensrc-mcp"];
};
context7 = {
enabled = true;
type = "remote";
url = "https://mcp.context7.com/mcp";
};
grep_app = {
enabled = true;
type = "remote";
url = "https://mcp.grep.app";
};
sentry = {
enabled = true;
type = "remote";
url = "https://mcp.sentry.dev/mcp";
oauth = {};
auth = "oauth";
};
};
};
};
systemd.user.services.opencode-server = {
Unit = {
Description = "OpenCode AI server";
After = ["default.target"];
};
Service = {
ExecStart = "${inputs'.llm-agents.packages.opencode}/bin/opencode serve --port 18822 --hostname 0.0.0.0";
Restart = "on-failure";
RestartSec = 5;
Environment = "PATH=${config.home.profileDirectory}/bin:/run/current-system/sw/bin";
};
Install = {
WantedBy = ["default.target"];
};
};
xdg.configFile = {
"opencode/agent" = {
source = ./_opencode/agent;
recursive = true;
};
"opencode/command" = {
source = ./_opencode/command;
recursive = true;
};
"opencode/skill" = {
source = ./_opencode/skill;
recursive = true;
};
"opencode/tool" = {
source = ./_opencode/tool;
recursive = true;
};
"opencode/plugin" = {
source = ./_opencode/plugin;
recursive = true;
};
"opencode/AGENTS.md".source = ./_opencode/AGENTS.md;
};
};
}

View File

@@ -54,6 +54,18 @@
inputs.nixpkgs.follows = "nixpkgs";
};
llm-agents.url = "github:numtide/llm-agents.nix";
pi-agent-stuff = {
url = "github:mitsuhiko/agent-stuff";
flake = false;
};
pi-elixir = {
url = "github:dannote/pi-elixir";
flake = false;
};
pi-rose-pine = {
url = "github:zenobi-us/pi-rose-pine";
flake = false;
};
# Overlay inputs
himalaya.url = "github:pimalaya/himalaya";
jj-ryu = {

View File

@@ -73,6 +73,34 @@
};
};
})
# pi-agent-stuff (mitsuhiko)
(final: prev: {
pi-agent-stuff =
prev.buildNpmPackage {
pname = "pi-agent-stuff";
version = "1.5.0";
src = inputs.pi-agent-stuff;
npmDepsHash = "sha256-pyXMNdlie8vAkhz2f3GUGT3CCYuwt+xkWnsijBajXIo=";
dontNpmBuild = true;
};
})
# pi-mcp-adapter
(final: prev: {
pi-mcp-adapter =
prev.buildNpmPackage {
pname = "pi-mcp-adapter";
version = "2.2.0";
src =
prev.fetchFromGitHub {
owner = "nicobailon";
repo = "pi-mcp-adapter";
rev = "v2.2.0";
hash = "sha256-E6Kf+OyTN/pF8pKADJO0B1+buAPqNcXnZl9ssZwSP8U=";
};
npmDepsHash = "sha256-myJ9h/zC/KDddt8NOVvJjjqbnkdEN4ZR+okCR5nu7hM=";
dontNpmBuild = true;
};
})
# jj-starship (passes through upstream overlay)
inputs.jj-starship.overlays.default
# zjstatus

View File

@@ -17,20 +17,9 @@
];
homeManager = {
config,
inputs',
...
}: let
opencode = inputs'.llm-agents.packages.opencode;
in {
programs.home-manager.enable = true;
programs.git.settings.user.email = "christoph@schmatzler.com";
programs.opencode.settings.permission.external_directory = {
"/tmp/himalaya-triage/*" = "allow";
"/var/lib/paperless/consume/inbox-triage/*" = "allow";
};
programs.nushell.extraConfig = ''
if $nu.is-interactive and ('SSH_CONNECTION' in ($env | columns)) and ('ZELLIJ' not-in ($env | columns)) {
try {
@@ -41,30 +30,6 @@
}
}
'';
systemd.user.services.opencode-inbox-triage = {
Unit = {
Description = "OpenCode inbox triage";
};
Service = {
Type = "oneshot";
ExecStart = "${opencode}/bin/opencode run --command inbox-triage --model opencode-go/glm-5";
Environment = "PATH=${config.home.profileDirectory}/bin:/run/current-system/sw/bin";
};
};
systemd.user.timers.opencode-inbox-triage = {
Unit = {
Description = "Run OpenCode inbox triage every 12 hours";
};
Timer = {
OnCalendar = "*-*-* 0/12:00:00";
Persistent = true;
};
Install = {
WantedBy = ["timers.target"];
};
};
};
};