diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..b2ba00b0 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,17 @@ +# EditorConfig — https://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +indent_size = 2 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.md] +trim_trailing_whitespace = false + +[Makefile] +indent_style = tab diff --git a/.env.example b/.env.example index b28bd6ca..adfb15ab 100644 --- a/.env.example +++ b/.env.example @@ -1,18 +1,34 @@ -# Discord bot token +# Discord bot token (required) DISCORD_TOKEN=your_discord_bot_token -# Discord application client ID (for slash command registration) -CLIENT_ID=your_discord_client_id +# Discord application/client ID for slash command deployment (required) +# Preferred name: +DISCORD_CLIENT_ID=your_discord_client_id +# Backward-compatible alias (optional): +# CLIENT_ID=your_discord_client_id -# Discord guild/server ID (optional - for faster command deployment during development) -# If not set, commands deploy globally (takes up to 1 hour to propagate) +# Discord guild/server ID (optional) +# If set, commands deploy to one guild instantly (great for development). +# If omitted, commands deploy globally (can take up to 1 hour). GUILD_ID=your_discord_guild_id -# OpenClaw API (routes through your Claude subscription) -# Local: http://localhost:18789/v1/chat/completions -# Remote (Railway/etc): https://your-tailscale-hostname.ts.net/v1/chat/completions -OPENCLAW_URL=http://localhost:18789/v1/chat/completions -OPENCLAW_TOKEN=your_openclaw_gateway_token +# OpenClaw chat completions endpoint (required) +# Local: http://localhost:18789/v1/chat/completions +# Remote: https://your-tailscale-hostname.ts.net/v1/chat/completions +OPENCLAW_API_URL=http://localhost:18789/v1/chat/completions +# Backward-compatible alias (optional): +# OPENCLAW_URL=http://localhost:18789/v1/chat/completions -# Logging level (options: debug, info, warn, error) +# OpenClaw API key / gateway token (required) +OPENCLAW_API_KEY=your_openclaw_gateway_token +# Backward-compatible alias (optional): +# OPENCLAW_TOKEN=your_openclaw_gateway_token + +# PostgreSQL connection string (required for persistent config/state) +DATABASE_URL=postgresql://user:password@host:5432/database + +# Optional: force SSL for DB connections if needed by your host +# DATABASE_SSL=true + +# Logging level (optional: debug, info, warn, error) LOG_LEVEL=info diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..ed25d517 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,34 @@ +name: CI + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + lint-and-test: + name: Lint & Test + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Setup pnpm + uses: pnpm/action-setup@7088e561eb65bb68695d245aa206f005ef30921d # v4.1.0 + + - name: Setup Node.js + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 + with: + node-version: 22 + cache: pnpm + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Lint (Biome) + run: pnpm lint + + - name: Test with coverage (Vitest) + run: pnpm test:coverage diff --git a/.github/workflows/claude-review.yml b/.github/workflows/claude-review.yml new file mode 100644 index 00000000..98c2509a --- /dev/null +++ b/.github/workflows/claude-review.yml @@ -0,0 +1,30 @@ +name: Claude Code Review + +on: + pull_request: + types: [opened, reopened, synchronize] + issue_comment: + types: [created] + +jobs: + claude-review: + if: | + (github.event_name == 'pull_request') || + (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) + runs-on: ubuntu-latest + permissions: + contents: read + pull-requests: write + issues: write + id-token: write + + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Claude Code Review + uses: anthropics/claude-code-action@23ed4cb53d6eacddbc22ec16652c98bcc54e0476 # v1.0.48 + with: + anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} + model: claude-opus-4-6-20250616 + timeout_minutes: 10 diff --git a/.gitignore b/.gitignore index 9f6ba53a..5b613a25 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ node_modules/ .env *.log logs/ +coverage/ # Auto Claude data directory and files .auto-claude/ diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000..9b61c9d4 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,147 @@ +# AGENTS.md — AI Coding Agent Guide + +> This file provides context for AI coding agents (Claude Code, Copilot, Cursor, etc.) working on bills-bot. + +## Project Overview + +**Bill Bot** is a Discord bot for the Volvox developer community. It provides AI chat (via OpenClaw/Claude), dynamic welcome messages, spam detection, and runtime configuration management backed by PostgreSQL. + +## Stack + +- **Runtime:** Node.js 22 (ESM modules, `"type": "module"`) +- **Framework:** discord.js v14 +- **Database:** PostgreSQL (via `pg` — raw SQL, no ORM) +- **Logging:** Winston with daily file rotation +- **AI:** Claude via OpenClaw chat completions API +- **Linting:** Biome +- **Testing:** Vitest +- **Hosting:** Railway + +## Key Files + +| File | Purpose | +|------|---------| +| `src/index.js` | Entry point — client setup, command loading, startup sequence | +| `src/db.js` | PostgreSQL pool management (init, query, close) | +| `src/logger.js` | Winston logger setup with file + console transports | +| `src/commands/*.js` | Slash commands (auto-loaded) | +| `src/modules/ai.js` | AI chat handler — conversation history, OpenClaw API calls | +| `src/modules/chimeIn.js` | Organic conversation joining logic | +| `src/modules/welcome.js` | Dynamic welcome message generation | +| `src/modules/spam.js` | Spam/scam pattern detection | +| `src/modules/config.js` | Config loading/saving (DB + file), runtime updates | +| `src/modules/events.js` | Event handler registration (wires modules to Discord events) | +| `src/utils/errors.js` | Error classes and handling utilities | +| `src/utils/health.js` | Health monitoring singleton | +| `src/utils/permissions.js` | Permission checking for commands | +| `src/utils/retry.js` | Retry utility for flaky operations | +| `src/utils/registerCommands.js` | Discord REST API command registration | +| `src/utils/splitMessage.js` | Message splitting for Discord's 2000-char limit | +| `config.json` | Default configuration (seeded to DB on first run) | +| `.env.example` | Environment variable template | + +## Code Conventions + +### General + +- **ESM only** — use `import`/`export`, never `require()` +- **No TypeScript** — plain JavaScript with JSDoc comments for documentation +- **Node.js builtins** — always use `node:` protocol (e.g. `import { readFileSync } from 'node:fs'`) +- **Semicolons** — always use them +- **Single quotes** — enforced by Biome +- **2-space indentation** — enforced by Biome + +### Logging + +- **Always use Winston** — `import { info, warn, error } from '../logger.js'` +- **NEVER use `console.log`, `console.warn`, `console.error`, or any `console.*` method** in src/ files — no exceptions +- If you see `console.*` in existing code, replace it with the Winston equivalent +- Pass structured metadata: `info('Message processed', { userId, channelId })` + +### Error Handling + +- Use custom error classes from `src/utils/errors.js` +- Always log errors with context before re-throwing +- Graceful shutdown is handled in `src/index.js` + +### Config + +- Config is loaded from PostgreSQL (falls back to `config.json`) +- Use `getConfig()` from `src/modules/config.js` to read config +- Use `setConfigValue(key, value)` to update at runtime +- Config is a live object reference — mutations propagate automatically + +## How to Add a Slash Command + +1. Create `src/commands/yourcommand.js`: + +```js +import { SlashCommandBuilder } from 'discord.js'; + +export const data = new SlashCommandBuilder() + .setName('yourcommand') + .setDescription('What it does'); + +export async function execute(interaction) { + await interaction.reply('Hello!'); +} +``` + +2. Commands are auto-discovered from `src/commands/` on startup +3. Run `pnpm run deploy` to register with Discord (or restart the bot) +4. Add permission in `config.json` under `permissions.allowedCommands` + +## How to Add a Module + +1. Create `src/modules/yourmodule.js` with handler functions +2. Register handlers in `src/modules/events.js`: + +```js +import { yourHandler } from './yourmodule.js'; +// In registerEventHandlers(): +client.on('eventName', (args) => yourHandler(args, config)); +``` + +3. Config for your module goes in `config.json` under a new key +4. Check `config.yourModule.enabled` before processing + +## Testing + +- **Framework:** Vitest (`pnpm test`) +- **Test directory:** `tests/` +- **Coverage:** `pnpm test:coverage` — **mandatory 80% threshold** on statements, branches, functions, and lines +- Coverage provider: `@vitest/coverage-v8` +- Tests are smoke/unit tests — the bot requires Discord credentials so we don't test live connections +- Test config structure, command exports, utility functions +- Run `pnpm test` before every commit +- **Any new code must include tests** — PRs that drop coverage below 80% will fail CI + +## Documentation + +**Keep docs up to date — this is non-negotiable.** + +After every code change, check whether these files need updating: + +- **`README.md`** — setup instructions, architecture overview, config reference, env vars +- **`AGENTS.md`** (this file) — key files table, code conventions, "how to add" guides, common pitfalls +- **`CONTRIBUTING.md`** — workflow, branching, commit conventions +- **`.env.example`** — if you add/remove/rename an environment variable, update this immediately +- **`config.json`** — if you add a new config section or key, document it in README.md's config reference + +**When to update:** +- Added a new command → update Key Files table, add to README command list +- Added a new module → update Key Files table, document config section +- Changed env vars → update `.env.example` and README's environment section +- Changed architecture (new dependency, new pattern) → update Stack section and relevant guides +- Found a new pitfall → add to Common Pitfalls below + +**Rule of thumb:** If a new contributor (human or AI) would be confused without the update, write it. + +## Common Pitfalls + +1. **Missing `node:` prefix** — Biome will catch this, but remember it for new imports +2. **Config is async** — `loadConfig()` returns a Promise; it must be awaited at startup +3. **Discord intents** — the bot needs MessageContent, GuildMembers, and GuildVoiceStates intents enabled +4. **DATABASE_URL optional** — the bot works without a database (uses config.json only), but config persistence requires PostgreSQL +5. **Undici override** — `pnpm.overrides` pins undici; this was originally added for Node 18 compatibility and may no longer be needed on Node 22. Verify before removing +6. **2000-char limit** — Discord messages can't exceed 2000 characters; use `splitMessage()` utility diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 00000000..7e94f367 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,3 @@ +# CLAUDE.md + +See [AGENTS.md](./AGENTS.md) for full project context, architecture, and coding guidelines. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..1f4819f2 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing to Bill Bot + +Thanks for your interest in contributing! Bill Bot is part of the [Volvox](https://volvox.dev) open-source community. + +## Getting Started + +1. Fork the repository +2. Follow the [setup instructions](README.md#-setup) in the README +3. Create a feature branch from `main` + +## Development Workflow + +### Branch Naming + +Use descriptive branch names with prefixes: + +- `feat/add-music-command` — new features +- `fix/welcome-message-crash` — bug fixes +- `chore/update-dependencies` — maintenance +- `docs/update-readme` — documentation +- `refactor/simplify-config` — code improvements + +### Commit Messages + +We use [Conventional Commits](https://www.conventionalcommits.org/): + +```text +feat: add music playback command +fix: prevent crash on empty welcome channel +chore: update discord.js to v14.16 +docs: add API reference to README +refactor: simplify config loading logic +style: format with Biome +test: add config validation tests +ci: update Node.js version in CI +``` + +### Before Submitting + +1. **Lint:** `pnpm lint` — must pass with no errors +2. **Format:** `pnpm format` — auto-format your code +3. **Test:** `pnpm test` — all tests must pass +4. **Commit:** use conventional commit messages + +### Pull Requests + +1. Open a PR against `main` +2. Fill in the PR description with what changed and why +3. PRs are automatically reviewed by Claude Code +4. CI must pass (lint + tests) +5. Wait for a maintainer review + +## Code Style + +Code style is enforced by [Biome](https://biomejs.dev/): + +- Single quotes +- Semicolons always +- 2-space indentation +- Trailing commas +- 100-character line width + +Run `pnpm format` to auto-format. The CI will reject PRs with formatting issues. + +## Project Structure + +See [AGENTS.md](AGENTS.md) for a detailed guide to the codebase, including: + +- Key files and their purposes +- How to add commands and modules +- Code conventions +- Common pitfalls + +## Questions? + +- Open an issue on GitHub +- Ask in the Volvox Discord server at [volvox.dev](https://volvox.dev) diff --git a/README.md b/README.md index 182b68f7..774ec6ae 100644 --- a/README.md +++ b/README.md @@ -1,101 +1,202 @@ -# Bill Bot (Volvox Discord Bot) - -AI-powered Discord bot for the Volvox community. - -## Features - -- **AI Chat** - Powered by Claude (via OpenClaw), responds when mentioned -- **Welcome Messages** - Dynamic, contextual onboarding (time of day, activity pulse, milestones) -- **Moderation** - Detects spam/scam patterns and alerts mods - -## Requirements - -- Node.js 18+ -- pnpm (`npm install -g pnpm`) -- OpenClaw gateway running (for AI chat) - -## Setup - -1. Copy `.env.example` to `.env` and fill in: - - `DISCORD_TOKEN` - Your Discord bot token - - `OPENCLAW_URL` - OpenClaw chat completions endpoint - - `OPENCLAW_TOKEN` - Your OpenClaw gateway token - -2. Edit `config.json` for your server: - - Channel IDs for welcome messages and mod alerts - - AI system prompt and model settings - - Enable/disable features - -3. Install and run: - ```bash - pnpm install - pnpm start - ``` - - For development (auto-restart on changes): - ```bash - pnpm dev - ``` - -## Discord Bot Setup - -1. Create app at https://discord.com/developers/applications -2. Bot → Add Bot → Copy token -3. Enable intents: - - Message Content Intent ✅ - - Server Members Intent ✅ -4. OAuth2 → URL Generator: - - Scopes: `bot` - - Permissions: View Channels, Send Messages, Read History, Manage Messages -5. Invite bot to server with generated URL - -## Config - -```jsonc -{ - "ai": { - "enabled": true, - "model": "claude-sonnet-4-20250514", - "maxTokens": 1024, - "systemPrompt": "...", - "channels": [] // empty = all channels, or list specific channel IDs - }, - "welcome": { - "enabled": true, - "channelId": "...", - "message": "Welcome, {user}!", // used when dynamic.enabled=false - "dynamic": { - "enabled": true, - "timezone": "America/New_York", - "activityWindowMinutes": 45, - "milestoneInterval": 25, - "highlightChannels": ["..."] - } - }, - "moderation": { - "enabled": true, - "alertChannelId": "...", - "autoDelete": false - } -} +# 🤖 Bill Bot — Volvox Discord Bot + +[![CI](https://github.com/BillChirico/bills-bot/actions/workflows/ci.yml/badge.svg)](https://github.com/BillChirico/bills-bot/actions/workflows/ci.yml) +[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) +[![Node.js](https://img.shields.io/badge/Node.js-22-green.svg)](https://nodejs.org) + +AI-powered Discord bot for the [Volvox](https://volvox.dev) developer community. Built with discord.js v14 and powered by Claude via [OpenClaw](https://openclaw.com). + +## ✨ Features + +- **🧠 AI Chat** — Mention the bot to chat with Claude. Maintains per-channel conversation history with intelligent context management. +- **🎯 Chime-In** — Bot can organically join conversations when it has something relevant to add (configurable per-channel). +- **👋 Dynamic Welcome Messages** — Contextual onboarding with time-of-day greetings, community activity snapshots, member milestones, and highlight channels. +- **🛡️ Spam Detection** — Pattern-based scam/spam detection with mod alerts and optional auto-delete. +- **⚙️ Config Management** — All settings stored in PostgreSQL with live `/config` slash command for runtime changes. +- **📊 Health Monitoring** — Built-in health checks and `/status` command for uptime, memory, and latency stats. +- **🎤 Voice Activity Tracking** — Tracks voice channel activity for community insights. + +## 🏗️ Architecture + +```text +Discord User + │ + ▼ +┌─────────────┐ ┌──────────────┐ ┌─────────┐ +│ Bill Bot │────▶│ OpenClaw │────▶│ Claude │ +│ (Node.js) │◀────│ Gateway │◀────│ (AI) │ +└──────┬──────┘ └──────────────┘ └─────────┘ + │ + ▼ +┌──────────────┐ +│ PostgreSQL │ Config, state persistence +└──────────────┘ ``` -## Architecture +## 📋 Prerequisites +- [Node.js](https://nodejs.org) 22+ +- [pnpm](https://pnpm.io) (`npm install -g pnpm`) +- [PostgreSQL](https://www.postgresql.org/) database +- [OpenClaw](https://openclaw.com) gateway (for AI chat features) +- A [Discord application](https://discord.com/developers/applications) with bot token + +## 🚀 Setup + +### 1. Clone and install + +```bash +git clone https://github.com/BillChirico/bills-bot.git +cd bills-bot +pnpm install +``` + +### 2. Configure environment + +```bash +cp .env.example .env +``` + +Edit `.env` with your values (see [Environment Variables](#-environment-variables) below). + +### 3. Configure the bot + +Edit `config.json` to match your Discord server (see [Configuration](#️-configuration) below). + +### 4. Set up Discord bot + +1. Create an app at [discord.com/developers/applications](https://discord.com/developers/applications) +2. **Bot** → Add Bot → Copy token → paste as `DISCORD_TOKEN` +3. Enable **Privileged Gateway Intents**: + - ✅ Message Content Intent + - ✅ Server Members Intent +4. **OAuth2** → URL Generator: + - Scopes: `bot`, `applications.commands` + - Permissions: View Channels, Send Messages, Read Message History, Manage Messages +5. Invite bot to your server with the generated URL + +### 5. Run + +```bash +pnpm start ``` -Discord Message - ↓ - bill-bot - ↓ -OpenClaw API (/v1/chat/completions) - ↓ -Claude (via your subscription) - ↓ - Response + +For development with auto-restart: + +```bash +pnpm dev ``` -The bot routes AI requests through OpenClaw's chat completions endpoint, which uses your existing Claude subscription. No separate Anthropic API key needed. +## 🔑 Environment Variables + +| Variable | Required | Description | +|----------|----------|-------------| +| `DISCORD_TOKEN` | ✅ | Discord bot token | +| `DISCORD_CLIENT_ID` | ✅* | Discord application/client ID for slash-command deployment (`pnpm deploy`) | +| `GUILD_ID` | ❌ | Guild ID for faster dev command deployment (omit for global) | +| `OPENCLAW_API_URL` | ✅ | OpenClaw chat completions endpoint | +| `OPENCLAW_API_KEY` | ✅ | OpenClaw gateway authentication token | +| `DATABASE_URL` | ✅** | PostgreSQL connection string for persistent config/state | +| `LOG_LEVEL` | ❌ | Logging level: `debug`, `info`, `warn`, `error` (default: `info`) | + +\* Legacy alias supported: `CLIENT_ID` +\** Bot can run without DB, but persistent config is strongly recommended in production. + +Legacy OpenClaw aliases are also supported for backwards compatibility: `OPENCLAW_URL`, `OPENCLAW_TOKEN`. + +## ⚙️ Configuration + +All configuration lives in `config.json` and can be updated at runtime via the `/config` slash command. When `DATABASE_URL` is set, config is persisted to PostgreSQL. + +### AI Chat (`ai`) + +| Key | Type | Description | +|-----|------|-------------| +| `enabled` | boolean | Enable/disable AI responses | +| `model` | string | Claude model to use (e.g. `claude-sonnet-4-20250514`) | +| `maxTokens` | number | Max tokens per AI response | +| `systemPrompt` | string | System prompt defining bot personality | +| `channels` | string[] | Channel IDs to respond in (empty = all channels) | + +### Chime-In (`chimeIn`) + +| Key | Type | Description | +|-----|------|-------------| +| `enabled` | boolean | Enable organic conversation joining | +| `evaluateEvery` | number | Evaluate every N messages | +| `model` | string | Model for evaluation (e.g. `claude-haiku-4-5`) | +| `channels` | string[] | Channels to monitor (empty = all) | +| `excludeChannels` | string[] | Channels to never chime into | + +### Welcome Messages (`welcome`) + +| Key | Type | Description | +|-----|------|-------------| +| `enabled` | boolean | Enable welcome messages | +| `channelId` | string | Channel to post welcome messages | +| `message` | string | Static fallback message template | +| `dynamic.enabled` | boolean | Enable AI-generated dynamic welcomes | +| `dynamic.timezone` | string | Timezone for time-of-day greetings | +| `dynamic.activityWindowMinutes` | number | Window for activity snapshot | +| `dynamic.milestoneInterval` | number | Member count milestone interval | +| `dynamic.highlightChannels` | string[] | Channels to highlight in welcomes | + +### Moderation (`moderation`) + +| Key | Type | Description | +|-----|------|-------------| +| `enabled` | boolean | Enable spam detection | +| `alertChannelId` | string | Channel for mod alerts | +| `autoDelete` | boolean | Auto-delete detected spam | + +### Permissions (`permissions`) + +| Key | Type | Description | +|-----|------|-------------| +| `enabled` | boolean | Enable permission checks | +| `adminRoleId` | string | Role ID for admin commands | +| `allowedCommands` | object | Per-command permission levels | + +## 🛠️ Development + +### Scripts + +| Command | Description | +|---------|-------------| +| `pnpm start` | Start the bot | +| `pnpm dev` | Start with auto-restart (watch mode) | +| `pnpm deploy` | Register slash commands with Discord | +| `pnpm lint` | Check code with Biome | +| `pnpm lint:fix` | Auto-fix lint issues | +| `pnpm format` | Format code with Biome | +| `pnpm test` | Run tests with Vitest | + +### Adding a new command + +1. Create `src/commands/yourcommand.js` +2. Export `data` (SlashCommandBuilder) and `execute(interaction)` function +3. Commands are auto-loaded on startup + +### Adding a new module + +1. Create `src/modules/yourmodule.js` +2. Wire it into `src/modules/events.js` event handlers +3. Use the Winston logger (`import { info, error } from '../logger.js'`) + +## 🚄 Deployment + +Bill Bot is deployed on [Railway](https://railway.app). + +1. Connect your GitHub repo to Railway +2. Set all environment variables in Railway dashboard +3. Railway auto-deploys on push to `main` + +The bot uses the `start` script (`node src/index.js`) for production. + +## 🤝 Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines. -## License +## 📄 License -MIT +[MIT](LICENSE) — Made with 💚 by [Volvox](https://volvox.dev) diff --git a/biome.json b/biome.json new file mode 100644 index 00000000..c50e2f6a --- /dev/null +++ b/biome.json @@ -0,0 +1,44 @@ +{ + "$schema": "https://biomejs.dev/schemas/2.3.14/schema.json", + "assist": { + "actions": { + "source": { + "organizeImports": "on" + } + } + }, + "linter": { + "enabled": true, + "rules": { + "recommended": true, + "correctness": { + "noUnusedVariables": "warn", + "noUnusedImports": "warn" + }, + "style": { + "useConst": "error" + }, + "suspicious": { + "noVar": "error", + "noConsole": "error" + } + } + }, + "formatter": { + "enabled": true, + "indentStyle": "space", + "indentWidth": 2, + "lineWidth": 100, + "lineEnding": "lf" + }, + "javascript": { + "formatter": { + "quoteStyle": "single", + "trailingCommas": "all", + "semicolons": "always" + } + }, + "files": { + "includes": ["**/*.js", "**/*.json", "**/*.md", "!coverage"] + } +} diff --git a/config.json b/config.json index ec033007..3e79b24f 100644 --- a/config.json +++ b/config.json @@ -23,11 +23,7 @@ "timezone": "America/New_York", "activityWindowMinutes": 45, "milestoneInterval": 25, - "highlightChannels": [ - "1438631182379253814", - "1444154471704957069", - "1446317676988465242" - ], + "highlightChannels": ["1438631182379253814", "1444154471704957069", "1446317676988465242"], "excludeChannels": [] } }, diff --git a/package.json b/package.json index 7be201c4..1ca19d88 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "bill-bot", - "packageManager": "pnpm@10.28.2", + "packageManager": "pnpm@10.29.2", "version": "1.0.0", "description": "Volvox Discord bot - AI chat, welcome messages, and moderation", "main": "src/index.js", @@ -8,11 +8,16 @@ "scripts": { "start": "node src/index.js", "dev": "node --watch src/index.js", - "deploy": "node src/deploy-commands.js" + "deploy": "node src/deploy-commands.js", + "lint": "biome check .", + "lint:fix": "biome check . --fix", + "format": "biome format . --write", + "test": "vitest run", + "test:coverage": "vitest run --coverage" }, "dependencies": { "discord.js": "^14.25.1", - "dotenv": "^17.2.3", + "dotenv": "^17.2.4", "pg": "^8.18.0", "winston": "^3.19.0", "winston-daily-rotate-file": "^5.0.0" @@ -24,5 +29,10 @@ }, "engines": { "node": ">=18.0.0" + }, + "devDependencies": { + "@biomejs/biome": "^2.3.14", + "@vitest/coverage-v8": "^4.0.18", + "vitest": "^4.0.18" } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b381e651..4bd24130 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -15,17 +15,108 @@ importers: specifier: ^14.25.1 version: 14.25.1 dotenv: - specifier: ^17.2.3 - version: 17.2.3 + specifier: ^17.2.4 + version: 17.2.4 + pg: + specifier: ^8.18.0 + version: 8.18.0 winston: specifier: ^3.19.0 version: 3.19.0 winston-daily-rotate-file: specifier: ^5.0.0 version: 5.0.0(winston@3.19.0) + devDependencies: + '@biomejs/biome': + specifier: ^2.3.14 + version: 2.3.14 + '@vitest/coverage-v8': + specifier: ^4.0.18 + version: 4.0.18(vitest@4.0.18(@types/node@25.2.0)) + vitest: + specifier: ^4.0.18 + version: 4.0.18(@types/node@25.2.0) packages: + '@babel/helper-string-parser@7.27.1': + resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.29.0': + resolution: {integrity: sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/types@7.29.0': + resolution: {integrity: sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==} + engines: {node: '>=6.9.0'} + + '@bcoe/v8-coverage@1.0.2': + resolution: {integrity: sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==} + engines: {node: '>=18'} + + '@biomejs/biome@2.3.14': + resolution: {integrity: sha512-QMT6QviX0WqXJCaiqVMiBUCr5WRQ1iFSjvOLoTk6auKukJMvnMzWucXpwZB0e8F00/1/BsS9DzcKgWH+CLqVuA==} + engines: {node: '>=14.21.3'} + hasBin: true + + '@biomejs/cli-darwin-arm64@2.3.14': + resolution: {integrity: sha512-UJGPpvWJMkLxSRtpCAKfKh41Q4JJXisvxZL8ChN1eNW3m/WlPFJ6EFDCE7YfUb4XS8ZFi3C1dFpxUJ0Ety5n+A==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [darwin] + + '@biomejs/cli-darwin-x64@2.3.14': + resolution: {integrity: sha512-PNkLNQG6RLo8lG7QoWe/hhnMxJIt1tEimoXpGQjwS/dkdNiKBLPv4RpeQl8o3s1OKI3ZOR5XPiYtmbGGHAOnLA==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [darwin] + + '@biomejs/cli-linux-arm64-musl@2.3.14': + resolution: {integrity: sha512-LInRbXhYujtL3sH2TMCH/UBwJZsoGwfQjBrMfl84CD4hL/41C/EU5mldqf1yoFpsI0iPWuU83U+nB2TUUypWeg==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [linux] + libc: [musl] + + '@biomejs/cli-linux-arm64@2.3.14': + resolution: {integrity: sha512-KT67FKfzIw6DNnUNdYlBg+eU24Go3n75GWK6NwU4+yJmDYFe9i/MjiI+U/iEzKvo0g7G7MZqoyrhIYuND2w8QQ==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [linux] + libc: [glibc] + + '@biomejs/cli-linux-x64-musl@2.3.14': + resolution: {integrity: sha512-KQU7EkbBBuHPW3/rAcoiVmhlPtDSGOGRPv9js7qJVpYTzjQmVR+C9Rfcz+ti8YCH+zT1J52tuBybtP4IodjxZQ==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [linux] + libc: [musl] + + '@biomejs/cli-linux-x64@2.3.14': + resolution: {integrity: sha512-ZsZzQsl9U+wxFrGGS4f6UxREUlgHwmEfu1IrXlgNFrNnd5Th6lIJr8KmSzu/+meSa9f4rzFrbEW9LBBA6ScoMA==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [linux] + libc: [glibc] + + '@biomejs/cli-win32-arm64@2.3.14': + resolution: {integrity: sha512-+IKYkj/pUBbnRf1G1+RlyA3LWiDgra1xpS7H2g4BuOzzRbRB+hmlw0yFsLprHhbbt7jUzbzAbAjK/Pn0FDnh1A==} + engines: {node: '>=14.21.3'} + cpu: [arm64] + os: [win32] + + '@biomejs/cli-win32-x64@2.3.14': + resolution: {integrity: sha512-oizCjdyQ3WJEswpb3Chdngeat56rIdSYK12JI3iI11Mt5T5EXcZ7WLuowzEaFPNJ3zmOQFliMN8QY1Pi+qsfdQ==} + engines: {node: '>=14.21.3'} + cpu: [x64] + os: [win32] + '@colors/colors@1.6.0': resolution: {integrity: sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA==} engines: {node: '>=0.1.90'} @@ -61,6 +152,310 @@ packages: resolution: {integrity: sha512-wPlQDxEmlDg5IxhJPuxXr3Vy9AjYq5xCvFWGJyD7w7Np8ZGu+Mc+97LCoEc/+AYCo2IDpKioiH0/c/mj5ZR9Uw==} engines: {node: '>=16.11.0'} + '@esbuild/aix-ppc64@0.27.3': + resolution: {integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.27.3': + resolution: {integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.27.3': + resolution: {integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.27.3': + resolution: {integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.27.3': + resolution: {integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.27.3': + resolution: {integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.27.3': + resolution: {integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.27.3': + resolution: {integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.27.3': + resolution: {integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.27.3': + resolution: {integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.27.3': + resolution: {integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.27.3': + resolution: {integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.27.3': + resolution: {integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.27.3': + resolution: {integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.27.3': + resolution: {integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.27.3': + resolution: {integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.27.3': + resolution: {integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.27.3': + resolution: {integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.27.3': + resolution: {integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.27.3': + resolution: {integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.27.3': + resolution: {integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.27.3': + resolution: {integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.27.3': + resolution: {integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.27.3': + resolution: {integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.27.3': + resolution: {integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.27.3': + resolution: {integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + + '@rollup/rollup-android-arm-eabi@4.57.1': + resolution: {integrity: sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.57.1': + resolution: {integrity: sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.57.1': + resolution: {integrity: sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.57.1': + resolution: {integrity: sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.57.1': + resolution: {integrity: sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.57.1': + resolution: {integrity: sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.57.1': + resolution: {integrity: sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==} + cpu: [arm] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm-musleabihf@4.57.1': + resolution: {integrity: sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==} + cpu: [arm] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-arm64-gnu@4.57.1': + resolution: {integrity: sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==} + cpu: [arm64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-arm64-musl@4.57.1': + resolution: {integrity: sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==} + cpu: [arm64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-loong64-gnu@4.57.1': + resolution: {integrity: sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==} + cpu: [loong64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-loong64-musl@4.57.1': + resolution: {integrity: sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==} + cpu: [loong64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-ppc64-gnu@4.57.1': + resolution: {integrity: sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==} + cpu: [ppc64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-ppc64-musl@4.57.1': + resolution: {integrity: sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==} + cpu: [ppc64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-riscv64-gnu@4.57.1': + resolution: {integrity: sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==} + cpu: [riscv64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-riscv64-musl@4.57.1': + resolution: {integrity: sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==} + cpu: [riscv64] + os: [linux] + libc: [musl] + + '@rollup/rollup-linux-s390x-gnu@4.57.1': + resolution: {integrity: sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==} + cpu: [s390x] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-gnu@4.57.1': + resolution: {integrity: sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==} + cpu: [x64] + os: [linux] + libc: [glibc] + + '@rollup/rollup-linux-x64-musl@4.57.1': + resolution: {integrity: sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==} + cpu: [x64] + os: [linux] + libc: [musl] + + '@rollup/rollup-openbsd-x64@4.57.1': + resolution: {integrity: sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==} + cpu: [x64] + os: [openbsd] + + '@rollup/rollup-openharmony-arm64@4.57.1': + resolution: {integrity: sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.57.1': + resolution: {integrity: sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.57.1': + resolution: {integrity: sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.57.1': + resolution: {integrity: sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.57.1': + resolution: {integrity: sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==} + cpu: [x64] + os: [win32] + '@sapphire/async-queue@1.5.5': resolution: {integrity: sha512-cvGzxbba6sav2zZkH8GPf2oGk9yYoD5qrNWdu9fRehifgnFZJMV+nuy2nON2roRO4yQQ+v7MK/Pktl/HgfsUXg==} engines: {node: '>=v14.0.0', npm: '>=7.0.0'} @@ -76,6 +471,18 @@ packages: '@so-ric/colorspace@1.1.6': resolution: {integrity: sha512-/KiKkpHNOBgkFJwu9sh48LkHSMYGyuTcSFK/qMBdnOAlrRJzRSXAOFB5qwzaVQuDl8wAvHVMkaASQDReTahxuw==} + '@standard-schema/spec@1.1.0': + resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} + + '@types/chai@5.2.3': + resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} + + '@types/deep-eql@4.0.2': + resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + '@types/node@25.2.0': resolution: {integrity: sha512-DZ8VwRFUNzuqJ5khrvwMXHmvPe+zGayJhr2CDNiKB1WBE1ST8Djl00D0IC4vvNmHMdj6DlbYRIaFE7WHjlDl5w==} @@ -85,13 +492,62 @@ packages: '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} + '@vitest/coverage-v8@4.0.18': + resolution: {integrity: sha512-7i+N2i0+ME+2JFZhfuz7Tg/FqKtilHjGyGvoHYQ6iLV0zahbsJ9sljC9OcFcPDbhYKCet+sG8SsVqlyGvPflZg==} + peerDependencies: + '@vitest/browser': 4.0.18 + vitest: 4.0.18 + peerDependenciesMeta: + '@vitest/browser': + optional: true + + '@vitest/expect@4.0.18': + resolution: {integrity: sha512-8sCWUyckXXYvx4opfzVY03EOiYVxyNrHS5QxX3DAIi5dpJAAkyJezHCP77VMX4HKA2LDT/Jpfo8i2r5BE3GnQQ==} + + '@vitest/mocker@4.0.18': + resolution: {integrity: sha512-HhVd0MDnzzsgevnOWCBj5Otnzobjy5wLBe4EdeeFGv8luMsGcYqDuFRMcttKWZA5vVO8RFjexVovXvAM4JoJDQ==} + peerDependencies: + msw: ^2.4.9 + vite: ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + + '@vitest/pretty-format@4.0.18': + resolution: {integrity: sha512-P24GK3GulZWC5tz87ux0m8OADrQIUVDPIjjj65vBXYG17ZeU3qD7r+MNZ1RNv4l8CGU2vtTRqixrOi9fYk/yKw==} + + '@vitest/runner@4.0.18': + resolution: {integrity: sha512-rpk9y12PGa22Jg6g5M3UVVnTS7+zycIGk9ZNGN+m6tZHKQb7jrP7/77WfZy13Y/EUDd52NDsLRQhYKtv7XfPQw==} + + '@vitest/snapshot@4.0.18': + resolution: {integrity: sha512-PCiV0rcl7jKQjbgYqjtakly6T1uwv/5BQ9SwBLekVg/EaYeQFPiXcgrC2Y7vDMA8dM1SUEAEV82kgSQIlXNMvA==} + + '@vitest/spy@4.0.18': + resolution: {integrity: sha512-cbQt3PTSD7P2OARdVW3qWER5EGq7PHlvE+QfzSC0lbwO+xnt7+XH06ZzFjFRgzUX//JmpxrCu92VdwvEPlWSNw==} + + '@vitest/utils@4.0.18': + resolution: {integrity: sha512-msMRKLMVLWygpK3u2Hybgi4MNjcYJvwTb0Ru09+fOyCXIgT5raYP041DRRdiJiI3k/2U6SEbAETB3YtBrUkCFA==} + '@vladfrangu/async_event_emitter@2.4.7': resolution: {integrity: sha512-Xfe6rpCTxSxfbswi/W/Pz7zp1WWSNn4A0eW4mLkQUewCrXXtMj31lCg+iQyTkh/CkusZSq9eDflu7tjEDXUY6g==} engines: {node: '>=v14.0.0', npm: '>=7.0.0'} + assertion-error@2.0.1: + resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} + engines: {node: '>=12'} + + ast-v8-to-istanbul@0.3.11: + resolution: {integrity: sha512-Qya9fkoofMjCBNVdWINMjB5KZvkYfaO9/anwkWnjxibpWUxo5iHl2sOdP7/uAqaRuUYuoo8rDwnbaaKVFxoUvw==} + async@3.2.6: resolution: {integrity: sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==} + chai@6.2.2: + resolution: {integrity: sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==} + engines: {node: '>=18'} + color-convert@3.1.3: resolution: {integrity: sha512-fasDH2ont2GqF5HpyO4w0+BcewlhHEZOFn9c1ckZdHpJ56Qb7MHhH/IcJZbBGgvdtwdwNbLvxiBEdg336iA9Sg==} engines: {node: '>=14.6'} @@ -115,16 +571,40 @@ packages: resolution: {integrity: sha512-2l0gsPOLPs5t6GFZfQZKnL1OJNYFcuC/ETWsW4VtKVD/tg4ICa9x+jb9bkPffkMdRpRpuUaO/fKkHCBeiCKh8g==} engines: {node: '>=18'} - dotenv@17.2.3: - resolution: {integrity: sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==} + dotenv@17.2.4: + resolution: {integrity: sha512-mudtfb4zRB4bVvdj0xRo+e6duH1csJRM8IukBqfTRvHotn9+LBXB8ynAidP9zHqoRC/fsllXgk4kCKlR21fIhw==} engines: {node: '>=12'} enabled@2.0.0: resolution: {integrity: sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==} + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + + esbuild@0.27.3: + resolution: {integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg==} + engines: {node: '>=18'} + hasBin: true + + estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + + expect-type@1.3.0: + resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} + engines: {node: '>=12.0.0'} + fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + fecha@4.2.3: resolution: {integrity: sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw==} @@ -134,6 +614,18 @@ packages: fn.name@1.1.0: resolution: {integrity: sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==} + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + html-escaper@2.0.2: + resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} + inherits@2.0.4: resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} @@ -141,6 +633,21 @@ packages: resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} engines: {node: '>=8'} + istanbul-lib-coverage@3.2.2: + resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==} + engines: {node: '>=8'} + + istanbul-lib-report@3.0.1: + resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==} + engines: {node: '>=10'} + + istanbul-reports@3.2.0: + resolution: {integrity: sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==} + engines: {node: '>=8'} + + js-tokens@10.0.0: + resolution: {integrity: sha512-lM/UBzQmfJRo9ABXbPWemivdCW8V2G8FHaHdypQaIy523snUjog0W71ayWXTjiR+ixeMyVHN2XcpnTd/liPg/Q==} + kuler@2.0.0: resolution: {integrity: sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==} @@ -157,23 +664,110 @@ packages: magic-bytes.js@1.13.0: resolution: {integrity: sha512-afO2mnxW7GDTXMm5/AoN1WuOcdoKhtgXjIvHmobqTD1grNplhGdv3PFOyjCVmrnOZBIT/gD/koDKpYG+0mvHcg==} + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + magicast@0.5.2: + resolution: {integrity: sha512-E3ZJh4J3S9KfwdjZhe2afj6R9lGIN5Pher1pF39UGrXRqq/VDaGVIGN13BjHd2u8B61hArAGOnso7nBOouW3TQ==} + + make-dir@4.0.0: + resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==} + engines: {node: '>=10'} + moment@2.30.1: resolution: {integrity: sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==} ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + object-hash@3.0.0: resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==} engines: {node: '>= 6'} + obug@2.1.1: + resolution: {integrity: sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==} + one-time@1.0.0: resolution: {integrity: sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==} + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + + pg-cloudflare@1.3.0: + resolution: {integrity: sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ==} + + pg-connection-string@2.11.0: + resolution: {integrity: sha512-kecgoJwhOpxYU21rZjULrmrBJ698U2RxXofKVzOn5UDj61BPj/qMb7diYUR1nLScCDbrztQFl1TaQZT0t1EtzQ==} + + pg-int8@1.0.1: + resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} + engines: {node: '>=4.0.0'} + + pg-pool@3.11.0: + resolution: {integrity: sha512-MJYfvHwtGp870aeusDh+hg9apvOe2zmpZJpyt+BMtzUWlVqbhFmMK6bOBXLBUPd7iRtIF9fZplDc7KrPN3PN7w==} + peerDependencies: + pg: '>=8.0' + + pg-protocol@1.11.0: + resolution: {integrity: sha512-pfsxk2M9M3BuGgDOfuy37VNRRX3jmKgMjcvAcWqNDpZSf4cUmv8HSOl5ViRQFsfARFn0KuUQTgLxVMbNq5NW3g==} + + pg-types@2.2.0: + resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} + engines: {node: '>=4'} + + pg@8.18.0: + resolution: {integrity: sha512-xqrUDL1b9MbkydY/s+VZ6v+xiMUmOUk7SS9d/1kpyQxoJ6U9AO1oIJyUWVZojbfe5Cc/oluutcgFG4L9RDP1iQ==} + engines: {node: '>= 16.0.0'} + peerDependencies: + pg-native: '>=3.0.1' + peerDependenciesMeta: + pg-native: + optional: true + + pgpass@1.0.5: + resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + engines: {node: '>=12'} + + postcss@8.5.6: + resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} + engines: {node: ^10 || ^12 || >=14} + + postgres-array@2.0.0: + resolution: {integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==} + engines: {node: '>=4'} + + postgres-bytea@1.0.1: + resolution: {integrity: sha512-5+5HqXnsZPE65IJZSMkZtURARZelel2oXUEO8rH83VS/hxH5vv1uHquPg5wZs8yMAfdv971IU+kcPUczi7NVBQ==} + engines: {node: '>=0.10.0'} + + postgres-date@1.0.7: + resolution: {integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==} + engines: {node: '>=0.10.0'} + + postgres-interval@1.2.0: + resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} + engines: {node: '>=0.10.0'} + readable-stream@3.6.2: resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} engines: {node: '>= 6'} + rollup@4.57.1: + resolution: {integrity: sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + safe-buffer@5.2.1: resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} @@ -181,15 +775,56 @@ packages: resolution: {integrity: sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==} engines: {node: '>=10'} + semver@7.7.4: + resolution: {integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==} + engines: {node: '>=10'} + hasBin: true + + siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + split2@4.2.0: + resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==} + engines: {node: '>= 10.x'} + stack-trace@0.0.10: resolution: {integrity: sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg==} + stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + string_decoder@1.3.0: resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + text-hex@1.0.0: resolution: {integrity: sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==} + tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + + tinyexec@1.0.2: + resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} + engines: {node: '>=18'} + + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + + tinyrainbow@3.0.3: + resolution: {integrity: sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==} + engines: {node: '>=14.0.0'} + triple-beam@1.4.1: resolution: {integrity: sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==} engines: {node: '>= 14.0.0'} @@ -210,6 +845,85 @@ packages: util-deprecate@1.0.2: resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + vite@7.3.1: + resolution: {integrity: sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==} + engines: {node: ^20.19.0 || >=22.12.0} + hasBin: true + peerDependencies: + '@types/node': ^20.19.0 || >=22.12.0 + jiti: '>=1.21.0' + less: ^4.0.0 + lightningcss: ^1.21.0 + sass: ^1.70.0 + sass-embedded: ^1.70.0 + stylus: '>=0.54.8' + sugarss: ^5.0.0 + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + '@types/node': + optional: true + jiti: + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + tsx: + optional: true + yaml: + optional: true + + vitest@4.0.18: + resolution: {integrity: sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==} + engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@opentelemetry/api': ^1.9.0 + '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 + '@vitest/browser-playwright': 4.0.18 + '@vitest/browser-preview': 4.0.18 + '@vitest/browser-webdriverio': 4.0.18 + '@vitest/ui': 4.0.18 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@opentelemetry/api': + optional: true + '@types/node': + optional: true + '@vitest/browser-playwright': + optional: true + '@vitest/browser-preview': + optional: true + '@vitest/browser-webdriverio': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + + why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} + engines: {node: '>=8'} + hasBin: true + winston-daily-rotate-file@5.0.0: resolution: {integrity: sha512-JDjiXXkM5qvwY06733vf09I2wnMXpZEhxEVOSPenZMii+g7pcDcTBt2MRugnoi8BwVSuCT2jfRXBUy+n1Zz/Yw==} engines: {node: '>=8'} @@ -236,8 +950,62 @@ packages: utf-8-validate: optional: true + xtend@4.0.2: + resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} + engines: {node: '>=0.4'} + snapshots: + '@babel/helper-string-parser@7.27.1': {} + + '@babel/helper-validator-identifier@7.28.5': {} + + '@babel/parser@7.29.0': + dependencies: + '@babel/types': 7.29.0 + + '@babel/types@7.29.0': + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.28.5 + + '@bcoe/v8-coverage@1.0.2': {} + + '@biomejs/biome@2.3.14': + optionalDependencies: + '@biomejs/cli-darwin-arm64': 2.3.14 + '@biomejs/cli-darwin-x64': 2.3.14 + '@biomejs/cli-linux-arm64': 2.3.14 + '@biomejs/cli-linux-arm64-musl': 2.3.14 + '@biomejs/cli-linux-x64': 2.3.14 + '@biomejs/cli-linux-x64-musl': 2.3.14 + '@biomejs/cli-win32-arm64': 2.3.14 + '@biomejs/cli-win32-x64': 2.3.14 + + '@biomejs/cli-darwin-arm64@2.3.14': + optional: true + + '@biomejs/cli-darwin-x64@2.3.14': + optional: true + + '@biomejs/cli-linux-arm64-musl@2.3.14': + optional: true + + '@biomejs/cli-linux-arm64@2.3.14': + optional: true + + '@biomejs/cli-linux-x64-musl@2.3.14': + optional: true + + '@biomejs/cli-linux-x64@2.3.14': + optional: true + + '@biomejs/cli-win32-arm64@2.3.14': + optional: true + + '@biomejs/cli-win32-x64@2.3.14': + optional: true + '@colors/colors@1.6.0': {} '@dabh/diagnostics@2.0.8': @@ -295,6 +1063,168 @@ snapshots: - bufferutil - utf-8-validate + '@esbuild/aix-ppc64@0.27.3': + optional: true + + '@esbuild/android-arm64@0.27.3': + optional: true + + '@esbuild/android-arm@0.27.3': + optional: true + + '@esbuild/android-x64@0.27.3': + optional: true + + '@esbuild/darwin-arm64@0.27.3': + optional: true + + '@esbuild/darwin-x64@0.27.3': + optional: true + + '@esbuild/freebsd-arm64@0.27.3': + optional: true + + '@esbuild/freebsd-x64@0.27.3': + optional: true + + '@esbuild/linux-arm64@0.27.3': + optional: true + + '@esbuild/linux-arm@0.27.3': + optional: true + + '@esbuild/linux-ia32@0.27.3': + optional: true + + '@esbuild/linux-loong64@0.27.3': + optional: true + + '@esbuild/linux-mips64el@0.27.3': + optional: true + + '@esbuild/linux-ppc64@0.27.3': + optional: true + + '@esbuild/linux-riscv64@0.27.3': + optional: true + + '@esbuild/linux-s390x@0.27.3': + optional: true + + '@esbuild/linux-x64@0.27.3': + optional: true + + '@esbuild/netbsd-arm64@0.27.3': + optional: true + + '@esbuild/netbsd-x64@0.27.3': + optional: true + + '@esbuild/openbsd-arm64@0.27.3': + optional: true + + '@esbuild/openbsd-x64@0.27.3': + optional: true + + '@esbuild/openharmony-arm64@0.27.3': + optional: true + + '@esbuild/sunos-x64@0.27.3': + optional: true + + '@esbuild/win32-arm64@0.27.3': + optional: true + + '@esbuild/win32-ia32@0.27.3': + optional: true + + '@esbuild/win32-x64@0.27.3': + optional: true + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@jridgewell/trace-mapping@0.3.31': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.5 + + '@rollup/rollup-android-arm-eabi@4.57.1': + optional: true + + '@rollup/rollup-android-arm64@4.57.1': + optional: true + + '@rollup/rollup-darwin-arm64@4.57.1': + optional: true + + '@rollup/rollup-darwin-x64@4.57.1': + optional: true + + '@rollup/rollup-freebsd-arm64@4.57.1': + optional: true + + '@rollup/rollup-freebsd-x64@4.57.1': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.57.1': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.57.1': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-loong64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-ppc64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.57.1': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.57.1': + optional: true + + '@rollup/rollup-linux-x64-musl@4.57.1': + optional: true + + '@rollup/rollup-openbsd-x64@4.57.1': + optional: true + + '@rollup/rollup-openharmony-arm64@4.57.1': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.57.1': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.57.1': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.57.1': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.57.1': + optional: true + '@sapphire/async-queue@1.5.5': {} '@sapphire/shapeshift@4.0.0': @@ -309,6 +1239,17 @@ snapshots: color: 5.0.3 text-hex: 1.0.0 + '@standard-schema/spec@1.1.0': {} + + '@types/chai@5.2.3': + dependencies: + '@types/deep-eql': 4.0.2 + assertion-error: 2.0.1 + + '@types/deep-eql@4.0.2': {} + + '@types/estree@1.0.8': {} + '@types/node@25.2.0': dependencies: undici-types: 7.16.0 @@ -319,10 +1260,73 @@ snapshots: dependencies: '@types/node': 25.2.0 + '@vitest/coverage-v8@4.0.18(vitest@4.0.18(@types/node@25.2.0))': + dependencies: + '@bcoe/v8-coverage': 1.0.2 + '@vitest/utils': 4.0.18 + ast-v8-to-istanbul: 0.3.11 + istanbul-lib-coverage: 3.2.2 + istanbul-lib-report: 3.0.1 + istanbul-reports: 3.2.0 + magicast: 0.5.2 + obug: 2.1.1 + std-env: 3.10.0 + tinyrainbow: 3.0.3 + vitest: 4.0.18(@types/node@25.2.0) + + '@vitest/expect@4.0.18': + dependencies: + '@standard-schema/spec': 1.1.0 + '@types/chai': 5.2.3 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 + chai: 6.2.2 + tinyrainbow: 3.0.3 + + '@vitest/mocker@4.0.18(vite@7.3.1(@types/node@25.2.0))': + dependencies: + '@vitest/spy': 4.0.18 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 7.3.1(@types/node@25.2.0) + + '@vitest/pretty-format@4.0.18': + dependencies: + tinyrainbow: 3.0.3 + + '@vitest/runner@4.0.18': + dependencies: + '@vitest/utils': 4.0.18 + pathe: 2.0.3 + + '@vitest/snapshot@4.0.18': + dependencies: + '@vitest/pretty-format': 4.0.18 + magic-string: 0.30.21 + pathe: 2.0.3 + + '@vitest/spy@4.0.18': {} + + '@vitest/utils@4.0.18': + dependencies: + '@vitest/pretty-format': 4.0.18 + tinyrainbow: 3.0.3 + '@vladfrangu/async_event_emitter@2.4.7': {} + assertion-error@2.0.1: {} + + ast-v8-to-istanbul@0.3.11: + dependencies: + '@jridgewell/trace-mapping': 0.3.31 + estree-walker: 3.0.3 + js-tokens: 10.0.0 + async@3.2.6: {} + chai@6.2.2: {} + color-convert@3.1.3: dependencies: color-name: 2.1.0 @@ -359,12 +1363,53 @@ snapshots: - bufferutil - utf-8-validate - dotenv@17.2.3: {} + dotenv@17.2.4: {} enabled@2.0.0: {} + es-module-lexer@1.7.0: {} + + esbuild@0.27.3: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.3 + '@esbuild/android-arm': 0.27.3 + '@esbuild/android-arm64': 0.27.3 + '@esbuild/android-x64': 0.27.3 + '@esbuild/darwin-arm64': 0.27.3 + '@esbuild/darwin-x64': 0.27.3 + '@esbuild/freebsd-arm64': 0.27.3 + '@esbuild/freebsd-x64': 0.27.3 + '@esbuild/linux-arm': 0.27.3 + '@esbuild/linux-arm64': 0.27.3 + '@esbuild/linux-ia32': 0.27.3 + '@esbuild/linux-loong64': 0.27.3 + '@esbuild/linux-mips64el': 0.27.3 + '@esbuild/linux-ppc64': 0.27.3 + '@esbuild/linux-riscv64': 0.27.3 + '@esbuild/linux-s390x': 0.27.3 + '@esbuild/linux-x64': 0.27.3 + '@esbuild/netbsd-arm64': 0.27.3 + '@esbuild/netbsd-x64': 0.27.3 + '@esbuild/openbsd-arm64': 0.27.3 + '@esbuild/openbsd-x64': 0.27.3 + '@esbuild/openharmony-arm64': 0.27.3 + '@esbuild/sunos-x64': 0.27.3 + '@esbuild/win32-arm64': 0.27.3 + '@esbuild/win32-ia32': 0.27.3 + '@esbuild/win32-x64': 0.27.3 + + estree-walker@3.0.3: + dependencies: + '@types/estree': 1.0.8 + + expect-type@1.3.0: {} + fast-deep-equal@3.1.3: {} + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + fecha@4.2.3: {} file-stream-rotator@0.6.1: @@ -373,10 +1418,32 @@ snapshots: fn.name@1.1.0: {} + fsevents@2.3.3: + optional: true + + has-flag@4.0.0: {} + + html-escaper@2.0.2: {} + inherits@2.0.4: {} is-stream@2.0.1: {} + istanbul-lib-coverage@3.2.2: {} + + istanbul-lib-report@3.0.1: + dependencies: + istanbul-lib-coverage: 3.2.2 + make-dir: 4.0.0 + supports-color: 7.2.0 + + istanbul-reports@3.2.0: + dependencies: + html-escaper: 2.0.2 + istanbul-lib-report: 3.0.1 + + js-tokens@10.0.0: {} + kuler@2.0.0: {} lodash.snakecase@4.1.1: {} @@ -394,34 +1461,167 @@ snapshots: magic-bytes.js@1.13.0: {} + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + magicast@0.5.2: + dependencies: + '@babel/parser': 7.29.0 + '@babel/types': 7.29.0 + source-map-js: 1.2.1 + + make-dir@4.0.0: + dependencies: + semver: 7.7.4 + moment@2.30.1: {} ms@2.1.3: {} + nanoid@3.3.11: {} + object-hash@3.0.0: {} + obug@2.1.1: {} + one-time@1.0.0: dependencies: fn.name: 1.1.0 + pathe@2.0.3: {} + + pg-cloudflare@1.3.0: + optional: true + + pg-connection-string@2.11.0: {} + + pg-int8@1.0.1: {} + + pg-pool@3.11.0(pg@8.18.0): + dependencies: + pg: 8.18.0 + + pg-protocol@1.11.0: {} + + pg-types@2.2.0: + dependencies: + pg-int8: 1.0.1 + postgres-array: 2.0.0 + postgres-bytea: 1.0.1 + postgres-date: 1.0.7 + postgres-interval: 1.2.0 + + pg@8.18.0: + dependencies: + pg-connection-string: 2.11.0 + pg-pool: 3.11.0(pg@8.18.0) + pg-protocol: 1.11.0 + pg-types: 2.2.0 + pgpass: 1.0.5 + optionalDependencies: + pg-cloudflare: 1.3.0 + + pgpass@1.0.5: + dependencies: + split2: 4.2.0 + + picocolors@1.1.1: {} + + picomatch@4.0.3: {} + + postcss@8.5.6: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + postgres-array@2.0.0: {} + + postgres-bytea@1.0.1: {} + + postgres-date@1.0.7: {} + + postgres-interval@1.2.0: + dependencies: + xtend: 4.0.2 + readable-stream@3.6.2: dependencies: inherits: 2.0.4 string_decoder: 1.3.0 util-deprecate: 1.0.2 + rollup@4.57.1: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.57.1 + '@rollup/rollup-android-arm64': 4.57.1 + '@rollup/rollup-darwin-arm64': 4.57.1 + '@rollup/rollup-darwin-x64': 4.57.1 + '@rollup/rollup-freebsd-arm64': 4.57.1 + '@rollup/rollup-freebsd-x64': 4.57.1 + '@rollup/rollup-linux-arm-gnueabihf': 4.57.1 + '@rollup/rollup-linux-arm-musleabihf': 4.57.1 + '@rollup/rollup-linux-arm64-gnu': 4.57.1 + '@rollup/rollup-linux-arm64-musl': 4.57.1 + '@rollup/rollup-linux-loong64-gnu': 4.57.1 + '@rollup/rollup-linux-loong64-musl': 4.57.1 + '@rollup/rollup-linux-ppc64-gnu': 4.57.1 + '@rollup/rollup-linux-ppc64-musl': 4.57.1 + '@rollup/rollup-linux-riscv64-gnu': 4.57.1 + '@rollup/rollup-linux-riscv64-musl': 4.57.1 + '@rollup/rollup-linux-s390x-gnu': 4.57.1 + '@rollup/rollup-linux-x64-gnu': 4.57.1 + '@rollup/rollup-linux-x64-musl': 4.57.1 + '@rollup/rollup-openbsd-x64': 4.57.1 + '@rollup/rollup-openharmony-arm64': 4.57.1 + '@rollup/rollup-win32-arm64-msvc': 4.57.1 + '@rollup/rollup-win32-ia32-msvc': 4.57.1 + '@rollup/rollup-win32-x64-gnu': 4.57.1 + '@rollup/rollup-win32-x64-msvc': 4.57.1 + fsevents: 2.3.3 + safe-buffer@5.2.1: {} safe-stable-stringify@2.5.0: {} + semver@7.7.4: {} + + siginfo@2.0.0: {} + + source-map-js@1.2.1: {} + + split2@4.2.0: {} + stack-trace@0.0.10: {} + stackback@0.0.2: {} + + std-env@3.10.0: {} + string_decoder@1.3.0: dependencies: safe-buffer: 5.2.1 + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + text-hex@1.0.0: {} + tinybench@2.9.0: {} + + tinyexec@1.0.2: {} + + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + tinyrainbow@3.0.3: {} + triple-beam@1.4.1: {} ts-mixer@6.0.4: {} @@ -434,6 +1634,60 @@ snapshots: util-deprecate@1.0.2: {} + vite@7.3.1(@types/node@25.2.0): + dependencies: + esbuild: 0.27.3 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + postcss: 8.5.6 + rollup: 4.57.1 + tinyglobby: 0.2.15 + optionalDependencies: + '@types/node': 25.2.0 + fsevents: 2.3.3 + + vitest@4.0.18(@types/node@25.2.0): + dependencies: + '@vitest/expect': 4.0.18 + '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.2.0)) + '@vitest/pretty-format': 4.0.18 + '@vitest/runner': 4.0.18 + '@vitest/snapshot': 4.0.18 + '@vitest/spy': 4.0.18 + '@vitest/utils': 4.0.18 + es-module-lexer: 1.7.0 + expect-type: 1.3.0 + magic-string: 0.30.21 + obug: 2.1.1 + pathe: 2.0.3 + picomatch: 4.0.3 + std-env: 3.10.0 + tinybench: 2.9.0 + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 + vite: 7.3.1(@types/node@25.2.0) + why-is-node-running: 2.3.0 + optionalDependencies: + '@types/node': 25.2.0 + transitivePeerDependencies: + - jiti + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - terser + - tsx + - yaml + + why-is-node-running@2.3.0: + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + winston-daily-rotate-file@5.0.0(winston@3.19.0): dependencies: file-stream-rotator: 0.6.1 @@ -463,3 +1717,5 @@ snapshots: winston-transport: 4.9.0 ws@8.19.0: {} + + xtend@4.0.2: {} diff --git a/src/commands/config.js b/src/commands/config.js index 9ce89236..8ca0cf78 100644 --- a/src/commands/config.js +++ b/src/commands/config.js @@ -3,8 +3,8 @@ * View, set, and reset bot configuration via slash commands */ -import { SlashCommandBuilder, EmbedBuilder } from 'discord.js'; -import { getConfig, setConfigValue, resetConfig } from '../modules/config.js'; +import { EmbedBuilder, SlashCommandBuilder } from 'discord.js'; +import { getConfig, resetConfig, setConfigValue } from '../modules/config.js'; /** * Escape backticks in user-provided strings to prevent breaking Discord inline code formatting. @@ -18,47 +18,49 @@ function escapeInlineCode(str) { export const data = new SlashCommandBuilder() .setName('config') .setDescription('View or manage bot configuration (Admin only)') - .addSubcommand(subcommand => + .addSubcommand((subcommand) => subcommand .setName('view') .setDescription('View current configuration') - .addStringOption(option => + .addStringOption((option) => option .setName('section') .setDescription('Specific config section to view') .setRequired(false) - .setAutocomplete(true) - ) + .setAutocomplete(true), + ), ) - .addSubcommand(subcommand => + .addSubcommand((subcommand) => subcommand .setName('set') .setDescription('Set a configuration value') - .addStringOption(option => + .addStringOption((option) => option .setName('path') .setDescription('Dot-notation path (e.g., ai.model, welcome.enabled)') .setRequired(true) - .setAutocomplete(true) + .setAutocomplete(true), ) - .addStringOption(option => + .addStringOption((option) => option .setName('value') - .setDescription('Value (auto-coerces true/false/null/numbers; use "\\"text\\"" for literal strings)') - .setRequired(true) - ) + .setDescription( + 'Value (auto-coerces true/false/null/numbers; use "\\"text\\"" for literal strings)', + ) + .setRequired(true), + ), ) - .addSubcommand(subcommand => + .addSubcommand((subcommand) => subcommand .setName('reset') .setDescription('Reset configuration to defaults from config.json') - .addStringOption(option => + .addStringOption((option) => option .setName('section') .setDescription('Section to reset (omit to reset all)') .setRequired(false) - .setAutocomplete(true) - ) + .setAutocomplete(true), + ), ); export const adminOnly = true; @@ -124,14 +126,14 @@ export async function autocomplete(interaction) { if (focusedOption.name === 'section') { // Autocomplete section names from live config choices = Object.keys(config) - .filter(s => s.toLowerCase().includes(focusedValue)) + .filter((s) => s.toLowerCase().includes(focusedValue)) .slice(0, 25) - .map(s => ({ name: s, value: s })); + .map((s) => ({ name: s, value: s })); } else { // Autocomplete dot-notation paths (leaf-only) const paths = collectConfigPaths(config); choices = paths - .filter(p => p.toLowerCase().includes(focusedValue)) + .filter((p) => p.toLowerCase().includes(focusedValue)) .sort((a, b) => { const aLower = a.toLowerCase(); const bLower = b.toLowerCase(); @@ -143,7 +145,7 @@ export async function autocomplete(interaction) { return aLower.localeCompare(bLower); }) .slice(0, 25) - .map(p => ({ name: p, value: p })); + .map((p) => ({ name: p, value: p })); } await interaction.respond(choices); @@ -169,7 +171,7 @@ export async function execute(interaction) { default: await interaction.reply({ content: `❌ Unknown subcommand: \`${subcommand}\``, - ephemeral: true + ephemeral: true, }); break; } @@ -187,9 +189,11 @@ async function handleView(interaction) { const section = interaction.options.getString('section'); const embed = new EmbedBuilder() - .setColor(0x5865F2) + .setColor(0x5865f2) .setTitle('⚙️ Bot Configuration') - .setFooter({ text: `${process.env.DATABASE_URL ? 'Stored in PostgreSQL' : 'Stored in memory (config.json)'} • Use /config set to modify` }) + .setFooter({ + text: `${process.env.DATABASE_URL ? 'Stored in PostgreSQL' : 'Stored in memory (config.json)'} • Use /config set to modify`, + }) .setTimestamp(); if (section) { @@ -198,7 +202,7 @@ async function handleView(interaction) { const safeSection = escapeInlineCode(section); return await interaction.reply({ content: `❌ Section \`${safeSection}\` not found in config`, - ephemeral: true + ephemeral: true, }); } @@ -206,7 +210,10 @@ async function handleView(interaction) { const sectionJson = JSON.stringify(sectionData, null, 2); embed.addFields({ name: 'Settings', - value: '```json\n' + (sectionJson.length > 1000 ? sectionJson.slice(0, 997) + '...' : sectionJson) + '\n```' + value: + '```json\n' + + (sectionJson.length > 1000 ? `${sectionJson.slice(0, 997)}...` : sectionJson) + + '\n```', }); } else { embed.setDescription('Current bot configuration'); @@ -217,7 +224,7 @@ async function handleView(interaction) { for (const [key, value] of Object.entries(config)) { const jsonStr = JSON.stringify(value, null, 2); - const fieldValue = '```json\n' + (jsonStr.length > 1000 ? jsonStr.slice(0, 997) + '...' : jsonStr) + '\n```'; + const fieldValue = `\`\`\`json\n${jsonStr.length > 1000 ? `${jsonStr.slice(0, 997)}...` : jsonStr}\n\`\`\``; const fieldName = key.toUpperCase(); const fieldLength = fieldName.length + fieldValue.length; @@ -226,7 +233,7 @@ async function handleView(interaction) { embed.addFields({ name: '⚠️ Truncated', value: 'Use `/config view section:` to see remaining sections.', - inline: false + inline: false, }); truncated = true; break; @@ -236,20 +243,24 @@ async function handleView(interaction) { embed.addFields({ name: fieldName, value: fieldValue, - inline: false + inline: false, }); } if (truncated) { - embed.setFooter({ text: 'Some sections omitted • Use /config view section: for details' }); + embed.setFooter({ + text: 'Some sections omitted • Use /config view section: for details', + }); } } await interaction.reply({ embeds: [embed], ephemeral: true }); } catch (err) { + const safeMessage = + process.env.NODE_ENV === 'production' ? 'An internal error occurred.' : err.message; await interaction.reply({ - content: `❌ Failed to load config: ${err.message}`, - ephemeral: true + content: `❌ Failed to load config: ${safeMessage}`, + ephemeral: true, }); } } @@ -268,7 +279,7 @@ async function handleSet(interaction) { const safeSection = escapeInlineCode(section); return await interaction.reply({ content: `❌ Invalid section \`${safeSection}\`. Valid sections: ${validSections.join(', ')}`, - ephemeral: true + ephemeral: true, }); } @@ -278,24 +289,30 @@ async function handleSet(interaction) { const updatedSection = await setConfigValue(path, value); // Traverse to the actual leaf value for display - const leafValue = path.split('.').slice(1).reduce((obj, k) => obj?.[k], updatedSection); + const leafValue = path + .split('.') + .slice(1) + .reduce((obj, k) => obj?.[k], updatedSection); const displayValue = JSON.stringify(leafValue, null, 2) ?? value; - const truncatedValue = displayValue.length > 1000 ? displayValue.slice(0, 997) + '...' : displayValue; + const truncatedValue = + displayValue.length > 1000 ? `${displayValue.slice(0, 997)}...` : displayValue; const embed = new EmbedBuilder() - .setColor(0x57F287) + .setColor(0x57f287) .setTitle('✅ Config Updated') .addFields( - { name: 'Path', value: `\`${path}\``, inline: true }, - { name: 'New Value', value: `\`${truncatedValue}\``, inline: true } + { name: 'Path', value: `\`${escapeInlineCode(path)}\``, inline: true }, + { name: 'New Value', value: `\`${escapeInlineCode(truncatedValue)}\``, inline: true }, ) .setFooter({ text: 'Changes take effect immediately' }) .setTimestamp(); await interaction.editReply({ embeds: [embed] }); } catch (err) { - const content = `❌ Failed to set config: ${err.message}`; + const safeMessage = + process.env.NODE_ENV === 'production' ? 'An internal error occurred.' : err.message; + const content = `❌ Failed to set config: ${safeMessage}`; if (interaction.deferred) { await interaction.editReply({ content }); } else { @@ -316,19 +333,21 @@ async function handleReset(interaction) { await resetConfig(section || undefined); const embed = new EmbedBuilder() - .setColor(0xFEE75C) + .setColor(0xfee75c) .setTitle('🔄 Config Reset') .setDescription( section - ? `Section **${section}** has been reset to defaults from config.json.` - : 'All configuration has been reset to defaults from config.json.' + ? `Section **${escapeInlineCode(section)}** has been reset to defaults from config.json.` + : 'All configuration has been reset to defaults from config.json.', ) .setFooter({ text: 'Changes take effect immediately' }) .setTimestamp(); await interaction.editReply({ embeds: [embed] }); } catch (err) { - const content = `❌ Failed to reset config: ${err.message}`; + const safeMessage = + process.env.NODE_ENV === 'production' ? 'An internal error occurred.' : err.message; + const content = `❌ Failed to reset config: ${safeMessage}`; if (interaction.deferred) { await interaction.editReply({ content }); } else { diff --git a/src/commands/ping.js b/src/commands/ping.js index bc05f0a5..f982fef3 100644 --- a/src/commands/ping.js +++ b/src/commands/ping.js @@ -7,16 +7,12 @@ export const data = new SlashCommandBuilder() export async function execute(interaction) { const response = await interaction.reply({ content: 'Pinging...', - withResponse: true + withResponse: true, }); const sent = response.resource.message; const latency = sent.createdTimestamp - interaction.createdTimestamp; const apiLatency = Math.round(interaction.client.ws.ping); - await interaction.editReply( - `🏓 Pong!\n` + - `📡 Latency: ${latency}ms\n` + - `💓 API: ${apiLatency}ms` - ); + await interaction.editReply(`🏓 Pong!\n📡 Latency: ${latency}ms\n💓 API: ${apiLatency}ms`); } diff --git a/src/commands/status.js b/src/commands/status.js index 5073476a..2f54c229 100644 --- a/src/commands/status.js +++ b/src/commands/status.js @@ -5,16 +5,18 @@ * Admin mode (detailed: true) shows additional diagnostics */ -import { SlashCommandBuilder, EmbedBuilder, PermissionFlagsBits } from 'discord.js'; +import { EmbedBuilder, PermissionFlagsBits, SlashCommandBuilder } from 'discord.js'; +import { error as logError } from '../logger.js'; import { HealthMonitor } from '../utils/health.js'; export const data = new SlashCommandBuilder() .setName('status') .setDescription('Display bot health metrics and status') - .addBooleanOption(option => - option.setName('detailed') + .addBooleanOption((option) => + option + .setName('detailed') .setDescription('Show detailed diagnostics (admin only)') - .setRequired(false) + .setRequired(false), ); /** @@ -42,10 +44,14 @@ function formatRelativeTime(timestamp) { */ function getStatusEmoji(status) { switch (status) { - case 'ok': return '🟢'; - case 'error': return '🔴'; - case 'unknown': return '🟡'; - default: return '⚪'; + case 'ok': + return '🟢'; + case 'error': + return '🔴'; + case 'unknown': + return '🟡'; + default: + return '⚪'; } } @@ -62,7 +68,7 @@ export async function execute(interaction) { if (!interaction.memberPermissions?.has(PermissionFlagsBits.Administrator)) { await interaction.reply({ content: '❌ Detailed diagnostics are only available to administrators.', - ephemeral: true + ephemeral: true, }); return; } @@ -71,21 +77,33 @@ export async function execute(interaction) { const status = healthMonitor.getDetailedStatus(); const embed = new EmbedBuilder() - .setColor(0x5865F2) + .setColor(0x5865f2) .setTitle('🔍 Bot Status - Detailed Diagnostics') .addFields( { name: '⏱️ Uptime', value: status.uptimeFormatted, inline: true }, { name: '🧠 Memory', value: status.memory.formatted, inline: true }, - { name: '🌐 API', value: `${getStatusEmoji(status.api.status)} ${status.api.status}`, inline: true }, - { name: '🤖 Last AI Request', value: formatRelativeTime(status.lastAIRequest), inline: true }, + { + name: '🌐 API', + value: `${getStatusEmoji(status.api.status)} ${status.api.status}`, + inline: true, + }, + { + name: '🤖 Last AI Request', + value: formatRelativeTime(status.lastAIRequest), + inline: true, + }, { name: '📊 Process ID', value: `${status.process.pid}`, inline: true }, { name: '🖥️ Platform', value: status.process.platform, inline: true }, { name: '📦 Node Version', value: status.process.nodeVersion, inline: true }, - { name: '⚙️ Process Uptime', value: `${Math.floor(status.process.uptime)}s`, inline: true }, + { + name: '⚙️ Process Uptime', + value: `${Math.floor(status.process.uptime)}s`, + inline: true, + }, { name: '🔢 Heap Used', value: `${status.memory.heapUsed}MB`, inline: true }, { name: '💾 RSS', value: `${status.memory.rss}MB`, inline: true }, { name: '📡 External', value: `${status.memory.external}MB`, inline: true }, - { name: '🔢 Array Buffers', value: `${status.memory.arrayBuffers}MB`, inline: true } + { name: '🔢 Array Buffers', value: `${status.memory.arrayBuffers}MB`, inline: true }, ) .setTimestamp() .setFooter({ text: 'Detailed diagnostics mode' }); @@ -96,14 +114,22 @@ export async function execute(interaction) { const status = healthMonitor.getStatus(); const embed = new EmbedBuilder() - .setColor(0x57F287) + .setColor(0x57f287) .setTitle('📊 Bot Status') .setDescription('Current health and performance metrics') .addFields( { name: '⏱️ Uptime', value: status.uptimeFormatted, inline: true }, { name: '🧠 Memory', value: status.memory.formatted, inline: true }, - { name: '🌐 API Status', value: `${getStatusEmoji(status.api.status)} ${status.api.status.toUpperCase()}`, inline: true }, - { name: '🤖 Last AI Request', value: formatRelativeTime(status.lastAIRequest), inline: false } + { + name: '🌐 API Status', + value: `${getStatusEmoji(status.api.status)} ${status.api.status.toUpperCase()}`, + inline: true, + }, + { + name: '🤖 Last AI Request', + value: formatRelativeTime(status.lastAIRequest), + inline: false, + }, ) .setTimestamp() .setFooter({ text: 'Use /status detailed:true for more info' }); @@ -111,11 +137,11 @@ export async function execute(interaction) { await interaction.reply({ embeds: [embed] }); } } catch (err) { - console.error('Status command error:', err.message); + logError('Status command error', { error: err.message }); const reply = { - content: 'Sorry, I couldn\'t retrieve the status. Try again in a moment!', - ephemeral: true + content: "Sorry, I couldn't retrieve the status. Try again in a moment!", + ephemeral: true, }; if (interaction.replied || interaction.deferred) { diff --git a/src/deploy-commands.js b/src/deploy-commands.js new file mode 100644 index 00000000..2a1a5054 --- /dev/null +++ b/src/deploy-commands.js @@ -0,0 +1,53 @@ +/** + * Deploy slash commands to Discord + * + * Usage: + * pnpm deploy + * + * Environment: + * DISCORD_TOKEN (required) + * DISCORD_CLIENT_ID (required, fallback: CLIENT_ID) + * GUILD_ID (optional) + */ + +import { dirname, join } from 'node:path'; +import { fileURLToPath } from 'node:url'; +import { config as dotenvConfig } from 'dotenv'; +import { error as logError } from './logger.js'; +import { loadCommandsFromDirectory } from './utils/loadCommands.js'; +import { registerCommands } from './utils/registerCommands.js'; + +dotenvConfig(); + +const __dirname = dirname(fileURLToPath(import.meta.url)); + +const token = process.env.DISCORD_TOKEN; +const clientId = process.env.DISCORD_CLIENT_ID || process.env.CLIENT_ID; +const guildId = process.env.GUILD_ID || null; + +if (!token) { + logError('DISCORD_TOKEN is required'); + process.exit(1); +} + +if (!clientId) { + logError('DISCORD_CLIENT_ID (or legacy CLIENT_ID) is required'); + process.exit(1); +} + +async function loadCommands() { + return loadCommandsFromDirectory({ + commandsPath: join(__dirname, 'commands'), + logLoaded: false, + }); +} + +async function main() { + const commands = await loadCommands(); + await registerCommands(commands, clientId, token, guildId); +} + +main().catch((err) => { + logError('Command deployment failed', { error: err.message, stack: err.stack }); + process.exit(1); +}); diff --git a/src/index.js b/src/index.js index f1b42586..00cca6ee 100644 --- a/src/index.js +++ b/src/index.js @@ -11,19 +11,20 @@ * - Structured logging */ -import { Client, GatewayIntentBits, Collection } from 'discord.js'; +import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs'; +import { dirname, join } from 'node:path'; +import { fileURLToPath } from 'node:url'; +import { Client, Collection, Events, GatewayIntentBits } from 'discord.js'; import { config as dotenvConfig } from 'dotenv'; -import { readdirSync, writeFileSync, readFileSync, existsSync, mkdirSync } from 'fs'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; -import { info, warn, error } from './logger.js'; -import { initDb, closeDb } from './db.js'; -import { loadConfig, getConfig } from './modules/config.js'; +import { closeDb, initDb } from './db.js'; +import { error, info, warn } from './logger.js'; +import { getConversationHistory, setConversationHistory } from './modules/ai.js'; +import { loadConfig } from './modules/config.js'; import { registerEventHandlers } from './modules/events.js'; import { HealthMonitor } from './utils/health.js'; +import { loadCommandsFromDirectory } from './utils/loadCommands.js'; +import { getPermissionError, hasPermission } from './utils/permissions.js'; import { registerCommands } from './utils/registerCommands.js'; -import { hasPermission, getPermissionError } from './utils/permissions.js'; -import { getConversationHistory, setConversationHistory } from './modules/ai.js'; // ES module dirname equivalent const __filename = fileURLToPath(import.meta.url); @@ -125,39 +126,25 @@ function loadState() { */ async function loadCommands() { const commandsPath = join(__dirname, 'commands'); - const commandFiles = readdirSync(commandsPath).filter(file => file.endsWith('.js')); - - for (const file of commandFiles) { - const filePath = join(commandsPath, file); - try { - const command = await import(filePath); - if (command.data && command.execute) { - client.commands.set(command.data.name, command); - info('Loaded command', { command: command.data.name }); - } else { - warn('Command missing data or execute export', { file }); - } - } catch (err) { - error('Failed to load command', { file, error: err.message }); - } - } + + await loadCommandsFromDirectory({ + commandsPath, + onCommandLoaded: (command) => { + client.commands.set(command.data.name, command); + }, + }); } // Event handlers are registered after config loads (see startup below) // Extend ready handler to register slash commands -client.once('clientReady', async () => { +client.once(Events.ClientReady, async () => { // Register slash commands with Discord try { const commands = Array.from(client.commands.values()); const guildId = process.env.GUILD_ID || null; - await registerCommands( - commands, - client.user.id, - process.env.DISCORD_TOKEN, - guildId - ); + await registerCommands(commands, client.user.id, process.env.DISCORD_TOKEN, guildId); } catch (err) { error('Command registration failed', { error: err.message }); } @@ -189,7 +176,7 @@ client.on('interactionCreate', async (interaction) => { if (!hasPermission(member, commandName, config)) { await interaction.reply({ content: getPermissionError(commandName), - ephemeral: true + ephemeral: true, }); warn('Permission denied', { user: interaction.user.tag, command: commandName }); return; @@ -200,7 +187,7 @@ client.on('interactionCreate', async (interaction) => { if (!command) { await interaction.reply({ content: '❌ Command not found.', - ephemeral: true + ephemeral: true, }); return; } @@ -212,7 +199,7 @@ client.on('interactionCreate', async (interaction) => { const errorMessage = { content: '❌ An error occurred while executing this command.', - ephemeral: true + ephemeral: true, }; if (interaction.replied || interaction.deferred) { @@ -236,8 +223,8 @@ async function gracefulShutdown(signal) { info('Waiting for pending requests', { count: pendingRequests.size }); const startTime = Date.now(); - while (pendingRequests.size > 0 && (Date.now() - startTime) < SHUTDOWN_TIMEOUT) { - await new Promise(resolve => setTimeout(resolve, 100)); + while (pendingRequests.size > 0 && Date.now() - startTime < SHUTDOWN_TIMEOUT) { + await new Promise((resolve) => setTimeout(resolve, 100)); } if (pendingRequests.size > 0) { @@ -277,7 +264,7 @@ client.on('error', (err) => { error('Discord client error', { error: err.message, stack: err.stack, - code: err.code + code: err.code, }); }); @@ -285,7 +272,7 @@ process.on('unhandledRejection', (err) => { error('Unhandled promise rejection', { error: err?.message || String(err), stack: err?.stack, - type: typeof err + type: typeof err, }); }); diff --git a/src/logger.js b/src/logger.js index 7c092cff..ba5c2086 100644 --- a/src/logger.js +++ b/src/logger.js @@ -8,11 +8,11 @@ * - Console transport (file transport added in phase 3) */ +import { existsSync, mkdirSync, readFileSync } from 'node:fs'; +import { dirname, join } from 'node:path'; +import { fileURLToPath } from 'node:url'; import winston from 'winston'; import DailyRotateFile from 'winston-daily-rotate-file'; -import { readFileSync, existsSync, mkdirSync } from 'fs'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; const __dirname = dirname(fileURLToPath(import.meta.url)); const configPath = join(__dirname, '..', 'config.json'); @@ -28,7 +28,7 @@ try { logLevel = process.env.LOG_LEVEL || config.logging?.level || 'info'; fileOutputEnabled = config.logging?.fileOutput || false; } -} catch (err) { +} catch (_err) { // Fallback to default if config can't be loaded logLevel = process.env.LOG_LEVEL || 'info'; } @@ -39,7 +39,7 @@ if (fileOutputEnabled) { if (!existsSync(logsDir)) { mkdirSync(logsDir, { recursive: true }); } - } catch (err) { + } catch (_err) { // Log directory creation failed, but continue without file logging fileOutputEnabled = false; } @@ -50,11 +50,12 @@ if (fileOutputEnabled) { */ const SENSITIVE_FIELDS = [ 'DISCORD_TOKEN', + 'OPENCLAW_API_KEY', 'OPENCLAW_TOKEN', 'token', 'password', 'apiKey', - 'authorization' + 'authorization', ]; /** @@ -70,15 +71,13 @@ function filterSensitiveData(obj) { } if (Array.isArray(obj)) { - return obj.map(item => filterSensitiveData(item)); + return obj.map((item) => filterSensitiveData(item)); } const filtered = {}; for (const [key, value] of Object.entries(obj)) { // Check if key matches any sensitive field (case-insensitive) - const isSensitive = SENSITIVE_FIELDS.some( - field => key.toLowerCase() === field.toLowerCase() - ); + const isSensitive = SENSITIVE_FIELDS.some((field) => key.toLowerCase() === field.toLowerCase()); if (isSensitive) { filtered[key] = '[REDACTED]'; @@ -101,10 +100,10 @@ const redactSensitiveData = winston.format((info) => { // Filter each property in the info object for (const key in info) { - if (Object.prototype.hasOwnProperty.call(info, key) && !reserved.includes(key)) { + if (Object.hasOwn(info, key) && !reserved.includes(key)) { // Check if this key is sensitive (case-insensitive) const isSensitive = SENSITIVE_FIELDS.some( - field => key.toLowerCase() === field.toLowerCase() + (field) => key.toLowerCase() === field.toLowerCase(), ); if (isSensitive) { @@ -126,7 +125,7 @@ const EMOJI_MAP = { error: '❌', warn: '⚠️', info: '✅', - debug: '🔍' + debug: '🔍', }; /** @@ -140,13 +139,15 @@ const preserveOriginalLevel = winston.format((info) => { /** * Custom format for console output with emoji prefixes */ -const consoleFormat = winston.format.printf(({ level, message, timestamp, originalLevel, ...meta }) => { - // Use originalLevel for emoji lookup since 'level' may contain ANSI color codes - const prefix = EMOJI_MAP[originalLevel] || '📝'; - const metaStr = Object.keys(meta).length > 0 ? ` ${JSON.stringify(meta)}` : ''; +const consoleFormat = winston.format.printf( + ({ level, message, timestamp, originalLevel, ...meta }) => { + // Use originalLevel for emoji lookup since 'level' may contain ANSI color codes + const prefix = EMOJI_MAP[originalLevel] || '📝'; + const metaStr = Object.keys(meta).length > 0 ? ` ${JSON.stringify(meta)}` : ''; - return `${prefix} [${timestamp}] ${level.toUpperCase()}: ${message}${metaStr}`; -}); + return `${prefix} [${timestamp}] ${originalLevel.toUpperCase()}: ${message}${metaStr}`; + }, +); /** * Create winston logger instance @@ -158,9 +159,9 @@ const transports = [ preserveOriginalLevel, winston.format.colorize(), winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), - consoleFormat - ) - }) + consoleFormat, + ), + }), ]; // Add file transport if enabled in config @@ -174,9 +175,9 @@ if (fileOutputEnabled) { format: winston.format.combine( redactSensitiveData, winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), - winston.format.json() - ) - }) + winston.format.json(), + ), + }), ); // Separate transport for error-level logs only @@ -190,19 +191,16 @@ if (fileOutputEnabled) { format: winston.format.combine( redactSensitiveData, winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), - winston.format.json() - ) - }) + winston.format.json(), + ), + }), ); } const logger = winston.createLogger({ level: logLevel, - format: winston.format.combine( - winston.format.errors({ stack: true }), - winston.format.splat() - ), - transports + format: winston.format.combine(winston.format.errors({ stack: true }), winston.format.splat()), + transports, }); /** @@ -239,5 +237,5 @@ export default { info, warn, error, - logger // Export winston logger instance for advanced usage + logger, // Export winston logger instance for advanced usage }; diff --git a/src/modules/ai.js b/src/modules/ai.js index 39dd6c80..cf8a1840 100644 --- a/src/modules/ai.js +++ b/src/modules/ai.js @@ -3,7 +3,7 @@ * Handles AI chat functionality powered by Claude via OpenClaw */ -import { info, warn } from '../logger.js'; +import { info, error as logError } from '../logger.js'; // Conversation history per channel (simple in-memory store) let conversationHistory = new Map(); @@ -25,9 +25,14 @@ export function setConversationHistory(history) { conversationHistory = history; } -// OpenClaw API endpoint (exported for shared use by other modules) -export const OPENCLAW_URL = process.env.OPENCLAW_URL || 'http://localhost:18789/v1/chat/completions'; -export const OPENCLAW_TOKEN = process.env.OPENCLAW_TOKEN || ''; +// OpenClaw API endpoint/token (exported for shared use by other modules) +// Preferred env vars: OPENCLAW_API_URL + OPENCLAW_API_KEY +// Backward-compatible aliases: OPENCLAW_URL + OPENCLAW_TOKEN +export const OPENCLAW_URL = + process.env.OPENCLAW_API_URL || + process.env.OPENCLAW_URL || + 'http://localhost:18789/v1/chat/completions'; +export const OPENCLAW_TOKEN = process.env.OPENCLAW_API_KEY || process.env.OPENCLAW_TOKEN || ''; /** * Get or create conversation history for a channel @@ -66,10 +71,18 @@ export function addToHistory(channelId, role, content) { * @param {Object} healthMonitor - Health monitor instance (optional) * @returns {Promise} AI response */ -export async function generateResponse(channelId, userMessage, username, config, healthMonitor = null) { +export async function generateResponse( + channelId, + userMessage, + username, + config, + healthMonitor = null, +) { const history = getHistory(channelId); - const systemPrompt = config.ai?.systemPrompt || `You are Volvox Bot, a helpful and friendly Discord bot for the Volvox developer community. + const systemPrompt = + config.ai?.systemPrompt || + `You are Volvox Bot, a helpful and friendly Discord bot for the Volvox developer community. You're witty, knowledgeable about programming and tech, and always eager to help. Keep responses concise and Discord-friendly (under 2000 chars). You can use Discord markdown formatting.`; @@ -78,7 +91,7 @@ You can use Discord markdown formatting.`; const messages = [ { role: 'system', content: systemPrompt }, ...history, - { role: 'user', content: `${username}: ${userMessage}` } + { role: 'user', content: `${username}: ${userMessage}` }, ]; // Log incoming AI request @@ -89,7 +102,7 @@ You can use Discord markdown formatting.`; method: 'POST', headers: { 'Content-Type': 'application/json', - ...(OPENCLAW_TOKEN && { 'Authorization': `Bearer ${OPENCLAW_TOKEN}` }) + ...(OPENCLAW_TOKEN && { Authorization: `Bearer ${OPENCLAW_TOKEN}` }), }, body: JSON.stringify({ model: config.ai?.model || 'claude-sonnet-4-20250514', @@ -106,7 +119,7 @@ You can use Discord markdown formatting.`; } const data = await response.json(); - const reply = data.choices?.[0]?.message?.content || "I got nothing. Try again?"; + const reply = data.choices?.[0]?.message?.content || 'I got nothing. Try again?'; // Log AI response info('AI response', { channelId, username, response: reply.substring(0, 500) }); @@ -123,7 +136,7 @@ You can use Discord markdown formatting.`; return reply; } catch (err) { - console.error('OpenClaw API error:', err.message); + logError('OpenClaw API error', { error: err.message }); if (healthMonitor) { healthMonitor.setAPIStatus('error'); } diff --git a/src/modules/chimeIn.js b/src/modules/chimeIn.js index 3d13b9f3..f823e15c 100644 --- a/src/modules/chimeIn.js +++ b/src/modules/chimeIn.js @@ -9,9 +9,9 @@ * - If NO → resets the counter but keeps the buffer for context continuity */ -import { info, warn, error as logError } from '../logger.js'; -import { OPENCLAW_URL, OPENCLAW_TOKEN } from './ai.js'; -import { splitMessage, needsSplitting } from '../utils/splitMessage.js'; +import { info, error as logError, warn } from '../logger.js'; +import { needsSplitting, splitMessage } from '../utils/splitMessage.js'; +import { OPENCLAW_TOKEN, OPENCLAW_URL } from './ai.js'; // ── Per-channel state ────────────────────────────────────────────────────────── // Map, counter: number, lastActive: number, abortController: AbortController|null }> @@ -39,8 +39,7 @@ function evictInactiveChannels() { // If still over limit, evict oldest if (channelBuffers.size > MAX_TRACKED_CHANNELS) { - const entries = [...channelBuffers.entries()] - .sort((a, b) => a[1].lastActive - b[1].lastActive); + const entries = [...channelBuffers.entries()].sort((a, b) => a[1].lastActive - b[1].lastActive); const toEvict = entries.slice(0, channelBuffers.size - MAX_TRACKED_CHANNELS); for (const [channelId] of toEvict) { channelBuffers.delete(channelId); @@ -54,7 +53,12 @@ function evictInactiveChannels() { function getBuffer(channelId) { if (!channelBuffers.has(channelId)) { evictInactiveChannels(); - channelBuffers.set(channelId, { messages: [], counter: 0, lastActive: Date.now(), abortController: null }); + channelBuffers.set(channelId, { + messages: [], + counter: 0, + lastActive: Date.now(), + abortController: null, + }); } const buf = channelBuffers.get(channelId); buf.lastActive = Date.now(); @@ -85,9 +89,7 @@ async function shouldChimeIn(buffer, config, signal) { const systemPrompt = config.ai?.systemPrompt || 'You are a helpful Discord bot.'; // Format the buffered conversation with structured delimiters to prevent injection - const conversationText = buffer.messages - .map((m) => `${m.author}: ${m.content}`) - .join('\n'); + const conversationText = buffer.messages.map((m) => `${m.author}: ${m.content}`).join('\n'); // System instruction first (required by OpenAI-compatible proxies for Anthropic models) const messages = [ @@ -144,9 +146,7 @@ async function generateChimeInResponse(buffer, config, signal) { const model = config.ai?.model || 'claude-sonnet-4-20250514'; const maxTokens = config.ai?.maxTokens || 1024; - const conversationText = buffer.messages - .map((m) => `${m.author}: ${m.content}`) - .join('\n'); + const conversationText = buffer.messages.map((m) => `${m.author}: ${m.content}`).join('\n'); const messages = [ { role: 'system', content: systemPrompt }, diff --git a/src/modules/config.js b/src/modules/config.js index fc173813..62c19eed 100644 --- a/src/modules/config.js +++ b/src/modules/config.js @@ -3,11 +3,11 @@ * Loads config from PostgreSQL with config.json as the seed/fallback */ -import { readFileSync, existsSync } from 'fs'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; +import { existsSync, readFileSync } from 'node:fs'; +import { dirname, join } from 'node:path'; +import { fileURLToPath } from 'node:url'; import { getPool } from '../db.js'; -import { info, warn as logWarn, error as logError } from '../logger.js'; +import { info, error as logError, warn as logWarn } from '../logger.js'; const __dirname = dirname(fileURLToPath(import.meta.url)); const configPath = join(__dirname, '..', '..', 'config.json'); @@ -61,7 +61,9 @@ export async function loadConfig() { } catch { // DB not initialized — file config is our only option if (!fileConfig) { - throw new Error('No configuration source available: config.json is missing and database is not initialized'); + throw new Error( + 'No configuration source available: config.json is missing and database is not initialized', + ); } info('Database not available, using config.json'); configCache = structuredClone(fileConfig); @@ -73,7 +75,9 @@ export async function loadConfig() { if (rows.length === 0) { if (!fileConfig) { - throw new Error('No configuration source available: database is empty and config.json is missing'); + throw new Error( + 'No configuration source available: database is empty and config.json is missing', + ); } // Seed database from config.json inside a transaction info('No config in database, seeding from config.json'); @@ -83,14 +87,18 @@ export async function loadConfig() { for (const [key, value] of Object.entries(fileConfig)) { await client.query( 'INSERT INTO config (key, value) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET value = $2, updated_at = NOW()', - [key, JSON.stringify(value)] + [key, JSON.stringify(value)], ); } await client.query('COMMIT'); info('Config seeded to database'); configCache = structuredClone(fileConfig); } catch (txErr) { - try { await client.query('ROLLBACK'); } catch { /* ignore rollback failure */ } + try { + await client.query('ROLLBACK'); + } catch { + /* ignore rollback failure */ + } throw txErr; } finally { client.release(); @@ -168,31 +176,34 @@ export async function setConfigValue(path, value) { try { await client.query('BEGIN'); // Lock the row (or prepare for INSERT if missing) - const { rows } = await client.query( - 'SELECT value FROM config WHERE key = $1 FOR UPDATE', - [section] - ); + const { rows } = await client.query('SELECT value FROM config WHERE key = $1 FOR UPDATE', [ + section, + ]); if (rows.length > 0) { // Row exists — merge change into the live DB value const dbSection = rows[0].value; setNestedValue(dbSection, nestedParts, parsedVal); - await client.query( - 'UPDATE config SET value = $1, updated_at = NOW() WHERE key = $2', - [JSON.stringify(dbSection), section] - ); + await client.query('UPDATE config SET value = $1, updated_at = NOW() WHERE key = $2', [ + JSON.stringify(dbSection), + section, + ]); } else { // New section — use ON CONFLICT to handle concurrent inserts safely await client.query( 'INSERT INTO config (key, value) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET value = $2, updated_at = NOW()', - [section, JSON.stringify(sectionClone)] + [section, JSON.stringify(sectionClone)], ); } await client.query('COMMIT'); dbPersisted = true; } catch (txErr) { - try { await client.query('ROLLBACK'); } catch { /* ignore rollback failure */ } + try { + await client.query('ROLLBACK'); + } catch { + /* ignore rollback failure */ + } throw txErr; } finally { client.release(); @@ -200,7 +211,11 @@ export async function setConfigValue(path, value) { } // Update in-memory cache (mutate in-place for reference propagation) - if (!configCache[section] || typeof configCache[section] !== 'object' || Array.isArray(configCache[section])) { + if ( + !configCache[section] || + typeof configCache[section] !== 'object' || + Array.isArray(configCache[section]) + ) { configCache[section] = {}; } setNestedValue(configCache[section], nestedParts, parsedVal); @@ -221,7 +236,7 @@ export async function resetConfig(section) { } catch { throw new Error( 'Cannot reset configuration: config.json is not available. ' + - 'Reset requires the default config file as a baseline.' + 'Reset requires the default config file as a baseline.', ); } @@ -241,10 +256,13 @@ export async function resetConfig(section) { try { await pool.query( 'INSERT INTO config (key, value) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET value = $2, updated_at = NOW()', - [section, JSON.stringify(fileConfig[section])] + [section, JSON.stringify(fileConfig[section])], ); } catch (err) { - logError('Database error during section reset — updating in-memory only', { section, error: err.message }); + logError('Database error during section reset — updating in-memory only', { + section, + error: err.message, + }); } } @@ -268,21 +286,24 @@ export async function resetConfig(section) { for (const [key, value] of Object.entries(fileConfig)) { await client.query( 'INSERT INTO config (key, value) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET value = $2, updated_at = NOW()', - [key, JSON.stringify(value)] + [key, JSON.stringify(value)], ); } // Remove stale keys that exist in DB but not in config.json const fileKeys = Object.keys(fileConfig); if (fileKeys.length > 0) { - await client.query( - 'DELETE FROM config WHERE key != ALL($1::text[])', - [fileKeys] - ); + await client.query('DELETE FROM config WHERE key != ALL($1::text[])', [fileKeys]); } await client.query('COMMIT'); } catch (txErr) { - try { await client.query('ROLLBACK'); } catch { /* ignore rollback failure */ } - logError('Database error during full config reset — updating in-memory only', { error: txErr.message }); + try { + await client.query('ROLLBACK'); + } catch { + /* ignore rollback failure */ + } + logError('Database error during full config reset — updating in-memory only', { + error: txErr.message, + }); } finally { client.release(); } diff --git a/src/modules/events.js b/src/modules/events.js index 90b04318..dd30091c 100644 --- a/src/modules/events.js +++ b/src/modules/events.js @@ -3,11 +3,16 @@ * Handles Discord event listeners and handlers */ -import { sendWelcomeMessage, recordCommunityActivity } from './welcome.js'; -import { isSpam, sendSpamAlert } from './spam.js'; +import { Events } from 'discord.js'; +import { info, error as logError, warn } from '../logger.js'; +import { needsSplitting, splitMessage } from '../utils/splitMessage.js'; import { generateResponse } from './ai.js'; import { accumulate, resetCounter } from './chimeIn.js'; -import { splitMessage, needsSplitting } from '../utils/splitMessage.js'; +import { isSpam, sendSpamAlert } from './spam.js'; +import { recordCommunityActivity, sendWelcomeMessage } from './welcome.js'; + +/** @type {boolean} Guard against duplicate process-level handler registration */ +let processHandlersRegistered = false; /** * Register bot ready event handler @@ -16,9 +21,8 @@ import { splitMessage, needsSplitting } from '../utils/splitMessage.js'; * @param {Object} healthMonitor - Health monitor instance */ export function registerReadyHandler(client, config, healthMonitor) { - client.once('clientReady', () => { - console.log(`✅ ${client.user.tag} is online!`); - console.log(`📡 Serving ${client.guilds.cache.size} server(s)`); + client.once(Events.ClientReady, () => { + info(`${client.user.tag} is online`, { servers: client.guilds.cache.size }); // Record bot start time if (healthMonitor) { @@ -26,13 +30,13 @@ export function registerReadyHandler(client, config, healthMonitor) { } if (config.welcome?.enabled) { - console.log(`👋 Welcome messages → #${config.welcome.channelId}`); + info('Welcome messages enabled', { channelId: config.welcome.channelId }); } if (config.ai?.enabled) { - console.log(`🤖 AI chat enabled (${config.ai.model || 'claude-sonnet-4-20250514'})`); + info('AI chat enabled', { model: config.ai.model || 'claude-sonnet-4-20250514' }); } if (config.moderation?.enabled) { - console.log(`🛡️ Moderation enabled`); + info('Moderation enabled'); } }); } @@ -62,7 +66,7 @@ export function registerMessageCreateHandler(client, config, healthMonitor) { // Spam detection if (config.moderation?.enabled && isSpam(message.content)) { - console.log(`[SPAM] ${message.author.tag}: ${message.content.slice(0, 50)}...`); + warn('Spam detected', { userId: message.author.id, contentPreview: '[redacted]' }); await sendSpamAlert(message, client, config); return; } @@ -77,7 +81,8 @@ export function registerMessageCreateHandler(client, config, healthMonitor) { // Check if in allowed channel (if configured) const allowedChannels = config.ai?.channels || []; - const isAllowedChannel = allowedChannels.length === 0 || allowedChannels.includes(message.channel.id); + const isAllowedChannel = + allowedChannels.length === 0 || allowedChannels.includes(message.channel.id); if ((isMentioned || isReply) && isAllowedChannel) { // Reset chime-in counter so we don't double-respond @@ -100,7 +105,7 @@ export function registerMessageCreateHandler(client, config, healthMonitor) { cleanContent, message.author.username, config, - healthMonitor + healthMonitor, ); // Split long responses @@ -119,7 +124,7 @@ export function registerMessageCreateHandler(client, config, healthMonitor) { // Chime-in: accumulate message for organic participation (fire-and-forget) accumulate(message, config).catch((err) => { - console.error('ChimeIn accumulate error:', err.message); + logError('ChimeIn accumulate error', { error: err?.message }); }); }); } @@ -129,13 +134,16 @@ export function registerMessageCreateHandler(client, config, healthMonitor) { * @param {Object} client - Discord client */ export function registerErrorHandlers(client) { - client.on('error', (error) => { - console.error('Discord error:', error); + client.on('error', (err) => { + logError('Discord error', { error: err.message, stack: err.stack }); }); - process.on('unhandledRejection', (error) => { - console.error('Unhandled rejection:', error); - }); + if (!processHandlersRegistered) { + process.on('unhandledRejection', (err) => { + logError('Unhandled rejection', { error: err?.message, stack: err?.stack }); + }); + processHandlersRegistered = true; + } } /** diff --git a/src/modules/spam.js b/src/modules/spam.js index f7a0edd9..2231aff7 100644 --- a/src/modules/spam.js +++ b/src/modules/spam.js @@ -24,7 +24,7 @@ const SPAM_PATTERNS = [ * @returns {boolean} True if spam detected */ export function isSpam(content) { - return SPAM_PATTERNS.some(pattern => pattern.test(content)); + return SPAM_PATTERNS.some((pattern) => pattern.test(content)); } /** @@ -36,17 +36,19 @@ export function isSpam(content) { export async function sendSpamAlert(message, client, config) { if (!config.moderation?.alertChannelId) return; - const alertChannel = await client.channels.fetch(config.moderation.alertChannelId).catch(() => null); + const alertChannel = await client.channels + .fetch(config.moderation.alertChannelId) + .catch(() => null); if (!alertChannel) return; const embed = new EmbedBuilder() - .setColor(0xFF6B6B) + .setColor(0xff6b6b) .setTitle('⚠️ Potential Spam Detected') .addFields( { name: 'Author', value: `<@${message.author.id}>`, inline: true }, { name: 'Channel', value: `<#${message.channel.id}>`, inline: true }, { name: 'Content', value: message.content.slice(0, 1000) || '*empty*' }, - { name: 'Link', value: `[Jump](${message.url})` } + { name: 'Link', value: `[Jump](${message.url})` }, ) .setTimestamp(); diff --git a/src/modules/welcome.js b/src/modules/welcome.js index a8787741..b383d41c 100644 --- a/src/modules/welcome.js +++ b/src/modules/welcome.js @@ -3,6 +3,8 @@ * Handles dynamic welcome messages for new members */ +import { info, error as logError } from '../logger.js'; + const guildActivity = new Map(); const DEFAULT_ACTIVITY_WINDOW_MINUTES = 45; const MAX_EVENTS_PER_CHANNEL = 250; @@ -88,13 +90,13 @@ export async function sendWelcomeMessage(member, client, config) { : renderWelcomeMessage( config.welcome.message || 'Welcome, {user}!', { id: member.id, username: member.user.username }, - { name: member.guild.name, memberCount: member.guild.memberCount } + { name: member.guild.name, memberCount: member.guild.memberCount }, ); await channel.send(message); - console.log(`[WELCOME] ${member.user.tag} joined ${member.guild.name}`); + info('Welcome message sent', { user: member.user.tag, guild: member.guild.name }); } catch (err) { - console.error('Welcome error:', err.message); + logError('Welcome error', { error: err.message, stack: err.stack }); } } @@ -154,7 +156,7 @@ function getCommunitySnapshot(guild, settings) { const channelCounts = []; for (const [channelId, timestamps] of activityMap.entries()) { - const recent = timestamps.filter(t => t >= cutoff); + const recent = timestamps.filter((t) => t >= cutoff); if (!recent.length) { activityMap.delete(channelId); @@ -176,16 +178,16 @@ function getCommunitySnapshot(guild, settings) { const topChannelIds = channelCounts .sort((a, b) => b.count - a.count) .slice(0, 3) - .map(entry => entry.channelId); + .map((entry) => entry.channelId); const activeVoiceChannels = guild.channels.cache.filter( - channel => channel?.isVoiceBased?.() && channel.members?.size > 0 + (channel) => channel?.isVoiceBased?.() && channel.members?.size > 0, ); const voiceChannels = activeVoiceChannels.size; const voiceParticipants = [...activeVoiceChannels.values()].reduce( (sum, channel) => sum + (channel.members?.size || 0), - 0 + 0, ); const level = getActivityLevel(messageCount, voiceParticipants); @@ -221,7 +223,7 @@ function getActivityLevel(messageCount, voiceParticipants) { * @returns {string} */ function buildVibeLine(snapshot, suggestedChannels) { - const topChannels = snapshot.topChannelIds.map(id => `<#${id}>`); + const topChannels = snapshot.topChannelIds.map((id) => `<#${id}>`); const channelList = (topChannels.length ? topChannels : suggestedChannels).slice(0, 2); const channelText = channelList.join(' + '); const hasChannels = channelList.length > 0; @@ -341,7 +343,8 @@ function getGreetingTemplates(timeOfDay) { ], afternoon: [ (ctx) => `👋 Welcome to **${ctx.server}**, <@${ctx.id}>!`, - (ctx) => `Nice timing, <@${ctx.id}> - welcome to the **${ctx.server}** corner of the internet.`, + (ctx) => + `Nice timing, <@${ctx.id}> - welcome to the **${ctx.server}** corner of the internet.`, (ctx) => `Hey <@${ctx.id}>! Glad you made it into **${ctx.server}**.`, ], evening: [ @@ -374,11 +377,10 @@ function getSuggestedChannels(member, config, snapshot) { const channelIds = [...new Set([...top, ...configured, ...legacy])] .filter(Boolean) - .filter(id => member.guild.channels.cache.has(id)) + .filter((id) => member.guild.channels.cache.has(id)) .slice(0, 3); - return channelIds - .map(id => `<#${id}>`); + return channelIds.map((id) => `<#${id}>`); } /** @@ -388,7 +390,7 @@ function getSuggestedChannels(member, config, snapshot) { */ function extractChannelIdsFromTemplate(template) { const matches = template.match(/<#(\d+)>/g) || []; - return matches.map(match => match.replace(/[^\d]/g, '')); + return matches.map((match) => match.replace(/[^\d]/g, '')); } /** diff --git a/src/utils/errors.js b/src/utils/errors.js index 80473917..5d0e40af 100644 --- a/src/utils/errors.js +++ b/src/utils/errors.js @@ -48,7 +48,7 @@ export function classifyError(error, context = {}) { const status = error.status || context.status || context.statusCode; // Network errors - if (code === 'ECONNREFUSED' || code === 'ENOTFOUND' || code === 'ETIMEDOUT') { + if (code === 'ECONNREFUSED' || code === 'ENOTFOUND') { return ErrorType.NETWORK; } if (code === 'ETIMEDOUT' || message.includes('timeout')) { @@ -115,31 +115,44 @@ export function getUserFriendlyMessage(error, context = {}) { const errorType = classifyError(error, context); const messages = { - [ErrorType.NETWORK]: "I'm having trouble connecting to my brain right now. Check if the AI service is running and try again!", + [ErrorType.NETWORK]: + "I'm having trouble connecting to my brain right now. Check if the AI service is running and try again!", - [ErrorType.TIMEOUT]: "That took too long to process. Try again with a shorter message, or wait a moment and retry!", + [ErrorType.TIMEOUT]: + 'That took too long to process. Try again with a shorter message, or wait a moment and retry!', - [ErrorType.API_RATE_LIMIT]: "Whoa, too many requests! Let's take a quick breather. Try again in a minute.", + [ErrorType.API_RATE_LIMIT]: + "Whoa, too many requests! Let's take a quick breather. Try again in a minute.", - [ErrorType.API_UNAUTHORIZED]: "I'm having authentication issues with the AI service. An admin needs to check the API credentials.", + [ErrorType.API_UNAUTHORIZED]: + "I'm having authentication issues with the AI service. An admin needs to check the API credentials.", - [ErrorType.API_NOT_FOUND]: "The AI service endpoint isn't responding. Please check if it's configured correctly.", + [ErrorType.API_NOT_FOUND]: + "The AI service endpoint isn't responding. Please check if it's configured correctly.", - [ErrorType.API_SERVER_ERROR]: "The AI service is having technical difficulties. It should recover automatically - try again in a moment!", + [ErrorType.API_SERVER_ERROR]: + 'The AI service is having technical difficulties. It should recover automatically - try again in a moment!', - [ErrorType.API_ERROR]: "Something went wrong with the AI service. Give it another shot in a moment!", + [ErrorType.API_ERROR]: + 'Something went wrong with the AI service. Give it another shot in a moment!', - [ErrorType.DISCORD_PERMISSION]: "I don't have permission to do that! An admin needs to check my role permissions.", + [ErrorType.DISCORD_PERMISSION]: + "I don't have permission to do that! An admin needs to check my role permissions.", - [ErrorType.DISCORD_CHANNEL_NOT_FOUND]: "I can't find that channel. It might have been deleted, or I don't have access to it.", + [ErrorType.DISCORD_CHANNEL_NOT_FOUND]: + "I can't find that channel. It might have been deleted, or I don't have access to it.", - [ErrorType.DISCORD_MISSING_ACCESS]: "I don't have access to that resource. Please check my permissions!", + [ErrorType.DISCORD_MISSING_ACCESS]: + "I don't have access to that resource. Please check my permissions!", - [ErrorType.CONFIG_MISSING]: "Configuration file not found! Please create a config.json file (you can copy from config.example.json).", + [ErrorType.CONFIG_MISSING]: + 'Configuration file not found! Please create a config.json file (you can copy from config.example.json).', - [ErrorType.CONFIG_INVALID]: "The configuration file has errors. Please check config.json for syntax errors or missing required fields.", + [ErrorType.CONFIG_INVALID]: + 'The configuration file has errors. Please check config.json for syntax errors or missing required fields.', - [ErrorType.UNKNOWN]: "Something unexpected happened. Try again, and if it keeps happening, check the logs for details.", + [ErrorType.UNKNOWN]: + 'Something unexpected happened. Try again, and if it keeps happening, check the logs for details.', }; return messages[errorType] || messages[ErrorType.UNKNOWN]; @@ -156,27 +169,34 @@ export function getSuggestedNextSteps(error, context = {}) { const errorType = classifyError(error, context); const suggestions = { - [ErrorType.NETWORK]: "Make sure the AI service (OpenClaw) is running and accessible.", + [ErrorType.NETWORK]: 'Make sure the AI service (OpenClaw) is running and accessible.', - [ErrorType.TIMEOUT]: "Try a shorter message or wait a moment before retrying.", + [ErrorType.TIMEOUT]: 'Try a shorter message or wait a moment before retrying.', - [ErrorType.API_RATE_LIMIT]: "Wait 60 seconds before trying again.", + [ErrorType.API_RATE_LIMIT]: 'Wait 60 seconds before trying again.', - [ErrorType.API_UNAUTHORIZED]: "Check the OPENCLAW_TOKEN environment variable and API credentials.", + [ErrorType.API_UNAUTHORIZED]: + 'Check the OPENCLAW_API_KEY environment variable (or legacy OPENCLAW_TOKEN) and API credentials.', - [ErrorType.API_NOT_FOUND]: "Verify the OPENCLAW_URL environment variable points to the correct endpoint.", + [ErrorType.API_NOT_FOUND]: + 'Verify OPENCLAW_API_URL (or legacy OPENCLAW_URL) points to the correct endpoint.', - [ErrorType.API_SERVER_ERROR]: "The service should recover automatically. If it persists, restart the AI service.", + [ErrorType.API_SERVER_ERROR]: + 'The service should recover automatically. If it persists, restart the AI service.', - [ErrorType.DISCORD_PERMISSION]: "Grant the bot appropriate permissions in Server Settings > Roles.", + [ErrorType.DISCORD_PERMISSION]: + 'Grant the bot appropriate permissions in Server Settings > Roles.', - [ErrorType.DISCORD_CHANNEL_NOT_FOUND]: "Update the channel ID in config.json or verify the channel exists.", + [ErrorType.DISCORD_CHANNEL_NOT_FOUND]: + 'Update the channel ID in config.json or verify the channel exists.', - [ErrorType.DISCORD_MISSING_ACCESS]: "Ensure the bot has access to the required channels and roles.", + [ErrorType.DISCORD_MISSING_ACCESS]: + 'Ensure the bot has access to the required channels and roles.', - [ErrorType.CONFIG_MISSING]: "Create config.json from config.example.json and fill in your settings.", + [ErrorType.CONFIG_MISSING]: + 'Create config.json from config.example.json and fill in your settings.', - [ErrorType.CONFIG_INVALID]: "Validate your config.json syntax using a JSON validator.", + [ErrorType.CONFIG_INVALID]: 'Validate your config.json syntax using a JSON validator.', }; return suggestions[errorType] || null; diff --git a/src/utils/loadCommands.js b/src/utils/loadCommands.js new file mode 100644 index 00000000..60fefaf6 --- /dev/null +++ b/src/utils/loadCommands.js @@ -0,0 +1,53 @@ +import { readdirSync } from 'node:fs'; +import { join } from 'node:path'; +import { error as logError, info as logInfo, warn as logWarn } from '../logger.js'; + +const defaultCommandLogger = { + info: logInfo, + warn: logWarn, + error: logError, +}; + +/** + * Load command modules from a directory. + * + * @param {object} options + * @param {string} options.commandsPath - Absolute path to command files + * @param {(command: object) => void} [options.onCommandLoaded] - Optional callback for each loaded command + * @param {boolean} [options.logLoaded=true] - Whether to log each successfully loaded command + * @param {{info: Function, warn: Function, error: Function}} [options.commandLogger] - Logger implementation override (for tests) + * @returns {Promise} + */ +export async function loadCommandsFromDirectory({ + commandsPath, + onCommandLoaded = () => {}, + logLoaded = true, + commandLogger = defaultCommandLogger, +}) { + const commandFiles = readdirSync(commandsPath).filter((file) => file.endsWith('.js')); + const commands = []; + + for (const file of commandFiles) { + const filePath = join(commandsPath, file); + + try { + const command = await import(filePath); + + if (!command.data || !command.execute) { + commandLogger.warn('Command missing data or execute export', { file }); + continue; + } + + commands.push(command); + onCommandLoaded(command); + + if (logLoaded) { + commandLogger.info('Loaded command', { command: command.data.name }); + } + } catch (err) { + commandLogger.error('Failed to load command', { file, error: err.message }); + } + } + + return commands; +} diff --git a/src/utils/registerCommands.js b/src/utils/registerCommands.js index 65bae731..10160e2b 100644 --- a/src/utils/registerCommands.js +++ b/src/utils/registerCommands.js @@ -5,6 +5,7 @@ */ import { REST, Routes } from 'discord.js'; +import { info, error as logError } from '../logger.js'; /** * Register slash commands with Discord @@ -25,7 +26,7 @@ export async function registerCommands(commands, clientId, token, guildId = null } // Convert command modules to JSON for API - const commandData = commands.map(cmd => { + const commandData = commands.map((cmd) => { if (!cmd.data || typeof cmd.data.toJSON !== 'function') { throw new Error('Each command must have a .data property with toJSON() method'); } @@ -35,26 +36,24 @@ export async function registerCommands(commands, clientId, token, guildId = null const rest = new REST({ version: '10' }).setToken(token); try { - console.log(`🔄 Registering ${commandData.length} slash command(s)...`); + info(`Registering ${commandData.length} slash command(s)`); let data; if (guildId) { // Guild-specific commands (instant updates, good for development) - data = await rest.put( - Routes.applicationGuildCommands(clientId, guildId), - { body: commandData } - ); + data = await rest.put(Routes.applicationGuildCommands(clientId, guildId), { + body: commandData, + }); } else { // Global commands (can take up to 1 hour to update) - data = await rest.put( - Routes.applicationCommands(clientId), - { body: commandData } - ); + data = await rest.put(Routes.applicationCommands(clientId), { body: commandData }); } - console.log(`✅ Successfully registered ${data.length} slash command(s)${guildId ? ' (guild)' : ' (global)'}`); + info(`Successfully registered ${data.length} slash command(s)`, { + scope: guildId ? 'guild' : 'global', + }); } catch (err) { - console.error('❌ Failed to register commands:', err.message); + logError('Failed to register commands', { error: err.message, stack: err.stack }); throw err; } } diff --git a/src/utils/retry.js b/src/utils/retry.js index 1d14e187..bf2442f4 100644 --- a/src/utils/retry.js +++ b/src/utils/retry.js @@ -5,8 +5,8 @@ * exponential backoff and integration with error classification. */ -import { isRetryable, classifyError } from './errors.js'; -import { warn, error, debug } from '../logger.js'; +import { debug, error, warn } from '../logger.js'; +import { classifyError, isRetryable } from './errors.js'; /** * Sleep for a specified duration @@ -14,7 +14,7 @@ import { warn, error, debug } from '../logger.js'; * @returns {Promise} */ function sleep(ms) { - return new Promise(resolve => setTimeout(resolve, ms)); + return new Promise((resolve) => setTimeout(resolve, ms)); } /** @@ -26,7 +26,7 @@ function sleep(ms) { */ function calculateBackoff(attempt, baseDelay, maxDelay) { // Exponential backoff: baseDelay * 2^attempt - const delay = baseDelay * Math.pow(2, attempt); + const delay = baseDelay * 2 ** attempt; // Cap at maxDelay return Math.min(delay, maxDelay); diff --git a/test-log-levels.js b/test-log-levels.js deleted file mode 100644 index ad73c9d4..00000000 --- a/test-log-levels.js +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Log Level Verification Test - * - * This script tests that all log levels work correctly and filtering behaves as expected. - * - * Expected behavior: - * - debug level: shows debug, info, warn, error - * - info level: shows info, warn, error (no debug) - * - warn level: shows warn, error (no debug, info) - * - error level: shows only error - */ - -import { debug, info, warn, error } from './src/logger.js'; - -console.log('\n=== Log Level Verification Test ===\n'); -console.log(`Current LOG_LEVEL: ${process.env.LOG_LEVEL || 'info (default)'}`); -console.log('Testing all log levels...\n'); - -// Test all log levels with different types of messages -debug('DEBUG: This is a debug message', { test: 'debug-data', value: 1 }); -info('INFO: This is an info message', { test: 'info-data', value: 2 }); -warn('WARN: This is a warning message', { test: 'warn-data', value: 3 }); -error('ERROR: This is an error message', { test: 'error-data', value: 4 }); - -// Test with nested metadata -debug('DEBUG: Testing nested metadata', { - user: 'testUser', - context: { - channel: 'test-channel', - guild: 'test-guild' - } -}); - -info('INFO: Testing nested metadata', { - user: 'testUser', - context: { - channel: 'test-channel', - guild: 'test-guild' - } -}); - -warn('WARN: Testing nested metadata', { - user: 'testUser', - context: { - channel: 'test-channel', - guild: 'test-guild' - } -}); - -error('ERROR: Testing nested metadata', { - user: 'testUser', - context: { - channel: 'test-channel', - guild: 'test-guild' - } -}); - -console.log('\n=== Test Complete ==='); -console.log('\nExpected output based on LOG_LEVEL:'); -console.log('- debug: All 8 log messages (4 simple + 4 with nested metadata)'); -console.log('- info: 6 messages (info, warn, error × 2)'); -console.log('- warn: 4 messages (warn, error × 2)'); -console.log('- error: 2 messages (error × 2)'); -console.log('\n'); diff --git a/tests/commands.test.js b/tests/commands.test.js new file mode 100644 index 00000000..13834ba7 --- /dev/null +++ b/tests/commands.test.js @@ -0,0 +1,38 @@ +import { readdirSync } from 'node:fs'; +import { dirname, join } from 'node:path'; +import { fileURLToPath } from 'node:url'; +import { beforeAll, describe, expect, it } from 'vitest'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const commandsDir = join(__dirname, '..', 'src', 'commands'); + +const commandFiles = readdirSync(commandsDir).filter((f) => f.endsWith('.js')); + +// NOTE: Dynamic imports may trigger module-level side effects (e.g., SlashCommandBuilder +// registration). Each file is imported once per describe block via beforeAll and the +// module cache is shared across tests within the same file. +describe('command files', () => { + it('should have at least one command', () => { + expect(commandFiles.length).toBeGreaterThan(0); + }); + + for (const file of commandFiles) { + describe(file, () => { + let mod; + + beforeAll(async () => { + mod = await import(join(commandsDir, file)); + }); + + it('should export data and execute', () => { + expect(mod.data).toBeDefined(); + expect(mod.data.name).toBeTruthy(); + expect(typeof mod.execute).toBe('function'); + }); + + it('should have a description on data', () => { + expect(mod.data.description).toBeTruthy(); + }); + }); + } +}); diff --git a/tests/commands/config.test.js b/tests/commands/config.test.js new file mode 100644 index 00000000..69234cc6 --- /dev/null +++ b/tests/commands/config.test.js @@ -0,0 +1,364 @@ +import { afterEach, describe, expect, it, vi } from 'vitest'; + +// Mock config module +vi.mock('../../src/modules/config.js', () => ({ + getConfig: vi.fn().mockReturnValue({ + ai: { enabled: true, model: 'test-model', maxTokens: 1024 }, + welcome: { enabled: false, channelId: '' }, + moderation: { enabled: false }, + }), + setConfigValue: vi.fn().mockResolvedValue({ enabled: true, model: 'new-model' }), + resetConfig: vi.fn().mockResolvedValue({}), +})); + +import { autocomplete, data, execute } from '../../src/commands/config.js'; +import { getConfig, resetConfig, setConfigValue } from '../../src/modules/config.js'; + +describe('config command', () => { + afterEach(() => { + vi.clearAllMocks(); + }); + + it('should export data with name', () => { + expect(data.name).toBe('config'); + }); + + it('should export adminOnly flag', async () => { + const mod = await import('../../src/commands/config.js'); + expect(mod.adminOnly).toBe(true); + }); + + describe('autocomplete', () => { + it('should autocomplete section names', async () => { + const mockRespond = vi.fn(); + const interaction = { + options: { + getFocused: vi.fn().mockReturnValue({ name: 'section', value: 'ai' }), + }, + respond: mockRespond, + }; + + await autocomplete(interaction); + expect(mockRespond).toHaveBeenCalled(); + const choices = mockRespond.mock.calls[0][0]; + expect(choices.length).toBeGreaterThan(0); + expect(choices[0].name).toBe('ai'); + }); + + it('should autocomplete dot-notation paths', async () => { + const mockRespond = vi.fn(); + const interaction = { + options: { + getFocused: vi.fn().mockReturnValue({ name: 'path', value: 'ai.' }), + }, + respond: mockRespond, + }; + + await autocomplete(interaction); + expect(mockRespond).toHaveBeenCalled(); + const choices = mockRespond.mock.calls[0][0]; + expect(choices.some((c) => c.value.startsWith('ai.'))).toBe(true); + }); + }); + + describe('execute', () => { + describe('view subcommand', () => { + it('should display all config sections', async () => { + const mockReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('view'), + getString: vi.fn().mockReturnValue(null), + }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ embeds: expect.any(Array), ephemeral: true }), + ); + }); + + it('should display specific section', async () => { + const mockReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('view'), + getString: vi.fn().mockReturnValue('ai'), + }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ embeds: expect.any(Array), ephemeral: true }), + ); + }); + + it('should error for unknown section', async () => { + const mockReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('view'), + getString: vi.fn().mockReturnValue('nonexistent'), + }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + content: expect.stringContaining('not found'), + ephemeral: true, + }), + ); + }); + + it('should truncate when config exceeds embed char limit', async () => { + // Create a config with many large sections that exceed 6000 chars total + // Each section generates ~1023 chars in the embed (JSON truncated to 1000 + field name) + // Need 6+ sections to push past the 5800-char truncation threshold + const largeValue = 'x'.repeat(1500); + getConfig.mockReturnValueOnce({ + section1: { data: largeValue }, + section2: { data: largeValue }, + section3: { data: largeValue }, + section4: { data: largeValue }, + section5: { data: largeValue }, + section6: { data: largeValue }, + section7: { data: largeValue }, + }); + const mockReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('view'), + getString: vi.fn().mockReturnValue(null), + }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ embeds: expect.any(Array), ephemeral: true }), + ); + // The embed should contain a truncation notice + const embed = mockReply.mock.calls[0][0].embeds[0]; + const fields = embed.toJSON().fields; + const truncatedField = fields.find((f) => f.name === '⚠️ Truncated'); + expect(truncatedField).toBeDefined(); + }); + + it('should handle getConfig throwing an error', async () => { + getConfig.mockImplementationOnce(() => { + throw new Error('config error'); + }); + const mockReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('view'), + getString: vi.fn().mockReturnValue(null), + }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + content: expect.stringContaining('Failed to load config'), + ephemeral: true, + }), + ); + }); + }); + + describe('set subcommand', () => { + it('should set a config value', async () => { + const mockEditReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('set'), + getString: vi.fn().mockImplementation((name) => { + if (name === 'path') return 'ai.model'; + if (name === 'value') return 'new-model'; + return null; + }), + }, + deferReply: vi.fn().mockResolvedValue(undefined), + editReply: mockEditReply, + }; + + await execute(interaction); + expect(setConfigValue).toHaveBeenCalledWith('ai.model', 'new-model'); + expect(mockEditReply).toHaveBeenCalled(); + }); + + it('should reject invalid section', async () => { + const mockReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('set'), + getString: vi.fn().mockImplementation((name) => { + if (name === 'path') return 'invalid.key'; + if (name === 'value') return 'value'; + return null; + }), + }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + content: expect.stringContaining('Invalid section'), + ephemeral: true, + }), + ); + }); + + it('should handle setConfigValue error', async () => { + setConfigValue.mockRejectedValueOnce(new Error('DB error')); + const mockEditReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('set'), + getString: vi.fn().mockImplementation((name) => { + if (name === 'path') return 'ai.model'; + if (name === 'value') return 'bad'; + return null; + }), + }, + deferReply: vi.fn().mockResolvedValue(undefined), + deferred: true, + editReply: mockEditReply, + }; + + await execute(interaction); + expect(mockEditReply).toHaveBeenCalledWith( + expect.objectContaining({ content: expect.stringContaining('Failed to set config') }), + ); + }); + + // deferReply rejects (simulating a Discord API failure), so the error + // originates from the defer call — not from setConfigValue. The path + // 'ai.key' passes section validation because only the top-level + // section ('ai') is checked, making it reach the defer+set path. + it('should handle error when not deferred', async () => { + setConfigValue.mockRejectedValueOnce(new Error('error')); + const mockReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('set'), + getString: vi.fn().mockImplementation((name) => { + if (name === 'path') return 'ai.key'; + if (name === 'value') return 'val'; + return null; + }), + }, + deferReply: vi.fn().mockRejectedValue(new Error('defer failed')), + deferred: false, + reply: mockReply, + editReply: vi.fn(), + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + content: expect.stringContaining('Failed to set config'), + ephemeral: true, + }), + ); + }); + }); + + describe('reset subcommand', () => { + it('should reset specific section', async () => { + const mockEditReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('reset'), + getString: vi.fn().mockReturnValue('ai'), + }, + deferReply: vi.fn().mockResolvedValue(undefined), + editReply: mockEditReply, + }; + + await execute(interaction); + expect(resetConfig).toHaveBeenCalledWith('ai'); + expect(mockEditReply).toHaveBeenCalled(); + }); + + it('should reset all when no section specified', async () => { + const mockEditReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('reset'), + getString: vi.fn().mockReturnValue(null), + }, + deferReply: vi.fn().mockResolvedValue(undefined), + editReply: mockEditReply, + }; + + await execute(interaction); + expect(resetConfig).toHaveBeenCalledWith(undefined); + }); + + it('should handle reset error with deferred reply', async () => { + resetConfig.mockRejectedValueOnce(new Error('reset failed')); + const mockEditReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('reset'), + getString: vi.fn().mockReturnValue('ai'), + }, + deferReply: vi.fn().mockResolvedValue(undefined), + deferred: true, + editReply: mockEditReply, + }; + + await execute(interaction); + expect(mockEditReply).toHaveBeenCalledWith( + expect.objectContaining({ content: expect.stringContaining('Failed to reset config') }), + ); + }); + + it('should handle reset error when not deferred', async () => { + resetConfig.mockRejectedValueOnce(new Error('reset failed')); + const mockReply = vi.fn(); + const interaction = { + options: { + getSubcommand: vi.fn().mockReturnValue('reset'), + getString: vi.fn().mockReturnValue('ai'), + }, + deferReply: vi.fn().mockRejectedValue(new Error('defer failed')), + deferred: false, + reply: mockReply, + editReply: vi.fn(), + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + content: expect.stringContaining('Failed to reset config'), + ephemeral: true, + }), + ); + }); + }); + + it('should reply with error for unknown subcommand', async () => { + const mockReply = vi.fn(); + const interaction = { + options: { getSubcommand: vi.fn().mockReturnValue('unknown') }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + content: expect.stringContaining('Unknown subcommand'), + ephemeral: true, + }), + ); + }); + }); +}); diff --git a/tests/commands/ping.test.js b/tests/commands/ping.test.js new file mode 100644 index 00000000..e53bef7c --- /dev/null +++ b/tests/commands/ping.test.js @@ -0,0 +1,67 @@ +import { describe, expect, it, vi } from 'vitest'; + +// Mock discord.js with proper class mocks +vi.mock('discord.js', () => { + class MockSlashCommandBuilder { + constructor() { + this.name = ''; + this.description = ''; + } + setName(name) { + this.name = name; + return this; + } + setDescription(desc) { + this.description = desc; + return this; + } + toJSON() { + return { name: this.name, description: this.description }; + } + } + return { SlashCommandBuilder: MockSlashCommandBuilder }; +}); + +import { data, execute } from '../../src/commands/ping.js'; + +/** + * Create a mock Discord interaction for ping command tests. + * @param {Object} overrides - Properties to override on the default mock + * @returns {Object} Mock interaction object + */ +function createMockInteraction(overrides = {}) { + return { + reply: vi.fn().mockResolvedValue({ + resource: { + message: { createdTimestamp: 1000 }, + }, + }), + createdTimestamp: 900, + client: { ws: { ping: 42 } }, + editReply: vi.fn(), + ...overrides, + }; +} + +describe('ping command', () => { + it('should export data with name and description', () => { + expect(data.name).toBe('ping'); + expect(data.description).toBeTruthy(); + }); + + it('should reply with pong and latency info', async () => { + const interaction = createMockInteraction(); + + await execute(interaction); + + expect(interaction.reply).toHaveBeenCalledWith({ + content: 'Pinging...', + withResponse: true, + }); + + expect(interaction.editReply).toHaveBeenCalledWith(expect.stringContaining('Pong')); + const editArg = interaction.editReply.mock.calls[0][0]; + expect(editArg).toContain('100ms'); // 1000 - 900 + expect(editArg).toContain('42ms'); + }); +}); diff --git a/tests/commands/status.test.js b/tests/commands/status.test.js new file mode 100644 index 00000000..5be91364 --- /dev/null +++ b/tests/commands/status.test.js @@ -0,0 +1,383 @@ +import { afterEach, describe, expect, it, vi } from 'vitest'; + +// Mock logger +vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), +})); + +// Mock health monitor +const healthMocks = vi.hoisted(() => ({ + monitor: { + getStatus: vi.fn().mockReturnValue({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { + heapUsed: 50, + heapTotal: 100, + rss: 120, + external: 5, + formatted: '50MB / 100MB (RSS: 120MB)', + }, + api: { status: 'ok', lastCheck: Date.now() }, + lastAIRequest: Date.now() - 5000, + timestamp: Date.now(), + }), + getDetailedStatus: vi.fn().mockReturnValue({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { + heapUsed: 50, + heapTotal: 100, + rss: 120, + external: 5, + arrayBuffers: 2, + formatted: '50MB / 100MB (RSS: 120MB)', + }, + api: { status: 'ok', lastCheck: Date.now() }, + lastAIRequest: Date.now() - 5000, + timestamp: Date.now(), + process: { + pid: 1234, + platform: 'linux', + nodeVersion: 'v22.0.0', + uptime: 60, + }, + cpu: { user: 1000, system: 500 }, + }), + }, +})); + +vi.mock('../../src/utils/health.js', () => ({ + HealthMonitor: { + getInstance: vi.fn().mockReturnValue(healthMocks.monitor), + }, +})); + +import { data, execute } from '../../src/commands/status.js'; + +describe('status command', () => { + afterEach(() => { + vi.clearAllMocks(); + }); + + it('should export data with name', () => { + expect(data.name).toBe('status'); + }); + + it('should show basic status', async () => { + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith(expect.objectContaining({ embeds: expect.any(Array) })); + + // Verify the 'ok' status produces the green emoji + const embed = mockReply.mock.calls[0][0].embeds[0]; + const apiField = embed.data.fields.find((f) => f.name.includes('API')); + expect(apiField.value).toContain('🟢'); + }); + + it('should deny non-admin from detailed view', async () => { + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(true) }, + memberPermissions: { has: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + content: expect.stringContaining('administrators'), + ephemeral: true, + }), + ); + }); + + it('should show detailed status for admin', async () => { + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(true) }, + memberPermissions: { has: vi.fn().mockReturnValue(true) }, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + embeds: expect.any(Array), + ephemeral: true, + }), + ); + }); + + it('should handle errors with reply', async () => { + const mockReply = vi.fn().mockResolvedValue(undefined); + const interaction = { + options: { + getBoolean: vi.fn().mockImplementation(() => { + throw new Error('test error'); + }), + }, + replied: false, + deferred: false, + reply: mockReply, + followUp: vi.fn(), + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + content: expect.stringContaining("couldn't retrieve"), + ephemeral: true, + }), + ); + }); + + it('should handle errors with followUp when already replied', async () => { + const mockFollowUp = vi.fn().mockResolvedValue(undefined); + const interaction = { + options: { + getBoolean: vi.fn().mockImplementation(() => { + throw new Error('test error'); + }), + }, + replied: true, + deferred: false, + reply: vi.fn(), + followUp: mockFollowUp, + }; + + await execute(interaction); + expect(mockFollowUp).toHaveBeenCalled(); + }); + + it('should handle null memberPermissions for detailed view', async () => { + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(true) }, + memberPermissions: null, + reply: mockReply, + }; + + await execute(interaction); + expect(mockReply).toHaveBeenCalledWith( + expect.objectContaining({ + content: expect.stringContaining('administrators'), + ephemeral: true, + }), + ); + }); + + describe('formatRelativeTime branches', () => { + /** Helper: extract the 'Last AI Request' field value from the reply embed */ + function getLastAIRequestField(mockReply) { + const embed = mockReply.mock.calls[0][0].embeds[0]; + const field = embed.data.fields.find((f) => f.name.includes('Last AI Request')); + return field?.value; + } + + it('should show "Never" when lastAIRequest is null', async () => { + healthMocks.monitor.getStatus.mockReturnValueOnce({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { heapUsed: 50, heapTotal: 100, rss: 120, external: 5, formatted: '50MB' }, + api: { status: 'ok', lastCheck: Date.now() }, + lastAIRequest: null, + timestamp: Date.now(), + }); + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + await execute(interaction); + expect(mockReply).toHaveBeenCalled(); + expect(getLastAIRequestField(mockReply)).toBe('Never'); + }); + + it('should show "Just now" when lastAIRequest is within 1 second', async () => { + healthMocks.monitor.getStatus.mockReturnValueOnce({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { heapUsed: 50, heapTotal: 100, rss: 120, external: 5, formatted: '50MB' }, + api: { status: 'ok', lastCheck: Date.now() }, + lastAIRequest: Date.now(), + timestamp: Date.now(), + }); + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + await execute(interaction); + expect(mockReply).toHaveBeenCalled(); + expect(getLastAIRequestField(mockReply)).toBe('Just now'); + }); + + it('should show minutes ago when lastAIRequest is minutes old', async () => { + healthMocks.monitor.getStatus.mockReturnValueOnce({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { heapUsed: 50, heapTotal: 100, rss: 120, external: 5, formatted: '50MB' }, + api: { status: 'ok', lastCheck: Date.now() }, + lastAIRequest: Date.now() - 300000, // 5 minutes ago + timestamp: Date.now(), + }); + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + await execute(interaction); + expect(mockReply).toHaveBeenCalled(); + expect(getLastAIRequestField(mockReply)).toBe('5m ago'); + }); + + it('should show hours ago when lastAIRequest is hours old', async () => { + healthMocks.monitor.getStatus.mockReturnValueOnce({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { heapUsed: 50, heapTotal: 100, rss: 120, external: 5, formatted: '50MB' }, + api: { status: 'ok', lastCheck: Date.now() }, + lastAIRequest: Date.now() - 7200000, // 2 hours ago + timestamp: Date.now(), + }); + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + await execute(interaction); + expect(mockReply).toHaveBeenCalled(); + expect(getLastAIRequestField(mockReply)).toBe('2h ago'); + }); + + it('should show days ago when lastAIRequest is days old', async () => { + healthMocks.monitor.getStatus.mockReturnValueOnce({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { heapUsed: 50, heapTotal: 100, rss: 120, external: 5, formatted: '50MB' }, + api: { status: 'ok', lastCheck: Date.now() }, + lastAIRequest: Date.now() - 172800000, // 2 days ago + timestamp: Date.now(), + }); + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + await execute(interaction); + expect(mockReply).toHaveBeenCalled(); + expect(getLastAIRequestField(mockReply)).toBe('2d ago'); + }); + }); + + describe('getStatusEmoji branches', () => { + /** Helper: extract the 'API Status' field value from the reply embed */ + function getAPIStatusField(mockReply) { + const embed = mockReply.mock.calls[0][0].embeds[0]; + const field = embed.data.fields.find((f) => f.name.includes('API')); + return field?.value; + } + + it('should show error emoji for error status', async () => { + healthMocks.monitor.getStatus.mockReturnValueOnce({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { heapUsed: 50, heapTotal: 100, rss: 120, external: 5, formatted: '50MB' }, + api: { status: 'error', lastCheck: Date.now() }, + lastAIRequest: Date.now() - 5000, + timestamp: Date.now(), + }); + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + await execute(interaction); + expect(mockReply).toHaveBeenCalled(); + expect(getAPIStatusField(mockReply)).toContain('🔴'); + }); + + it('should show unknown emoji for unknown status', async () => { + healthMocks.monitor.getStatus.mockReturnValueOnce({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { heapUsed: 50, heapTotal: 100, rss: 120, external: 5, formatted: '50MB' }, + api: { status: 'unknown', lastCheck: Date.now() }, + lastAIRequest: Date.now() - 5000, + timestamp: Date.now(), + }); + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + await execute(interaction); + expect(mockReply).toHaveBeenCalled(); + expect(getAPIStatusField(mockReply)).toContain('🟡'); + }); + + it('should show default emoji for unrecognized status', async () => { + healthMocks.monitor.getStatus.mockReturnValueOnce({ + uptime: 60000, + uptimeFormatted: '1m 0s', + memory: { heapUsed: 50, heapTotal: 100, rss: 120, external: 5, formatted: '50MB' }, + api: { status: 'maintenance', lastCheck: Date.now() }, + lastAIRequest: Date.now() - 5000, + timestamp: Date.now(), + }); + const mockReply = vi.fn(); + const interaction = { + options: { getBoolean: vi.fn().mockReturnValue(false) }, + reply: mockReply, + }; + await execute(interaction); + expect(mockReply).toHaveBeenCalled(); + expect(getAPIStatusField(mockReply)).toContain('⚪'); + }); + }); + + it('should handle error when followUp also fails', async () => { + const interaction = { + options: { + getBoolean: vi.fn().mockImplementation(() => { + throw new Error('test error'); + }), + }, + replied: true, + deferred: false, + reply: vi.fn(), + followUp: vi.fn().mockRejectedValue(new Error('followUp failed')), + }; + + // Should not throw even when followUp rejects + await execute(interaction); + expect(interaction.followUp).toHaveBeenCalled(); + }); + + it('should handle error when reply also fails', async () => { + const interaction = { + options: { + getBoolean: vi.fn().mockImplementation(() => { + throw new Error('test error'); + }), + }, + replied: false, + deferred: false, + reply: vi.fn().mockRejectedValue(new Error('reply failed')), + followUp: vi.fn(), + }; + + // Should not throw even when reply rejects + await execute(interaction); + expect(interaction.reply).toHaveBeenCalled(); + }); +}); diff --git a/tests/config.test.js b/tests/config.test.js new file mode 100644 index 00000000..d55c3344 --- /dev/null +++ b/tests/config.test.js @@ -0,0 +1,53 @@ +import { readFileSync } from 'node:fs'; +import { dirname, join } from 'node:path'; +import { fileURLToPath } from 'node:url'; +import { beforeAll, describe, expect, it } from 'vitest'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const configPath = join(__dirname, '..', 'config.json'); + +describe('config.json', () => { + let config; + + beforeAll(() => { + const raw = readFileSync(configPath, 'utf-8'); + config = JSON.parse(raw); + }); + + it('should be valid JSON', () => { + expect(config).toBeDefined(); + expect(typeof config).toBe('object'); + }); + + it('should have an ai section', () => { + expect(config.ai).toBeDefined(); + expect(typeof config.ai.enabled).toBe('boolean'); + expect(typeof config.ai.model).toBe('string'); + expect(typeof config.ai.maxTokens).toBe('number'); + expect(typeof config.ai.systemPrompt).toBe('string'); + expect(Array.isArray(config.ai.channels)).toBe(true); + }); + + it('should have a welcome section', () => { + expect(config.welcome).toBeDefined(); + expect(typeof config.welcome.enabled).toBe('boolean'); + expect(typeof config.welcome.channelId).toBe('string'); + }); + + it('should have a moderation section', () => { + expect(config.moderation).toBeDefined(); + expect(typeof config.moderation.enabled).toBe('boolean'); + expect(typeof config.moderation.alertChannelId).toBe('string'); + }); + + it('should have a permissions section', () => { + expect(config.permissions).toBeDefined(); + expect(typeof config.permissions.enabled).toBe('boolean'); + expect(config.permissions.allowedCommands).toBeDefined(); + }); + + it('should have a logging section', () => { + expect(config.logging).toBeDefined(); + expect(typeof config.logging.level).toBe('string'); + }); +}); diff --git a/tests/db.test.js b/tests/db.test.js new file mode 100644 index 00000000..ca757a3a --- /dev/null +++ b/tests/db.test.js @@ -0,0 +1,193 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +const pgMocks = vi.hoisted(() => ({ + poolConfig: null, + poolQuery: vi.fn(), + poolOn: vi.fn(), + poolConnect: vi.fn(), + poolEnd: vi.fn(), + clientQuery: vi.fn(), + clientRelease: vi.fn(), +})); + +vi.mock('../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), +})); + +vi.mock('pg', () => { + class Pool { + constructor(config) { + pgMocks.poolConfig = config; + } + + query(...args) { + return pgMocks.poolQuery(...args); + } + + on(...args) { + return pgMocks.poolOn(...args); + } + + connect(...args) { + return pgMocks.poolConnect(...args); + } + + end(...args) { + return pgMocks.poolEnd(...args); + } + } + + return { default: { Pool } }; +}); + +describe('db module', () => { + let dbModule; + let originalDatabaseUrl; + let originalDatabaseSsl; + + beforeEach(async () => { + vi.resetModules(); + + // Save original env vars to restore after each test + originalDatabaseUrl = process.env.DATABASE_URL; + originalDatabaseSsl = process.env.DATABASE_SSL; + + pgMocks.poolConfig = null; + pgMocks.poolQuery.mockReset().mockResolvedValue({}); + pgMocks.poolOn.mockReset(); + pgMocks.poolConnect.mockReset(); + pgMocks.poolEnd.mockReset().mockResolvedValue(undefined); + pgMocks.clientQuery.mockReset().mockResolvedValue({}); + pgMocks.clientRelease.mockReset(); + + pgMocks.poolConnect.mockResolvedValue({ + query: pgMocks.clientQuery, + release: pgMocks.clientRelease, + }); + + process.env.DATABASE_URL = 'postgresql://test:test@localhost:5432/testdb'; + delete process.env.DATABASE_SSL; + + dbModule = await import('../src/db.js'); + }); + + afterEach(async () => { + try { + await dbModule.closeDb(); + } catch { + // ignore cleanup failures + } + + // Restore original env vars + if (originalDatabaseUrl !== undefined) { + process.env.DATABASE_URL = originalDatabaseUrl; + } else { + delete process.env.DATABASE_URL; + } + if (originalDatabaseSsl !== undefined) { + process.env.DATABASE_SSL = originalDatabaseSsl; + } else { + delete process.env.DATABASE_SSL; + } + vi.clearAllMocks(); + }); + + describe('initDb', () => { + it('should initialize database pool', async () => { + const pool = await dbModule.initDb(); + expect(pool).toBeDefined(); + expect(pgMocks.poolConnect).toHaveBeenCalled(); + expect(pgMocks.clientQuery).toHaveBeenCalledWith('SELECT NOW()'); + expect(pgMocks.clientRelease).toHaveBeenCalled(); + expect(pgMocks.poolQuery).toHaveBeenCalled(); + }); + + it('should return existing pool on second call', async () => { + const pool1 = await dbModule.initDb(); + const pool2 = await dbModule.initDb(); + expect(pool1).toBe(pool2); + expect(pgMocks.poolConnect).toHaveBeenCalledTimes(1); + }); + + it('should throw if DATABASE_URL is not set', async () => { + delete process.env.DATABASE_URL; + await expect(dbModule.initDb()).rejects.toThrow( + 'DATABASE_URL environment variable is not set', + ); + }); + + it('should clean up pool on connection test failure', async () => { + pgMocks.poolConnect.mockRejectedValueOnce(new Error('connection failed')); + await expect(dbModule.initDb()).rejects.toThrow('connection failed'); + expect(pgMocks.poolEnd).toHaveBeenCalled(); + }); + }); + + describe('getPool', () => { + it('should throw if pool not initialized', () => { + expect(() => dbModule.getPool()).toThrow('Database not initialized'); + }); + + it('should return pool after init', async () => { + await dbModule.initDb(); + expect(dbModule.getPool()).toBeDefined(); + }); + }); + + describe('closeDb', () => { + it('should close pool', async () => { + await dbModule.initDb(); + await dbModule.closeDb(); + expect(pgMocks.poolEnd).toHaveBeenCalled(); + }); + + it('should do nothing if pool not initialized', async () => { + await dbModule.closeDb(); + }); + + it('should handle close error gracefully', async () => { + await dbModule.initDb(); + pgMocks.poolEnd.mockRejectedValueOnce(new Error('close failed')); + await dbModule.closeDb(); + }); + }); + + describe('SSL configuration', () => { + it('should disable SSL for railway.internal connections', async () => { + process.env.DATABASE_URL = 'postgresql://test@postgres.railway.internal:5432/db'; + await dbModule.initDb(); + expect(pgMocks.poolConfig.ssl).toBe(false); + }); + + it('should disable SSL when DATABASE_SSL is "false"', async () => { + process.env.DATABASE_URL = 'postgresql://test@localhost/db'; + process.env.DATABASE_SSL = 'false'; + await dbModule.initDb(); + expect(pgMocks.poolConfig.ssl).toBe(false); + }); + + it('should disable SSL when DATABASE_SSL is "off"', async () => { + process.env.DATABASE_URL = 'postgresql://test@localhost/db'; + process.env.DATABASE_SSL = 'off'; + await dbModule.initDb(); + expect(pgMocks.poolConfig.ssl).toBe(false); + }); + + it('should use rejectUnauthorized: false for "no-verify"', async () => { + process.env.DATABASE_URL = 'postgresql://test@localhost/db'; + process.env.DATABASE_SSL = 'no-verify'; + await dbModule.initDb(); + expect(pgMocks.poolConfig.ssl).toEqual({ rejectUnauthorized: false }); + }); + + it('should use rejectUnauthorized: true by default', async () => { + process.env.DATABASE_URL = 'postgresql://test@localhost/db'; + delete process.env.DATABASE_SSL; + await dbModule.initDb(); + expect(pgMocks.poolConfig.ssl).toEqual({ rejectUnauthorized: true }); + }); + }); +}); diff --git a/tests/index.test.js b/tests/index.test.js new file mode 100644 index 00000000..084d4f10 --- /dev/null +++ b/tests/index.test.js @@ -0,0 +1,559 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +const mocks = vi.hoisted(() => ({ + client: null, + onHandlers: {}, + onceHandlers: {}, + processHandlers: {}, + + fs: { + existsSync: vi.fn(), + mkdirSync: vi.fn(), + readdirSync: vi.fn(), + readFileSync: vi.fn(), + writeFileSync: vi.fn(), + }, + + logger: { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }, + + db: { + initDb: vi.fn(), + closeDb: vi.fn(), + }, + + ai: { + getConversationHistory: vi.fn(), + setConversationHistory: vi.fn(), + }, + + config: { + loadConfig: vi.fn(), + }, + + events: { + registerEventHandlers: vi.fn(), + }, + + health: { + instance: {}, + getInstance: vi.fn(), + }, + + permissions: { + hasPermission: vi.fn(), + getPermissionError: vi.fn(), + }, + + registerCommands: vi.fn(), + dotenvConfig: vi.fn(), +})); + +vi.mock('node:fs', () => ({ + existsSync: mocks.fs.existsSync, + mkdirSync: mocks.fs.mkdirSync, + readdirSync: mocks.fs.readdirSync, + readFileSync: mocks.fs.readFileSync, + writeFileSync: mocks.fs.writeFileSync, +})); + +vi.mock('discord.js', () => { + class Client { + constructor() { + this.user = { id: 'bot-user-id', tag: 'Bot#0001' }; + this.guilds = { cache: { size: 2 } }; + this.ws = { ping: 12 }; + this.commands = null; + this.login = vi.fn().mockResolvedValue('logged-in'); + this.destroy = vi.fn(); + mocks.client = this; + } + + once(event, cb) { + if (!mocks.onceHandlers[event]) mocks.onceHandlers[event] = []; + mocks.onceHandlers[event].push(cb); + } + + on(event, cb) { + if (!mocks.onHandlers[event]) mocks.onHandlers[event] = []; + mocks.onHandlers[event].push(cb); + } + } + + class Collection extends Map {} + + return { + Client, + Collection, + Events: { + ClientReady: 'clientReady', + }, + GatewayIntentBits: { + Guilds: 1, + GuildMessages: 2, + MessageContent: 3, + GuildMembers: 4, + GuildVoiceStates: 5, + }, + }; +}); + +vi.mock('dotenv', () => ({ + config: mocks.dotenvConfig, +})); + +vi.mock('../src/db.js', () => ({ + initDb: mocks.db.initDb, + closeDb: mocks.db.closeDb, +})); + +vi.mock('../src/logger.js', () => ({ + info: mocks.logger.info, + warn: mocks.logger.warn, + error: mocks.logger.error, +})); + +vi.mock('../src/modules/ai.js', () => ({ + getConversationHistory: mocks.ai.getConversationHistory, + setConversationHistory: mocks.ai.setConversationHistory, +})); + +vi.mock('../src/modules/config.js', () => ({ + loadConfig: mocks.config.loadConfig, +})); + +vi.mock('../src/modules/events.js', () => ({ + registerEventHandlers: mocks.events.registerEventHandlers, +})); + +vi.mock('../src/utils/health.js', () => ({ + HealthMonitor: { + getInstance: mocks.health.getInstance, + }, +})); + +vi.mock('../src/utils/permissions.js', () => ({ + hasPermission: mocks.permissions.hasPermission, + getPermissionError: mocks.permissions.getPermissionError, +})); + +vi.mock('../src/utils/registerCommands.js', () => ({ + registerCommands: mocks.registerCommands, +})); + +async function importIndex({ + token = 'test-token', + databaseUrl = 'postgres://db', + stateFile = false, + stateRaw = null, + readdirFiles = [], + loadConfigReject = null, + throwOnExit = true, +} = {}) { + vi.resetModules(); + + mocks.onHandlers = {}; + mocks.onceHandlers = {}; + mocks.processHandlers = {}; + + mocks.fs.existsSync.mockReset().mockImplementation((path) => { + const p = String(path); + if (p.endsWith('state.json')) return stateFile; + return false; + }); + mocks.fs.mkdirSync.mockReset(); + mocks.fs.readdirSync.mockReset().mockReturnValue(readdirFiles); + mocks.fs.readFileSync + .mockReset() + .mockReturnValue( + stateRaw ?? + JSON.stringify({ conversationHistory: [['ch1', [{ role: 'user', content: 'hi' }]]] }), + ); + mocks.fs.writeFileSync.mockReset(); + + mocks.logger.info.mockReset(); + mocks.logger.warn.mockReset(); + mocks.logger.error.mockReset(); + + mocks.db.initDb.mockReset().mockResolvedValue(undefined); + mocks.db.closeDb.mockReset().mockResolvedValue(undefined); + + mocks.ai.getConversationHistory.mockReset().mockReturnValue(new Map()); + mocks.ai.setConversationHistory.mockReset(); + + mocks.config.loadConfig.mockReset().mockImplementation(() => { + if (loadConfigReject) { + return Promise.reject(loadConfigReject); + } + return Promise.resolve({ + ai: { enabled: true, channels: [] }, + welcome: { enabled: true, channelId: 'welcome-ch' }, + moderation: { enabled: true }, + permissions: { enabled: false, usePermissions: false }, + }); + }); + + mocks.events.registerEventHandlers.mockReset(); + mocks.health.getInstance.mockReset().mockReturnValue({}); + mocks.permissions.hasPermission.mockReset().mockReturnValue(true); + mocks.permissions.getPermissionError.mockReset().mockReturnValue('nope'); + mocks.registerCommands.mockReset().mockResolvedValue(undefined); + mocks.dotenvConfig.mockReset(); + + if (token == null) { + delete process.env.DISCORD_TOKEN; + } else { + process.env.DISCORD_TOKEN = token; + } + + if (databaseUrl == null) { + delete process.env.DATABASE_URL; + } else { + process.env.DATABASE_URL = databaseUrl; + } + + vi.spyOn(process, 'on').mockImplementation((event, cb) => { + mocks.processHandlers[event] = cb; + return process; + }); + + vi.spyOn(process, 'exit').mockImplementation((code) => { + if (throwOnExit) { + throw new Error(`process.exit:${code}`); + } + return code; + }); + + const mod = await import('../src/index.js'); + // Pragmatic workaround: settle async microtasks from startup(). + // The 3 hops (2x Promise.resolve + 1x setImmediate) are coupled to + // the current async hop count in startup(). If startup() gains more + // awaits, this settling sequence may need to be extended. + await Promise.resolve(); + await Promise.resolve(); + await new Promise((resolve) => setImmediate(resolve)); + return mod; +} + +describe('index.js', () => { + beforeEach(() => { + delete process.env.DISCORD_TOKEN; + delete process.env.DATABASE_URL; + }); + + afterEach(() => { + vi.restoreAllMocks(); + delete process.env.DISCORD_TOKEN; + delete process.env.DATABASE_URL; + }); + + it('should exit when DISCORD_TOKEN is missing', async () => { + await expect(importIndex({ token: null, databaseUrl: null })).rejects.toThrow('process.exit:1'); + expect(mocks.logger.error).toHaveBeenCalledWith('DISCORD_TOKEN not set'); + }); + + it('should initialize startup with database when DATABASE_URL is set', async () => { + await importIndex({ token: 'abc', databaseUrl: 'postgres://db' }); + + expect(mocks.db.initDb).toHaveBeenCalled(); + expect(mocks.config.loadConfig).toHaveBeenCalled(); + expect(mocks.events.registerEventHandlers).toHaveBeenCalled(); + expect(mocks.client.login).toHaveBeenCalledWith('abc'); + }); + + it('should warn and skip db init when DATABASE_URL is not set', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + expect(mocks.db.initDb).not.toHaveBeenCalled(); + expect(mocks.logger.warn).toHaveBeenCalledWith( + 'DATABASE_URL not set — using config.json only (no persistence)', + ); + expect(mocks.client.login).toHaveBeenCalledWith('abc'); + }); + + it('should load state from disk when state file exists', async () => { + await importIndex({ token: 'abc', databaseUrl: null, stateFile: true }); + expect(mocks.ai.setConversationHistory).toHaveBeenCalled(); + }); + + it('should export pending request helpers', async () => { + const mod = await importIndex({ token: 'abc', databaseUrl: null }); + + const requestId = mod.registerPendingRequest(); + expect(typeof requestId).toBe('symbol'); + + // should not throw + mod.removePendingRequest(requestId); + }); + + it('should handle autocomplete interactions', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + const autocomplete = vi.fn().mockResolvedValue(undefined); + mocks.client.commands.set('config', { autocomplete }); + + const interactionHandler = mocks.onHandlers.interactionCreate[0]; + const interaction = { + isAutocomplete: () => true, + commandName: 'config', + }; + + await interactionHandler(interaction); + expect(autocomplete).toHaveBeenCalledWith(interaction); + }); + + it('should handle autocomplete errors gracefully', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + const autocomplete = vi.fn().mockRejectedValue(new Error('autocomplete fail')); + mocks.client.commands.set('config', { autocomplete }); + + const interactionHandler = mocks.onHandlers.interactionCreate[0]; + const interaction = { + isAutocomplete: () => true, + commandName: 'config', + }; + + await interactionHandler(interaction); + expect(mocks.logger.error).toHaveBeenCalledWith('Autocomplete error', { + command: 'config', + error: 'autocomplete fail', + }); + }); + + it('should ignore non-chat interactions', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + const interactionHandler = mocks.onHandlers.interactionCreate[0]; + const interaction = { + isAutocomplete: () => false, + isChatInputCommand: () => false, + }; + + await interactionHandler(interaction); + // no crash = pass + }); + + it('should deny command when user lacks permission', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + mocks.permissions.hasPermission.mockReturnValue(false); + mocks.permissions.getPermissionError.mockReturnValue('denied'); + + const interactionHandler = mocks.onHandlers.interactionCreate[0]; + const interaction = { + isAutocomplete: () => false, + isChatInputCommand: () => true, + commandName: 'config', + member: {}, + user: { tag: 'user#1' }, + reply: vi.fn().mockResolvedValue(undefined), + }; + + await interactionHandler(interaction); + expect(interaction.reply).toHaveBeenCalledWith({ content: 'denied', ephemeral: true }); + }); + + it('should handle command not found', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + mocks.permissions.hasPermission.mockReturnValue(true); + + const interactionHandler = mocks.onHandlers.interactionCreate[0]; + const interaction = { + isAutocomplete: () => false, + isChatInputCommand: () => true, + commandName: 'missing', + member: {}, + user: { tag: 'user#1' }, + reply: vi.fn().mockResolvedValue(undefined), + }; + + await interactionHandler(interaction); + expect(interaction.reply).toHaveBeenCalledWith({ + content: '❌ Command not found.', + ephemeral: true, + }); + }); + + it('should execute command successfully', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + const execute = vi.fn().mockResolvedValue(undefined); + mocks.client.commands.set('ping', { execute }); + + const interactionHandler = mocks.onHandlers.interactionCreate[0]; + const interaction = { + isAutocomplete: () => false, + isChatInputCommand: () => true, + commandName: 'ping', + member: {}, + user: { tag: 'user#1' }, + reply: vi.fn(), + }; + + await interactionHandler(interaction); + expect(execute).toHaveBeenCalledWith(interaction); + }); + + it('should handle command execution errors with reply', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + const execute = vi.fn().mockRejectedValue(new Error('boom')); + mocks.client.commands.set('ping', { execute }); + + const interactionHandler = mocks.onHandlers.interactionCreate[0]; + const interaction = { + isAutocomplete: () => false, + isChatInputCommand: () => true, + commandName: 'ping', + member: {}, + user: { tag: 'user#1' }, + replied: false, + deferred: false, + reply: vi.fn().mockResolvedValue(undefined), + followUp: vi.fn(), + }; + + await interactionHandler(interaction); + expect(interaction.reply).toHaveBeenCalledWith({ + content: '❌ An error occurred while executing this command.', + ephemeral: true, + }); + }); + + it('should handle command execution errors with followUp when already replied', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + const execute = vi.fn().mockRejectedValue(new Error('boom')); + mocks.client.commands.set('ping', { execute }); + + const interactionHandler = mocks.onHandlers.interactionCreate[0]; + const interaction = { + isAutocomplete: () => false, + isChatInputCommand: () => true, + commandName: 'ping', + member: {}, + user: { tag: 'user#1' }, + replied: true, + deferred: false, + reply: vi.fn(), + followUp: vi.fn().mockResolvedValue(undefined), + }; + + await interactionHandler(interaction); + expect(interaction.followUp).toHaveBeenCalledWith({ + content: '❌ An error occurred while executing this command.', + ephemeral: true, + }); + }); + + it('should register commands on clientReady', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + mocks.client.commands.set('ping', { data: { name: 'ping' }, execute: vi.fn() }); + + await mocks.onceHandlers.clientReady[0](); + + expect(mocks.registerCommands).toHaveBeenCalledWith( + Array.from(mocks.client.commands.values()), + 'bot-user-id', + 'abc', + null, + ); + }); + + it('should handle command registration failure on ready', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + mocks.registerCommands.mockRejectedValueOnce(new Error('register fail')); + + await mocks.onceHandlers.clientReady[0](); + + expect(mocks.logger.error).toHaveBeenCalledWith('Command registration failed', { + error: 'register fail', + }); + }); + + it('should run graceful shutdown on SIGINT', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + const sigintHandler = mocks.processHandlers.SIGINT; + await expect(sigintHandler()).rejects.toThrow('process.exit:0'); + + expect(mocks.fs.mkdirSync).toHaveBeenCalled(); + expect(mocks.fs.writeFileSync).toHaveBeenCalled(); + expect(mocks.db.closeDb).toHaveBeenCalled(); + expect(mocks.client.destroy).toHaveBeenCalled(); + }); + + it('should log save-state failure during shutdown', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + mocks.fs.writeFileSync.mockImplementationOnce(() => { + throw new Error('disk full'); + }); + + const sigintHandler = mocks.processHandlers.SIGINT; + await expect(sigintHandler()).rejects.toThrow('process.exit:0'); + + expect(mocks.logger.error).toHaveBeenCalledWith('Failed to save state', { + error: 'disk full', + }); + }); + + it('should log load-state failure for invalid JSON', async () => { + await importIndex({ + token: 'abc', + databaseUrl: null, + stateFile: true, + stateRaw: '{invalid-json', + }); + + expect(mocks.logger.error).toHaveBeenCalledWith('Failed to load state', { + error: expect.any(String), + }); + }); + + // Skipped: dynamic import() in vitest doesn't throw for missing files the same way Node does at runtime + it.skip('should continue startup when command import fails', () => {}); + + it('should log discord client error events', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + mocks.onHandlers.error[0]({ message: 'discord broke', stack: 'stack', code: 500 }); + + expect(mocks.logger.error).toHaveBeenCalledWith('Discord client error', { + error: 'discord broke', + stack: 'stack', + code: 500, + }); + }); + + it('should log unhandledRejection events', async () => { + await importIndex({ token: 'abc', databaseUrl: null }); + + mocks.processHandlers.unhandledRejection(new Error('rejected')); + + expect(mocks.logger.error).toHaveBeenCalledWith('Unhandled promise rejection', { + error: 'rejected', + stack: expect.any(String), + type: 'object', + }); + }); + + it('should handle startup failure and exit', async () => { + await importIndex({ + token: 'abc', + databaseUrl: null, + loadConfigReject: new Error('config fail'), + throwOnExit: false, + }); + + expect(mocks.logger.error).toHaveBeenCalledWith('Startup failed', { + error: 'config fail', + stack: expect.any(String), + }); + expect(process.exit).toHaveBeenCalledWith(1); + }); +}); diff --git a/tests/logger.test.js b/tests/logger.test.js new file mode 100644 index 00000000..e75fbf42 --- /dev/null +++ b/tests/logger.test.js @@ -0,0 +1,131 @@ +import { afterEach, describe, expect, it, vi } from 'vitest'; + +// We need to test the logger module, but it reads config.json at import time. +// Mock fs to control what it reads. +vi.mock('node:fs', () => ({ + existsSync: vi.fn().mockReturnValue(false), + readFileSync: vi.fn().mockReturnValue('{}'), + mkdirSync: vi.fn(), +})); + +// Mock winston-daily-rotate-file +vi.mock('winston-daily-rotate-file', () => ({ + default: vi.fn().mockImplementation(() => ({ + on: vi.fn(), + })), +})); + +// NOTE: Logger module is cached after first import. Tests that need fresh +// module state use vi.resetModules() before re-importing. Tests sharing +// the same import get the same winston logger instance. +describe('logger module', () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should export debug, info, warn, error functions', async () => { + const logger = await import('../src/logger.js'); + expect(typeof logger.debug).toBe('function'); + expect(typeof logger.info).toBe('function'); + expect(typeof logger.warn).toBe('function'); + expect(typeof logger.error).toBe('function'); + }); + + it('should export default object with all log functions', async () => { + const logger = await import('../src/logger.js'); + expect(typeof logger.default.debug).toBe('function'); + expect(typeof logger.default.info).toBe('function'); + expect(typeof logger.default.warn).toBe('function'); + expect(typeof logger.default.error).toBe('function'); + expect(logger.default.logger).toBeDefined(); + }); + + it('should call log functions without errors', async () => { + const logger = await import('../src/logger.js'); + // These should not throw + logger.debug('debug message', { key: 'value' }); + logger.info('info message', { key: 'value' }); + logger.warn('warn message', { key: 'value' }); + logger.error('error message', { key: 'value' }); + }); + + it('should call with empty meta', async () => { + const logger = await import('../src/logger.js'); + logger.debug('debug'); + logger.info('info'); + logger.warn('warn'); + logger.error('error'); + }); + + it('should redact sensitive fields', async () => { + const logger = await import('../src/logger.js'); + // Spy on console transport to capture actual output after redaction + const transport = logger.default.logger.transports[0]; + const writeSpy = vi.spyOn(transport, 'log').mockImplementation((_info, cb) => cb?.()); + + logger.info('test', { + token: 'secret-token', + DISCORD_TOKEN: 'secret', + password: 'pass', + apiKey: 'key', + nested: { + token: 'nested-secret', + safe: 'visible', + }, + }); + + expect(writeSpy).toHaveBeenCalled(); + const loggedInfo = writeSpy.mock.calls[0][0]; + expect(loggedInfo.token).toBe('[REDACTED]'); + expect(loggedInfo.DISCORD_TOKEN).toBe('[REDACTED]'); + expect(loggedInfo.password).toBe('[REDACTED]'); + expect(loggedInfo.apiKey).toBe('[REDACTED]'); + expect(loggedInfo.nested.token).toBe('[REDACTED]'); + expect(loggedInfo.nested.safe).toBe('visible'); + }); + + it('should handle array meta values in filter', async () => { + const logger = await import('../src/logger.js'); + logger.info('test', { + items: [{ token: 'secret', name: 'item1' }, { name: 'item2' }], + }); + }); + + it('should load with file output enabled config', async () => { + vi.resetModules(); + vi.mock('node:fs', () => ({ + existsSync: vi.fn().mockReturnValue(true), + readFileSync: vi.fn().mockReturnValue( + JSON.stringify({ + logging: { level: 'debug', fileOutput: true }, + }), + ), + mkdirSync: vi.fn(), + })); + vi.mock('winston-daily-rotate-file', () => ({ + default: vi.fn().mockImplementation(() => ({ + on: vi.fn(), + })), + })); + + const logger = await import('../src/logger.js'); + expect(typeof logger.info).toBe('function'); + }); + + it('should handle config parse errors gracefully', async () => { + vi.resetModules(); + vi.mock('node:fs', () => ({ + existsSync: vi.fn().mockReturnValue(true), + readFileSync: vi.fn().mockReturnValue('invalid json'), + mkdirSync: vi.fn(), + })); + vi.mock('winston-daily-rotate-file', () => ({ + default: vi.fn().mockImplementation(() => ({ + on: vi.fn(), + })), + })); + + const logger = await import('../src/logger.js'); + expect(typeof logger.info).toBe('function'); + }); +}); diff --git a/tests/modules/ai.test.js b/tests/modules/ai.test.js new file mode 100644 index 00000000..edb3ea1e --- /dev/null +++ b/tests/modules/ai.test.js @@ -0,0 +1,222 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +// Mock logger +vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), +})); + +import { + addToHistory, + generateResponse, + getConversationHistory, + getHistory, + OPENCLAW_TOKEN, + OPENCLAW_URL, + setConversationHistory, +} from '../../src/modules/ai.js'; + +describe('ai module', () => { + beforeEach(() => { + // Reset conversation history before each test + setConversationHistory(new Map()); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('getConversationHistory / setConversationHistory', () => { + it('should get and set conversation history', () => { + const history = new Map([['channel1', [{ role: 'user', content: 'hi' }]]]); + setConversationHistory(history); + expect(getConversationHistory()).toBe(history); + }); + }); + + describe('OPENCLAW_URL and OPENCLAW_TOKEN', () => { + it('should export URL and token constants', () => { + expect(typeof OPENCLAW_URL).toBe('string'); + expect(typeof OPENCLAW_TOKEN).toBe('string'); + }); + }); + + describe('getHistory', () => { + it('should create empty history for new channel', () => { + const history = getHistory('new-channel'); + expect(history).toEqual([]); + }); + + it('should return existing history for known channel', () => { + addToHistory('ch1', 'user', 'hello'); + const history = getHistory('ch1'); + expect(history.length).toBe(1); + expect(history[0]).toEqual({ role: 'user', content: 'hello' }); + }); + }); + + describe('addToHistory', () => { + it('should add messages to channel history', () => { + addToHistory('ch1', 'user', 'hello'); + addToHistory('ch1', 'assistant', 'hi there'); + const history = getHistory('ch1'); + expect(history.length).toBe(2); + }); + + it('should trim history beyond MAX_HISTORY (20)', () => { + for (let i = 0; i < 25; i++) { + addToHistory('ch1', 'user', `message ${i}`); + } + const history = getHistory('ch1'); + expect(history.length).toBe(20); + expect(history[0].content).toBe('message 5'); + }); + }); + + describe('generateResponse', () => { + it('should return AI response on success', async () => { + const mockResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'Hello!' } }], + }), + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(mockResponse); + + const config = { ai: { model: 'test-model', maxTokens: 512, systemPrompt: 'You are a bot' } }; + const result = await generateResponse('ch1', 'Hi', 'testuser', config); + + expect(result).toBe('Hello!'); + expect(globalThis.fetch).toHaveBeenCalled(); + }); + + it('should use default system prompt if not configured', async () => { + const mockResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'Response' } }], + }), + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(mockResponse); + + const config = { ai: {} }; + const result = await generateResponse('ch1', 'Hi', 'testuser', config); + + expect(result).toBe('Response'); + // Verify fetch was called with default model + const fetchCall = globalThis.fetch.mock.calls[0]; + const body = JSON.parse(fetchCall[1].body); + expect(body.model).toBe('claude-sonnet-4-20250514'); + expect(body.max_tokens).toBe(1024); + }); + + it('should handle empty choices gracefully', async () => { + const mockResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ choices: [] }), + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(mockResponse); + + const config = { ai: {} }; + const result = await generateResponse('ch1', 'Hi', 'testuser', config); + expect(result).toBe('I got nothing. Try again?'); + }); + + it('should return fallback on API error', async () => { + const mockResponse = { + ok: false, + status: 500, + statusText: 'Internal Server Error', + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(mockResponse); + + const mockHealth = { setAPIStatus: vi.fn(), recordAIRequest: vi.fn() }; + const config = { ai: {} }; + const result = await generateResponse('ch1', 'Hi', 'testuser', config, mockHealth); + + expect(result).toContain('trouble thinking'); + expect(mockHealth.setAPIStatus).toHaveBeenCalledWith('error'); + }); + + it('should return fallback on fetch exception', async () => { + vi.spyOn(globalThis, 'fetch').mockRejectedValue(new Error('network failure')); + + const config = { ai: {} }; + const result = await generateResponse('ch1', 'Hi', 'testuser', config); + expect(result).toContain('trouble thinking'); + }); + + it('should update health monitor on success', async () => { + const mockResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'OK' } }], + }), + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(mockResponse); + + const mockHealth = { setAPIStatus: vi.fn(), recordAIRequest: vi.fn() }; + const config = { ai: {} }; + await generateResponse('ch1', 'Hi', 'testuser', config, mockHealth); + + expect(mockHealth.recordAIRequest).toHaveBeenCalled(); + expect(mockHealth.setAPIStatus).toHaveBeenCalledWith('ok'); + }); + + it('should update conversation history on success', async () => { + const mockResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'Reply' } }], + }), + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(mockResponse); + + const config = { ai: {} }; + await generateResponse('ch1', 'Hello', 'user1', config); + + const history = getHistory('ch1'); + expect(history.length).toBe(2); + expect(history[0].role).toBe('user'); + expect(history[0].content).toContain('user1: Hello'); + expect(history[1].role).toBe('assistant'); + expect(history[1].content).toBe('Reply'); + }); + + it('should include Authorization header when token is set', async () => { + vi.resetModules(); + process.env.OPENCLAW_API_KEY = 'test-key-123'; + + try { + vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + })); + + const { generateResponse: genResponse, setConversationHistory: setHistory } = await import( + '../../src/modules/ai.js' + ); + setHistory(new Map()); + + const mockResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'OK' } }], + }), + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(mockResponse); + + await genResponse('ch1', 'Hi', 'user', { ai: {} }); + + const fetchCall = globalThis.fetch.mock.calls[0]; + expect(fetchCall[1].headers.Authorization).toBe('Bearer test-key-123'); + } finally { + delete process.env.OPENCLAW_API_KEY; + } + }); + }); +}); diff --git a/tests/modules/chimeIn.test.js b/tests/modules/chimeIn.test.js new file mode 100644 index 00000000..db7908d3 --- /dev/null +++ b/tests/modules/chimeIn.test.js @@ -0,0 +1,324 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +// Mock logger +vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), +})); + +// Mock ai exports +vi.mock('../../src/modules/ai.js', () => ({ + OPENCLAW_URL: 'http://mock-api/v1/chat/completions', + OPENCLAW_TOKEN: 'mock-token', +})); + +// Mock splitMessage +vi.mock('../../src/utils/splitMessage.js', () => ({ + needsSplitting: vi.fn().mockReturnValue(false), + splitMessage: vi.fn().mockReturnValue([]), +})); + +describe('chimeIn module', () => { + let chimeInModule; + + beforeEach(async () => { + vi.resetModules(); + // Re-apply mocks after resetModules + vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + })); + vi.mock('../../src/modules/ai.js', () => ({ + OPENCLAW_URL: 'http://mock-api/v1/chat/completions', + OPENCLAW_TOKEN: 'mock-token', + })); + vi.mock('../../src/utils/splitMessage.js', () => ({ + needsSplitting: vi.fn().mockReturnValue(false), + splitMessage: vi.fn().mockReturnValue([]), + })); + + chimeInModule = await import('../../src/modules/chimeIn.js'); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('accumulate', () => { + it('should do nothing if chimeIn is disabled', async () => { + const fetchSpy = vi.spyOn(globalThis, 'fetch'); + const message = { + channel: { id: 'c1' }, + content: 'hello', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, { chimeIn: { enabled: false } }); + expect(fetchSpy).not.toHaveBeenCalled(); + }); + + it('should do nothing if chimeIn config is missing', async () => { + const fetchSpy = vi.spyOn(globalThis, 'fetch'); + const message = { + channel: { id: 'c1' }, + content: 'hello', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, {}); + expect(fetchSpy).not.toHaveBeenCalled(); + }); + + it('should skip excluded channels', async () => { + const fetchSpy = vi.spyOn(globalThis, 'fetch'); + const message = { + channel: { id: 'excluded-ch' }, + content: 'hello', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, { + chimeIn: { enabled: true, excludeChannels: ['excluded-ch'] }, + }); + expect(fetchSpy).not.toHaveBeenCalled(); + }); + + it('should skip empty messages', async () => { + const fetchSpy = vi.spyOn(globalThis, 'fetch'); + const message = { + channel: { id: 'c1' }, + content: '', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, { chimeIn: { enabled: true } }); + expect(fetchSpy).not.toHaveBeenCalled(); + }); + + it('should skip whitespace-only messages', async () => { + const fetchSpy = vi.spyOn(globalThis, 'fetch'); + const message = { + channel: { id: 'c1' }, + content: ' ', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, { chimeIn: { enabled: true } }); + expect(fetchSpy).not.toHaveBeenCalled(); + }); + + it('should accumulate messages without triggering eval below threshold', async () => { + const config = { chimeIn: { enabled: true, evaluateEvery: 5 } }; + for (let i = 0; i < 3; i++) { + const message = { + channel: { id: 'c-test' }, + content: `message ${i}`, + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + } + // 3 < 5, so evaluation shouldn't trigger — just confirm no crash + }); + + it('should trigger evaluation when counter reaches evaluateEvery', async () => { + // Mock fetch for the evaluation call + const mockResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'NO' } }], + }), + }; + vi.spyOn(globalThis, 'fetch').mockResolvedValue(mockResponse); + + const config = { chimeIn: { enabled: true, evaluateEvery: 2, channels: [] }, ai: {} }; + for (let i = 0; i < 2; i++) { + const message = { + channel: { id: 'c-eval', send: vi.fn(), sendTyping: vi.fn() }, + content: `message ${i}`, + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + } + // fetch called for evaluation + expect(globalThis.fetch).toHaveBeenCalled(); + }); + + it('should send response when evaluation says YES', async () => { + const evalResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'YES' } }], + }), + }; + const genResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'Hey folks!' } }], + }), + }; + vi.spyOn(globalThis, 'fetch') + .mockResolvedValueOnce(evalResponse) + .mockResolvedValueOnce(genResponse); + + const mockSend = vi.fn().mockResolvedValue(undefined); + const mockSendTyping = vi.fn().mockResolvedValue(undefined); + + const config = { chimeIn: { enabled: true, evaluateEvery: 1, channels: [] }, ai: {} }; + const message = { + channel: { id: 'c-yes', send: mockSend, sendTyping: mockSendTyping }, + content: 'interesting discussion', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + expect(mockSend).toHaveBeenCalledWith('Hey folks!'); + }); + + it('should respect allowed channels list', async () => { + const fetchSpy = vi.spyOn(globalThis, 'fetch'); + const config = { + chimeIn: { enabled: true, evaluateEvery: 1, channels: ['allowed-ch'] }, + }; + const message = { + channel: { id: 'not-allowed' }, + content: 'hello', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + // Should not trigger any fetch since channel is not in the allowed list + expect(fetchSpy).not.toHaveBeenCalled(); + }); + + it('should handle evaluation API error gracefully', async () => { + vi.spyOn(globalThis, 'fetch').mockResolvedValue({ + ok: false, + status: 500, + }); + + const config = { chimeIn: { enabled: true, evaluateEvery: 1, channels: [] }, ai: {} }; + const message = { + channel: { id: 'c-err', send: vi.fn(), sendTyping: vi.fn() }, + content: 'test message', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + // Should not throw + }); + + it('should handle evaluation fetch exception', async () => { + vi.spyOn(globalThis, 'fetch').mockRejectedValue(new Error('network error')); + + const config = { chimeIn: { enabled: true, evaluateEvery: 1, channels: [] }, ai: {} }; + const message = { + channel: { id: 'c-fetch-err', send: vi.fn(), sendTyping: vi.fn() }, + content: 'test message', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + }); + + it('should not send empty chime-in responses', async () => { + const evalResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'YES' } }], + }), + }; + const genResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: ' ' } }], + }), + }; + vi.spyOn(globalThis, 'fetch') + .mockResolvedValueOnce(evalResponse) + .mockResolvedValueOnce(genResponse); + + const mockSend = vi.fn(); + const config = { chimeIn: { enabled: true, evaluateEvery: 1, channels: [] }, ai: {} }; + const message = { + channel: { id: 'c-empty', send: mockSend, sendTyping: vi.fn() }, + content: 'test', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + expect(mockSend).not.toHaveBeenCalled(); + }); + + it('should handle generation API error', async () => { + const evalResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'YES' } }], + }), + }; + const genResponse = { ok: false, status: 500, statusText: 'Server Error' }; + vi.spyOn(globalThis, 'fetch') + .mockResolvedValueOnce(evalResponse) + .mockResolvedValueOnce(genResponse); + + const config = { chimeIn: { enabled: true, evaluateEvery: 1, channels: [] }, ai: {} }; + const message = { + channel: { id: 'c-gen-err', send: vi.fn(), sendTyping: vi.fn() }, + content: 'test', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + // Should not throw — error handled internally + }); + + it('should split long chime-in responses', async () => { + const { needsSplitting: mockNeedsSplitting, splitMessage: mockSplitMessage } = await import( + '../../src/utils/splitMessage.js' + ); + mockNeedsSplitting.mockReturnValueOnce(true); + mockSplitMessage.mockReturnValueOnce(['part1', 'part2']); + + const evalResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'YES' } }], + }), + }; + const genResponse = { + ok: true, + json: vi.fn().mockResolvedValue({ + choices: [{ message: { content: 'a'.repeat(3000) } }], + }), + }; + vi.spyOn(globalThis, 'fetch') + .mockResolvedValueOnce(evalResponse) + .mockResolvedValueOnce(genResponse); + + const mockSend = vi.fn().mockResolvedValue(undefined); + const config = { chimeIn: { enabled: true, evaluateEvery: 1, channels: [] }, ai: {} }; + const message = { + channel: { id: 'c-split', send: mockSend, sendTyping: vi.fn() }, + content: 'test', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + expect(mockSend).toHaveBeenCalledWith('part1'); + expect(mockSend).toHaveBeenCalledWith('part2'); + }); + }); + + describe('resetCounter', () => { + it('should not throw for unknown channel', () => { + expect(() => chimeInModule.resetCounter('unknown-channel')).not.toThrow(); + }); + + it('should reset counter and abort evaluation', async () => { + // First accumulate some messages to create a buffer + const config = { chimeIn: { enabled: true, evaluateEvery: 100, channels: [] } }; + const message = { + channel: { id: 'c-reset' }, + content: 'hello', + author: { username: 'user' }, + }; + await chimeInModule.accumulate(message, config); + + // Now reset + chimeInModule.resetCounter('c-reset'); + // No crash = pass + }); + }); +}); diff --git a/tests/modules/config.test.js b/tests/modules/config.test.js new file mode 100644 index 00000000..cadabb3f --- /dev/null +++ b/tests/modules/config.test.js @@ -0,0 +1,504 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +// Mock logger +vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), +})); + +// Mock db module +vi.mock('../../src/db.js', () => ({ + getPool: vi.fn(), +})); + +// Mock fs +vi.mock('node:fs', () => ({ + existsSync: vi.fn(), + readFileSync: vi.fn(), +})); + +describe('modules/config', () => { + let configModule; + + beforeEach(async () => { + vi.resetModules(); + // Re-mock all deps after resetModules + vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + })); + vi.mock('../../src/db.js', () => ({ + getPool: vi.fn(), + })); + vi.mock('node:fs', () => ({ + existsSync: vi.fn(), + readFileSync: vi.fn(), + })); + + // Default mock: config.json exists with test data + const { existsSync: mockExists, readFileSync: mockRead } = await import('node:fs'); + mockExists.mockReturnValue(true); + mockRead.mockReturnValue( + JSON.stringify({ + ai: { enabled: true, model: 'test-model' }, + welcome: { enabled: false }, + }), + ); + + configModule = await import('../../src/modules/config.js'); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('loadConfigFromFile', () => { + it('should load and parse config.json', () => { + const config = configModule.loadConfigFromFile(); + expect(config).toBeDefined(); + expect(config.ai.enabled).toBe(true); + }); + + it('should throw if config.json does not exist', async () => { + vi.resetModules(); + vi.doMock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + })); + vi.doMock('../../src/db.js', () => ({ + getPool: vi.fn(), + })); + vi.doMock('node:fs', () => ({ + existsSync: vi.fn().mockReturnValue(false), + readFileSync: vi.fn(), + })); + + const mod = await import('../../src/modules/config.js'); + expect(() => mod.loadConfigFromFile()).toThrow('config.json not found'); + }); + + it('should throw on JSON parse error', async () => { + vi.resetModules(); + vi.doMock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + })); + vi.doMock('../../src/db.js', () => ({ + getPool: vi.fn(), + })); + vi.doMock('node:fs', () => ({ + existsSync: vi.fn().mockReturnValue(true), + readFileSync: vi.fn().mockReturnValue('invalid json{'), + })); + + const mod = await import('../../src/modules/config.js'); + expect(() => mod.loadConfigFromFile()).toThrow('Failed to load config.json'); + }); + }); + + describe('getConfig', () => { + it('should return current config cache', () => { + const config = configModule.getConfig(); + expect(typeof config).toBe('object'); + }); + }); + + describe('loadConfig', () => { + it('should fall back to config.json if DB not available', async () => { + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockImplementation(() => { + throw new Error('Database not initialized'); + }); + + const config = await configModule.loadConfig(); + expect(config.ai.enabled).toBe(true); + }); + + it('should seed DB from config.json if DB is empty', async () => { + const mockClient = { + query: vi.fn().mockResolvedValue({}), + release: vi.fn(), + }; + const mockPool = { + query: vi.fn().mockResolvedValue({ rows: [] }), + connect: vi.fn().mockResolvedValue(mockClient), + }; + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockReturnValue(mockPool); + + const config = await configModule.loadConfig(); + expect(config.ai.enabled).toBe(true); + expect(mockClient.query).toHaveBeenCalledWith('BEGIN'); + expect(mockClient.query).toHaveBeenCalledWith('COMMIT'); + }); + + it('should load config from DB when rows exist', async () => { + const mockPool = { + query: vi.fn().mockResolvedValue({ + rows: [ + { key: 'ai', value: { enabled: false, model: 'db-model' } }, + { key: 'welcome', value: { enabled: true } }, + ], + }), + }; + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockReturnValue(mockPool); + + const config = await configModule.loadConfig(); + expect(config.ai.enabled).toBe(false); + expect(config.ai.model).toBe('db-model'); + }); + + it('should handle DB error and fall back to config.json', async () => { + const mockPool = { + query: vi.fn().mockRejectedValue(new Error('DB connection failed')), + }; + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockReturnValue(mockPool); + + const config = await configModule.loadConfig(); + expect(config.ai.enabled).toBe(true); // Falls back to file + }); + + it('should handle rollback failure during seeding gracefully', async () => { + const mockClient = { + query: vi + .fn() + .mockResolvedValueOnce({}) // BEGIN + .mockRejectedValueOnce(new Error('INSERT failed')) // INSERT + .mockRejectedValueOnce(new Error('ROLLBACK also failed')), // ROLLBACK + release: vi.fn(), + }; + const mockPool = { + query: vi.fn().mockResolvedValue({ rows: [] }), + connect: vi.fn().mockResolvedValue(mockClient), + }; + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockReturnValue(mockPool); + + // Should fall back to config.json, not crash + const config = await configModule.loadConfig(); + expect(config.ai.enabled).toBe(true); + }); + }); + + describe('setConfigValue', () => { + it('should reject paths with less than 2 parts', async () => { + await expect(configModule.setConfigValue('ai', 'value')).rejects.toThrow( + 'Path must include section and key', + ); + }); + + it('should reject dangerous keys (__proto__)', async () => { + await expect(configModule.setConfigValue('__proto__.polluted', 'true')).rejects.toThrow( + 'reserved key', + ); + }); + + it('should reject dangerous keys (constructor)', async () => { + await expect(configModule.setConfigValue('ai.constructor', 'true')).rejects.toThrow( + 'reserved key', + ); + }); + + it('should reject dangerous keys (prototype)', async () => { + await expect(configModule.setConfigValue('ai.prototype', 'true')).rejects.toThrow( + 'reserved key', + ); + }); + + describe('in-memory only (no DB)', () => { + beforeEach(async () => { + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockImplementation(() => { + throw new Error('no db'); + }); + await configModule.loadConfig(); + }); + + it('should update in-memory only when DB not available', async () => { + const result = await configModule.setConfigValue('ai.model', 'new-model'); + expect(result.model).toBe('new-model'); + expect(configModule.getConfig().ai.model).toBe('new-model'); + }); + + it('should parse boolean values', async () => { + await configModule.setConfigValue('ai.enabled', 'false'); + expect(configModule.getConfig().ai.enabled).toBe(false); + + await configModule.setConfigValue('ai.enabled', 'true'); + expect(configModule.getConfig().ai.enabled).toBe(true); + }); + + it('should parse null values', async () => { + await configModule.setConfigValue('ai.model', 'null'); + expect(configModule.getConfig().ai.model).toBeNull(); + }); + + it('should parse numeric values', async () => { + await configModule.setConfigValue('ai.maxTokens', '512'); + expect(configModule.getConfig().ai.maxTokens).toBe(512); + }); + + it('should parse JSON array values', async () => { + await configModule.setConfigValue('ai.channels', '["ch1","ch2"]'); + expect(configModule.getConfig().ai.channels).toEqual(['ch1', 'ch2']); + }); + + it('should parse JSON string values', async () => { + await configModule.setConfigValue('ai.model', '"literal-string"'); + expect(configModule.getConfig().ai.model).toBe('literal-string'); + }); + + it('should create intermediate objects for nested paths', async () => { + await configModule.setConfigValue('ai.deep.nested.key', 'value'); + expect(configModule.getConfig().ai.deep.nested.key).toBe('value'); + }); + + it('should handle floats and keep precision', async () => { + await configModule.setConfigValue('ai.temperature', '0.7'); + expect(configModule.getConfig().ai.temperature).toBe(0.7); + }); + + it('should keep unsafe integers as strings', async () => { + await configModule.setConfigValue('ai.bigNum', '99999999999999999999'); + expect(configModule.getConfig().ai.bigNum).toBe('99999999999999999999'); + }); + + it('should keep invalid JSON parse attempts as strings', async () => { + await configModule.setConfigValue('ai.bad', '[invalid'); + expect(configModule.getConfig().ai.bad).toBe('[invalid'); + }); + + it('should parse JSON objects', async () => { + await configModule.setConfigValue('ai.obj', '{"key":"val"}'); + expect(configModule.getConfig().ai.obj).toEqual({ key: 'val' }); + }); + + it('should handle Infinity as string', async () => { + // Infinity doesn't match the numeric regex so stays as string + await configModule.setConfigValue('ai.val', 'Infinity'); + expect(configModule.getConfig().ai.val).toBe('Infinity'); + }); + + it('should handle non-string values passed directly', async () => { + await configModule.setConfigValue('ai.num', 42); + expect(configModule.getConfig().ai.num).toBe(42); + }); + }); + + it('should persist to database when available', async () => { + const mockClient = { + query: vi.fn().mockResolvedValue({ rows: [{ value: { enabled: true, model: 'old' } }] }), + release: vi.fn(), + }; + const mockPool = { + query: vi.fn().mockResolvedValue({ + rows: [ + { key: 'ai', value: { enabled: true, model: 'old' } }, + { key: 'welcome', value: { enabled: false } }, + ], + }), + connect: vi.fn().mockResolvedValue(mockClient), + }; + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockReturnValue(mockPool); + + await configModule.loadConfig(); + await configModule.setConfigValue('ai.model', 'new-model'); + + expect(mockClient.query).toHaveBeenCalledWith('BEGIN'); + expect(mockClient.query).toHaveBeenCalledWith('COMMIT'); + expect(mockClient.release).toHaveBeenCalled(); + }); + + it('should handle transaction rollback on error', async () => { + const mockClient = { + query: vi + .fn() + .mockResolvedValueOnce({}) // BEGIN + .mockResolvedValueOnce({ rows: [{ value: { enabled: true } }] }) // SELECT + .mockRejectedValueOnce(new Error('UPDATE failed')) // UPDATE + .mockResolvedValueOnce({}), // ROLLBACK + release: vi.fn(), + }; + const mockPool = { + query: vi.fn().mockResolvedValue({ + rows: [{ key: 'ai', value: { enabled: true, model: 'old' } }], + }), + connect: vi.fn().mockResolvedValue(mockClient), + }; + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockReturnValue(mockPool); + + await configModule.loadConfig(); + await expect(configModule.setConfigValue('ai.model', 'bad')).rejects.toThrow('UPDATE failed'); + }); + + it('should create new section if it does not exist', async () => { + const mockClient = { + query: vi + .fn() + .mockResolvedValueOnce({}) // BEGIN + .mockResolvedValueOnce({ rows: [] }) // SELECT (section doesn't exist) + .mockResolvedValueOnce({}) // INSERT + .mockResolvedValueOnce({}), // COMMIT + release: vi.fn(), + }; + const mockPool = { + query: vi.fn().mockResolvedValue({ + rows: [{ key: 'ai', value: { enabled: true } }], + }), + connect: vi.fn().mockResolvedValue(mockClient), + }; + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockReturnValue(mockPool); + + await configModule.loadConfig(); + await configModule.setConfigValue('newSection.key', 'value'); + expect(configModule.getConfig().newSection.key).toBe('value'); + }); + }); + + describe('resetConfig', () => { + it('should reset specific section to defaults', async () => { + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockImplementation(() => { + throw new Error('no db'); + }); + + await configModule.loadConfig(); + await configModule.setConfigValue('ai.model', 'changed'); + expect(configModule.getConfig().ai.model).toBe('changed'); + + await configModule.resetConfig('ai'); + expect(configModule.getConfig().ai.model).toBe('test-model'); + }); + + it('should reset all sections to defaults', async () => { + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockImplementation(() => { + throw new Error('no db'); + }); + + await configModule.loadConfig(); + await configModule.setConfigValue('ai.model', 'changed'); + + await configModule.resetConfig(); + expect(configModule.getConfig().ai.model).toBe('test-model'); + }); + + it('should throw if section not found in file defaults', async () => { + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockImplementation(() => { + throw new Error('no db'); + }); + + await configModule.loadConfig(); + await expect(configModule.resetConfig('nonexistent')).rejects.toThrow( + "Section 'nonexistent' not found", + ); + }); + + it('should reset with database persistence', async () => { + const mockPool = { + query: vi.fn().mockResolvedValue({ rows: [] }), + connect: vi.fn(), + }; + // First return rows for loadConfig + mockPool.query.mockResolvedValueOnce({ + rows: [ + { key: 'ai', value: { enabled: true, model: 'changed' } }, + { key: 'welcome', value: { enabled: true } }, + ], + }); + // Then for the reset + mockPool.query.mockResolvedValue({}); + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockReturnValue(mockPool); + + await configModule.loadConfig(); + await configModule.resetConfig('ai'); + expect(configModule.getConfig().ai.model).toBe('test-model'); + }); + + it('should handle full reset with database transaction', async () => { + const mockClient = { + query: vi.fn().mockResolvedValue({}), + release: vi.fn(), + }; + const mockPool = { + query: vi.fn().mockResolvedValue({ + rows: [ + { key: 'ai', value: { enabled: true, model: 'db-model' } }, + { key: 'welcome', value: { enabled: false } }, + ], + }), + connect: vi.fn().mockResolvedValue(mockClient), + }; + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockReturnValue(mockPool); + + await configModule.loadConfig(); + await configModule.resetConfig(); + + expect(mockClient.query).toHaveBeenCalledWith('BEGIN'); + expect(mockClient.query).toHaveBeenCalledWith('COMMIT'); + }); + + // NOTE: The following 3 tests directly mutate the getConfig() return value. + // This works because getConfig() returns a live reference to the internal + // cache object. If the implementation changes to return a copy/clone, + // these tests will break and need to be updated. + + it('should remove stale keys from cache on full reset', async () => { + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockImplementation(() => { + throw new Error('no db'); + }); + + await configModule.loadConfig(); + // Directly mutates the live cache reference to inject a stale key + configModule.getConfig().staleKey = { foo: 'bar' }; + + await configModule.resetConfig(); + expect(configModule.getConfig().staleKey).toBeUndefined(); + }); + + it('should handle section reset where cache has non-object value', async () => { + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockImplementation(() => { + throw new Error('no db'); + }); + + await configModule.loadConfig(); + // Directly mutates the live cache reference to replace section with a non-object + configModule.getConfig().welcome = 'not-an-object'; + + await configModule.resetConfig('welcome'); + expect(configModule.getConfig().welcome).toEqual({ enabled: false }); + }); + + it('should handle full reset where some cache values are non-objects', async () => { + const { getPool: mockGetPool } = await import('../../src/db.js'); + mockGetPool.mockImplementation(() => { + throw new Error('no db'); + }); + + await configModule.loadConfig(); + // Directly mutates the live cache reference to replace section with a string + configModule.getConfig().ai = 'string-value'; + + await configModule.resetConfig(); + expect(configModule.getConfig().ai).toEqual({ enabled: true, model: 'test-model' }); + }); + }); +}); diff --git a/tests/modules/events.test.js b/tests/modules/events.test.js new file mode 100644 index 00000000..ad9292a7 --- /dev/null +++ b/tests/modules/events.test.js @@ -0,0 +1,336 @@ +import { afterEach, describe, expect, it, vi } from 'vitest'; + +// Mock logger +vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), +})); + +// Mock ai module +vi.mock('../../src/modules/ai.js', () => ({ + generateResponse: vi.fn().mockResolvedValue('AI response'), +})); + +// Mock chimeIn module +vi.mock('../../src/modules/chimeIn.js', () => ({ + accumulate: vi.fn().mockResolvedValue(undefined), + resetCounter: vi.fn(), +})); + +// Mock spam module +vi.mock('../../src/modules/spam.js', () => ({ + isSpam: vi.fn().mockReturnValue(false), + sendSpamAlert: vi.fn().mockResolvedValue(undefined), +})); + +// Mock welcome module +vi.mock('../../src/modules/welcome.js', () => ({ + sendWelcomeMessage: vi.fn().mockResolvedValue(undefined), + recordCommunityActivity: vi.fn(), +})); + +// Mock splitMessage +vi.mock('../../src/utils/splitMessage.js', () => ({ + needsSplitting: vi.fn().mockReturnValue(false), + splitMessage: vi.fn().mockReturnValue(['chunk1', 'chunk2']), +})); + +import { generateResponse } from '../../src/modules/ai.js'; +import { accumulate, resetCounter } from '../../src/modules/chimeIn.js'; +import { + registerErrorHandlers, + registerEventHandlers, + registerGuildMemberAddHandler, + registerMessageCreateHandler, + registerReadyHandler, +} from '../../src/modules/events.js'; +import { isSpam, sendSpamAlert } from '../../src/modules/spam.js'; +import { recordCommunityActivity, sendWelcomeMessage } from '../../src/modules/welcome.js'; +import { needsSplitting, splitMessage } from '../../src/utils/splitMessage.js'; + +describe('events module', () => { + afterEach(() => { + vi.clearAllMocks(); + }); + + describe('registerReadyHandler', () => { + it('should register clientReady event', () => { + const once = vi.fn(); + const client = { + once, + user: { tag: 'Bot#1234' }, + guilds: { cache: { size: 5 } }, + }; + const config = { + welcome: { enabled: true, channelId: 'ch1' }, + ai: { enabled: true }, + moderation: { enabled: true }, + }; + + registerReadyHandler(client, config, null); + expect(once).toHaveBeenCalledWith('clientReady', expect.any(Function)); + + // Trigger the callback + const callback = once.mock.calls[0][1]; + callback(); + }); + + it('should record start if healthMonitor provided', () => { + const once = vi.fn(); + const client = { + once, + user: { tag: 'Bot#1234' }, + guilds: { cache: { size: 1 } }, + }; + const config = {}; + const healthMonitor = { recordStart: vi.fn() }; + + registerReadyHandler(client, config, healthMonitor); + const callback = once.mock.calls[0][1]; + callback(); + + expect(healthMonitor.recordStart).toHaveBeenCalled(); + }); + }); + + describe('registerGuildMemberAddHandler', () => { + it('should register guildMemberAdd handler', () => { + const on = vi.fn(); + const client = { on }; + const config = {}; + + registerGuildMemberAddHandler(client, config); + expect(on).toHaveBeenCalledWith('guildMemberAdd', expect.any(Function)); + }); + + it('should call sendWelcomeMessage on member add', async () => { + const on = vi.fn(); + const client = { on }; + const config = {}; + + registerGuildMemberAddHandler(client, config); + const callback = on.mock.calls[0][1]; + const member = { user: { tag: 'User#1234' } }; + await callback(member); + + expect(sendWelcomeMessage).toHaveBeenCalledWith(member, client, config); + }); + }); + + describe('registerMessageCreateHandler', () => { + let onCallbacks; + let client; + let config; + + function setup(configOverrides = {}) { + onCallbacks = {}; + client = { + on: vi.fn((event, cb) => { + onCallbacks[event] = cb; + }), + user: { id: 'bot-user-id' }, + }; + config = { + ai: { enabled: true, channels: [] }, + moderation: { enabled: true }, + ...configOverrides, + }; + + registerMessageCreateHandler(client, config, null); + } + + it('should ignore bot messages', async () => { + setup(); + const message = { author: { bot: true }, guild: { id: 'g1' } }; + await onCallbacks.messageCreate(message); + expect(isSpam).not.toHaveBeenCalled(); + }); + + it('should ignore DMs', async () => { + setup(); + const message = { author: { bot: false }, guild: null }; + await onCallbacks.messageCreate(message); + expect(isSpam).not.toHaveBeenCalled(); + }); + + it('should detect and alert spam', async () => { + setup(); + isSpam.mockReturnValueOnce(true); + const message = { + author: { bot: false, tag: 'spammer#1234' }, + guild: { id: 'g1' }, + content: 'spam content', + channel: { id: 'c1' }, + }; + await onCallbacks.messageCreate(message); + expect(sendSpamAlert).toHaveBeenCalledWith(message, client, config); + }); + + it('should respond when bot is mentioned', async () => { + setup(); + const mockReply = vi.fn().mockResolvedValue(undefined); + const mockSendTyping = vi.fn().mockResolvedValue(undefined); + const message = { + author: { bot: false, username: 'user' }, + guild: { id: 'g1' }, + content: `<@bot-user-id> hello`, + channel: { id: 'c1', sendTyping: mockSendTyping, send: vi.fn() }, + mentions: { has: vi.fn().mockReturnValue(true), repliedUser: null }, + reference: null, + reply: mockReply, + }; + await onCallbacks.messageCreate(message); + expect(resetCounter).toHaveBeenCalledWith('c1'); + expect(mockReply).toHaveBeenCalledWith('AI response'); + }); + + it('should respond to replies to bot', async () => { + setup(); + const mockReply = vi.fn().mockResolvedValue(undefined); + const mockSendTyping = vi.fn().mockResolvedValue(undefined); + const message = { + author: { bot: false, username: 'user' }, + guild: { id: 'g1' }, + content: 'follow up', + channel: { id: 'c1', sendTyping: mockSendTyping, send: vi.fn() }, + mentions: { has: vi.fn().mockReturnValue(false), repliedUser: { id: 'bot-user-id' } }, + reference: { messageId: 'ref-123' }, + reply: mockReply, + }; + await onCallbacks.messageCreate(message); + expect(mockReply).toHaveBeenCalled(); + }); + + it('should handle empty mention content', async () => { + setup(); + const mockReply = vi.fn().mockResolvedValue(undefined); + const message = { + author: { bot: false, username: 'user' }, + guild: { id: 'g1' }, + content: `<@bot-user-id>`, + channel: { id: 'c1', sendTyping: vi.fn(), send: vi.fn() }, + mentions: { has: vi.fn().mockReturnValue(true), repliedUser: null }, + reference: null, + reply: mockReply, + }; + await onCallbacks.messageCreate(message); + expect(mockReply).toHaveBeenCalledWith("Hey! What's up?"); + }); + + it('should split long AI responses', async () => { + setup(); + needsSplitting.mockReturnValueOnce(true); + splitMessage.mockReturnValueOnce(['chunk1', 'chunk2']); + const mockSend = vi.fn().mockResolvedValue(undefined); + const message = { + author: { bot: false, username: 'user' }, + guild: { id: 'g1' }, + content: `<@bot-user-id> tell me a story`, + channel: { id: 'c1', sendTyping: vi.fn(), send: mockSend }, + mentions: { has: vi.fn().mockReturnValue(true), repliedUser: null }, + reference: null, + reply: vi.fn(), + }; + await onCallbacks.messageCreate(message); + expect(mockSend).toHaveBeenCalledWith('chunk1'); + expect(mockSend).toHaveBeenCalledWith('chunk2'); + }); + + it('should respect allowed channels', async () => { + setup({ ai: { enabled: true, channels: ['allowed-ch'] } }); + const mockReply = vi.fn(); + const message = { + author: { bot: false, username: 'user' }, + guild: { id: 'g1' }, + content: '<@bot-user-id> hello', + channel: { id: 'not-allowed-ch', sendTyping: vi.fn(), send: vi.fn() }, + mentions: { has: vi.fn().mockReturnValue(true), repliedUser: null }, + reference: null, + reply: mockReply, + }; + await onCallbacks.messageCreate(message); + // Should NOT respond (channel not in allowed list) + expect(generateResponse).not.toHaveBeenCalled(); + }); + + it('should accumulate messages for chimeIn', async () => { + setup({ ai: { enabled: false } }); + const message = { + author: { bot: false, username: 'user' }, + guild: { id: 'g1' }, + content: 'regular message', + channel: { id: 'c1', sendTyping: vi.fn(), send: vi.fn() }, + mentions: { has: vi.fn().mockReturnValue(false), repliedUser: null }, + reference: null, + }; + await onCallbacks.messageCreate(message); + expect(accumulate).toHaveBeenCalledWith(message, config); + }); + + it('should record community activity', async () => { + setup(); + const message = { + author: { bot: false, username: 'user' }, + guild: { id: 'g1' }, + content: 'regular message', + channel: { id: 'c1', sendTyping: vi.fn(), send: vi.fn() }, + mentions: { has: vi.fn().mockReturnValue(false), repliedUser: null }, + reference: null, + }; + await onCallbacks.messageCreate(message); + expect(recordCommunityActivity).toHaveBeenCalledWith(message, config); + }); + }); + + describe('registerErrorHandlers', () => { + it('should register error and unhandledRejection handlers', () => { + const on = vi.fn(); + const client = { on }; + + const processOnSpy = vi.spyOn(process, 'on').mockImplementation(() => process); + + registerErrorHandlers(client); + + expect(on).toHaveBeenCalledWith('error', expect.any(Function)); + expect(processOnSpy).toHaveBeenCalledWith('unhandledRejection', expect.any(Function)); + + // Trigger handlers to cover the logging code + const errorCallback = on.mock.calls[0][1]; + errorCallback(new Error('test error')); + + const rejectionCallback = processOnSpy.mock.calls.find( + (call) => call[0] === 'unhandledRejection', + )[1]; + rejectionCallback(new Error('rejection')); + + processOnSpy.mockRestore(); + }); + }); + + describe('registerEventHandlers', () => { + it('should register all handlers', () => { + const once = vi.fn(); + const on = vi.fn(); + const client = { + once, + on, + user: { id: 'bot', tag: 'Bot#1234' }, + guilds: { cache: { size: 1 } }, + }; + const config = {}; + + const processOnSpy = vi.spyOn(process, 'on').mockImplementation(() => process); + + registerEventHandlers(client, config, null); + + expect(once).toHaveBeenCalledWith('clientReady', expect.any(Function)); + expect(on).toHaveBeenCalledWith('guildMemberAdd', expect.any(Function)); + expect(on).toHaveBeenCalledWith('messageCreate', expect.any(Function)); + expect(on).toHaveBeenCalledWith('error', expect.any(Function)); + + processOnSpy.mockRestore(); + }); + }); +}); diff --git a/tests/modules/spam.test.js b/tests/modules/spam.test.js new file mode 100644 index 00000000..c8324fb0 --- /dev/null +++ b/tests/modules/spam.test.js @@ -0,0 +1,174 @@ +import { describe, expect, it, vi } from 'vitest'; + +import { isSpam, sendSpamAlert } from '../../src/modules/spam.js'; + +describe('isSpam', () => { + it('should detect "free crypto" spam', () => { + expect(isSpam('Get FREE CRYPTO now!')).toBe(true); + }); + + it('should detect "free bitcoin" spam', () => { + expect(isSpam('Free Bitcoin for everyone')).toBe(true); + }); + + it('should detect "free btc" spam', () => { + expect(isSpam('Free BTC giveaway')).toBe(true); + }); + + it('should detect "free eth" spam', () => { + expect(isSpam('Free ETH airdrop')).toBe(true); + }); + + it('should detect "free nft" spam', () => { + expect(isSpam('Free NFT mint now')).toBe(true); + }); + + it('should detect airdrop claim spam', () => { + expect(isSpam('Claim your airdrop claim now')).toBe(true); + }); + + it('should detect discord nitro free spam', () => { + expect(isSpam('Discord Nitro free giveaway')).toBe(true); + }); + + it('should detect nitro gift claim spam', () => { + expect(isSpam('Nitro gift please claim it')).toBe(true); + }); + + it('should detect click verify account spam', () => { + expect(isSpam('Click here to verify your account')).toBe(true); + }); + + it('should detect guaranteed profit spam', () => { + expect(isSpam('Guaranteed profit every day')).toBe(true); + }); + + it('should detect invest double money spam', () => { + expect(isSpam('Invest and double your money')).toBe(true); + }); + + it('should detect DM me for free spam', () => { + expect(isSpam('DM me for free crypto')).toBe(true); + }); + + it('should detect make money claims', () => { + expect(isSpam('Make $5000 daily with this')).toBe(true); + expect(isSpam('Make 10k+ weekly from home')).toBe(true); + expect(isSpam('Make $500 monthly easy')).toBe(true); + }); + + it('should NOT flag normal messages', () => { + expect(isSpam('Hello everyone!')).toBe(false); + expect(isSpam('Can someone help me with JavaScript?')).toBe(false); + expect(isSpam('What is the best programming language?')).toBe(false); + expect(isSpam('I made a new project')).toBe(false); + }); + + it('should NOT flag empty content', () => { + expect(isSpam('')).toBe(false); + }); +}); + +describe('sendSpamAlert', () => { + it('should not send alert if config.moderation is undefined', async () => { + const message = { + author: { id: '123' }, + channel: { id: '456' }, + content: 'spam', + url: 'http://test', + }; + const client = { channels: { fetch: vi.fn() } }; + const config = {}; + + await sendSpamAlert(message, client, config); + expect(client.channels.fetch).not.toHaveBeenCalled(); + }); + + it('should not send alert if no alertChannelId configured', async () => { + const message = { + author: { id: '123' }, + channel: { id: '456' }, + content: 'spam', + url: 'http://test', + }; + const client = { channels: { fetch: vi.fn() } }; + const config = { moderation: {} }; + + await sendSpamAlert(message, client, config); + expect(client.channels.fetch).not.toHaveBeenCalled(); + }); + + it('should not send alert if channel cannot be fetched', async () => { + const message = { + author: { id: '123' }, + channel: { id: '456' }, + content: 'spam', + url: 'http://test', + }; + const client = { channels: { fetch: vi.fn().mockRejectedValue(new Error('not found')) } }; + const config = { moderation: { alertChannelId: '789' } }; + + await sendSpamAlert(message, client, config); + // Should not throw + }); + + it('should send embed to alert channel', async () => { + const mockSend = vi.fn(); + const message = { + author: { id: '123' }, + channel: { id: '456' }, + content: 'spam content', + url: 'http://discord.com/msg', + delete: vi.fn(), + }; + const client = { + channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) }, + }; + const config = { moderation: { alertChannelId: '789' } }; + + await sendSpamAlert(message, client, config); + expect(client.channels.fetch).toHaveBeenCalledWith('789'); + expect(mockSend).toHaveBeenCalledWith(expect.objectContaining({ embeds: expect.any(Array) })); + // autoDelete is not enabled, so message.delete should NOT be called + expect(message.delete).not.toHaveBeenCalled(); + }); + + it('should auto-delete message if autoDelete is enabled', async () => { + const mockDelete = vi.fn().mockResolvedValue(undefined); + const mockSend = vi.fn(); + const message = { + author: { id: '123' }, + channel: { id: '456' }, + content: 'spam', + url: 'http://test', + delete: mockDelete, + }; + const client = { + channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) }, + }; + const config = { moderation: { alertChannelId: '789', autoDelete: true } }; + + await sendSpamAlert(message, client, config); + expect(mockDelete).toHaveBeenCalled(); + }); + + it('should not crash if auto-delete fails', async () => { + const mockDelete = vi.fn().mockRejectedValue(new Error('permission')); + const mockSend = vi.fn(); + const message = { + author: { id: '123' }, + channel: { id: '456' }, + content: 'spam', + url: 'http://test', + delete: mockDelete, + }; + const client = { + channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) }, + }; + const config = { moderation: { alertChannelId: '789', autoDelete: true } }; + + await sendSpamAlert(message, client, config); + expect(mockDelete).toHaveBeenCalled(); + // Should not throw + }); +}); diff --git a/tests/modules/welcome.test.js b/tests/modules/welcome.test.js new file mode 100644 index 00000000..bb9874a2 --- /dev/null +++ b/tests/modules/welcome.test.js @@ -0,0 +1,421 @@ +import { describe, expect, it, vi } from 'vitest'; + +// Mock logger +vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), +})); + +import { + recordCommunityActivity, + renderWelcomeMessage, + sendWelcomeMessage, +} from '../../src/modules/welcome.js'; + +describe('renderWelcomeMessage', () => { + it('should replace {user} with mention', () => { + const result = renderWelcomeMessage( + 'Hello {user}!', + { id: '123' }, + { name: 'Test', memberCount: 10 }, + ); + expect(result).toBe('Hello <@123>!'); + }); + + it('should replace {username} with username', () => { + const result = renderWelcomeMessage( + 'Hello {username}!', + { id: '123', username: 'testuser' }, + { name: 'Test', memberCount: 10 }, + ); + expect(result).toBe('Hello testuser!'); + }); + + it('should replace {server} with guild name', () => { + const result = renderWelcomeMessage( + 'Welcome to {server}!', + { id: '123' }, + { name: 'My Server', memberCount: 10 }, + ); + expect(result).toBe('Welcome to My Server!'); + }); + + it('should replace {memberCount}', () => { + const result = renderWelcomeMessage( + 'You are member #{memberCount}!', + { id: '123' }, + { name: 'Test', memberCount: 42 }, + ); + expect(result).toBe('You are member #42!'); + }); + + it('should handle multiple replacements', () => { + const result = renderWelcomeMessage( + 'Welcome {user} ({username}) to {server}! Member #{memberCount}', + { id: '123', username: 'bob' }, + { name: 'Cool Server', memberCount: 100 }, + ); + expect(result).toBe('Welcome <@123> (bob) to Cool Server! Member #100'); + }); + + it('should handle missing username', () => { + const result = renderWelcomeMessage( + '{username}', + { id: '123' }, + { name: 'Test', memberCount: 1 }, + ); + expect(result).toBe('Unknown'); + }); +}); + +describe('recordCommunityActivity', () => { + it('should not crash on null message', () => { + recordCommunityActivity(null, {}); + }); + + it('should not record for bot messages', () => { + const message = { + guild: { id: 'g1' }, + channel: { id: 'c1', isTextBased: () => true }, + author: { bot: true }, + }; + recordCommunityActivity(message, {}); + }); + + it('should not record for non-text channels', () => { + const message = { + guild: { id: 'g1' }, + channel: { id: 'c1', isTextBased: () => false }, + author: { bot: false }, + }; + recordCommunityActivity(message, {}); + }); + + it('should not record for excluded channels', () => { + const message = { + guild: { id: 'g1' }, + channel: { id: 'excluded-ch', isTextBased: () => true }, + author: { bot: false }, + }; + const config = { + welcome: { + dynamic: { excludeChannels: ['excluded-ch'] }, + }, + }; + recordCommunityActivity(message, config); + }); + + it('should record activity for valid messages', () => { + const message = { + guild: { id: 'g1' }, + channel: { id: 'c1', isTextBased: () => true }, + author: { bot: false }, + }; + const config = { welcome: { dynamic: {} } }; + // Should not throw + recordCommunityActivity(message, config); + }); + + it('should handle messages with no guild', () => { + const message = { + guild: null, + channel: { id: 'c1', isTextBased: () => true }, + author: { bot: false }, + }; + recordCommunityActivity(message, {}); + }); + + it('should handle empty dynamic config', () => { + const message = { + guild: { id: 'g1' }, + channel: { id: 'c1', isTextBased: () => true }, + author: { bot: false }, + }; + recordCommunityActivity(message, {}); + }); +}); + +describe('sendWelcomeMessage', () => { + it('should not send if welcome is disabled', async () => { + const member = { user: { tag: 'user#1234' }, guild: { name: 'Test' } }; + const client = { channels: { fetch: vi.fn() } }; + const config = { welcome: { enabled: false } }; + await sendWelcomeMessage(member, client, config); + expect(client.channels.fetch).not.toHaveBeenCalled(); + }); + + it('should not send if no channelId configured', async () => { + const member = { user: { tag: 'user#1234' }, guild: { name: 'Test' } }; + const client = { channels: { fetch: vi.fn() } }; + const config = { welcome: { enabled: true } }; + await sendWelcomeMessage(member, client, config); + expect(client.channels.fetch).not.toHaveBeenCalled(); + }); + + it('should not send if channel cannot be fetched', async () => { + const member = { + id: '123', + user: { tag: 'user#1234', username: 'user' }, + guild: { name: 'Test', memberCount: 10 }, + }; + const client = { channels: { fetch: vi.fn().mockResolvedValue(null) } }; + const config = { welcome: { enabled: true, channelId: 'ch1' } }; + await sendWelcomeMessage(member, client, config); + }); + + it('should send static welcome message', async () => { + const mockSend = vi.fn(); + const member = { + id: '123', + user: { tag: 'user#1234', username: 'testuser' }, + guild: { name: 'Test Server', memberCount: 50 }, + }; + const client = { channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) } }; + const config = { + welcome: { + enabled: true, + channelId: 'ch1', + message: 'Welcome {user} to {server}!', + }, + }; + await sendWelcomeMessage(member, client, config); + expect(mockSend).toHaveBeenCalledWith('Welcome <@123> to Test Server!'); + }); + + it('should send dynamic welcome message when enabled', async () => { + const mockSend = vi.fn(); + const member = { + id: '123', + user: { tag: 'user#1234', username: 'testuser' }, + guild: { + name: 'Test Server', + memberCount: 50, + channels: { + cache: { + filter: vi.fn().mockReturnValue({ size: 0, values: () => [] }), + has: vi.fn().mockReturnValue(false), + }, + }, + }, + }; + const client = { channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) } }; + const config = { + welcome: { + enabled: true, + channelId: 'ch1', + dynamic: { + enabled: true, + timezone: 'America/New_York', + }, + }, + }; + await sendWelcomeMessage(member, client, config); + expect(mockSend).toHaveBeenCalled(); + const sentMessage = mockSend.mock.calls[0][0]; + expect(sentMessage).toContain('<@123>'); + }); + + it('should handle errors gracefully', async () => { + const member = { + id: '123', + user: { tag: 'user#1234', username: 'testuser' }, + guild: { name: 'Test', memberCount: 10 }, + }; + const client = { channels: { fetch: vi.fn().mockRejectedValue(new Error('channel error')) } }; + const config = { welcome: { enabled: true, channelId: 'ch1' } }; + + // Should not throw + await sendWelcomeMessage(member, client, config); + }); + + it('should send dynamic message with milestone', async () => { + const mockSend = vi.fn(); + const member = { + id: '123', + user: { tag: 'user#1234', username: 'testuser' }, + guild: { + name: 'Test Server', + memberCount: 100, // Notable milestone + channels: { + cache: { + filter: vi.fn().mockReturnValue({ size: 0, values: () => [] }), + has: vi.fn().mockReturnValue(false), + }, + }, + }, + }; + const client = { channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) } }; + const config = { + welcome: { + enabled: true, + channelId: 'ch1', + dynamic: { + enabled: true, + timezone: 'UTC', + }, + }, + }; + await sendWelcomeMessage(member, client, config); + const sentMessage = mockSend.mock.calls[0][0]; + expect(sentMessage).toContain('#100'); + expect(sentMessage).toContain('milestone'); + }); + + it('should use default welcome message if not configured', async () => { + const mockSend = vi.fn(); + const member = { + id: '123', + user: { tag: 'user#1234', username: 'testuser' }, + guild: { name: 'Test', memberCount: 10 }, + }; + const client = { channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) } }; + const config = { welcome: { enabled: true, channelId: 'ch1' } }; + await sendWelcomeMessage(member, client, config); + expect(mockSend).toHaveBeenCalledWith('Welcome, <@123>!'); + }); + + it('should send dynamic message with highlight channels', async () => { + const mockSend = vi.fn(); + const member = { + id: '123', + user: { tag: 'user#1234', username: 'testuser' }, + guild: { + name: 'Test Server', + memberCount: 51, + channels: { + cache: { + filter: vi.fn().mockReturnValue({ size: 0, values: () => [] }), + has: vi.fn().mockReturnValue(true), + }, + }, + }, + }; + const client = { channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) } }; + const config = { + welcome: { + enabled: true, + channelId: 'ch1', + message: 'Hello <#111> and <#222>', + dynamic: { + enabled: true, + timezone: 'UTC', + highlightChannels: ['ch-intro', 'ch-general'], + }, + }, + }; + await sendWelcomeMessage(member, client, config); + expect(mockSend).toHaveBeenCalled(); + }); + + it('should send dynamic message with active voice channels', async () => { + const mockSend = vi.fn(); + const voiceChannel = { + isVoiceBased: () => true, + members: { size: 3 }, + }; + const member = { + id: '123', + user: { tag: 'user#1234', username: 'testuser' }, + guild: { + name: 'Test Server', + memberCount: 51, + channels: { + cache: { + filter: vi.fn().mockReturnValue({ + size: 1, + values: () => [voiceChannel][Symbol.iterator](), + }), + has: vi.fn().mockReturnValue(false), + }, + }, + }, + }; + const client = { channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) } }; + const config = { + welcome: { + enabled: true, + channelId: 'ch1', + dynamic: { enabled: true, timezone: 'UTC' }, + }, + }; + + // Record some light activity to get "light" level with voice + for (let i = 0; i < 2; i++) { + recordCommunityActivity( + { + guild: { id: member.guild.id || 'test-guild' }, + channel: { id: `voice-test-ch-${i}`, isTextBased: () => true }, + author: { bot: false }, + }, + config, + ); + } + + await sendWelcomeMessage(member, client, config); + expect(mockSend).toHaveBeenCalled(); + }); + + it('should send dynamic message with 3 suggested channels (3-channel CTA)', async () => { + const mockSend = vi.fn(); + const member = { + id: '123', + user: { tag: 'user#1234', username: 'testuser' }, + guild: { + name: 'Test Server', + memberCount: 51, + channels: { + cache: { + filter: vi.fn().mockReturnValue({ size: 0, values: () => [][Symbol.iterator]() }), + has: vi.fn().mockReturnValue(true), + }, + }, + }, + }; + const client = { channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) } }; + const config = { + welcome: { + enabled: true, + channelId: 'ch1', + message: 'Hello <#111> and <#222> and <#333>', + dynamic: { + enabled: true, + timezone: 'UTC', + highlightChannels: ['ch-intro', 'ch-general', 'ch-projects'], + }, + }, + }; + await sendWelcomeMessage(member, client, config); + expect(mockSend).toHaveBeenCalled(); + }); + + it('should handle dynamic message with milestone interval', async () => { + const mockSend = vi.fn(); + const member = { + id: '123', + user: { tag: 'user#1234', username: 'testuser' }, + guild: { + name: 'Test', + memberCount: 75, // 75 % 25 === 0 → milestone + channels: { + cache: { + filter: vi.fn().mockReturnValue({ size: 0, values: () => [] }), + has: vi.fn().mockReturnValue(false), + }, + }, + }, + }; + const client = { channels: { fetch: vi.fn().mockResolvedValue({ send: mockSend }) } }; + const config = { + welcome: { + enabled: true, + channelId: 'ch1', + dynamic: { enabled: true, timezone: 'UTC', milestoneInterval: 25 }, + }, + }; + await sendWelcomeMessage(member, client, config); + const msg = mockSend.mock.calls[0][0]; + expect(msg).toContain('milestone'); + }); +}); diff --git a/tests/utils/errors.test.js b/tests/utils/errors.test.js new file mode 100644 index 00000000..fe45f808 --- /dev/null +++ b/tests/utils/errors.test.js @@ -0,0 +1,330 @@ +import { describe, expect, it } from 'vitest'; +import { + classifyError, + ErrorType, + getSuggestedNextSteps, + getUserFriendlyMessage, + isRetryable, +} from '../../src/utils/errors.js'; + +describe('ErrorType', () => { + it('should export all error type constants', () => { + expect(ErrorType.NETWORK).toBe('network'); + expect(ErrorType.TIMEOUT).toBe('timeout'); + expect(ErrorType.API_ERROR).toBe('api_error'); + expect(ErrorType.API_RATE_LIMIT).toBe('api_rate_limit'); + expect(ErrorType.API_UNAUTHORIZED).toBe('api_unauthorized'); + expect(ErrorType.API_NOT_FOUND).toBe('api_not_found'); + expect(ErrorType.API_SERVER_ERROR).toBe('api_server_error'); + expect(ErrorType.DISCORD_PERMISSION).toBe('discord_permission'); + expect(ErrorType.DISCORD_CHANNEL_NOT_FOUND).toBe('discord_channel_not_found'); + expect(ErrorType.DISCORD_MISSING_ACCESS).toBe('discord_missing_access'); + expect(ErrorType.CONFIG_MISSING).toBe('config_missing'); + expect(ErrorType.CONFIG_INVALID).toBe('config_invalid'); + expect(ErrorType.UNKNOWN).toBe('unknown'); + }); +}); + +describe('classifyError', () => { + it('should return UNKNOWN for null/undefined error', () => { + expect(classifyError(null)).toBe(ErrorType.UNKNOWN); + expect(classifyError(undefined)).toBe(ErrorType.UNKNOWN); + }); + + it('should classify ECONNREFUSED as NETWORK', () => { + const err = new Error('connection refused'); + err.code = 'ECONNREFUSED'; + expect(classifyError(err)).toBe(ErrorType.NETWORK); + }); + + it('should classify ENOTFOUND as NETWORK', () => { + const err = new Error('not found'); + err.code = 'ENOTFOUND'; + expect(classifyError(err)).toBe(ErrorType.NETWORK); + }); + + it('should classify ETIMEDOUT as TIMEOUT', () => { + const err = new Error('timed out'); + err.code = 'ETIMEDOUT'; + // ETIMEDOUT removed from NETWORK codes — falls through to TIMEOUT check + expect(classifyError(err)).toBe(ErrorType.TIMEOUT); + }); + + it('should classify timeout message as TIMEOUT', () => { + const err = new Error('Request timeout exceeded'); + expect(classifyError(err)).toBe(ErrorType.TIMEOUT); + }); + + it('should classify "fetch failed" as NETWORK', () => { + const err = new Error('fetch failed'); + expect(classifyError(err)).toBe(ErrorType.NETWORK); + }); + + it('should classify "network" in message as NETWORK', () => { + const err = new Error('network error occurred'); + expect(classifyError(err)).toBe(ErrorType.NETWORK); + }); + + it('should classify HTTP 401 as API_UNAUTHORIZED', () => { + const err = new Error('unauthorized'); + expect(classifyError(err, { status: 401 })).toBe(ErrorType.API_UNAUTHORIZED); + }); + + it('should classify HTTP 403 as API_UNAUTHORIZED', () => { + const err = new Error('forbidden'); + expect(classifyError(err, { status: 403 })).toBe(ErrorType.API_UNAUTHORIZED); + }); + + it('should classify HTTP 404 as API_NOT_FOUND', () => { + const err = new Error('not found'); + expect(classifyError(err, { status: 404 })).toBe(ErrorType.API_NOT_FOUND); + }); + + it('should classify HTTP 429 as API_RATE_LIMIT', () => { + const err = new Error('too many requests'); + expect(classifyError(err, { status: 429 })).toBe(ErrorType.API_RATE_LIMIT); + }); + + it('should classify HTTP 500 as API_SERVER_ERROR', () => { + const err = new Error('internal server error'); + expect(classifyError(err, { status: 500 })).toBe(ErrorType.API_SERVER_ERROR); + }); + + it('should classify HTTP 503 as API_SERVER_ERROR', () => { + const err = new Error('service unavailable'); + expect(classifyError(err, { status: 503 })).toBe(ErrorType.API_SERVER_ERROR); + }); + + it('should classify HTTP 400 as API_ERROR', () => { + const err = new Error('bad request'); + expect(classifyError(err, { status: 400 })).toBe(ErrorType.API_ERROR); + }); + + it('should classify Discord code 50001 as DISCORD_MISSING_ACCESS', () => { + const err = new Error('missing access'); + err.code = 50001; + expect(classifyError(err)).toBe(ErrorType.DISCORD_MISSING_ACCESS); + }); + + it('should classify "missing access" message as DISCORD_MISSING_ACCESS', () => { + const err = new Error('Missing Access'); + expect(classifyError(err)).toBe(ErrorType.DISCORD_MISSING_ACCESS); + }); + + it('should classify Discord code 50013 as DISCORD_PERMISSION', () => { + const err = new Error('missing permissions'); + err.code = 50013; + expect(classifyError(err)).toBe(ErrorType.DISCORD_PERMISSION); + }); + + it('should classify "missing permissions" message as DISCORD_PERMISSION', () => { + const err = new Error('Missing Permissions'); + expect(classifyError(err)).toBe(ErrorType.DISCORD_PERMISSION); + }); + + it('should classify Discord code 10003 as DISCORD_CHANNEL_NOT_FOUND', () => { + const err = new Error('unknown channel'); + err.code = 10003; + expect(classifyError(err)).toBe(ErrorType.DISCORD_CHANNEL_NOT_FOUND); + }); + + it('should classify "unknown channel" message as DISCORD_CHANNEL_NOT_FOUND', () => { + const err = new Error('Unknown Channel'); + expect(classifyError(err)).toBe(ErrorType.DISCORD_CHANNEL_NOT_FOUND); + }); + + it('should classify "config.json not found" as CONFIG_MISSING', () => { + const err = new Error('config.json not found'); + expect(classifyError(err)).toBe(ErrorType.CONFIG_MISSING); + }); + + it('should classify ENOENT as CONFIG_MISSING', () => { + const err = new Error('ENOENT: no such file'); + expect(classifyError(err)).toBe(ErrorType.CONFIG_MISSING); + }); + + it('should classify "invalid config" as CONFIG_INVALID', () => { + const err = new Error('Invalid config file'); + expect(classifyError(err)).toBe(ErrorType.CONFIG_INVALID); + }); + + it('should classify "api error" message as API_ERROR', () => { + const err = new Error('API error occurred'); + expect(classifyError(err)).toBe(ErrorType.API_ERROR); + }); + + it('should classify isApiError context as API_ERROR', () => { + const err = new Error('something happened'); + expect(classifyError(err, { isApiError: true })).toBe(ErrorType.API_ERROR); + }); + + it('should use error.status directly', () => { + const err = new Error('error'); + err.status = 429; + expect(classifyError(err)).toBe(ErrorType.API_RATE_LIMIT); + }); + + it('should use context.statusCode', () => { + const err = new Error('error'); + expect(classifyError(err, { statusCode: 500 })).toBe(ErrorType.API_SERVER_ERROR); + }); + + it('should use context.code for network errors', () => { + const err = new Error('something'); + expect(classifyError(err, { code: 'ECONNREFUSED' })).toBe(ErrorType.NETWORK); + }); + + it('should return UNKNOWN for unrecognized errors', () => { + const err = new Error('some random error'); + expect(classifyError(err)).toBe(ErrorType.UNKNOWN); + }); +}); + +describe('getUserFriendlyMessage', () => { + it('should return appropriate message for NETWORK errors', () => { + const err = new Error('fetch failed'); + const msg = getUserFriendlyMessage(err); + expect(msg).toContain('trouble connecting'); + }); + + it('should return appropriate message for TIMEOUT errors', () => { + const err = new Error('timeout'); + const msg = getUserFriendlyMessage(err); + expect(msg).toContain('too long'); + }); + + it('should return appropriate message for rate limit errors', () => { + const err = new Error('rate limited'); + const msg = getUserFriendlyMessage(err, { status: 429 }); + expect(msg).toContain('too many requests'); + }); + + it('should return appropriate message for UNKNOWN errors', () => { + const err = new Error('unknown'); + const msg = getUserFriendlyMessage(err); + expect(msg).toContain('unexpected'); + }); + + it('should return default message for error with empty message', () => { + const err = new Error(); + const msg = getUserFriendlyMessage(err); + expect(typeof msg).toBe('string'); + expect(msg.length).toBeGreaterThan(0); + }); +}); + +describe('getSuggestedNextSteps', () => { + it('should return suggestion for NETWORK errors', () => { + const err = new Error('fetch failed'); + const steps = getSuggestedNextSteps(err); + expect(steps).toContain('AI service'); + }); + + it('should return suggestion for TIMEOUT errors', () => { + const err = new Error('timeout'); + const steps = getSuggestedNextSteps(err); + expect(steps).toContain('shorter'); + }); + + it('should return suggestion for API_RATE_LIMIT errors', () => { + const err = new Error('rate'); + const steps = getSuggestedNextSteps(err, { status: 429 }); + expect(steps).toContain('Wait'); + }); + + it('should return suggestion for API_UNAUTHORIZED errors', () => { + const err = new Error('unauth'); + const steps = getSuggestedNextSteps(err, { status: 401 }); + expect(steps).toContain('OPENCLAW_API_KEY'); + }); + + it('should return suggestion for API_NOT_FOUND errors', () => { + const err = new Error('not found'); + const steps = getSuggestedNextSteps(err, { status: 404 }); + expect(steps).toContain('OPENCLAW_API_URL'); + }); + + it('should return suggestion for API_SERVER_ERROR', () => { + const err = new Error('server'); + const steps = getSuggestedNextSteps(err, { status: 500 }); + expect(steps).toContain('recover'); + }); + + it('should return suggestion for DISCORD_PERMISSION', () => { + const err = new Error('Missing Permissions'); + const steps = getSuggestedNextSteps(err); + expect(steps).toContain('permissions'); + }); + + it('should return suggestion for DISCORD_CHANNEL_NOT_FOUND', () => { + const err = new Error('Unknown Channel'); + const steps = getSuggestedNextSteps(err); + expect(steps).toContain('channel'); + }); + + it('should return suggestion for DISCORD_MISSING_ACCESS', () => { + const err = new Error('Missing Access'); + const steps = getSuggestedNextSteps(err); + expect(steps).toContain('access'); + }); + + it('should return suggestion for CONFIG_MISSING', () => { + const err = new Error('config.json not found'); + const steps = getSuggestedNextSteps(err); + expect(steps).toContain('config.json'); + }); + + it('should return suggestion for CONFIG_INVALID', () => { + const err = new Error('Invalid config file'); + const steps = getSuggestedNextSteps(err); + expect(steps).toContain('syntax'); + }); + + it('should return null for UNKNOWN errors', () => { + const err = new Error('totally unknown'); + const steps = getSuggestedNextSteps(err); + expect(steps).toBeNull(); + }); +}); + +describe('isRetryable', () => { + it('should return true for NETWORK errors', () => { + const err = new Error('fetch failed'); + expect(isRetryable(err)).toBe(true); + }); + + it('should return true for TIMEOUT errors', () => { + const err = new Error('timeout'); + expect(isRetryable(err)).toBe(true); + }); + + it('should return true for API_SERVER_ERROR', () => { + const err = new Error('server error'); + expect(isRetryable(err, { status: 500 })).toBe(true); + }); + + it('should return true for API_RATE_LIMIT', () => { + const err = new Error('rate limit'); + expect(isRetryable(err, { status: 429 })).toBe(true); + }); + + it('should return false for API_UNAUTHORIZED', () => { + const err = new Error('unauthorized'); + expect(isRetryable(err, { status: 401 })).toBe(false); + }); + + it('should return false for CONFIG_MISSING', () => { + const err = new Error('config.json not found'); + expect(isRetryable(err)).toBe(false); + }); + + it('should return false for UNKNOWN errors', () => { + const err = new Error('unknown error'); + expect(isRetryable(err)).toBe(false); + }); + + it('should return false for DISCORD_PERMISSION', () => { + const err = new Error('Missing Permissions'); + expect(isRetryable(err)).toBe(false); + }); +}); diff --git a/tests/utils/health.test.js b/tests/utils/health.test.js new file mode 100644 index 00000000..108e76e5 --- /dev/null +++ b/tests/utils/health.test.js @@ -0,0 +1,141 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +describe('HealthMonitor', () => { + let HealthMonitor; + + beforeEach(async () => { + // Reset the module to clear the singleton between tests + vi.resetModules(); + const mod = await import('../../src/utils/health.js'); + HealthMonitor = mod.HealthMonitor; + // FRAGILE COUPLING: We directly set HealthMonitor.instance = null to reset + // the singleton between tests. This relies on the internal implementation + // detail that the singleton is stored as a static 'instance' property. + // A cleaner approach would be a static resetInstance() method, but that + // would add test-only code to production. If the singleton storage changes, + // these tests will need updating. + HealthMonitor.instance = null; + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should create singleton via getInstance', () => { + const instance1 = HealthMonitor.getInstance(); + const instance2 = HealthMonitor.getInstance(); + expect(instance1).toBe(instance2); + }); + + it('should throw if constructor called directly when instance exists', () => { + HealthMonitor.getInstance(); // Create first instance + expect(() => new HealthMonitor()).toThrow('Use HealthMonitor.getInstance()'); + }); + + it('should record start time', () => { + const monitor = HealthMonitor.getInstance(); + const before = Date.now(); + monitor.recordStart(); + const after = Date.now(); + expect(monitor.startTime).toBeGreaterThanOrEqual(before); + expect(monitor.startTime).toBeLessThanOrEqual(after); + }); + + it('should record AI request timestamp', () => { + const monitor = HealthMonitor.getInstance(); + expect(monitor.lastAIRequest).toBeNull(); + monitor.recordAIRequest(); + expect(monitor.lastAIRequest).toBeTruthy(); + expect(typeof monitor.lastAIRequest).toBe('number'); + }); + + it('should set API status', () => { + const monitor = HealthMonitor.getInstance(); + expect(monitor.apiStatus).toBe('unknown'); + monitor.setAPIStatus('ok'); + expect(monitor.apiStatus).toBe('ok'); + expect(monitor.lastAPICheck).toBeTruthy(); + + monitor.setAPIStatus('error'); + expect(monitor.apiStatus).toBe('error'); + }); + + it('should calculate uptime', () => { + const monitor = HealthMonitor.getInstance(); + monitor.startTime = Date.now() - 5000; + const uptime = monitor.getUptime(); + expect(uptime).toBeGreaterThanOrEqual(4900); + expect(uptime).toBeLessThanOrEqual(6000); + }); + + it('should format uptime as seconds', () => { + const monitor = HealthMonitor.getInstance(); + monitor.startTime = Date.now() - 30 * 1000; + const formatted = monitor.getFormattedUptime(); + expect(formatted).toMatch(/\d+s/); + }); + + it('should format uptime as minutes', () => { + const monitor = HealthMonitor.getInstance(); + monitor.startTime = Date.now() - 5 * 60 * 1000; + const formatted = monitor.getFormattedUptime(); + expect(formatted).toMatch(/\d+m \d+s/); + }); + + it('should format uptime as hours', () => { + const monitor = HealthMonitor.getInstance(); + monitor.startTime = Date.now() - 2 * 60 * 60 * 1000; + const formatted = monitor.getFormattedUptime(); + expect(formatted).toMatch(/\d+h \d+m \d+s/); + }); + + it('should format uptime as days', () => { + const monitor = HealthMonitor.getInstance(); + monitor.startTime = Date.now() - 3 * 24 * 60 * 60 * 1000; + const formatted = monitor.getFormattedUptime(); + expect(formatted).toMatch(/\d+d \d+h \d+m/); + }); + + it('should return memory usage stats', () => { + const monitor = HealthMonitor.getInstance(); + const mem = monitor.getMemoryUsage(); + expect(typeof mem.heapUsed).toBe('number'); + expect(typeof mem.heapTotal).toBe('number'); + expect(typeof mem.rss).toBe('number'); + expect(typeof mem.external).toBe('number'); + }); + + it('should return formatted memory string', () => { + const monitor = HealthMonitor.getInstance(); + const formatted = monitor.getFormattedMemory(); + expect(formatted).toMatch(/\d+MB \/ \d+MB \(RSS: \d+MB\)/); + }); + + it('should return complete status', () => { + const monitor = HealthMonitor.getInstance(); + monitor.recordAIRequest(); + monitor.setAPIStatus('ok'); + + const status = monitor.getStatus(); + expect(status.uptime).toBeGreaterThanOrEqual(0); + expect(status.uptimeFormatted).toBeTruthy(); + expect(status.memory.heapUsed).toBeDefined(); + expect(status.memory.formatted).toBeTruthy(); + expect(status.api.status).toBe('ok'); + expect(status.api.lastCheck).toBeTruthy(); + expect(status.lastAIRequest).toBeTruthy(); + expect(status.timestamp).toBeTruthy(); + }); + + it('should return detailed status with process info', () => { + const monitor = HealthMonitor.getInstance(); + const status = monitor.getDetailedStatus(); + + expect(status.process.pid).toBe(process.pid); + expect(status.process.platform).toBe(process.platform); + expect(status.process.nodeVersion).toBe(process.version); + expect(typeof status.process.uptime).toBe('number'); + expect(status.memory.arrayBuffers).toBeDefined(); + expect(status.cpu).toBeDefined(); + }); +}); diff --git a/tests/utils/loadCommands.test.js b/tests/utils/loadCommands.test.js new file mode 100644 index 00000000..ac0a4886 --- /dev/null +++ b/tests/utils/loadCommands.test.js @@ -0,0 +1,80 @@ +import { mkdtempSync, rmSync, writeFileSync } from 'node:fs'; +import { tmpdir } from 'node:os'; +import { join } from 'node:path'; +import { afterEach, describe, expect, it, vi } from 'vitest'; +import { loadCommandsFromDirectory } from '../../src/utils/loadCommands.js'; + +describe('loadCommandsFromDirectory', () => { + let tempDir = null; + + afterEach(() => { + if (tempDir) { + rmSync(tempDir, { recursive: true, force: true }); + tempDir = null; + } + + vi.clearAllMocks(); + }); + + it('loads valid commands and handles invalid/broken modules per file', async () => { + tempDir = mkdtempSync(join(tmpdir(), 'bill-bot-commands-')); + + writeFileSync( + join(tempDir, 'ping.js'), + "export const data = { name: 'ping' }; export async function execute() {}", + 'utf8', + ); + writeFileSync(join(tempDir, 'invalid.js'), "export const data = { name: 'invalid' };", 'utf8'); + writeFileSync(join(tempDir, 'broken.js'), "throw new Error('boom');", 'utf8'); + + const commandLogger = { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }; + const onCommandLoaded = vi.fn(); + + const commands = await loadCommandsFromDirectory({ + commandsPath: tempDir, + onCommandLoaded, + commandLogger, + }); + + expect(commands).toHaveLength(1); + expect(commands[0].data.name).toBe('ping'); + expect(onCommandLoaded).toHaveBeenCalledTimes(1); + expect(commandLogger.info).toHaveBeenCalledWith('Loaded command', { command: 'ping' }); + expect(commandLogger.warn).toHaveBeenCalledWith('Command missing data or execute export', { + file: 'invalid.js', + }); + expect(commandLogger.error).toHaveBeenCalledWith( + 'Failed to load command', + expect.objectContaining({ file: 'broken.js', error: 'boom' }), + ); + }); + + it('supports disabling success logs', async () => { + tempDir = mkdtempSync(join(tmpdir(), 'bill-bot-commands-')); + + writeFileSync( + join(tempDir, 'status.js'), + "export const data = { name: 'status' }; export async function execute() {}", + 'utf8', + ); + + const commandLogger = { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + }; + + const commands = await loadCommandsFromDirectory({ + commandsPath: tempDir, + logLoaded: false, + commandLogger, + }); + + expect(commands).toHaveLength(1); + expect(commandLogger.info).not.toHaveBeenCalled(); + }); +}); diff --git a/tests/utils/permissions.test.js b/tests/utils/permissions.test.js new file mode 100644 index 00000000..8894004e --- /dev/null +++ b/tests/utils/permissions.test.js @@ -0,0 +1,169 @@ +import { describe, expect, it, vi } from 'vitest'; + +// Mock discord.js before importing the module +vi.mock('discord.js', () => ({ + PermissionFlagsBits: { + Administrator: 1n << 3n, + }, +})); + +import { getPermissionError, hasPermission, isAdmin } from '../../src/utils/permissions.js'; + +describe('isAdmin', () => { + it('should return false for null member or config', () => { + expect(isAdmin(null, {})).toBe(false); + expect(isAdmin({}, null)).toBe(false); + expect(isAdmin(null, null)).toBe(false); + }); + + it('should return true for members with Administrator permission', () => { + const member = { + permissions: { has: vi.fn().mockReturnValue(true) }, + roles: { cache: { has: vi.fn().mockReturnValue(false) } }, + }; + expect(isAdmin(member, {})).toBe(true); + }); + + it('should return true for members with admin role', () => { + const member = { + permissions: { has: vi.fn().mockReturnValue(false) }, + roles: { cache: { has: vi.fn().mockReturnValue(true) } }, + }; + const config = { permissions: { adminRoleId: '123456' } }; + expect(isAdmin(member, config)).toBe(true); + expect(member.roles.cache.has).toHaveBeenCalledWith('123456'); + }); + + it('should return false for regular members', () => { + const member = { + permissions: { has: vi.fn().mockReturnValue(false) }, + roles: { cache: { has: vi.fn().mockReturnValue(false) } }, + }; + const config = { permissions: { adminRoleId: '123456' } }; + expect(isAdmin(member, config)).toBe(false); + }); + + it('should return false when no adminRoleId configured and not Admin', () => { + const member = { + permissions: { has: vi.fn().mockReturnValue(false) }, + roles: { cache: { has: vi.fn() } }, + }; + expect(isAdmin(member, {})).toBe(false); + }); +}); + +describe('hasPermission', () => { + it('should return false for null member, commandName, or config', () => { + expect(hasPermission(null, 'ping', {})).toBe(false); + expect(hasPermission({}, null, {})).toBe(false); + expect(hasPermission({}, 'ping', null)).toBe(false); + }); + + it('should return true when permissions are disabled', () => { + const member = {}; + const config = { permissions: { enabled: false } }; + expect(hasPermission(member, 'ping', config)).toBe(true); + }); + + it('should return true when usePermissions is false', () => { + const member = {}; + const config = { permissions: { enabled: true, usePermissions: false } }; + expect(hasPermission(member, 'ping', config)).toBe(true); + }); + + it('should return true for "everyone" permission level', () => { + const member = {}; + const config = { + permissions: { + enabled: true, + usePermissions: true, + allowedCommands: { ping: 'everyone' }, + }, + }; + expect(hasPermission(member, 'ping', config)).toBe(true); + }); + + it('should check admin for "admin" permission level', () => { + const adminMember = { + permissions: { has: vi.fn().mockReturnValue(true) }, + roles: { cache: { has: vi.fn() } }, + }; + const config = { + permissions: { + enabled: true, + usePermissions: true, + allowedCommands: { config: 'admin' }, + }, + }; + expect(hasPermission(adminMember, 'config', config)).toBe(true); + }); + + it('should deny non-admin for "admin" permission level', () => { + const member = { + permissions: { has: vi.fn().mockReturnValue(false) }, + roles: { cache: { has: vi.fn().mockReturnValue(false) } }, + }; + const config = { + permissions: { + enabled: true, + usePermissions: true, + allowedCommands: { config: 'admin' }, + }, + }; + expect(hasPermission(member, 'config', config)).toBe(false); + }); + + it('should default to admin-only for unknown commands', () => { + const member = { + permissions: { has: vi.fn().mockReturnValue(false) }, + roles: { cache: { has: vi.fn().mockReturnValue(false) } }, + }; + const config = { + permissions: { + enabled: true, + usePermissions: true, + allowedCommands: {}, + }, + }; + expect(hasPermission(member, 'unknown', config)).toBe(false); + }); + + it('should grant admin access to unknown commands', () => { + const adminMember = { + permissions: { has: vi.fn().mockReturnValue(true) }, + roles: { cache: { has: vi.fn() } }, + }; + const config = { + permissions: { + enabled: true, + usePermissions: true, + allowedCommands: {}, + }, + }; + expect(hasPermission(adminMember, 'unknown', config)).toBe(true); + }); + + it('should deny for unknown permission level', () => { + const member = { + permissions: { has: vi.fn().mockReturnValue(false) }, + roles: { cache: { has: vi.fn().mockReturnValue(false) } }, + }; + const config = { + permissions: { + enabled: true, + usePermissions: true, + allowedCommands: { foo: 'moderator' }, + }, + }; + expect(hasPermission(member, 'foo', config)).toBe(false); + }); +}); + +describe('getPermissionError', () => { + it('should return a formatted error message with command name', () => { + const msg = getPermissionError('config'); + expect(msg).toContain('/config'); + expect(msg).toContain('permission'); + expect(msg).toContain('administrator'); + }); +}); diff --git a/tests/utils/registerCommands.test.js b/tests/utils/registerCommands.test.js new file mode 100644 index 00000000..45754190 --- /dev/null +++ b/tests/utils/registerCommands.test.js @@ -0,0 +1,100 @@ +import { afterEach, describe, expect, it, vi } from 'vitest'; + +const discordMocks = vi.hoisted(() => ({ + put: vi.fn(), + setToken: vi.fn(), + routeGlobal: vi.fn((clientId) => `/applications/${clientId}/commands`), + routeGuild: vi.fn((clientId, guildId) => `/applications/${clientId}/guilds/${guildId}/commands`), +})); + +vi.mock('discord.js', () => { + class REST { + constructor() { + this.put = discordMocks.put; + } + + setToken(token) { + discordMocks.setToken(token); + return this; + } + } + + return { + REST, + Routes: { + applicationCommands: discordMocks.routeGlobal, + applicationGuildCommands: discordMocks.routeGuild, + }, + }; +}); + +// Mock logger +vi.mock('../../src/logger.js', () => ({ + info: vi.fn(), + error: vi.fn(), +})); + +import { registerCommands } from '../../src/utils/registerCommands.js'; + +describe('registerCommands', () => { + afterEach(() => { + vi.clearAllMocks(); + }); + + it('should throw if commands is not an array', async () => { + await expect(registerCommands(null, 'client-id', 'token')).rejects.toThrow( + 'Commands must be an array', + ); + }); + + it('should throw if clientId or token is missing', async () => { + await expect(registerCommands([], null, 'token')).rejects.toThrow( + 'Client ID and token are required', + ); + await expect(registerCommands([], 'client-id', null)).rejects.toThrow( + 'Client ID and token are required', + ); + }); + + it('should throw if command lacks .data.toJSON()', async () => { + const commands = [{ data: {} }]; + await expect(registerCommands(commands, 'client-id', 'token')).rejects.toThrow( + 'Each command must have a .data property with toJSON() method', + ); + }); + + it('should register global commands when no guildId', async () => { + const commands = [{ data: { toJSON: () => ({ name: 'ping', description: 'Ping' }) } }]; + discordMocks.put.mockResolvedValue([{ name: 'ping' }]); + + await registerCommands(commands, 'client-id', 'token'); + + expect(discordMocks.setToken).toHaveBeenCalledWith('token'); + expect(discordMocks.routeGlobal).toHaveBeenCalledWith('client-id'); + expect(discordMocks.put).toHaveBeenCalledWith('/applications/client-id/commands', { + body: [{ name: 'ping', description: 'Ping' }], + }); + }); + + it('should register guild commands when guildId is provided', async () => { + const commands = [{ data: { toJSON: () => ({ name: 'ping', description: 'Ping' }) } }]; + discordMocks.put.mockResolvedValue([{ name: 'ping' }]); + + await registerCommands(commands, 'client-id', 'token', 'guild-id'); + + expect(discordMocks.routeGuild).toHaveBeenCalledWith('client-id', 'guild-id'); + expect(discordMocks.put).toHaveBeenCalledWith( + '/applications/client-id/guilds/guild-id/commands', + { body: [{ name: 'ping', description: 'Ping' }] }, + ); + }); + + it('should throw on API failure', async () => { + const commands = [{ data: { toJSON: () => ({ name: 'ping', description: 'Ping' }) } }]; + discordMocks.put.mockRejectedValue(new Error('Discord API error')); + + await expect(registerCommands(commands, 'client-id', 'token')).rejects.toThrow( + 'Discord API error', + ); + }); +}); diff --git a/tests/utils/retry.test.js b/tests/utils/retry.test.js new file mode 100644 index 00000000..34673736 --- /dev/null +++ b/tests/utils/retry.test.js @@ -0,0 +1,135 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; + +// Mock logger before imports +vi.mock('../../src/logger.js', () => ({ + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), +})); + +import { createRetryWrapper, withRetry } from '../../src/utils/retry.js'; + +describe('withRetry', () => { + beforeEach(() => { + vi.useFakeTimers(); + }); + + afterEach(() => { + vi.useRealTimers(); + vi.restoreAllMocks(); + }); + + it('should return result on first successful call', async () => { + const fn = vi.fn().mockResolvedValue('success'); + const result = await withRetry(fn); + expect(result).toBe('success'); + expect(fn).toHaveBeenCalledTimes(1); + }); + + it('should retry on retryable errors', async () => { + const fn = vi + .fn() + .mockRejectedValueOnce(Object.assign(new Error('timeout'), { code: 'ETIMEDOUT' })) + .mockResolvedValue('success'); + + const promise = withRetry(fn, { baseDelay: 100, maxRetries: 3 }); + + // Advance time past the first retry delay + await vi.advanceTimersByTimeAsync(200); + + const result = await promise; + expect(result).toBe('success'); + expect(fn).toHaveBeenCalledTimes(2); + }); + + it('should throw after max retries for retryable errors', async () => { + const err = Object.assign(new Error('timeout'), { code: 'ETIMEDOUT' }); + const fn = vi.fn().mockRejectedValue(err); + + const expectation = expect(withRetry(fn, { maxRetries: 2, baseDelay: 100 })).rejects.toThrow( + 'timeout', + ); + + // Advance timers for retry backoff delays + await vi.advanceTimersByTimeAsync(1000); + + await expectation; + expect(fn).toHaveBeenCalledTimes(3); // initial + 2 retries + }); + + it('should throw immediately for non-retryable errors', async () => { + const err = new Error('Missing Permissions'); + const fn = vi.fn().mockRejectedValue(err); + + await expect(withRetry(fn, { maxRetries: 3, baseDelay: 100 })).rejects.toThrow( + 'Missing Permissions', + ); + expect(fn).toHaveBeenCalledTimes(1); + }); + + it('should use custom shouldRetry function', async () => { + const err = new Error('custom error'); + const fn = vi.fn().mockRejectedValueOnce(err).mockResolvedValue('ok'); + + const shouldRetry = vi.fn().mockReturnValue(true); + const promise = withRetry(fn, { shouldRetry, baseDelay: 100, maxRetries: 3 }); + + await vi.advanceTimersByTimeAsync(200); + + const result = await promise; + expect(result).toBe('ok'); + expect(shouldRetry).toHaveBeenCalledWith(err, {}); + }); + + it('should respect maxDelay cap', async () => { + const fn = vi + .fn() + .mockRejectedValueOnce(Object.assign(new Error('timeout'), { code: 'ETIMEDOUT' })) + .mockRejectedValueOnce(Object.assign(new Error('timeout'), { code: 'ETIMEDOUT' })) + .mockResolvedValue('ok'); + + const promise = withRetry(fn, { + baseDelay: 10000, + maxDelay: 15000, + maxRetries: 3, + }); + + // The delay should be capped at maxDelay (15000ms) + await vi.advanceTimersByTimeAsync(16000); + await vi.advanceTimersByTimeAsync(16000); + + const result = await promise; + expect(result).toBe('ok'); + }); + + it('should pass context to shouldRetry', async () => { + const err = new Error('fail'); + const fn = vi.fn().mockRejectedValue(err); + const shouldRetry = vi.fn().mockReturnValue(false); + const context = { operation: 'test' }; + + await expect(withRetry(fn, { shouldRetry, context })).rejects.toThrow('fail'); + expect(shouldRetry).toHaveBeenCalledWith(err, context); + }); +}); + +describe('createRetryWrapper', () => { + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should create a wrapper with default options', async () => { + const wrapper = createRetryWrapper({ maxRetries: 0 }); + const fn = vi.fn().mockResolvedValue('result'); + const result = await wrapper(fn); + expect(result).toBe('result'); + }); + + it('should merge default and per-call options', async () => { + const wrapper = createRetryWrapper({ maxRetries: 0 }); + const fn = vi.fn().mockResolvedValue('ok'); + const result = await wrapper(fn, { context: { test: true } }); + expect(result).toBe('ok'); + }); +}); diff --git a/tests/utils/splitMessage.test.js b/tests/utils/splitMessage.test.js new file mode 100644 index 00000000..ee520122 --- /dev/null +++ b/tests/utils/splitMessage.test.js @@ -0,0 +1,91 @@ +import { describe, expect, it } from 'vitest'; +import { needsSplitting, splitMessage } from '../../src/utils/splitMessage.js'; + +describe('splitMessage', () => { + it('should return empty array for empty/null input', () => { + expect(splitMessage('')).toEqual([]); + expect(splitMessage(null)).toEqual([]); + expect(splitMessage(undefined)).toEqual([]); + }); + + it('should return single-element array for short messages', () => { + expect(splitMessage('hello')).toEqual(['hello']); + }); + + it('should not split messages at exactly the limit', () => { + const msg = 'a'.repeat(1990); + expect(splitMessage(msg)).toEqual([msg]); + }); + + it('should split messages longer than the limit', () => { + const msg = 'a'.repeat(2000); + const chunks = splitMessage(msg, 1000); + expect(chunks.length).toBe(2); + expect(chunks[0].length).toBe(1000); + expect(chunks[1].length).toBe(1000); + }); + + it('should split on word boundaries when possible', () => { + // Create a message with spaces — split should happen at a space + const msg = 'hello world foo bar baz qux'; + const chunks = splitMessage(msg, 11); + expect(chunks.length).toBeGreaterThan(1); + // Each chunk should be <= maxLength + for (const chunk of chunks) { + expect(chunk.length).toBeLessThanOrEqual(11); + } + }); + + it('should force split when no space found', () => { + const msg = 'a'.repeat(3000); + const chunks = splitMessage(msg, 1000); + expect(chunks.length).toBe(3); + expect(chunks[0].length).toBe(1000); + expect(chunks[1].length).toBe(1000); + expect(chunks[2].length).toBe(1000); + }); + + it('should handle custom maxLength', () => { + const msg = 'hello world foo bar'; + const chunks = splitMessage(msg, 11); + expect(chunks.length).toBeGreaterThan(1); + for (const chunk of chunks) { + expect(chunk.length).toBeLessThanOrEqual(11); + } + }); + + it('should trim leading whitespace on subsequent chunks', () => { + const msg = 'aaaa bbbb cccc'; + const chunks = splitMessage(msg, 5); + for (const chunk of chunks) { + expect(chunk).not.toMatch(/^\s/); + } + }); + + it('should handle messages with only spaces gracefully', () => { + const msg = ' '.repeat(3000); + const chunks = splitMessage(msg, 1000); + // After trim, remaining chunks may be empty; just ensure no crash + expect(Array.isArray(chunks)).toBe(true); + }); +}); + +describe('needsSplitting', () => { + it('should return false for short messages', () => { + expect(needsSplitting('hello')).toBe(false); + }); + + it('should return false for exactly 2000 chars', () => { + expect(needsSplitting('a'.repeat(2000))).toBe(false); + }); + + it('should return true for messages over 2000 chars', () => { + expect(needsSplitting('a'.repeat(2001))).toBe(true); + }); + + it('should return falsy for null/empty', () => { + expect(needsSplitting('')).toBeFalsy(); + expect(needsSplitting(null)).toBeFalsy(); + expect(needsSplitting(undefined)).toBeFalsy(); + }); +}); diff --git a/verify-contextual-logging.js b/verify-contextual-logging.js deleted file mode 100644 index 05c5cf66..00000000 --- a/verify-contextual-logging.js +++ /dev/null @@ -1,321 +0,0 @@ -/** - * Verification Script: Contextual Logging for Discord Events - * - * This script verifies that Discord events include proper context - * in their log output (channel, user, guild) and that the format - * is consistent and parseable. - */ - -import { readFileSync, existsSync, readdirSync } from 'fs'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; - -const __dirname = dirname(fileURLToPath(import.meta.url)); -const logsDir = join(__dirname, 'logs'); - -console.log('='.repeat(70)); -console.log('CONTEXTUAL LOGGING VERIFICATION'); -console.log('='.repeat(70)); -console.log(); - -// Expected context fields for each event type -const expectedContextFields = { - 'Welcome message': ['user', 'userId', 'guild', 'guildId', 'channel', 'channelId'], - 'Spam detected': ['user', 'userId', 'channel', 'channelId', 'guild', 'guildId', 'contentPreview'], - 'AI chat': ['channelId', 'username'] // AI chat context is minimal but present in error logs -}; - -let passed = 0; -let failed = 0; -let warnings = 0; - -function pass(message) { - console.log(`✅ PASS: ${message}`); - passed++; -} - -function fail(message) { - console.log(`❌ FAIL: ${message}`); - failed++; -} - -function warn(message) { - console.log(`⚠️ WARN: ${message}`); - warnings++; -} - -// 1. Check if logs directory exists -console.log('1. Checking logs directory...'); -if (!existsSync(logsDir)) { - fail('Logs directory does not exist. Run the bot with fileOutput enabled first.'); - console.log('\nSKIPPING remaining tests - no log files to analyze\n'); - process.exit(1); -} else { - pass('Logs directory exists'); -} -console.log(); - -// 2. Find and read log files -console.log('2. Reading log files...'); -const logFiles = readdirSync(logsDir).filter(f => f.startsWith('combined-') && f.endsWith('.log')); - -if (logFiles.length === 0) { - fail('No combined log files found. Run the bot with fileOutput enabled first.'); - console.log('\nSKIPPING remaining tests - no log files to analyze\n'); - process.exit(1); -} - -console.log(` Found ${logFiles.length} log file(s):`); -logFiles.forEach(f => console.log(` - ${f}`)); -pass('Log files found'); -console.log(); - -// 3. Parse and analyze log entries -console.log('3. Analyzing log entries for contextual data...'); -const allLogEntries = []; -let parseErrors = 0; - -for (const file of logFiles) { - const content = readFileSync(join(logsDir, file), 'utf-8'); - const lines = content.trim().split('\n').filter(l => l.trim()); - - for (const line of lines) { - try { - const entry = JSON.parse(line); - allLogEntries.push(entry); - } catch (err) { - parseErrors++; - fail(`Failed to parse log line: ${line.slice(0, 50)}...`); - } - } -} - -if (parseErrors === 0) { - pass(`All ${allLogEntries.length} log entries are valid JSON`); -} else { - fail(`${parseErrors} log entries failed to parse`); -} -console.log(); - -// 4. Verify timestamp presence -console.log('4. Verifying timestamps...'); -const entriesWithTimestamp = allLogEntries.filter(e => e.timestamp); -if (entriesWithTimestamp.length === allLogEntries.length) { - pass('All log entries include timestamps'); -} else { - fail(`${allLogEntries.length - entriesWithTimestamp.length} entries missing timestamps`); -} -console.log(); - -// 5. Check for welcome message context -console.log('5. Checking Welcome Message context...'); -const welcomeLogs = allLogEntries.filter(e => - e.message && e.message.includes('Welcome message') -); - -if (welcomeLogs.length === 0) { - warn('No welcome message logs found. Trigger a user join to test this.'); -} else { - console.log(` Found ${welcomeLogs.length} welcome message log(s)`); - - let contextComplete = true; - for (const log of welcomeLogs) { - const missing = expectedContextFields['Welcome message'].filter( - field => !log[field] && log[field] !== 0 - ); - - if (missing.length > 0) { - fail(`Welcome message log missing context: ${missing.join(', ')}`); - contextComplete = false; - } - } - - if (contextComplete) { - pass('Welcome message logs include all expected context fields'); - console.log(' Context fields:', expectedContextFields['Welcome message'].join(', ')); - } -} -console.log(); - -// 6. Check for spam detection context -console.log('6. Checking Spam Detection context...'); -const spamLogs = allLogEntries.filter(e => - e.message && e.message.includes('Spam detected') -); - -if (spamLogs.length === 0) { - warn('No spam detection logs found. Post a spam message to test this.'); -} else { - console.log(` Found ${spamLogs.length} spam detection log(s)`); - - let contextComplete = true; - for (const log of spamLogs) { - const missing = expectedContextFields['Spam detected'].filter( - field => !log[field] && log[field] !== 0 - ); - - if (missing.length > 0) { - fail(`Spam detection log missing context: ${missing.join(', ')}`); - contextComplete = false; - } - } - - if (contextComplete) { - pass('Spam detection logs include all expected context fields'); - console.log(' Context fields:', expectedContextFields['Spam detected'].join(', ')); - } -} -console.log(); - -// 7. Check for AI chat context (in error logs) -console.log('7. Checking AI Chat context...'); -const aiLogs = allLogEntries.filter(e => - e.message && (e.message.includes('OpenClaw API') || e.message.includes('AI')) -); - -if (aiLogs.length === 0) { - warn('No AI chat logs found. Mention the bot to trigger AI chat.'); -} else { - console.log(` Found ${aiLogs.length} AI-related log(s)`); - - // AI chat logs should include channelId and username in error cases - const aiErrorLogs = aiLogs.filter(e => e.level === 'error'); - if (aiErrorLogs.length > 0) { - let contextComplete = true; - for (const log of aiErrorLogs) { - if (!log.channelId || !log.username) { - fail('AI error log missing context (channelId or username)'); - contextComplete = false; - } - } - - if (contextComplete) { - pass('AI error logs include channelId and username context'); - } - } else { - warn('No AI error logs found (this is good - no errors occurred)'); - } -} -console.log(); - -// 8. Verify log format consistency -console.log('8. Verifying log format consistency...'); -const requiredFields = ['level', 'message', 'timestamp']; -let formatConsistent = true; - -for (const entry of allLogEntries) { - const missing = requiredFields.filter(field => !entry[field]); - if (missing.length > 0) { - fail(`Log entry missing required fields: ${missing.join(', ')}`); - formatConsistent = false; - break; - } -} - -if (formatConsistent) { - pass('All log entries have consistent format (level, message, timestamp)'); -} -console.log(); - -// 9. Check log levels -console.log('9. Verifying log levels...'); -const levels = new Set(allLogEntries.map(e => e.level)); -console.log(` Found log levels: ${Array.from(levels).join(', ')}`); - -const validLevels = ['debug', 'info', 'warn', 'error']; -const invalidLevels = Array.from(levels).filter(l => !validLevels.includes(l)); - -if (invalidLevels.length === 0) { - pass('All log entries use valid log levels'); -} else { - fail(`Invalid log levels found: ${invalidLevels.join(', ')}`); -} -console.log(); - -// 10. Verify Discord event context patterns -console.log('10. Verifying Discord event context patterns...'); - -// Events that should include guild context -const guildEvents = allLogEntries.filter(e => - e.message && ( - e.message.includes('Welcome message') || - e.message.includes('Spam detected') - ) -); - -if (guildEvents.length > 0) { - const withGuildContext = guildEvents.filter(e => e.guild && e.guildId); - if (withGuildContext.length === guildEvents.length) { - pass('All guild events include guild and guildId context'); - } else { - fail(`${guildEvents.length - withGuildContext.length} guild events missing guild context`); - } -} - -// Events that should include channel context -const channelEvents = allLogEntries.filter(e => - e.message && ( - e.message.includes('Welcome message') || - e.message.includes('Spam detected') || - e.message.includes('enabled') && e.channelId - ) -); - -if (channelEvents.length > 0) { - const withChannelContext = channelEvents.filter(e => e.channelId); - if (withChannelContext.length === channelEvents.length) { - pass('All channel events include channelId context'); - } else { - fail(`${channelEvents.length - withChannelContext.length} channel events missing channelId`); - } -} - -// Events that should include user context -const userEvents = allLogEntries.filter(e => - e.message && ( - e.message.includes('Welcome message') || - e.message.includes('Spam detected') - ) -); - -if (userEvents.length > 0) { - const withUserContext = userEvents.filter(e => e.user && e.userId); - if (withUserContext.length === userEvents.length) { - pass('All user events include user and userId context'); - } else { - fail(`${userEvents.length - withUserContext.length} user events missing user context`); - } -} -console.log(); - -// Summary -console.log('='.repeat(70)); -console.log('VERIFICATION SUMMARY'); -console.log('='.repeat(70)); -console.log(`Total log entries analyzed: ${allLogEntries.length}`); -console.log(`✅ Passed: ${passed}`); -console.log(`❌ Failed: ${failed}`); -console.log(`⚠️ Warnings: ${warnings}`); -console.log(); - -if (failed === 0 && warnings <= 3) { - console.log('✅ VERIFICATION PASSED - Contextual logging is working correctly!'); - console.log(); - console.log('Notes:'); - console.log('- All log entries are properly formatted with timestamps'); - console.log('- Discord events include appropriate context (channel, user, guild)'); - console.log('- Log format is consistent and parseable as JSON'); - console.log('- Warnings are expected if not all event types were triggered'); - process.exit(0); -} else if (failed === 0) { - console.log('⚠️ VERIFICATION PASSED WITH WARNINGS'); - console.log(); - console.log('To fully verify, trigger the following events:'); - if (welcomeLogs.length === 0) console.log('- User join (welcome message)'); - if (spamLogs.length === 0) console.log('- Spam message (spam detection)'); - if (aiLogs.length === 0) console.log('- Mention bot (AI chat)'); - process.exit(0); -} else { - console.log('❌ VERIFICATION FAILED - Issues found with contextual logging'); - process.exit(1); -} diff --git a/verify-file-output.js b/verify-file-output.js deleted file mode 100644 index 31b615de..00000000 --- a/verify-file-output.js +++ /dev/null @@ -1,180 +0,0 @@ -/** - * Verification script for file output and rotation configuration - * Tests that logger creates log files with proper JSON format - */ - -import { debug, info, warn, error } from './src/logger.js'; -import { existsSync, readFileSync, readdirSync } from 'fs'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; - -const __dirname = dirname(fileURLToPath(import.meta.url)); -const logsDir = join(__dirname, 'logs'); - -console.log('\n🧪 Starting file output verification...\n'); - -// Generate test logs at different levels -info('File output verification started'); -debug('This is a debug message for testing', { testId: 1, service: 'verification' }); -info('This is an info message for testing', { testId: 2, channel: 'test-channel' }); -warn('This is a warning message for testing', { testId: 3, user: 'test-user' }); -error('This is an error message for testing', { testId: 4, code: 'TEST_ERROR' }); - -// Log with sensitive data to verify redaction -info('Testing sensitive data redaction', { - DISCORD_TOKEN: 'this-should-be-redacted', - username: 'safe-to-log', - password: 'this-should-also-be-redacted' -}); - -console.log('✅ Test logs generated\n'); - -// Wait a moment for file writes to complete -setTimeout(() => { - console.log('🔍 Verifying log files...\n'); - - // Check 1: Logs directory exists - if (!existsSync(logsDir)) { - console.error('❌ FAIL: logs directory was not created'); - process.exit(1); - } - console.log('✅ PASS: logs directory exists'); - - // Check 2: List files in logs directory - const logFiles = readdirSync(logsDir); - console.log(`\n📁 Files in logs directory: ${logFiles.join(', ')}`); - - // Check 3: Combined log file exists - const combinedLog = logFiles.find(f => f.startsWith('combined-')); - if (!combinedLog) { - console.error('❌ FAIL: combined log file not found'); - process.exit(1); - } - console.log(`✅ PASS: combined log file exists (${combinedLog})`); - - // Check 4: Error log file exists - const errorLog = logFiles.find(f => f.startsWith('error-')); - if (!errorLog) { - console.error('❌ FAIL: error log file not found'); - process.exit(1); - } - console.log(`✅ PASS: error log file exists (${errorLog})`); - - // Check 5: Combined log contains valid JSON - console.log('\n📄 Verifying combined log format...'); - const combinedPath = join(logsDir, combinedLog); - const combinedContent = readFileSync(combinedPath, 'utf-8'); - const combinedLines = combinedContent.trim().split('\n').filter(line => line.trim()); - - console.log(`\nCombined log entries: ${combinedLines.length}`); - - let validJsonCount = 0; - let hasInfoLevel = false; - let hasWarnLevel = false; - let hasErrorLevel = false; - let sensitiveDataRedacted = false; - - for (const line of combinedLines) { - try { - const entry = JSON.parse(line); - validJsonCount++; - - // Verify required fields - if (!entry.timestamp || !entry.level || !entry.message) { - console.error(`❌ FAIL: Log entry missing required fields: ${line}`); - process.exit(1); - } - - // Track log levels - if (entry.level === 'info') hasInfoLevel = true; - if (entry.level === 'warn') hasWarnLevel = true; - if (entry.level === 'error') hasErrorLevel = true; - - // Check for sensitive data redaction - if (entry.message.includes('sensitive data')) { - if (entry.DISCORD_TOKEN === '[REDACTED]' && entry.password === '[REDACTED]') { - sensitiveDataRedacted = true; - } else { - console.error('❌ FAIL: Sensitive data was not redacted properly'); - console.error('Entry:', JSON.stringify(entry, null, 2)); - process.exit(1); - } - } - - // Display sample entry - if (validJsonCount === 1) { - console.log('\nSample log entry:'); - console.log(JSON.stringify(entry, null, 2)); - } - } catch (err) { - console.error(`❌ FAIL: Invalid JSON in combined log: ${line}`); - console.error('Parse error:', err.message); - process.exit(1); - } - } - - console.log(`\n✅ PASS: All ${validJsonCount} entries are valid JSON`); - console.log(`✅ PASS: Timestamps present in all entries`); - console.log(`✅ PASS: Log levels present - info: ${hasInfoLevel}, warn: ${hasWarnLevel}, error: ${hasErrorLevel}`); - console.log(`✅ PASS: Sensitive data redacted: ${sensitiveDataRedacted}`); - - // Check 6: Error log contains only error-level entries - console.log('\n📄 Verifying error log format...'); - const errorPath = join(logsDir, errorLog); - const errorContent = readFileSync(errorPath, 'utf-8'); - const errorLines = errorContent.trim().split('\n').filter(line => line.trim()); - - console.log(`\nError log entries: ${errorLines.length}`); - - for (const line of errorLines) { - try { - const entry = JSON.parse(line); - - if (entry.level !== 'error') { - console.error(`❌ FAIL: Non-error level found in error log: ${entry.level}`); - process.exit(1); - } - - // Display sample error entry - if (errorLines.indexOf(line) === 0) { - console.log('\nSample error entry:'); - console.log(JSON.stringify(entry, null, 2)); - } - } catch (err) { - console.error(`❌ FAIL: Invalid JSON in error log: ${line}`); - console.error('Parse error:', err.message); - process.exit(1); - } - } - - console.log(`\n✅ PASS: All error log entries are error-level only`); - console.log(`✅ PASS: Error log format is valid JSON`); - - // Check 7: Verify rotation configuration - console.log('\n🔄 Verifying rotation configuration...'); - console.log('Expected: Daily rotation with YYYY-MM-DD pattern'); - console.log('Expected: Max size 20MB, max files 14 days'); - - const datePattern = /\d{4}-\d{2}-\d{2}/; - if (datePattern.test(combinedLog) && datePattern.test(errorLog)) { - console.log('✅ PASS: Log files use correct date pattern (YYYY-MM-DD)'); - } else { - console.error('❌ FAIL: Log files do not use expected date pattern'); - process.exit(1); - } - - console.log('\n✅ ALL CHECKS PASSED!'); - console.log('\n📋 Summary:'); - console.log(' - Logs directory created: ✅'); - console.log(' - Combined log file created: ✅'); - console.log(' - Error log file created: ✅'); - console.log(' - JSON format valid: ✅'); - console.log(' - Timestamps present: ✅'); - console.log(' - Log levels working: ✅'); - console.log(' - Error log filtering: ✅'); - console.log(' - Sensitive data redaction: ✅'); - console.log(' - Date-based rotation pattern: ✅'); - console.log('\n✨ File output and rotation verification complete!\n'); - - process.exit(0); -}, 1000); // Wait 1 second for file writes diff --git a/verify-sensitive-data-redaction.js b/verify-sensitive-data-redaction.js deleted file mode 100644 index 489c8eb1..00000000 --- a/verify-sensitive-data-redaction.js +++ /dev/null @@ -1,179 +0,0 @@ -/** - * Verification Script: Sensitive Data Redaction - * - * Comprehensive test to ensure all sensitive data is properly redacted - * in both console and file output. - */ - -import { info, warn, error } from './src/logger.js'; -import { readFileSync, existsSync } from 'fs'; -import { join, dirname } from 'path'; -import { fileURLToPath } from 'url'; - -const __dirname = dirname(fileURLToPath(import.meta.url)); -const logsDir = join(__dirname, 'logs'); - -console.log('='.repeat(70)); -console.log('SENSITIVE DATA REDACTION VERIFICATION'); -console.log('='.repeat(70)); -console.log(); - -// Test 1: Direct sensitive field logging -console.log('Test 1: Direct sensitive fields...'); -info('Testing direct sensitive fields', { - DISCORD_TOKEN: 'MTk4OTg2MjQ3ODk4NjI0MDAwMA.GXxxXX.xxxxxxxxxxxxxxxxxxxxxxxx', - OPENCLAW_TOKEN: 'sk-test-1234567890abcdefghijklmnop', - username: 'test-user' -}); -console.log('✓ Logged with DISCORD_TOKEN and OPENCLAW_TOKEN\n'); - -// Test 2: Various sensitive field names (case variations) -console.log('Test 2: Case-insensitive sensitive fields...'); -warn('Testing case variations', { - discord_token: 'should-be-redacted', - Token: 'should-be-redacted', - PASSWORD: 'should-be-redacted', - apikey: 'should-be-redacted', - Authorization: 'Bearer should-be-redacted' -}); -console.log('✓ Logged with various case variations\n'); - -// Test 3: Nested objects -console.log('Test 3: Nested objects with sensitive data...'); -info('Testing nested sensitive data', { - config: { - database: { - host: 'localhost', - password: 'db-password-123' - }, - api: { - endpoint: 'https://api.example.com', - DISCORD_TOKEN: 'nested-token-value', - apiKey: 'nested-api-key' - } - } -}); -console.log('✓ Logged with nested sensitive data\n'); - -// Test 4: Arrays with sensitive data -console.log('Test 4: Arrays containing sensitive data...'); -info('Testing arrays with sensitive data', { - tokens: [ - { name: 'discord', token: 'token-1' }, - { name: 'openclaw', OPENCLAW_TOKEN: 'token-2' } - ] -}); -console.log('✓ Logged with arrays containing sensitive data\n'); - -// Test 5: Mixed safe and sensitive data -console.log('Test 5: Mixed safe and sensitive data...'); -error('Testing mixed data', { - user: 'john_doe', - channel: 'general', - guild: 'My Server', - DISCORD_TOKEN: 'should-be-redacted', - timestamp: new Date().toISOString(), - password: 'user-password', - metadata: { - version: '1.0.0', - authorization: 'Bearer secret-token' - } -}); -console.log('✓ Logged with mixed safe and sensitive data\n'); - -// Wait a moment for file writes to complete -await new Promise(resolve => setTimeout(resolve, 1000)); - -console.log('='.repeat(70)); -console.log('VERIFYING LOG FILES'); -console.log('='.repeat(70)); -console.log(); - -if (!existsSync(logsDir)) { - console.log('⚠️ No logs directory found. File output may be disabled.'); - console.log(' This is OK if fileOutput is set to false in config.json\n'); -} else { - // Find the most recent combined log file - const fs = await import('fs'); - const files = fs.readdirSync(logsDir) - .filter(f => f.startsWith('combined-') && f.endsWith('.log')) - .sort() - .reverse(); - - if (files.length === 0) { - console.log('⚠️ No combined log files found\n'); - } else { - const logFile = join(logsDir, files[0]); - console.log(`Reading log file: ${files[0]}\n`); - - const logContent = readFileSync(logFile, 'utf-8'); - const lines = logContent.trim().split('\n'); - - // Check for any exposed tokens - const sensitivePatterns = [ - /MTk4OTg2MjQ3ODk4NjI0MDAwMA/, // Example Discord token - /sk-test-\d+/, // Example OpenClaw token - /"password":"(?!\[REDACTED\])/, // Password not redacted - /"token":"(?!\[REDACTED\])/, // Token not redacted - /"apiKey":"(?!\[REDACTED\])/, // API key not redacted - /Bearer secret-token/, // Authorization header - /db-password-123/, // Database password - /nested-token-value/, // Nested token - /nested-api-key/, // Nested API key - /token-1/, // Array token - /token-2/, // Array OPENCLAW_TOKEN - /user-password/ // User password - ]; - - let exposedCount = 0; - const exposedPatterns = []; - - for (const pattern of sensitivePatterns) { - if (pattern.test(logContent)) { - exposedCount++; - exposedPatterns.push(pattern.toString()); - } - } - - if (exposedCount > 0) { - console.log('❌ FAILED: Found exposed sensitive data!'); - console.log(` ${exposedCount} pattern(s) were not properly redacted:`); - exposedPatterns.forEach(p => console.log(` - ${p}`)); - console.log(); - process.exit(1); - } - - // Count redacted occurrences - const redactedCount = (logContent.match(/\[REDACTED\]/g) || []).length; - console.log(`✓ All sensitive data properly redacted`); - console.log(` Found ${redactedCount} [REDACTED] markers in log file\n`); - - // Verify specific fields are redacted - const checks = [ - { field: 'DISCORD_TOKEN', expected: '[REDACTED]' }, - { field: 'OPENCLAW_TOKEN', expected: '[REDACTED]' }, - { field: 'password', expected: '[REDACTED]' }, - { field: 'token', expected: '[REDACTED]' }, - { field: 'apiKey', expected: '[REDACTED]' }, - { field: 'authorization', expected: '[REDACTED]' } - ]; - - console.log('Field-specific verification:'); - for (const check of checks) { - const regex = new RegExp(`"${check.field}":"\\[REDACTED\\]"`, 'i'); - if (regex.test(logContent)) { - console.log(` ✓ ${check.field}: properly redacted`); - } - } - } -} - -console.log(); -console.log('='.repeat(70)); -console.log('VERIFICATION COMPLETE'); -console.log('='.repeat(70)); -console.log('✓ All sensitive data is properly redacted'); -console.log('✓ No tokens or credentials exposed in logs'); -console.log('✓ Redaction works for nested objects and arrays'); -console.log('✓ Case-insensitive field matching works correctly'); -console.log(); diff --git a/vitest.config.js b/vitest.config.js new file mode 100644 index 00000000..d0cdf4e3 --- /dev/null +++ b/vitest.config.js @@ -0,0 +1,21 @@ +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + globals: false, + environment: 'node', + include: ['tests/**/*.test.js'], + testTimeout: 10000, + coverage: { + provider: 'v8', + include: ['src/**/*.js'], + exclude: ['src/deploy-commands.js'], + thresholds: { + statements: 80, + branches: 80, + functions: 80, + lines: 80, + }, + }, + }, +});