Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 27 additions & 4 deletions .cursor/rules/dev-environment.mdc
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,32 @@ alwaysApply: true
The dev environment is managed by the `wp-env` package.
In order to run code and run tests, you need to use the proper syntax to run commands.

For example:
## Testing Commands

- To run unit tests, you would `npm run test:unit`
- To read the debug.log you would `npm run wp-env run cli -- tail -n 100 wp-content/debug.log`
### Unit Tests
- To run all unit tests: `npm run test:unit`
- To run specific unit test class: `npm run test:unit -- --filter=ClassName`
- Example: `npm run test:unit -- --filter=OpenAIModuleTest`

Feel free to do both as often as you think is reasonable. DO NOT run local commands on this machine, as the wp-env manages the environment in a docker container.
### Integration Tests
- Integration tests in this project are tests that run API calls to external APIs. All other tests are unit.
- To run all integration tests: `npm run test:integration`
- To run specific integration test class: `npm run test:integration -- --filter=ClassName`
- **IMPORTANT**: Do NOT run integration tests automatically - only run when specifically requested

### Other Useful Commands
- To read the debug.log: `npm run wp-env run cli -- tail -n 100 wp-content/debug.log`
- To check PHP syntax/linting: Use the linter errors shown in the interface
- To run specific test groups: `npm run test:unit -- --group=groupname`

## Testing Guidelines

1. **Always run unit tests** after making code changes to verify functionality
2. **Run specific test classes** when working on particular modules (faster feedback)
3. **Only run integration tests** when specifically asked or when testing full system flows
4. **Feel free to run unit tests often** - they are fast and help catch regressions
5. **DO NOT run local commands** on this machine, as wp-env manages the environment in a docker container

## Test Structure
- Unit tests: `tests/unit/` - Test individual methods and classes in isolation
- Integration tests: `tests/integration/` - Test full workflows and system interactions
102 changes: 100 additions & 2 deletions modules/openai/class-openai-module.php
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
<?php

require_once __DIR__ . '/class-openai-tool.php';
require_once __DIR__ . '/class-ollama.php';
require_once __DIR__ . '/class-pos-ollama-server.php';

class OpenAI_Module extends POS_Module {
public $id = 'openai';
Expand Down Expand Up @@ -35,7 +35,10 @@ public function register() {
add_action( 'admin_menu', array( $this, 'admin_menu' ) );
add_filter( 'pos_openai_tools', array( $this, 'register_openai_tools' ) );
$this->register_cli_command( 'tool', 'cli_openai_tool' );

$this->register_block( 'tool', array( 'render_callback' => array( $this, 'render_tool_block' ) ) );
$this->register_block( 'message', array() );

require_once __DIR__ . '/chat-page.php';
}

Expand Down Expand Up @@ -901,6 +904,95 @@ public function api_call( $url, $data ) {
return json_decode( $body );
}

/**
* Save conversation backscroll as a note
*
* @param array $backscroll Array of conversation messages
* @param array $search_args Search arguments for get_posts to find existing notes, also used for new post configuration
* - 'name': The post slug/name to search for and use when creating new posts
* - 'post_title': Title for new posts (optional, defaults to auto-generated)
* - 'notebook': Notebook slug to assign to new posts (optional, defaults to 'ai-chats')
* - Any other valid get_posts() arguments for finding existing posts
* @return int|WP_Error Post ID on success, WP_Error on failure
*/
public function save_backscroll( array $backscroll, array $search_args ) {
$notes_module = POS::get_module_by_id( 'notes' );
if ( ! $notes_module ) {
return new WP_Error( 'notes_module_not_found', 'Notes module not available' );
}

// Create content from backscroll messages
$content_blocks = array();
foreach ( $backscroll as $message ) {
if ( is_object( $message ) ) {
$message = (array) $message;
}

if ( ! isset( $message['role'] ) ) {
continue;
}

$role = $message['role'];
$content = $message['content'] ?? '';

if ( in_array( $role, array( 'user', 'assistant' ), true ) ) {
// Create message block
$content_blocks[] = get_comment_delimited_block_content(
'pos/ai-message',
array(
'role' => $role,
'content' => $content,
'id' => $message['id'] ?? '',
),
''
);
}
}

// Use notes module's list method to find existing posts
$existing_posts = $notes_module->list( $search_args, 'ai-chats' );

// Prepare post data
$post_data = array(
// TODO: generate title with AI.
'post_title' => $search_args['post_title'] ?? 'Chat ' . gmdate( 'Y-m-d H:i:s' ),
'post_type' => $notes_module->id,
'post_name' => $search_args['name'] ?? 'chat-' . gmdate( 'Y-m-d-H-i-s' ),
'post_status' => 'private',
);

// Create or update post
if ( ! empty( $existing_posts ) ) {
$post_id = wp_update_post(
array(
'ID' => $existing_posts[0]->ID,
'post_content' => implode( "\n\n", $content_blocks ),
)
);
} else {
$post_data['post_content'] = implode( "\n\n", $content_blocks );
$post_id = wp_insert_post( $post_data );

// Add to specified notebook or default to OpenAI chats
$notebook_slug = $search_args['notebook'] ?? 'ai-chats';
$notebook = get_term_by( 'slug', $notebook_slug, 'notebook' );

if ( ! $notebook ) {
$notebook_name = 'ai-chats' === $notebook_slug ? 'AI Chats' : ucwords( str_replace( '-', ' ', $notebook_slug ) );
$term_result = wp_insert_term( $notebook_name, 'notebook', array( 'slug' => $notebook_slug ) );
if ( ! is_wp_error( $term_result ) ) {
$notebook = get_term( $term_result['term_id'], 'notebook' );
}
}

if ( $notebook ) {
wp_set_object_terms( $post_id, array( $notebook->term_id ), 'notebook' );
}
}

return $post_id;
}

public function vercel_chat( WP_REST_Request $request ) {
$params = $request->get_json_params();

Expand Down Expand Up @@ -967,7 +1059,13 @@ public function vercel_chat( WP_REST_Request $request ) {
$vercel_sdk->sendToolCall( $data->id, $data->function->name, json_decode( $data->function->arguments, true ) );
}
} );
set_transient( 'vercel_chat_' . $params['id'], $openai_messages, 60 * 60 );
set_transient( 'vercel_chat_' . $params['id'], $response, 60 * 60 );
$this->save_backscroll(
$response,
array(
'name' => $params['id'],
)
);

// $vercel_sdk->sendText( $response->choices[0]->message->content );
$vercel_sdk->finishStep( 'stop', array( 'promptTokens' => 0, 'completionTokens' => 0 ), false );
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
<?php
require_once __DIR__ . '/class-openai-module.php';

/**
* Ollama Mock Server Class
*
Expand Down Expand Up @@ -30,7 +32,7 @@
*/
class POS_Ollama_Server {

public $module;
public \OpenAI_Module $module; // Reference to OpenAI_Module
public $rest_namespace = 'ollama/v1';
/**
* Array of available models in this mock server.
Expand All @@ -45,10 +47,10 @@ class POS_Ollama_Server {
public function __construct( $module ) {
$this->module = $module;
$token = $this->module->get_setting( 'ollama_auth_token' );
$this->module->settings[ 'ollama_auth_token' ] = array(
$this->module->settings['ollama_auth_token'] = array(
'type' => 'text',
'name' => 'Token for authorizing OLLAMA mock API.',
'label' => strlen( $token ) < 3 ? 'Set a token to enable Ollama-compatible API for external clients' : 'OLLAMA Api accessible at <a href="' . add_query_arg( 'token', $token, get_rest_url( null, $this->rest_namespace ) ) . '" target="_blank">here</a>',
'label' => strlen( $token ) < 3 ? 'Set a token to enable Ollama-compatible API for external clients' : 'OLLAMA Api accessible at <a href="' . add_query_arg( 'token', $token, get_rest_url( null, $this->rest_namespace ) ) . '" target="_blank">here</a>',
'default' => '0',
);
if ( strlen( $token ) >= 3 ) {
Expand Down Expand Up @@ -312,6 +314,24 @@ public function get_version( WP_REST_Request $request ): WP_REST_Response {
);
}

private function calculate_rolling_hash( $messages ) {
$hash = '';
$last_assistant_index = -1;
foreach ( $messages as $index => $message ) {
$message = (array) $message;
if ( in_array( $message['role'], array( 'assistant', 'system' ), true ) ) {
$last_assistant_index = $index;
}
}
foreach ( $messages as $index => $message ) {
$message = (array) $message;
if ( ( $index <= $last_assistant_index || $last_assistant_index === -1 ) && in_array( $message['role'], array( 'user', 'assistant' ), true ) ) {
$hash .= "\n\n" . trim( $message['content'] );
}
}
return hash( 'sha256', trim( $hash ) );
}

/**
* POST /api/chat - Chat endpoint.
*
Expand Down Expand Up @@ -345,7 +365,38 @@ function( $message ) {
return $message['role'] !== 'system';
}
);

$hash = $this->calculate_rolling_hash( $messages );

$result = $this->module->complete_backscroll( $non_system_messages );

// Use the OpenAI module's save_backscroll method with hash as identifier
$post_id = $this->module->save_backscroll(
$result,
array(
'meta_input' => array(
'ollama-hash' => $hash,
),
)
);

if ( is_wp_error( $post_id ) ) {
return new WP_REST_Response(
array( 'error' => 'Failed to save conversation: ' . $post_id->get_error_message() ),
500
);
}

// In case we have edited an existing post, we are updating the hash with the result information so the subsequent search will find the correct post.
wp_update_post(
array(
'ID' => $post_id,
'meta_input' => array(
'ollama-hash' => $this->calculate_rolling_hash( $result ),
),
)
);

$last_message = (array) end( $result );
$answer = $last_message['content'] ?? 'Hello from PersonalOS Mock Ollama!';
// $answer = 'Echo: ' . json_encode( $data ); //$content;
Expand Down
24 changes: 22 additions & 2 deletions src-chatbot/app/(chat)/page.tsx
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
// import { cookies } from 'next/headers'; // Removed for static export: caused "used headers" error

'use client';

import { Chat } from '@/components/chat';
import { DEFAULT_CHAT_MODEL } from '@/lib/ai/models';
import { generateUUID } from '@/lib/utils';
import { DataStreamHandler } from '@/components/data-stream-handler';
import { useEffect, useState } from 'react';
// import { auth } from '../(auth)/auth'; // auth() call disabled for static export
// import { redirect } from 'next/navigation'; // Redirect disabled for static export
// import type { Session } from 'next-auth'; // Removed as next-auth is uninstalled
Expand All @@ -26,7 +29,15 @@ interface MockSession {
expires: string;
}

export default async function Page() {
export default function Page() {
// Generate unique ID on client-side to avoid static export issue
const [id, setId] = useState<string>('');

useEffect(() => {
// Generate a unique ID after component mounts on client-side
setId(generateUUID());
}, []);

// const sessionFromAuth = await auth(); // auth() call disabled for static export
console.warn('auth() call in app/(chat)/page.tsx disabled. Using mock session.');

Expand All @@ -47,13 +58,22 @@ export default async function Page() {
// redirect('/api/auth/guest');
// }

const id = generateUUID();
console.log( 'id: ' + id );

// For static export, cookie reading is disabled. Always use default model.
// const cookieStore = await cookies(); // Call to cookies() disabled
// const modelIdFromCookie = cookieStore.get('chat-model');
console.warn('Cookie reading in app/(chat)/page.tsx disabled for static export. Using default chat model.');

// Show loading state until ID is generated
if (!id) {
return (
<div className="flex items-center justify-center h-dvh">
<div className="text-muted-foreground">Loading...</div>
</div>
);
}

return (
<>
<Chat
Expand Down
Loading