From 6cfb257d591d496c97f1da05e93e8e6d04fabf9c Mon Sep 17 00:00:00 2001 From: Shaw Date: Tue, 17 Jun 2025 11:58:13 -0700 Subject: [PATCH 1/5] frontend tests --- .gitignore | 7 +- cypress.config.ts | 17 + cypress/e2e/simple-test.cy.ts | 7 + cypress/e2e/ui-components-simple.cy.ts | 79 + cypress/e2e/ui-components.cy.ts | 170 ++ cypress/support/commands.ts | 84 + cypress/support/e2e.ts | 59 + images/README.md | 24 - package.json | 24 +- scripts/test-server.js | 130 + src/__tests__/e2e/knowledge-e2e.test.ts | 167 ++ src/__tests__/e2e/startup-loading.test.ts | 211 ++ .../__tests__/unit}/action.test.ts | 163 +- .../unit/document-repository.test.ts | 247 ++ .../unit/fragment-repository.test.ts | 403 +++ src/__tests__/unit/schema.test.ts | 109 + src/actions.ts | 183 +- src/config.ts | 285 +-- src/ctx-embeddings.ts | 152 +- src/docs-loader.ts | 10 +- src/document-processor.ts | 26 +- src/frontend/index.tsx | 2 +- src/frontend/test-components.html | 315 +++ src/frontend/ui/badge.tsx | 4 +- src/frontend/ui/button.tsx | 5 +- src/frontend/ui/card.tsx | 24 +- src/frontend/ui/knowledge-tab.tsx | 2182 +++++++++-------- src/frontend/ui/memory-graph.tsx | 569 +++-- src/index.ts | 18 +- src/provider.ts | 24 +- src/repositories/document-repository.ts | 147 ++ src/repositories/fragment-repository.ts | 212 ++ src/repositories/index.ts | 6 + src/routes.ts | 26 +- src/schema.ts | 105 + src/service.ts | 22 +- src/tests.ts | 444 ++-- src/types.ts | 119 +- tsconfig.json | 6 +- tsup.config.ts | 1 + vitest.config.ts | 27 + 41 files changed, 4797 insertions(+), 2018 deletions(-) create mode 100644 cypress.config.ts create mode 100644 cypress/e2e/simple-test.cy.ts create mode 100644 cypress/e2e/ui-components-simple.cy.ts create mode 100644 cypress/e2e/ui-components.cy.ts create mode 100644 cypress/support/commands.ts create mode 100644 cypress/support/e2e.ts delete mode 100644 images/README.md create mode 100644 scripts/test-server.js create mode 100644 src/__tests__/e2e/knowledge-e2e.test.ts create mode 100644 src/__tests__/e2e/startup-loading.test.ts rename {__tests__ => src/__tests__/unit}/action.test.ts (55%) create mode 100644 src/__tests__/unit/document-repository.test.ts create mode 100644 src/__tests__/unit/fragment-repository.test.ts create mode 100644 src/__tests__/unit/schema.test.ts create mode 100644 src/frontend/test-components.html create mode 100644 src/repositories/document-repository.ts create mode 100644 src/repositories/fragment-repository.ts create mode 100644 src/repositories/index.ts create mode 100644 src/schema.ts create mode 100644 vitest.config.ts diff --git a/.gitignore b/.gitignore index f0ad109..97fc338 100644 --- a/.gitignore +++ b/.gitignore @@ -2,5 +2,10 @@ dist node_modules +.eliza .elizadb -.env \ No newline at end of file +.elizadb* +.env +.cursor +coverage +cypress/screenshots \ No newline at end of file diff --git a/cypress.config.ts b/cypress.config.ts new file mode 100644 index 0000000..d5e5631 --- /dev/null +++ b/cypress.config.ts @@ -0,0 +1,17 @@ +import { defineConfig } from 'cypress'; + +export default defineConfig({ + e2e: { + baseUrl: 'http://localhost:3000', + supportFile: 'cypress/support/e2e.ts', + specPattern: 'cypress/e2e/**/*.cy.{js,jsx,ts,tsx}', + video: false, + screenshotOnRunFailure: true, + viewportWidth: 1280, + viewportHeight: 720, + defaultCommandTimeout: 10000, + requestTimeout: 10000, + responseTimeout: 10000, + chromeWebSecurity: false, + }, +}); diff --git a/cypress/e2e/simple-test.cy.ts b/cypress/e2e/simple-test.cy.ts new file mode 100644 index 0000000..053932e --- /dev/null +++ b/cypress/e2e/simple-test.cy.ts @@ -0,0 +1,7 @@ +describe('Simple Test', () => { + it('should visit the Cypress example page', () => { + cy.visit('https://example.cypress.io', { failOnStatusCode: false }); + cy.contains('type').click(); + cy.url().should('include', '/commands/actions'); + }); +}); \ No newline at end of file diff --git a/cypress/e2e/ui-components-simple.cy.ts b/cypress/e2e/ui-components-simple.cy.ts new file mode 100644 index 0000000..6e53b52 --- /dev/null +++ b/cypress/e2e/ui-components-simple.cy.ts @@ -0,0 +1,79 @@ +describe('UI Components - Simple Tests', () => { + beforeEach(() => { + cy.visit('/test-components'); + }); + + describe('Badge Component', () => { + it('should render badges with all variants', () => { + cy.get('[data-testid="badge-default"]').should('exist').and('be.visible'); + cy.get('[data-testid="badge-outline"]').should('exist').and('be.visible'); + cy.get('[data-testid="badge-secondary"]').should('exist').and('be.visible'); + cy.get('[data-testid="badge-destructive"]').should('exist').and('be.visible'); + }); + + it('should display correct text content', () => { + cy.get('[data-testid="badge-default"]').should('contain', 'Test Badge'); + }); + }); + + describe('Button Component', () => { + it('should render buttons with all variants', () => { + cy.get('[data-testid="button-default"]').should('exist').and('be.visible'); + cy.get('[data-testid="button-outline"]').should('exist').and('be.visible'); + cy.get('[data-testid="button-ghost"]').should('exist').and('be.visible'); + cy.get('[data-testid="button-destructive"]').should('exist').and('be.visible'); + }); + + it('should handle click events', () => { + cy.get('[data-testid="button-clickable"]').click(); + cy.get('[data-testid="click-count"]').should('contain', '1'); + + cy.get('[data-testid="button-clickable"]').click(); + cy.get('[data-testid="click-count"]').should('contain', '2'); + }); + + it('should be disabled when disabled prop is true', () => { + cy.get('[data-testid="button-disabled"]').should('be.disabled'); + }); + }); + + describe('Card Component', () => { + it('should render card with all sections', () => { + cy.get('[data-testid="card"]').should('exist').and('be.visible'); + cy.get('[data-testid="card-header"]').should('exist'); + cy.get('[data-testid="card-title"]').should('contain', 'Test Card Title'); + cy.get('[data-testid="card-description"]').should('contain', 'Test Description'); + cy.get('[data-testid="card-content"]').should('contain', 'Test Content'); + cy.get('[data-testid="card-footer"]').should('contain', 'Test Footer'); + }); + }); + + describe('Input Component', () => { + it('should render different input types', () => { + cy.get('[data-testid="input-default"]').should('have.attr', 'type', 'text'); + cy.get('[data-testid="input-file"]').should('have.attr', 'type', 'file'); + }); + + it('should handle text input', () => { + cy.get('[data-testid="input-controlled"]') + .clear() + .type('Hello Cypress') + .should('have.value', 'Hello Cypress'); + }); + + it('should show placeholder text', () => { + cy.get('[data-testid="input-placeholder"]') + .should('have.attr', 'placeholder', 'Enter text...'); + }); + }); + + describe('Table Component', () => { + it('should render table structure', () => { + cy.get('[data-testid="table"]').should('exist'); + cy.get('[data-testid="table-header"]').should('exist'); + cy.get('[data-testid="table-body"]').should('exist'); + cy.get('[data-testid="table-footer"]').should('exist'); + cy.get('[data-testid="table-caption"]').should('contain', 'Test Caption'); + }); + }); +}); \ No newline at end of file diff --git a/cypress/e2e/ui-components.cy.ts b/cypress/e2e/ui-components.cy.ts new file mode 100644 index 0000000..c77b2ed --- /dev/null +++ b/cypress/e2e/ui-components.cy.ts @@ -0,0 +1,170 @@ +describe('UI Components', () => { + beforeEach(() => { + // Visit a test page that includes all components + cy.visit('/test-components'); + }); + + describe('Badge Component', () => { + it('should render with default variant', () => { + cy.get('[data-testid="badge-default"]').should('exist'); + cy.get('[data-testid="badge-default"]').should('have.class', 'bg-primary'); + }); + + it('should render with all variants', () => { + const variants = ['default', 'outline', 'secondary', 'destructive']; + variants.forEach(variant => { + cy.get(`[data-testid="badge-${variant}"]`).should('exist'); + }); + }); + + it('should display children content', () => { + cy.get('[data-testid="badge-default"]').should('contain', 'Test Badge'); + }); + + it('should apply custom className', () => { + cy.get('[data-testid="badge-custom"]').should('have.class', 'custom-class'); + }); + }); + + describe('Button Component', () => { + it('should render with default props', () => { + cy.get('[data-testid="button-default"]').should('exist'); + cy.get('[data-testid="button-default"]').should('have.attr', 'type', 'button'); + }); + + it('should handle click events', () => { + cy.get('[data-testid="button-clickable"]').click(); + cy.get('[data-testid="click-count"]').should('contain', '1'); + }); + + it('should be disabled when disabled prop is true', () => { + cy.get('[data-testid="button-disabled"]').should('be.disabled'); + cy.get('[data-testid="button-disabled"]').should('have.class', 'disabled:opacity-50'); + }); + + it('should render all variants', () => { + const variants = ['default', 'outline', 'ghost', 'destructive']; + variants.forEach(variant => { + cy.get(`[data-testid="button-${variant}"]`).should('exist'); + }); + }); + + it('should render all sizes', () => { + const sizes = ['default', 'sm', 'lg', 'icon']; + sizes.forEach(size => { + cy.get(`[data-testid="button-size-${size}"]`).should('exist'); + }); + }); + + it('should show title on hover', () => { + cy.get('[data-testid="button-with-title"]').trigger('mouseenter'); + cy.get('[data-testid="button-with-title"]').should('have.attr', 'title', 'Test Title'); + }); + }); + + describe('Card Components', () => { + it('should render Card with all sub-components', () => { + cy.get('[data-testid="card"]').should('exist'); + cy.get('[data-testid="card-header"]').should('exist'); + cy.get('[data-testid="card-title"]').should('exist'); + cy.get('[data-testid="card-description"]').should('exist'); + cy.get('[data-testid="card-content"]').should('exist'); + cy.get('[data-testid="card-footer"]').should('exist'); + }); + + it('should apply proper styling to Card', () => { + cy.get('[data-testid="card"]').should('have.class', 'rounded-lg'); + cy.get('[data-testid="card"]').should('have.class', 'border'); + cy.get('[data-testid="card"]').should('have.class', 'bg-card'); + }); + + it('should render content in each section', () => { + cy.get('[data-testid="card-title"]').should('contain', 'Test Card Title'); + cy.get('[data-testid="card-description"]').should('contain', 'Test Description'); + cy.get('[data-testid="card-content"]').should('contain', 'Test Content'); + cy.get('[data-testid="card-footer"]').should('contain', 'Test Footer'); + }); + }); + + describe('Input Component', () => { + it('should render with default type text', () => { + cy.get('[data-testid="input-default"]').should('have.attr', 'type', 'text'); + }); + + it('should handle value changes', () => { + cy.get('[data-testid="input-controlled"]').type('Hello World'); + cy.get('[data-testid="input-controlled"]').should('have.value', 'Hello World'); + }); + + it('should show placeholder', () => { + cy.get('[data-testid="input-placeholder"]').should('have.attr', 'placeholder', 'Enter text...'); + }); + + it('should be disabled when disabled prop is true', () => { + cy.get('[data-testid="input-disabled"]').should('be.disabled'); + }); + + it('should handle file input with multiple files', () => { + cy.get('[data-testid="input-file"]').should('have.attr', 'type', 'file'); + cy.get('[data-testid="input-file"]').should('have.attr', 'multiple'); + cy.get('[data-testid="input-file"]').should('have.attr', 'accept', '.pdf,.txt'); + }); + + it('should apply custom className', () => { + cy.get('[data-testid="input-custom"]').should('have.class', 'custom-input-class'); + }); + }); + + describe('Table Components', () => { + it('should render table with all sub-components', () => { + cy.get('[data-testid="table"]').should('exist'); + cy.get('[data-testid="table-header"]').should('exist'); + cy.get('[data-testid="table-body"]').should('exist'); + cy.get('[data-testid="table-footer"]').should('exist'); + }); + + it('should render table rows and cells', () => { + cy.get('[data-testid="table-row"]').should('have.length.at.least', 1); + cy.get('[data-testid="table-head"]').should('exist'); + cy.get('[data-testid="table-cell"]').should('exist'); + }); + + it('should have hover effect on rows', () => { + cy.get('[data-testid="table-row"]').first().trigger('mouseenter', { force: true }); + cy.get('[data-testid="table-row"]').first().should('have.class', 'hover:bg-muted/50'); + }); + + it('should render table caption', () => { + cy.get('[data-testid="table-caption"]').should('exist'); + cy.get('[data-testid="table-caption"]').should('contain', 'Test Caption'); + }); + }); + + describe('Tabs Components', () => { + it('should render tabs with all sub-components', () => { + cy.get('[data-testid="tabs"]').should('exist'); + cy.get('[data-testid="tabs-list"]').should('exist'); + cy.get('[data-testid="tabs-trigger-1"]').should('exist'); + cy.get('[data-testid="tabs-trigger-2"]').should('exist'); + cy.get('[data-testid="tabs-content-1"]').should('exist'); + }); + + it('should switch between tabs', () => { + // First tab should be active by default + cy.get('[data-testid="tabs-trigger-1"]').should('have.attr', 'data-state', 'active'); + cy.get('[data-testid="tabs-content-1"]').should('be.visible'); + + // Second tab should be inactive + cy.get('[data-testid="tabs-trigger-2"]').should('have.attr', 'data-state', 'inactive'); + }); + + it('should handle keyboard navigation', () => { + // Since this is static HTML, we'll just verify the tabs can be focused + cy.get('[data-testid="tabs-trigger-1"]').focus(); + cy.get('[data-testid="tabs-trigger-1"]').should('have.focus'); + + cy.get('[data-testid="tabs-trigger-2"]').focus(); + cy.get('[data-testid="tabs-trigger-2"]').should('have.focus'); + }); + }); +}); \ No newline at end of file diff --git a/cypress/support/commands.ts b/cypress/support/commands.ts new file mode 100644 index 0000000..8070fe1 --- /dev/null +++ b/cypress/support/commands.ts @@ -0,0 +1,84 @@ +// *********************************************** +// This example commands.ts shows you how to +// create various custom commands and overwrite +// existing commands. +// +// For more comprehensive examples of custom +// commands please read more here: +// https://on.cypress.io/custom-commands +// *********************************************** + +// Custom command to upload files +Cypress.Commands.add('uploadFile', (selector: string, fileName: string, fileContent: string, mimeType: string = 'text/plain') => { + cy.get(selector).selectFile({ + contents: Cypress.Buffer.from(fileContent), + fileName: fileName, + mimeType: mimeType + }, { force: true }); +}); + +// Custom command to wait for API response +Cypress.Commands.add('waitForApi', (alias: string, timeout: number = 10000) => { + cy.wait(alias, { timeout }); +}); + +// Declare custom commands for TypeScript +declare global { + namespace Cypress { + interface Chainable { + uploadFile(selector: string, fileName: string, fileContent: string, mimeType?: string): Chainable; + waitForApi(alias: string, timeout?: number): Chainable; + } + } +} + +// Custom Cypress commands for Knowledge plugin + +Cypress.Commands.add('visitKnowledgePanel', () => { + cy.visit('/plugins/knowledge/display'); + cy.get('[data-testid="knowledge-panel"]', { timeout: 10000 }).should('be.visible'); +}); + +Cypress.Commands.add( + 'uploadKnowledgeFile', + (fileName: string, content: string, mimeType = 'text/plain') => { + // Create a file blob + const blob = new Blob([content], { type: mimeType }); + const file = new File([blob], fileName, { type: mimeType }); + + // Find file input and upload + cy.get('[data-testid="file-upload-input"]').selectFile( + { + contents: Cypress.Buffer.from(content), + fileName: fileName, + mimeType: mimeType, + }, + { force: true } + ); + + // Wait for upload to complete + cy.get('[data-testid="upload-success"]', { timeout: 10000 }).should('be.visible'); + } +); + +Cypress.Commands.add('searchKnowledge', (query: string) => { + cy.get('[data-testid="knowledge-search-input"]').clear().type(query); + cy.get('[data-testid="knowledge-search-button"]').click(); + + // Wait for search results + cy.get('[data-testid="search-results"]', { timeout: 5000 }).should('be.visible'); +}); + +Cypress.Commands.add('deleteDocument', (title: string) => { + // Find document by title + cy.contains('[data-testid="document-item"]', title).find('[data-testid="delete-button"]').click(); + + // Confirm deletion + cy.get('[data-testid="confirm-delete"]').click(); + + // Verify document is removed + cy.contains('[data-testid="document-item"]', title).should('not.exist'); +}); + +// Prevent TypeScript errors +export {}; diff --git a/cypress/support/e2e.ts b/cypress/support/e2e.ts new file mode 100644 index 0000000..9273879 --- /dev/null +++ b/cypress/support/e2e.ts @@ -0,0 +1,59 @@ +// *********************************************************** +// This example support/e2e.ts is processed and +// loaded automatically before your test files. +// +// This is a great place to put global configuration and +// behavior that modifies Cypress. +// +// You can change the location of this file or turn off +// automatically serving support files with the +// 'supportFile' configuration option. +// +// You can read more here: +// https://on.cypress.io/configuration +// *********************************************************** + +// Cypress support file for Knowledge plugin tests + +// Import commands.ts +import './commands'; + +// Alternatively you can use CommonJS syntax: +// require('./commands') + +// Custom commands for Knowledge plugin testing +declare global { + namespace Cypress { + interface Chainable { + /** + * Navigate to the knowledge panel + */ + visitKnowledgePanel(): Chainable; + + /** + * Upload a file to the knowledge base + */ + uploadKnowledgeFile(fileName: string, content: string, mimeType?: string): Chainable; + + /** + * Search for knowledge + */ + searchKnowledge(query: string): Chainable; + + /** + * Delete a document by title + */ + deleteDocument(title: string): Chainable; + } + } +} + +// Prevent TypeScript errors +export {}; + +// Disable uncaught exception handling for React development warnings +Cypress.on('uncaught:exception', (err, runnable) => { + // Returning false here prevents Cypress from failing the test + // on uncaught exceptions, which is useful for React development warnings + return false; +}); diff --git a/images/README.md b/images/README.md deleted file mode 100644 index 050b354..0000000 --- a/images/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Required Images for ElizaOS Plugins - -Please add the following required images to this directory: - -## logo.jpg - -- **Size**: 400x400px square -- **Max size**: 500KB -- **Purpose**: Main logo for your plugin displayed in the registry and UI - -## banner.jpg - -- **Size**: 1280x640px (2:1 aspect ratio) -- **Max size**: 1MB -- **Purpose**: Banner image for your plugin displayed in the registry - -## Guidelines - -- Use clear, high-resolution images -- Keep file sizes optimized -- Follow the ElizaOS brand guidelines -- Include alt text in your documentation for accessibility - -These files are required for registry submission. Your plugin submission will not be accepted without these images. diff --git a/package.json b/package.json index 457dccf..83c20d0 100644 --- a/package.json +++ b/package.json @@ -31,9 +31,13 @@ "@ai-sdk/google": "^1.2.18", "@ai-sdk/openai": "^1.3.22", "@elizaos/core": "^1.0.0", + "@elizaos/plugin-anthropic": "^1.0.0", "@openrouter/ai-sdk-provider": "^0.4.5", "@tanstack/react-query": "^5.51.1", + "@types/d3-shape": "^3.1.7", + "@types/mdx": "^2.0.13", "@types/multer": "^1.4.13", + "@types/node-forge": "^1.3.11", "@vitejs/plugin-react-swc": "^3.10.0", "ai": "^4.3.15", "clsx": "^2.1.1", @@ -59,16 +63,32 @@ "tailwindcss": "^4.1.0", "tailwindcss-animate": "^1.0.7", "postcss": "^8.5.3", - "autoprefixer": "^10.4.19" + "autoprefixer": "^10.4.19", + "vitest": "^2.0.0", + "cypress": "^13.0.0", + "start-server-and-test": "^2.0.0", + "@types/node": "^20.0.0", + "drizzle-orm": "^0.36.0", + "uuid": "^10.0.0" }, "scripts": { "dev": "tsup --watch", "build": "vite build && tsup", "lint": "prettier --write ./src", "test": "elizaos test", + "test:all": "elizaos test && bun run test:cypress:ci", + "test:unit": "vitest run", + "test:unit:watch": "vitest", + "test:e2e": "elizaos test --e2e", + "test:cypress": "cypress run", + "test:cypress:open": "cypress open", + "test:cypress:ci": "start-server-and-test 'bun run dev:server' http://localhost:3000 'cypress run'", + "dev:server": "node scripts/test-server.js", "format": "prettier --write ./src", "format:check": "prettier --check ./src", - "clean": "rm -rf dist .turbo node_modules .turbo-tsconfig.json tsconfig.tsbuildinfo" + "clean": "rm -rf dist .turbo node_modules .turbo-tsconfig.json tsconfig.tsbuildinfo", + "lint:ci": "eslint --cache .", + "check-types": "tsc --noEmit" }, "publishConfig": { "access": "public" diff --git a/scripts/test-server.js b/scripts/test-server.js new file mode 100644 index 0000000..71129f4 --- /dev/null +++ b/scripts/test-server.js @@ -0,0 +1,130 @@ +import { createServer } from 'http'; +import { readFileSync } from 'fs'; +import { join, dirname } from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const PORT = process.env.PORT || 3000; + +const server = createServer((req, res) => { + console.log(`Request: ${req.method} ${req.url}`); + + // Handle test components page + if (req.url === '/test-components') { + const testPagePath = join(__dirname, '..', 'src', 'frontend', 'test-components.html'); + try { + const html = readFileSync(testPagePath, 'utf8'); + res.writeHead(200, { 'Content-Type': 'text/html' }); + res.end(html); + } catch (error) { + res.writeHead(404); + res.end('Test components page not found'); + } + return; + } + + // Handle knowledge page and plugin display routes + if (req.url.startsWith('/knowledge') || req.url.includes('/plugins/knowledge/display')) { + const agentId = 'test-agent-123'; + const html = ` + + + + + + Knowledge - Test + + + + +
+
+

Knowledge Management

+

Knowledge

+
+ +
+
+ +
+
+
Document 1
+
Document 2
+
+
+ +
+
+
+ +`; + res.writeHead(200, { 'Content-Type': 'text/html' }); + res.end(html); + return; + } + + // Handle CSS + if (req.url === '/index.css') { + const cssPath = join(__dirname, '..', 'src', 'frontend', 'index.css'); + try { + const css = readFileSync(cssPath, 'utf8'); + res.writeHead(200, { 'Content-Type': 'text/css' }); + res.end(css); + } catch (error) { + res.writeHead(404); + res.end('CSS not found'); + } + return; + } + + // Mock API endpoints + if (req.url.startsWith('/api/')) { + res.writeHead(200, { 'Content-Type': 'application/json' }); + + if (req.url.includes('/documents')) { + res.end(JSON.stringify({ + data: { + memories: [ + { + id: 'doc-1', + content: { text: 'Test document' }, + metadata: { + type: 'document', + title: 'Test Document 1', + filename: 'test1.md', + fileExt: 'md' + }, + createdAt: Date.now() + } + ] + } + })); + } else if (req.url.includes('/knowledges')) { + res.end(JSON.stringify({ + data: { + chunks: [] + } + })); + } else { + res.end(JSON.stringify({ success: true })); + } + return; + } + + // Default response + res.writeHead(404); + res.end('Not found'); +}); + +server.listen(PORT, () => { + console.log(`Test server running at http://localhost:${PORT}`); +}); \ No newline at end of file diff --git a/src/__tests__/e2e/knowledge-e2e.test.ts b/src/__tests__/e2e/knowledge-e2e.test.ts new file mode 100644 index 0000000..84b111d --- /dev/null +++ b/src/__tests__/e2e/knowledge-e2e.test.ts @@ -0,0 +1,167 @@ +import { TestCase, IAgentRuntime, UUID } from '@elizaos/core'; +import { KnowledgeService } from '../../service'; +import path from 'path'; +import fs from 'fs/promises'; + +/** + * E2E test case for the Knowledge plugin + * Tests document loading, processing, and retrieval + */ +const knowledgeE2ETest: TestCase = { + name: 'Knowledge Plugin E2E Test', + + async fn(runtime: IAgentRuntime): Promise { + console.log('Starting Knowledge Plugin E2E Tests...\n'); + + // Test 1: Service initialization + const service = runtime.getService('knowledge') as KnowledgeService; + if (!service) { + throw new Error('Knowledge service not found'); + } + console.log('✓ Knowledge service initialized'); + + // Test 2: Create test documents + const docsPath = path.join(process.cwd(), 'test-docs'); + await fs.mkdir(docsPath, { recursive: true }); + + const testDoc = { + filename: 'test-knowledge.md', + content: `# Test Knowledge Document + +This is a test document for the knowledge service. +It contains information about testing. + +## Important Section +This section contains critical information that should be retrievable. +The knowledge service should index and chunk this content properly. + +## Another Section +Additional test content to ensure proper document processing.`, + }; + + await fs.writeFile(path.join(docsPath, testDoc.filename), testDoc.content); + console.log('✓ Created test document'); + + // Test 3: Load documents + try { + // Set the path for document loading + const originalPath = process.env.KNOWLEDGE_PATH; + process.env.KNOWLEDGE_PATH = docsPath; + + const { loadDocsFromPath } = await import('../../docs-loader'); + const loadResult = await loadDocsFromPath(service, runtime.agentId); + + if (loadResult.successful === 0) { + throw new Error('No documents were loaded'); + } + console.log(`✓ Loaded ${loadResult.successful} document(s)`); + + // Restore original path + if (originalPath) { + process.env.KNOWLEDGE_PATH = originalPath; + } else { + delete process.env.KNOWLEDGE_PATH; + } + } catch (error) { + console.error('Failed to load documents:', error); + throw error; + } + + // Test 4: Verify document in database + const documents = await service.getMemories({ + tableName: 'documents', + count: 100, + }); + + const testDocument = documents.find( + (d) => (d.metadata as any)?.originalFilename === testDoc.filename + ); + + if (!testDocument) { + throw new Error('Test document not found in database'); + } + console.log('✓ Document stored in database'); + + // Test 5: Verify fragments were created + const fragments = await service.getMemories({ + tableName: 'knowledge', + count: 100, + }); + + const documentFragments = fragments.filter( + (f) => (f.metadata as any)?.documentId === testDocument.id + ); + + if (documentFragments.length === 0) { + throw new Error('No fragments found for test document'); + } + console.log(`✓ Created ${documentFragments.length} fragments`); + + // Test 6: Test knowledge retrieval + const testMessage = { + id: 'test-msg-1' as UUID, + content: { text: 'Tell me about the important section' }, + agentId: runtime.agentId, + roomId: runtime.agentId, + createdAt: Date.now(), + }; + + const knowledgeItems = await service.getKnowledge(testMessage as any); + + if (knowledgeItems.length === 0) { + throw new Error('No knowledge items retrieved'); + } + console.log(`✓ Retrieved ${knowledgeItems.length} knowledge items`); + + // Test 7: Verify relevance + const relevantItems = knowledgeItems.filter( + (item) => + item.content.text?.toLowerCase().includes('important') || + item.content.text?.toLowerCase().includes('critical') + ); + + if (relevantItems.length === 0) { + throw new Error('Retrieved items are not relevant to query'); + } + console.log(`✓ Found ${relevantItems.length} relevant items`); + + // Test 8: Test document deletion with cascade + if (testDocument.id) { + await service.deleteMemory(testDocument.id); + + // Verify document is deleted + const remainingDocs = await service.getMemories({ + tableName: 'documents', + count: 100, + }); + + if (remainingDocs.find((d) => d.id === testDocument.id)) { + throw new Error('Document was not deleted'); + } + console.log('✓ Document deleted successfully'); + + // Verify fragments are cascade deleted + const remainingFragments = await service.getMemories({ + tableName: 'knowledge', + count: 100, + }); + + const orphanedFragments = remainingFragments.filter( + (f) => (f.metadata as any)?.documentId === testDocument.id + ); + + if (orphanedFragments.length > 0) { + throw new Error('Fragments were not cascade deleted'); + } + console.log('✓ Fragments cascade deleted'); + } + + // Cleanup + await fs.rm(docsPath, { recursive: true, force: true }); + console.log('✓ Cleaned up test files'); + + console.log('\n✅ All Knowledge Plugin E2E tests passed!'); + }, +}; + +export default knowledgeE2ETest; diff --git a/src/__tests__/e2e/startup-loading.test.ts b/src/__tests__/e2e/startup-loading.test.ts new file mode 100644 index 0000000..90bcbf5 --- /dev/null +++ b/src/__tests__/e2e/startup-loading.test.ts @@ -0,0 +1,211 @@ +import { TestCase, IAgentRuntime, UUID } from '@elizaos/core'; +import { KnowledgeService } from '../../service'; +import path from 'path'; +import fs from 'fs/promises'; +import { DocumentRepository, FragmentRepository } from '../../repositories'; +import { v4 as uuidv4 } from 'uuid'; + +const testCase: TestCase = { + name: 'Knowledge Service Startup Loading', + + async fn(runtime: IAgentRuntime): Promise { + // Test 1: Service initialization + const service = runtime.getService('knowledge') as KnowledgeService; + if (!service) { + throw new Error('Knowledge service not found'); + } + console.log('✓ Knowledge service initialized'); + + // Test 2: Check if new tables are being used + const useNewTables = runtime.getSetting('KNOWLEDGE_USE_NEW_TABLES') === 'true'; + console.log(`✓ Using new tables: ${useNewTables}`); + + // Test 3: Create test documents directory + const docsPath = path.join(process.cwd(), 'docs'); + await fs.mkdir(docsPath, { recursive: true }); + console.log('✓ Created docs directory'); + + // Test 4: Create test documents + const testDocs = [ + { + filename: 'test-document-1.md', + content: `# Test Document 1 + +This is a test document for the knowledge service. +It contains multiple paragraphs to test chunking. + +## Section 1 +This section tests how the system handles markdown headers. +It should properly extract and chunk this content. + +## Section 2 +Another section with different content. +This helps test the fragment creation process.`, + }, + { + filename: 'test-document-2.txt', + content: `Plain text document for testing. + +This document doesn't have markdown formatting. +It should still be processed correctly by the knowledge service. + +The system should handle both markdown and plain text files.`, + }, + ]; + + for (const doc of testDocs) { + await fs.writeFile(path.join(docsPath, doc.filename), doc.content); + } + console.log('✓ Created test documents'); + + // Test 5: Wait for initial document loading (if enabled) + const loadDocsOnStartup = runtime.getSetting('LOAD_DOCS_ON_STARTUP') !== 'false'; + if (loadDocsOnStartup) { + // Give the service time to load documents + await new Promise((resolve) => setTimeout(resolve, 2000)); + console.log('✓ Waited for startup document loading'); + } + + // Test 6: Manually trigger document loading + const { loadDocsFromPath } = await import('../../docs-loader'); + const loadResult = await loadDocsFromPath(service, runtime.agentId); + + if (loadResult.successful !== testDocs.length) { + throw new Error( + `Expected ${testDocs.length} documents to be loaded, but got ${loadResult.successful}` + ); + } + console.log(`✓ Loaded ${loadResult.successful} documents`); + + // Test 7: Verify documents in database + const documents = await service.getMemories({ + tableName: 'documents', + count: 100, + }); + + const loadedDocs = documents.filter((d) => + testDocs.some((td) => (d.metadata as any)?.originalFilename === td.filename) + ); + + if (loadedDocs.length !== testDocs.length) { + throw new Error( + `Expected ${testDocs.length} documents in database, but found ${loadedDocs.length}` + ); + } + console.log(`✓ Found ${loadedDocs.length} documents in database`); + + // Test 8: Verify fragments were created + const fragments = await service.getMemories({ + tableName: 'knowledge', + count: 100, + }); + + const relatedFragments = fragments.filter((f) => + loadedDocs.some((d) => (f.metadata as any)?.documentId === d.id) + ); + + if (relatedFragments.length === 0) { + throw new Error('No fragments found for loaded documents'); + } + console.log(`✓ Found ${relatedFragments.length} fragments for documents`); + + // Test 9: Test knowledge retrieval + const testMessage = { + id: 'test-message-1', + content: { text: 'Tell me about markdown headers' }, + agentId: runtime.agentId, + roomId: runtime.agentId, + createdAt: Date.now(), + }; + + const knowledgeItems = await service.getKnowledge(testMessage as any); + + if (knowledgeItems.length === 0) { + throw new Error('No knowledge items retrieved for test query'); + } + console.log(`✓ Retrieved ${knowledgeItems.length} knowledge items`); + + // Test 10: Verify relevance - should find content about markdown headers + const relevantItems = knowledgeItems.filter( + (item) => + item.content.text?.toLowerCase().includes('markdown') || + item.content.text?.toLowerCase().includes('header') + ); + + if (relevantItems.length === 0) { + throw new Error('Retrieved knowledge items are not relevant to the query'); + } + console.log(`✓ Found ${relevantItems.length} relevant knowledge items`); + + // Test 11: Test document deletion + const docToDelete = loadedDocs[0]; + await service.deleteMemory(docToDelete.id as UUID); + + const remainingDocs = await service.getMemories({ + tableName: 'documents', + count: 100, + }); + + const deletedDoc = remainingDocs.find((d) => d.id === docToDelete.id); + if (deletedDoc) { + throw new Error('Document was not deleted'); + } + console.log('✓ Successfully deleted document'); + + // Test 12: Verify cascade delete - fragments should be deleted too + const remainingFragments = await service.getMemories({ + tableName: 'knowledge', + count: 100, + }); + + const orphanedFragments = remainingFragments.filter( + (f) => (f.metadata as any)?.documentId === docToDelete.id + ); + + if (orphanedFragments.length > 0) { + throw new Error('Fragments were not cascade deleted with document'); + } + console.log('✓ Fragments were cascade deleted'); + + // Test 13: Test adding knowledge via API + const apiKnowledge = { + clientDocumentId: uuidv4() as UUID, + contentType: 'text/plain', + originalFilename: 'api-test.txt', + worldId: runtime.agentId as UUID, + roomId: runtime.agentId as UUID, + entityId: runtime.agentId as UUID, + content: 'This is content added via the API. It should be processed and stored correctly.', + metadata: { source: 'api' }, + }; + + const apiResult = await service.addKnowledge(apiKnowledge); + + if (!apiResult.storedDocumentMemoryId) { + throw new Error('Failed to add knowledge via API'); + } + console.log(`✓ Added knowledge via API, ${apiResult.fragmentCount} fragments created`); + + // Test 14: Verify API-added document exists + const apiDoc = await runtime.getMemoryById(apiResult.storedDocumentMemoryId); + if (!apiDoc) { + throw new Error('API-added document not found in database'); + } + console.log('✓ API-added document verified in database'); + + // Test 15: Test duplicate prevention + const duplicateResult = await service.addKnowledge(apiKnowledge); + + if (duplicateResult.storedDocumentMemoryId !== apiResult.storedDocumentMemoryId) { + throw new Error('Duplicate document was created instead of returning existing'); + } + console.log('✓ Duplicate prevention working correctly'); + + // Cleanup + await fs.rm(docsPath, { recursive: true, force: true }); + console.log('✓ Cleaned up test documents'); + console.log('All knowledge service startup loading tests passed!'); + }, +}; + +export default testCase; diff --git a/__tests__/action.test.ts b/src/__tests__/unit/action.test.ts similarity index 55% rename from __tests__/action.test.ts rename to src/__tests__/unit/action.test.ts index 293b665..6943443 100644 --- a/__tests__/action.test.ts +++ b/src/__tests__/unit/action.test.ts @@ -1,13 +1,13 @@ -import { describe, it, expect, beforeEach, vi, Mock } from "vitest"; -import { processKnowledgeAction } from "../src/actions"; -import { KnowledgeService } from "../src/service"; -import type { IAgentRuntime, Memory, Content, State, UUID } from "@elizaos/core"; -import * as fs from "fs"; -import * as path from "path"; +import { describe, it, expect, beforeEach, vi, Mock } from 'vitest'; +import { processKnowledgeAction } from '../../actions'; +import { KnowledgeService } from '../../service'; +import type { IAgentRuntime, Memory, Content, State, UUID } from '@elizaos/core'; +import * as fs from 'fs'; +import * as path from 'path'; // Mock @elizaos/core logger and createUniqueUuid -vi.mock("@elizaos/core", async () => { - const actual = await vi.importActual("@elizaos/core"); +vi.mock('@elizaos/core', async () => { + const actual = await vi.importActual('@elizaos/core'); return { ...actual, logger: { @@ -24,75 +24,74 @@ vi.mock("@elizaos/core", async () => { }); // Mock fs and path -vi.mock("fs"); -vi.mock("path"); +vi.mock('fs'); +vi.mock('path'); -describe("processKnowledgeAction", () => { +describe('processKnowledgeAction', () => { let mockRuntime: IAgentRuntime; let mockKnowledgeService: KnowledgeService; let mockCallback: Mock; let mockState: State; - const generateMockUuid = (suffix: string | number): UUID => `00000000-0000-0000-0000-${String(suffix).padStart(12, "0")}` as UUID; + const generateMockUuid = (suffix: string | number): UUID => + `00000000-0000-0000-0000-${String(suffix).padStart(12, '0')}` as UUID; beforeEach(() => { mockKnowledgeService = { addKnowledge: vi.fn(), getKnowledge: vi.fn(), - serviceType: "knowledge-service", + serviceType: 'knowledge-service', } as unknown as KnowledgeService; mockRuntime = { - agentId: "test-agent" as UUID, + agentId: 'test-agent' as UUID, getService: vi.fn().mockReturnValue(mockKnowledgeService), } as unknown as IAgentRuntime; mockCallback = vi.fn(); mockState = { - values: {}, - data: {}, - text: "", + values: {}, + data: {}, + text: '', }; vi.clearAllMocks(); }); - describe("handler", () => { + describe('handler', () => { beforeEach(() => { - // Reset and re-mock fs/path functions for each handler test - (fs.existsSync as Mock).mockReset(); - (fs.readFileSync as Mock).mockReset(); - (path.basename as Mock).mockReset(); - (path.extname as Mock).mockReset(); + // Reset and re-mock fs/path functions for each handler test + (fs.existsSync as Mock).mockReset(); + (fs.readFileSync as Mock).mockReset(); + (path.basename as Mock).mockReset(); + (path.extname as Mock).mockReset(); }); - it("should process a file when a valid path is provided", async () => { + it('should process a file when a valid path is provided', async () => { const message: Memory = { id: generateMockUuid(1), content: { - text: "Process the document at /path/to/document.pdf", + text: 'Process the document at /path/to/document.pdf', }, entityId: generateMockUuid(2), roomId: generateMockUuid(3), }; (fs.existsSync as Mock).mockReturnValue(true); - (fs.readFileSync as Mock).mockReturnValue( - Buffer.from("file content") - ); - (path.basename as Mock).mockReturnValue("document.pdf"); - (path.extname as Mock).mockReturnValue(".pdf"); + (fs.readFileSync as Mock).mockReturnValue(Buffer.from('file content')); + (path.basename as Mock).mockReturnValue('document.pdf'); + (path.extname as Mock).mockReturnValue('.pdf'); (mockKnowledgeService.addKnowledge as Mock).mockResolvedValue({ fragmentCount: 5 }); await processKnowledgeAction.handler?.(mockRuntime, message, mockState, {}, mockCallback); - expect(fs.existsSync).toHaveBeenCalledWith("/path/to/document.pdf"); - expect(fs.readFileSync).toHaveBeenCalledWith("/path/to/document.pdf"); + expect(fs.existsSync).toHaveBeenCalledWith('/path/to/document.pdf'); + expect(fs.readFileSync).toHaveBeenCalledWith('/path/to/document.pdf'); expect(mockKnowledgeService.addKnowledge).toHaveBeenCalledWith({ - clientDocumentId: "mocked-uuid-document.pdf" as UUID, - contentType: "application/pdf", - originalFilename: "document.pdf", - worldId: "test-agent" as UUID, - content: Buffer.from("file content").toString("base64"), + clientDocumentId: 'mocked-uuid-document.pdf' as UUID, + contentType: 'application/pdf', + originalFilename: 'document.pdf', + worldId: 'test-agent' as UUID, + content: Buffer.from('file content').toString('base64'), roomId: message.roomId, entityId: message.entityId, }); @@ -101,11 +100,11 @@ describe("processKnowledgeAction", () => { }); }); - it("should return a message if the file path is provided but file does not exist", async () => { + it('should return a message if the file path is provided but file does not exist', async () => { const message: Memory = { id: generateMockUuid(4), content: { - text: "Process the document at /non/existent/file.txt", + text: 'Process the document at /non/existent/file.txt', }, entityId: generateMockUuid(5), roomId: generateMockUuid(6), @@ -115,7 +114,7 @@ describe("processKnowledgeAction", () => { await processKnowledgeAction.handler?.(mockRuntime, message, mockState, {}, mockCallback); - expect(fs.existsSync).toHaveBeenCalledWith("/non/existent/file.txt"); + expect(fs.existsSync).toHaveBeenCalledWith('/non/existent/file.txt'); expect(fs.readFileSync).not.toHaveBeenCalled(); expect(mockKnowledgeService.addKnowledge).not.toHaveBeenCalled(); expect(mockCallback).toHaveBeenCalledWith({ @@ -123,27 +122,27 @@ describe("processKnowledgeAction", () => { }); }); - it("should process direct text content when no file path is provided", async () => { + it('should process direct text content when no file path is provided', async () => { const message: Memory = { id: generateMockUuid(7), content: { - text: "Add this to your knowledge: The capital of France is Paris.", + text: 'Add this to your knowledge: The capital of France is Paris.', }, entityId: generateMockUuid(8), roomId: generateMockUuid(9), }; - (mockKnowledgeService.addKnowledge as Mock).mockResolvedValue({}); + (mockKnowledgeService.addKnowledge as Mock).mockResolvedValue({}); await processKnowledgeAction.handler?.(mockRuntime, message, mockState, {}, mockCallback); expect(fs.existsSync).not.toHaveBeenCalled(); expect(mockKnowledgeService.addKnowledge).toHaveBeenCalledWith({ - clientDocumentId: "mocked-uuid-user-knowledge" as UUID, - contentType: "text/plain", - originalFilename: "user-knowledge.txt", - worldId: "test-agent" as UUID, - content: "to your knowledge: The capital of France is Paris.", + clientDocumentId: 'mocked-uuid-user-knowledge' as UUID, + contentType: 'text/plain', + originalFilename: 'user-knowledge.txt', + worldId: 'test-agent' as UUID, + content: 'to your knowledge: The capital of France is Paris.', roomId: message.roomId, entityId: message.entityId, }); @@ -152,11 +151,11 @@ describe("processKnowledgeAction", () => { }); }); - it("should return a message if no file path and no text content is provided", async () => { + it('should return a message if no file path and no text content is provided', async () => { const message: Memory = { id: generateMockUuid(10), content: { - text: "add this:", + text: 'add this:', }, entityId: generateMockUuid(11), roomId: generateMockUuid(12), @@ -167,110 +166,90 @@ describe("processKnowledgeAction", () => { expect(fs.existsSync).not.toHaveBeenCalled(); expect(mockKnowledgeService.addKnowledge).not.toHaveBeenCalled(); expect(mockCallback).toHaveBeenCalledWith({ - text: "I need some content to add to my knowledge base. Please provide text or a file path.", + text: 'I need some content to add to my knowledge base. Please provide text or a file path.', }); }); - it("should handle errors gracefully", async () => { + it('should handle errors gracefully', async () => { const message: Memory = { id: generateMockUuid(13), content: { - text: "Process /path/to/error.txt", + text: 'Process /path/to/error.txt', }, entityId: generateMockUuid(14), roomId: generateMockUuid(15), }; (fs.existsSync as Mock).mockReturnValue(true); - (fs.readFileSync as Mock).mockReturnValue(Buffer.from("error content")); - (path.basename as Mock).mockReturnValue("error.txt"); - (path.extname as Mock).mockReturnValue(".txt"); - (mockKnowledgeService.addKnowledge as Mock).mockRejectedValue( - new Error("Service error") - ); + (fs.readFileSync as Mock).mockReturnValue(Buffer.from('error content')); + (path.basename as Mock).mockReturnValue('error.txt'); + (path.extname as Mock).mockReturnValue('.txt'); + (mockKnowledgeService.addKnowledge as Mock).mockRejectedValue(new Error('Service error')); await processKnowledgeAction.handler?.(mockRuntime, message, mockState, {}, mockCallback); expect(mockCallback).toHaveBeenCalledWith({ - text: "I encountered an error while processing the knowledge: Service error", + text: 'I encountered an error while processing the knowledge: Service error', }); }); }); - describe("validate", () => { + describe('validate', () => { beforeEach(() => { (mockRuntime.getService as Mock).mockReturnValue(mockKnowledgeService); }); - it("should return true if knowledge keywords are present and service is available", async () => { + it('should return true if knowledge keywords are present and service is available', async () => { const message: Memory = { id: generateMockUuid(16), content: { - text: "add this to your knowledge base", + text: 'add this to your knowledge base', }, entityId: generateMockUuid(17), roomId: generateMockUuid(18), }; - const isValid = await processKnowledgeAction.validate?.( - mockRuntime, - message, - mockState - ); + const isValid = await processKnowledgeAction.validate?.(mockRuntime, message, mockState); expect(isValid).toBe(true); - expect(mockRuntime.getService).toHaveBeenCalledWith( - KnowledgeService.serviceType - ); + expect(mockRuntime.getService).toHaveBeenCalledWith(KnowledgeService.serviceType); }); - it("should return true if a file path is present and service is available", async () => { + it('should return true if a file path is present and service is available', async () => { const message: Memory = { id: generateMockUuid(19), content: { - text: "process /path/to/doc.pdf", + text: 'process /path/to/doc.pdf', }, entityId: generateMockUuid(20), roomId: generateMockUuid(21), }; - const isValid = await processKnowledgeAction.validate?.( - mockRuntime, - message, - mockState - ); + const isValid = await processKnowledgeAction.validate?.(mockRuntime, message, mockState); expect(isValid).toBe(true); }); - it("should return false if service is not available", async () => { + it('should return false if service is not available', async () => { (mockRuntime.getService as Mock).mockReturnValue(null); const message: Memory = { id: generateMockUuid(22), content: { - text: "add this to your knowledge base", + text: 'add this to your knowledge base', }, entityId: generateMockUuid(23), roomId: generateMockUuid(24), }; - const isValid = await processKnowledgeAction.validate?.( - mockRuntime, - message, - mockState - ); + const isValid = await processKnowledgeAction.validate?.(mockRuntime, message, mockState); expect(isValid).toBe(false); }); - it("should return false if no relevant keywords or path are present", async () => { + it('should return false if no relevant keywords or path are present', async () => { const message: Memory = { id: generateMockUuid(25), content: { - text: "hello there", + text: 'hello there', }, entityId: generateMockUuid(26), roomId: generateMockUuid(27), }; - const isValid = await processKnowledgeAction.validate?.( - mockRuntime, - message, - mockState - ); + const isValid = await processKnowledgeAction.validate?.(mockRuntime, message, mockState); expect(isValid).toBe(false); }); }); diff --git a/src/__tests__/unit/document-repository.test.ts b/src/__tests__/unit/document-repository.test.ts new file mode 100644 index 0000000..9401334 --- /dev/null +++ b/src/__tests__/unit/document-repository.test.ts @@ -0,0 +1,247 @@ +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { DocumentRepository } from '../../repositories/document-repository'; +import { v4 as uuidv4 } from 'uuid'; +import type { UUID } from '@elizaos/core'; +import type { Document } from '../../types'; + +describe('DocumentRepository', () => { + let mockDb: any; + let repository: DocumentRepository; + + beforeEach(() => { + // Create mock database methods + mockDb = { + insert: vi.fn().mockReturnThis(), + values: vi.fn().mockReturnThis(), + returning: vi.fn().mockResolvedValue([]), + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + orderBy: vi.fn().mockReturnThis(), + limit: vi.fn().mockReturnThis(), + offset: vi.fn().mockReturnThis(), + update: vi.fn().mockReturnThis(), + set: vi.fn().mockReturnThis(), + delete: vi.fn().mockReturnThis(), + }; + + repository = new DocumentRepository(mockDb); + }); + + describe('create', () => { + it('should create a new document', async () => { + const testDoc = { + agentId: uuidv4() as UUID, + worldId: uuidv4() as UUID, + roomId: uuidv4() as UUID, + entityId: uuidv4() as UUID, + originalFilename: 'test.pdf', + contentType: 'application/pdf', + content: 'base64content', + fileSize: 1024, + title: 'Test Document', + }; + + const expectedResult = { + id: uuidv4() as UUID, + ...testDoc, + sourceUrl: undefined, + metadata: {}, + createdAt: new Date(), + updatedAt: new Date(), + }; + + mockDb.returning.mockResolvedValue([expectedResult]); + + const result = await repository.create(testDoc); + + expect(mockDb.insert).toHaveBeenCalled(); + expect(mockDb.values).toHaveBeenCalledWith( + expect.objectContaining({ + ...testDoc, + createdAt: expect.any(Date), + updatedAt: expect.any(Date), + }) + ); + expect(result).toEqual(expectedResult); + }); + }); + + describe('findById', () => { + it('should find a document by ID', async () => { + const id = uuidv4() as UUID; + const expectedDoc: Document = { + id, + agentId: uuidv4() as UUID, + worldId: uuidv4() as UUID, + roomId: uuidv4() as UUID, + entityId: uuidv4() as UUID, + originalFilename: 'test.pdf', + contentType: 'application/pdf', + content: 'base64content', + fileSize: 1024, + title: 'Test Document', + sourceUrl: undefined, + metadata: {}, + createdAt: new Date(), + updatedAt: new Date(), + }; + + mockDb.limit.mockResolvedValue([expectedDoc]); + + const result = await repository.findById(id); + + expect(mockDb.select).toHaveBeenCalled(); + expect(mockDb.from).toHaveBeenCalled(); + expect(mockDb.where).toHaveBeenCalled(); + expect(mockDb.limit).toHaveBeenCalledWith(1); + expect(result).toEqual(expectedDoc); + }); + + it('should return null if document not found', async () => { + mockDb.limit.mockResolvedValue([]); + + const result = await repository.findById(uuidv4() as UUID); + + expect(result).toBeNull(); + }); + }); + + describe('findByAgent', () => { + it('should find documents by agent ID', async () => { + const agentId = uuidv4() as UUID; + const docs = [createMockDocument(), createMockDocument()]; + + mockDb.offset.mockResolvedValue(docs); + + const result = await repository.findByAgent(agentId, 10, 0); + + expect(mockDb.select).toHaveBeenCalled(); + expect(mockDb.where).toHaveBeenCalled(); + expect(mockDb.orderBy).toHaveBeenCalled(); + expect(mockDb.limit).toHaveBeenCalledWith(10); + expect(mockDb.offset).toHaveBeenCalledWith(0); + expect(result).toEqual(docs); + }); + }); + + describe('update', () => { + it('should update a document', async () => { + const id = uuidv4() as UUID; + const updates = { + title: 'Updated Title', + fileSize: 2048, + }; + + const updatedDoc = { + ...createMockDocument(), + ...updates, + updatedAt: new Date(), + }; + + mockDb.returning.mockResolvedValue([updatedDoc]); + + const result = await repository.update(id, updates); + + expect(mockDb.update).toHaveBeenCalled(); + expect(mockDb.set).toHaveBeenCalledWith( + expect.objectContaining({ + ...updates, + updatedAt: expect.any(Date), + }) + ); + expect(result).toEqual(updatedDoc); + }); + + it('should return null if document not found', async () => { + mockDb.returning.mockResolvedValue([]); + + const result = await repository.update(uuidv4() as UUID, { title: 'New' }); + + expect(result).toBeNull(); + }); + }); + + describe('delete', () => { + it('should delete a document', async () => { + const id = uuidv4() as UUID; + mockDb.returning.mockResolvedValue([{ id }]); + + const result = await repository.delete(id); + + expect(mockDb.delete).toHaveBeenCalled(); + expect(mockDb.where).toHaveBeenCalled(); + expect(result).toBe(true); + }); + + it('should return false if document not found', async () => { + mockDb.returning.mockResolvedValue([]); + + const result = await repository.delete(uuidv4() as UUID); + + expect(result).toBe(false); + }); + }); + + describe('exists', () => { + it('should return true if document exists', async () => { + mockDb.limit.mockResolvedValue([{ id: uuidv4() }]); + + const result = await repository.exists(uuidv4() as UUID); + + expect(result).toBe(true); + }); + + it('should return false if document does not exist', async () => { + mockDb.limit.mockResolvedValue([]); + + const result = await repository.exists(uuidv4() as UUID); + + expect(result).toBe(false); + }); + }); + + describe('findBySourceUrl', () => { + it('should find document by source URL', async () => { + const sourceUrl = 'https://example.com/doc.pdf'; + const agentId = uuidv4() as UUID; + const doc = createMockDocument({ sourceUrl }); + + mockDb.limit.mockResolvedValue([doc]); + + const result = await repository.findBySourceUrl(sourceUrl, agentId); + + expect(mockDb.where).toHaveBeenCalled(); + expect(result).toEqual(doc); + }); + + it('should return null if not found', async () => { + mockDb.limit.mockResolvedValue([]); + + const result = await repository.findBySourceUrl('https://example.com', uuidv4() as UUID); + + expect(result).toBeNull(); + }); + }); +}); + +// Helper function to create mock documents +function createMockDocument(overrides?: Partial): Document { + return { + id: uuidv4() as UUID, + agentId: uuidv4() as UUID, + worldId: uuidv4() as UUID, + roomId: uuidv4() as UUID, + entityId: uuidv4() as UUID, + originalFilename: 'test.pdf', + contentType: 'application/pdf', + content: 'base64content', + fileSize: 1024, + title: 'Test Document', + sourceUrl: undefined, + metadata: {}, + createdAt: new Date(), + updatedAt: new Date(), + ...overrides, + }; +} diff --git a/src/__tests__/unit/fragment-repository.test.ts b/src/__tests__/unit/fragment-repository.test.ts new file mode 100644 index 0000000..dd4e586 --- /dev/null +++ b/src/__tests__/unit/fragment-repository.test.ts @@ -0,0 +1,403 @@ +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { FragmentRepository } from '../../repositories/fragment-repository'; +import type { KnowledgeFragment } from '../../types'; +import type { UUID } from '@elizaos/core'; + +// Mock the schema +vi.mock('../../schema', () => ({ + knowledgeFragmentsTable: 'knowledge_fragments_table_mock', +})); + +// Mock drizzle +vi.mock('drizzle-orm', () => ({ + eq: vi.fn((field, value) => ({ field, value })), + sql: vi.fn((strings, ...values) => ({ query: strings.join(''), values })), + asc: vi.fn((field) => ({ field, order: 'asc' })), + desc: vi.fn((field) => ({ field, order: 'desc' })), + and: vi.fn((...conditions) => ({ type: 'and', conditions })), + or: vi.fn((...conditions) => ({ type: 'or', conditions })), + relations: vi.fn((table, callback) => ({ + table, + relations: callback({ many: vi.fn(), one: vi.fn() }), + })), + cosineDistance: vi.fn((field, embedding) => ({ field, embedding })), +})); + +describe('FragmentRepository', () => { + let mockDb: any; + let repository: FragmentRepository; + + const mockFragment: KnowledgeFragment = { + id: 'fragment-123' as UUID, + documentId: 'doc-123' as UUID, + agentId: 'agent-123' as UUID, + worldId: 'world-123' as UUID, + roomId: 'room-123' as UUID, + entityId: 'entity-123' as UUID, + content: 'This is test fragment content', + embedding: Array(1536).fill(0.1), + position: 0, + createdAt: new Date('2025-01-01T00:00:00Z'), + metadata: { custom: 'data' }, + }; + + beforeEach(() => { + // Reset all mocks + vi.clearAllMocks(); + + // Create mock database with proper chaining + const mockChain: any = { + insert: vi.fn(), + values: vi.fn(), + returning: vi.fn(), + select: vi.fn(), + from: vi.fn(), + where: vi.fn(), + update: vi.fn(), + set: vi.fn(), + delete: vi.fn(), + orderBy: vi.fn(), + limit: vi.fn(), + execute: vi.fn(), + as: vi.fn(), + $with: vi.fn(), + }; + + // Setup method chaining - each method returns mockChain + Object.keys(mockChain).forEach((key) => { + mockChain[key].mockReturnValue(mockChain); + }); + + // Override returning() to return promise-like chain + mockChain.returning.mockImplementation(() => { + // returning() should return a promise when awaited + return Promise.resolve(mockChain._returnValue || []); + }); + + mockDb = mockChain; + repository = new FragmentRepository(mockDb as any); + }); + + describe('create', () => { + it('should create a fragment successfully', async () => { + const expectedFragment = { ...mockFragment }; + mockDb._returnValue = [expectedFragment]; + + const result = await repository.create(mockFragment); + + expect(mockDb.insert).toHaveBeenCalledWith('knowledge_fragments_table_mock'); + expect(mockDb.values).toHaveBeenCalled(); + expect(mockDb.returning).toHaveBeenCalled(); + expect(result).toMatchObject({ + id: mockFragment.id, + content: mockFragment.content, + documentId: mockFragment.documentId, + }); + }); + + it('should handle creation errors', async () => { + // Simulate a database error by throwing when returning is called + mockDb.returning.mockImplementation(() => { + return Promise.reject(new Error('Database constraint violation')); + }); + + await expect(repository.create(mockFragment)).rejects.toThrow( + 'Database constraint violation' + ); + }); + }); + + describe('createBatch', () => { + it('should create multiple fragments in batch', async () => { + const fragments = [ + { ...mockFragment, id: 'fragment-1' as UUID, position: 0 }, + { ...mockFragment, id: 'fragment-2' as UUID, position: 1 }, + { ...mockFragment, id: 'fragment-3' as UUID, position: 2 }, + ]; + + mockDb._returnValue = fragments; + + const result = await repository.createBatch(fragments); + + expect(mockDb.insert).toHaveBeenCalledWith('knowledge_fragments_table_mock'); + expect(mockDb.values).toHaveBeenCalled(); + expect(mockDb.returning).toHaveBeenCalled(); + expect(result).toHaveLength(3); + expect(result[0].id).toBe('fragment-1'); + expect(result[2].position).toBe(2); + }); + + it('should handle empty batch', async () => { + const result = await repository.createBatch([]); + + expect(mockDb.insert).toHaveBeenCalledWith('knowledge_fragments_table_mock'); + expect(result).toEqual([]); + }); + }); + + describe('findById', () => { + it('should find fragment by id', async () => { + // For select queries, we need to override the chain differently + const selectChain = { + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + limit: vi.fn().mockReturnThis(), + then: vi.fn((resolve) => resolve([mockFragment])), + }; + + mockDb.select.mockReturnValue(selectChain); + + const result = await repository.findById(mockFragment.id); + + expect(mockDb.select).toHaveBeenCalled(); + expect(selectChain.where).toHaveBeenCalled(); + expect(selectChain.limit).toHaveBeenCalledWith(1); + expect(result).toMatchObject({ + id: mockFragment.id, + content: mockFragment.content, + }); + }); + + it('should return null when fragment not found', async () => { + const selectChain = { + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + limit: vi.fn().mockReturnThis(), + then: vi.fn((resolve) => resolve([])), + }; + + mockDb.select.mockReturnValue(selectChain); + + const result = await repository.findById('non-existent' as UUID); + + expect(result).toBeNull(); + }); + }); + + describe('findByDocument', () => { + it('should find fragments by document ID', async () => { + const fragments = [ + { ...mockFragment, position: 0 }, + { ...mockFragment, position: 1 }, + { ...mockFragment, position: 2 }, + ]; + + const selectChain = { + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + orderBy: vi.fn().mockReturnThis(), + then: vi.fn((resolve) => resolve(fragments)), + }; + + mockDb.select.mockReturnValue(selectChain); + + const result = await repository.findByDocument(mockFragment.documentId); + + expect(mockDb.select).toHaveBeenCalled(); + expect(selectChain.where).toHaveBeenCalled(); + expect(selectChain.orderBy).toHaveBeenCalled(); + expect(result).toHaveLength(3); + expect(result[0].position).toBe(0); + expect(result[2].position).toBe(2); + }); + + it('should return empty array when no fragments found', async () => { + const selectChain = { + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + orderBy: vi.fn().mockReturnThis(), + then: vi.fn((resolve) => resolve([])), + }; + + mockDb.select.mockReturnValue(selectChain); + + const result = await repository.findByDocument('non-existent' as UUID); + + expect(result).toEqual([]); + }); + }); + + describe('searchByEmbedding', () => { + it('should search fragments by embedding similarity', async () => { + const embedding = Array(1536).fill(0.5); + const searchResults = [ + { ...mockFragment, embedding: Array(1536).fill(0.9) }, + { ...mockFragment, id: 'fragment-2' as UUID, embedding: Array(1536).fill(0.7) }, + { ...mockFragment, id: 'fragment-3' as UUID, embedding: Array(1536).fill(0.5) }, + ]; + + const selectChain = { + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + orderBy: vi.fn().mockReturnThis(), + limit: vi.fn().mockReturnThis(), + then: vi.fn((resolve) => resolve(searchResults)), + }; + + mockDb.select.mockReturnValue(selectChain); + + const result = await repository.searchByEmbedding(embedding, { + agentId: mockFragment.agentId, + limit: 10, + threshold: 0.7, + }); + + expect(mockDb.select).toHaveBeenCalled(); + expect(selectChain.where).toHaveBeenCalled(); + expect(selectChain.orderBy).toHaveBeenCalled(); + expect(selectChain.limit).toHaveBeenCalledWith(10); + expect(result).toHaveLength(3); + // The similarity should be calculated by the repository + expect(result[0]).toHaveProperty('similarity'); + expect(result[0].similarity).toBeGreaterThan(0); + }); + + it('should apply optional filters', async () => { + const embedding = Array(1536).fill(0.5); + + const selectChain = { + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + orderBy: vi.fn().mockReturnThis(), + limit: vi.fn().mockReturnThis(), + then: vi.fn((resolve) => resolve([])), + }; + + mockDb.select.mockReturnValue(selectChain); + + await repository.searchByEmbedding(embedding, { + agentId: mockFragment.agentId, + roomId: 'room-456' as UUID, + worldId: 'world-456' as UUID, + entityId: 'entity-456' as UUID, + limit: 5, + threshold: 0.8, + }); + + expect(selectChain.where).toHaveBeenCalled(); + expect(selectChain.limit).toHaveBeenCalledWith(5); + }); + }); + + describe('updateEmbedding', () => { + it('should update fragment embedding', async () => { + const newEmbedding = Array(1536).fill(0.8); + const updatedFragment = { ...mockFragment, embedding: newEmbedding }; + + mockDb._returnValue = [updatedFragment]; + + const result = await repository.updateEmbedding(mockFragment.id, newEmbedding); + + expect(mockDb.update).toHaveBeenCalledWith('knowledge_fragments_table_mock'); + expect(mockDb.set).toHaveBeenCalledWith({ embedding: newEmbedding }); + expect(mockDb.where).toHaveBeenCalled(); + expect(mockDb.returning).toHaveBeenCalled(); + expect(result?.embedding).toEqual(newEmbedding); + }); + + it('should return null when fragment not found', async () => { + mockDb._returnValue = []; + + const result = await repository.updateEmbedding('non-existent' as UUID, Array(1536).fill(0)); + + expect(result).toBeNull(); + }); + }); + + describe('delete', () => { + it('should delete fragment by id', async () => { + mockDb._returnValue = [{ id: mockFragment.id }]; + + const result = await repository.delete(mockFragment.id); + + expect(mockDb.delete).toHaveBeenCalledWith('knowledge_fragments_table_mock'); + expect(mockDb.where).toHaveBeenCalled(); + expect(mockDb.returning).toHaveBeenCalled(); + expect(result).toBe(true); + }); + + it('should return false when fragment not found', async () => { + mockDb._returnValue = []; + + const result = await repository.delete(mockFragment.id); + + expect(result).toBe(false); + }); + }); + + describe('deleteByDocument', () => { + it('should delete all fragments for a document', async () => { + mockDb._returnValue = [{ id: '1' }, { id: '2' }, { id: '3' }]; + + const result = await repository.deleteByDocument(mockFragment.documentId); + + expect(mockDb.delete).toHaveBeenCalledWith('knowledge_fragments_table_mock'); + expect(mockDb.where).toHaveBeenCalled(); + expect(mockDb.returning).toHaveBeenCalled(); + expect(result).toBe(3); + }); + + it('should return 0 when no fragments deleted', async () => { + mockDb._returnValue = []; + + const result = await repository.deleteByDocument(mockFragment.documentId); + + expect(result).toBe(0); + }); + }); + + describe('countByDocument', () => { + it('should count fragments for a document', async () => { + const selectChain = { + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + then: vi.fn((resolve) => resolve([{ count: 5 }])), + }; + + mockDb.select.mockReturnValue(selectChain); + + const result = await repository.countByDocument(mockFragment.documentId); + + expect(mockDb.select).toHaveBeenCalled(); + expect(selectChain.where).toHaveBeenCalled(); + expect(result).toBe(5); + }); + + it('should return 0 when no fragments exist', async () => { + const selectChain = { + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + then: vi.fn((resolve) => resolve([{ count: 0 }])), + }; + + mockDb.select.mockReturnValue(selectChain); + + const result = await repository.countByDocument('non-existent' as UUID); + + expect(result).toBe(0); + }); + + it('should handle null count result', async () => { + const selectChain = { + select: vi.fn().mockReturnThis(), + from: vi.fn().mockReturnThis(), + where: vi.fn().mockReturnThis(), + then: vi.fn((resolve) => resolve([{ count: null }])), + }; + + mockDb.select.mockReturnValue(selectChain); + + const result = await repository.countByDocument(mockFragment.documentId); + + expect(result).toBe(0); + }); + }); +}); diff --git a/src/__tests__/unit/schema.test.ts b/src/__tests__/unit/schema.test.ts new file mode 100644 index 0000000..3ea0e63 --- /dev/null +++ b/src/__tests__/unit/schema.test.ts @@ -0,0 +1,109 @@ +import { describe, it, expect } from 'vitest'; +import { documentsTable, knowledgeFragmentsTable, knowledgeSchema } from '../../schema'; +import { v4 as uuidv4 } from 'uuid'; +import type { UUID } from '@elizaos/core'; + +describe('Knowledge Schema', () => { + describe('Schema Structure', () => { + it('should export documents table', () => { + expect(documentsTable).toBeDefined(); + expect(documentsTable.id).toBeDefined(); + expect(documentsTable.agentId).toBeDefined(); + expect(documentsTable.originalFilename).toBeDefined(); + expect(documentsTable.content).toBeDefined(); + }); + + it('should export knowledge fragments table', () => { + expect(knowledgeFragmentsTable).toBeDefined(); + expect(knowledgeFragmentsTable.id).toBeDefined(); + expect(knowledgeFragmentsTable.documentId).toBeDefined(); + expect(knowledgeFragmentsTable.content).toBeDefined(); + expect(knowledgeFragmentsTable.embedding).toBeDefined(); + }); + + it('should export complete schema', () => { + expect(knowledgeSchema).toBeDefined(); + expect(knowledgeSchema.documentsTable).toBe(documentsTable); + expect(knowledgeSchema.knowledgeFragmentsTable).toBe(knowledgeFragmentsTable); + }); + }); + + describe('Table Columns', () => { + it('documents table should have all required columns', () => { + // Check that columns exist + expect(documentsTable.id).toBeDefined(); + expect(documentsTable.agentId).toBeDefined(); + expect(documentsTable.worldId).toBeDefined(); + expect(documentsTable.roomId).toBeDefined(); + expect(documentsTable.entityId).toBeDefined(); + expect(documentsTable.originalFilename).toBeDefined(); + expect(documentsTable.contentType).toBeDefined(); + expect(documentsTable.content).toBeDefined(); + expect(documentsTable.fileSize).toBeDefined(); + expect(documentsTable.title).toBeDefined(); + expect(documentsTable.sourceUrl).toBeDefined(); + expect(documentsTable.createdAt).toBeDefined(); + expect(documentsTable.updatedAt).toBeDefined(); + expect(documentsTable.metadata).toBeDefined(); + }); + + it('knowledge_fragments table should have all required columns', () => { + // Check that columns exist + expect(knowledgeFragmentsTable.id).toBeDefined(); + expect(knowledgeFragmentsTable.documentId).toBeDefined(); + expect(knowledgeFragmentsTable.agentId).toBeDefined(); + expect(knowledgeFragmentsTable.worldId).toBeDefined(); + expect(knowledgeFragmentsTable.roomId).toBeDefined(); + expect(knowledgeFragmentsTable.entityId).toBeDefined(); + expect(knowledgeFragmentsTable.content).toBeDefined(); + expect(knowledgeFragmentsTable.embedding).toBeDefined(); + expect(knowledgeFragmentsTable.position).toBeDefined(); + expect(knowledgeFragmentsTable.createdAt).toBeDefined(); + expect(knowledgeFragmentsTable.metadata).toBeDefined(); + }); + }); + + describe('Foreign Key Relationships', () => { + it('knowledge_fragments should have documentId column', () => { + // Just check that the column exists + expect(knowledgeFragmentsTable.documentId).toBeDefined(); + }); + }); + + describe('Table Structure', () => { + it('should define valid document structure', () => { + // Test that all fields map to columns + expect(documentsTable.id).toBeDefined(); + expect(documentsTable.agentId).toBeDefined(); + expect(documentsTable.worldId).toBeDefined(); + expect(documentsTable.roomId).toBeDefined(); + expect(documentsTable.entityId).toBeDefined(); + expect(documentsTable.originalFilename).toBeDefined(); + expect(documentsTable.contentType).toBeDefined(); + expect(documentsTable.content).toBeDefined(); + expect(documentsTable.fileSize).toBeDefined(); + expect(documentsTable.title).toBeDefined(); + expect(documentsTable.createdAt).toBeDefined(); + expect(documentsTable.updatedAt).toBeDefined(); + }); + + it('should define valid fragment structure', () => { + // Test that all fields map to columns + expect(knowledgeFragmentsTable.id).toBeDefined(); + expect(knowledgeFragmentsTable.documentId).toBeDefined(); + expect(knowledgeFragmentsTable.agentId).toBeDefined(); + expect(knowledgeFragmentsTable.worldId).toBeDefined(); + expect(knowledgeFragmentsTable.roomId).toBeDefined(); + expect(knowledgeFragmentsTable.entityId).toBeDefined(); + expect(knowledgeFragmentsTable.content).toBeDefined(); + expect(knowledgeFragmentsTable.embedding).toBeDefined(); + expect(knowledgeFragmentsTable.position).toBeDefined(); + expect(knowledgeFragmentsTable.createdAt).toBeDefined(); + }); + + it('should have documentId foreign key column', () => { + // Just verify the column exists + expect(knowledgeFragmentsTable.documentId).toBeDefined(); + }); + }); +}); diff --git a/src/actions.ts b/src/actions.ts index a976a31..bafacb1 100644 --- a/src/actions.ts +++ b/src/actions.ts @@ -6,88 +6,83 @@ import type { Memory, State, UUID, -} from "@elizaos/core"; -import { logger, createUniqueUuid } from "@elizaos/core"; -import * as fs from "fs"; -import * as path from "path"; -import { KnowledgeService } from "./service.ts"; -import { AddKnowledgeOptions } from "./types.ts"; +} from '@elizaos/core'; +import { logger, createUniqueUuid } from '@elizaos/core'; +import * as fs from 'fs'; +import * as path from 'path'; +import { KnowledgeService } from './service.ts'; +import { AddKnowledgeOptions } from './types.ts'; /** * Action to process knowledge from files or text */ export const processKnowledgeAction: Action = { - name: "PROCESS_KNOWLEDGE", + name: 'PROCESS_KNOWLEDGE', description: - "Process and store knowledge from a file path or text content into the knowledge base", + 'Process and store knowledge from a file path or text content into the knowledge base', similes: [], examples: [ [ { - name: "user", + name: 'user', content: { - text: "Process the document at /path/to/document.pdf", + text: 'Process the document at /path/to/document.pdf', }, }, { - name: "assistant", + name: 'assistant', content: { text: "I'll process the document at /path/to/document.pdf and add it to my knowledge base.", - actions: ["PROCESS_KNOWLEDGE"], + actions: ['PROCESS_KNOWLEDGE'], }, }, ], [ { - name: "user", + name: 'user', content: { - text: "Add this to your knowledge: The capital of France is Paris.", + text: 'Add this to your knowledge: The capital of France is Paris.', }, }, { - name: "assistant", + name: 'assistant', content: { text: "I'll add that information to my knowledge base.", - actions: ["PROCESS_KNOWLEDGE"], + actions: ['PROCESS_KNOWLEDGE'], }, }, ], ], validate: async (runtime: IAgentRuntime, message: Memory, state?: State) => { - const text = message.content.text?.toLowerCase() || ""; + const text = message.content.text?.toLowerCase() || ''; // Check if the message contains knowledge-related keywords const knowledgeKeywords = [ - "process", - "add", - "upload", - "document", - "knowledge", - "learn", - "remember", - "store", - "ingest", - "file", + 'process', + 'add', + 'upload', + 'document', + 'knowledge', + 'learn', + 'remember', + 'store', + 'ingest', + 'file', ]; - const hasKeyword = knowledgeKeywords.some((keyword) => - text.includes(keyword) - ); + const hasKeyword = knowledgeKeywords.some((keyword) => text.includes(keyword)); // Check if there's a file path mentioned - const pathPattern = - /(?:\/[\w.-]+)+|(?:[a-zA-Z]:[\\/][\w\s.-]+(?:[\\/][\w\s.-]+)*)/; + const pathPattern = /(?:\/[\w.-]+)+|(?:[a-zA-Z]:[\\/][\w\s.-]+(?:[\\/][\w\s.-]+)*)/; const hasPath = pathPattern.test(text); // Check if service is available const service = runtime.getService(KnowledgeService.serviceType); if (!service) { - logger.warn( - "Knowledge service not available for PROCESS_KNOWLEDGE action" - ); + logger.warn('Knowledge service not available for PROCESS_KNOWLEDGE action'); return false; } @@ -102,18 +97,15 @@ export const processKnowledgeAction: Action = { callback?: HandlerCallback ) => { try { - const service = runtime.getService( - KnowledgeService.serviceType - ); + const service = runtime.getService(KnowledgeService.serviceType); if (!service) { - throw new Error("Knowledge service not available"); + throw new Error('Knowledge service not available'); } - const text = message.content.text || ""; + const text = message.content.text || ''; // Extract file path from message - const pathPattern = - /(?:\/[\w.-]+)+|(?:[a-zA-Z]:[\\/][\w\s.-]+(?:[\\/][\w\s.-]+)*)/; + const pathPattern = /(?:\/[\w.-]+)+|(?:[a-zA-Z]:[\\/][\w\s.-]+(?:[\\/][\w\s.-]+)*)/; const pathMatch = text.match(pathPattern); let response: Content; @@ -140,14 +132,13 @@ export const processKnowledgeAction: Action = { const fileExt = path.extname(filePath).toLowerCase(); // Determine content type - let contentType = "text/plain"; - if (fileExt === ".pdf") contentType = "application/pdf"; - else if (fileExt === ".docx") - contentType = - "application/vnd.openxmlformats-officedocument.wordprocessingml.document"; - else if (fileExt === ".doc") contentType = "application/msword"; - else if ([".txt", ".md", ".tson", ".xml", ".csv"].includes(fileExt)) - contentType = "text/plain"; + let contentType = 'text/plain'; + if (fileExt === '.pdf') contentType = 'application/pdf'; + else if (fileExt === '.docx') + contentType = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'; + else if (fileExt === '.doc') contentType = 'application/msword'; + else if (['.txt', '.md', '.tson', '.xml', '.csv'].includes(fileExt)) + contentType = 'text/plain'; // Prepare knowledge options const knowledgeOptions: AddKnowledgeOptions = { @@ -155,7 +146,7 @@ export const processKnowledgeAction: Action = { contentType, originalFilename: fileName, worldId: runtime.agentId, - content: fileBuffer.toString("base64"), + content: fileBuffer.toString('base64'), roomId: message.roomId, entityId: message.entityId, }; @@ -169,15 +160,12 @@ export const processKnowledgeAction: Action = { } else { // Process direct text content const knowledgeContent = text - .replace( - /^(add|store|remember|process|learn)\s+(this|that|the following)?:?\s*/i, - "" - ) + .replace(/^(add|store|remember|process|learn)\s+(this|that|the following)?:?\s*/i, '') .trim(); if (!knowledgeContent) { response = { - text: "I need some content to add to my knowledge base. Please provide text or a file path.", + text: 'I need some content to add to my knowledge base. Please provide text or a file path.', }; if (callback) { @@ -188,9 +176,12 @@ export const processKnowledgeAction: Action = { // Prepare knowledge options for text const knowledgeOptions: AddKnowledgeOptions = { - clientDocumentId: createUniqueUuid(runtime.agentId + "text" + Date.now(), "user-knowledge"), - contentType: "text/plain", - originalFilename: "user-knowledge.txt", + clientDocumentId: createUniqueUuid( + runtime.agentId + 'text' + Date.now(), + 'user-knowledge' + ), + contentType: 'text/plain', + originalFilename: 'user-knowledge.txt', worldId: runtime.agentId, content: knowledgeContent, roomId: message.roomId, @@ -209,10 +200,10 @@ export const processKnowledgeAction: Action = { await callback(response); } } catch (error) { - logger.error("Error in PROCESS_KNOWLEDGE action:", error); + logger.error('Error in PROCESS_KNOWLEDGE action:', error); const errorResponse: Content = { - text: `I encountered an error while processing the knowledge: ${error instanceof Error ? error.message : "Unknown error"}`, + text: `I encountered an error while processing the knowledge: ${error instanceof Error ? error.message : 'Unknown error'}`, }; if (callback) { @@ -226,60 +217,45 @@ export const processKnowledgeAction: Action = { * Action to search the knowledge base */ export const searchKnowledgeAction: Action = { - name: "SEARCH_KNOWLEDGE", - description: "Search the knowledge base for specific information", + name: 'SEARCH_KNOWLEDGE', + description: 'Search the knowledge base for specific information', similes: [ - "search knowledge", - "find information", - "look up", - "query knowledge base", - "search documents", - "find in knowledge", + 'search knowledge', + 'find information', + 'look up', + 'query knowledge base', + 'search documents', + 'find in knowledge', ], examples: [ [ { - name: "user", + name: 'user', content: { - text: "Search your knowledge for information about quantum computing", + text: 'Search your knowledge for information about quantum computing', }, }, { - name: "assistant", + name: 'assistant', content: { text: "I'll search my knowledge base for information about quantum computing.", - actions: ["SEARCH_KNOWLEDGE"], + actions: ['SEARCH_KNOWLEDGE'], }, }, ], ], validate: async (runtime: IAgentRuntime, message: Memory, state?: State) => { - const text = message.content.text?.toLowerCase() || ""; + const text = message.content.text?.toLowerCase() || ''; // Check if the message contains search-related keywords - const searchKeywords = [ - "search", - "find", - "look up", - "query", - "what do you know about", - ]; - const knowledgeKeywords = [ - "knowledge", - "information", - "document", - "database", - ]; + const searchKeywords = ['search', 'find', 'look up', 'query', 'what do you know about']; + const knowledgeKeywords = ['knowledge', 'information', 'document', 'database']; - const hasSearchKeyword = searchKeywords.some((keyword) => - text.includes(keyword) - ); - const hasKnowledgeKeyword = knowledgeKeywords.some((keyword) => - text.includes(keyword) - ); + const hasSearchKeyword = searchKeywords.some((keyword) => text.includes(keyword)); + const hasKnowledgeKeyword = knowledgeKeywords.some((keyword) => text.includes(keyword)); // Check if service is available const service = runtime.getService(KnowledgeService.serviceType); @@ -298,26 +274,21 @@ export const searchKnowledgeAction: Action = { callback?: HandlerCallback ) => { try { - const service = runtime.getService( - KnowledgeService.serviceType - ); + const service = runtime.getService(KnowledgeService.serviceType); if (!service) { - throw new Error("Knowledge service not available"); + throw new Error('Knowledge service not available'); } - const text = message.content.text || ""; + const text = message.content.text || ''; // Extract search query const query = text - .replace( - /^(search|find|look up|query)\s+(your\s+)?knowledge\s+(base\s+)?(for\s+)?/i, - "" - ) + .replace(/^(search|find|look up|query)\s+(your\s+)?knowledge\s+(base\s+)?(for\s+)?/i, '') .trim(); if (!query) { const response: Content = { - text: "What would you like me to search for in my knowledge base?", + text: 'What would you like me to search for in my knowledge base?', }; if (callback) { @@ -348,7 +319,7 @@ export const searchKnowledgeAction: Action = { const formattedResults = results .slice(0, 3) // Top 3 results .map((item, index) => `${index + 1}. ${item.content.text}`) - .join("\n\n"); + .join('\n\n'); response = { text: `Here's what I found about "${query}":\n\n${formattedResults}`, @@ -359,10 +330,10 @@ export const searchKnowledgeAction: Action = { await callback(response); } } catch (error) { - logger.error("Error in SEARCH_KNOWLEDGE action:", error); + logger.error('Error in SEARCH_KNOWLEDGE action:', error); const errorResponse: Content = { - text: `I encountered an error while searching the knowledge base: ${error instanceof Error ? error.message : "Unknown error"}`, + text: `I encountered an error while searching the knowledge base: ${error instanceof Error ? error.message : 'Unknown error'}`, }; if (callback) { diff --git a/src/config.ts b/src/config.ts index 447efa7..2c88fb1 100644 --- a/src/config.ts +++ b/src/config.ts @@ -1,218 +1,101 @@ -import { ModelConfig, ModelConfigSchema, ProviderRateLimits } from './types.ts'; -import z from 'zod'; -import { logger, IAgentRuntime } from '@elizaos/core'; - /** - * Validates the model configuration using runtime settings - * @param runtime The agent runtime to get settings from - * @returns The validated configuration or throws an error + * Configuration validation for the Knowledge plugin */ -export function validateModelConfig(runtime?: IAgentRuntime): ModelConfig { - try { - // Helper function to get setting from runtime or fallback to process.env - const getSetting = (key: string, defaultValue?: string) => { - if (runtime) { - return runtime.getSetting(key) || defaultValue; - } - return process.env[key] || defaultValue; - }; - - // Determine if contextual Knowledge is enabled - const ctxKnowledgeEnabled = getSetting('CTX_KNOWLEDGE_ENABLED') === 'true'; - logger.debug(`Configuration: CTX_KNOWLEDGE_ENABLED=${ctxKnowledgeEnabled}`); - - // If EMBEDDING_PROVIDER is not provided, assume we're using plugin-openai - const embeddingProvider = getSetting('EMBEDDING_PROVIDER'); - const assumePluginOpenAI = !embeddingProvider; - - if (assumePluginOpenAI) { - const openaiApiKey = getSetting('OPENAI_API_KEY'); - const openaiEmbeddingModel = getSetting('OPENAI_EMBEDDING_MODEL'); - - if (openaiApiKey && openaiEmbeddingModel) { - logger.info('EMBEDDING_PROVIDER not specified, using configuration from plugin-openai'); - } else { - logger.warn( - 'EMBEDDING_PROVIDER not specified, but plugin-openai configuration incomplete. Check OPENAI_API_KEY and OPENAI_EMBEDDING_MODEL.' - ); - } - } - - // Set embedding provider defaults based on plugin-openai if EMBEDDING_PROVIDER is not set - const finalEmbeddingProvider = embeddingProvider || 'openai'; - const textEmbeddingModel = - getSetting('TEXT_EMBEDDING_MODEL') || - getSetting('OPENAI_EMBEDDING_MODEL') || - 'text-embedding-3-small'; - const embeddingDimension = - getSetting('EMBEDDING_DIMENSION') || getSetting('OPENAI_EMBEDDING_DIMENSIONS') || '1536'; - - // Use OpenAI API key from runtime settings - const openaiApiKey = getSetting('OPENAI_API_KEY'); - - const config = ModelConfigSchema.parse({ - EMBEDDING_PROVIDER: finalEmbeddingProvider, - TEXT_PROVIDER: getSetting('TEXT_PROVIDER'), - - OPENAI_API_KEY: openaiApiKey, - ANTHROPIC_API_KEY: getSetting('ANTHROPIC_API_KEY'), - OPENROUTER_API_KEY: getSetting('OPENROUTER_API_KEY'), - GOOGLE_API_KEY: getSetting('GOOGLE_API_KEY'), - - OPENAI_BASE_URL: getSetting('OPENAI_BASE_URL'), - ANTHROPIC_BASE_URL: getSetting('ANTHROPIC_BASE_URL'), - OPENROUTER_BASE_URL: getSetting('OPENROUTER_BASE_URL'), - GOOGLE_BASE_URL: getSetting('GOOGLE_BASE_URL'), - - TEXT_EMBEDDING_MODEL: textEmbeddingModel, - TEXT_MODEL: getSetting('TEXT_MODEL'), - - MAX_INPUT_TOKENS: getSetting('MAX_INPUT_TOKENS', '4000'), - MAX_OUTPUT_TOKENS: getSetting('MAX_OUTPUT_TOKENS', '4096'), - - EMBEDDING_DIMENSION: embeddingDimension, - - CTX_KNOWLEDGE_ENABLED: ctxKnowledgeEnabled, - }); - - validateConfigRequirements(config, assumePluginOpenAI); - return config; - } catch (error) { - if (error instanceof z.ZodError) { - const issues = error.issues - .map((issue) => `${issue.path.join('.')}: ${issue.message}`) - .join(', '); - throw new Error(`Model configuration validation failed: ${issues}`); - } - throw error; - } +import type { IAgentRuntime } from '@elizaos/core'; +import { logger } from '@elizaos/core'; + +export interface ValidatedModelConfig { + CTX_KNOWLEDGE_ENABLED: boolean; + LOAD_DOCS_ON_STARTUP: boolean; + MAX_INPUT_TOKENS?: number; + MAX_OUTPUT_TOKENS?: number; + EMBEDDING_PROVIDER: string; + TEXT_PROVIDER?: string; + TEXT_EMBEDDING_MODEL: string; } /** - * Validates the required API keys and configuration based on the selected mode - * @param config The model configuration to validate - * @param assumePluginOpenAI Whether we're assuming plugin-openai is being used - * @throws Error if a required configuration value is missing + * Validates the model configuration for the Knowledge plugin + * @param runtime The agent runtime instance + * @returns Validated configuration object */ -function validateConfigRequirements(config: ModelConfig, assumePluginOpenAI: boolean): void { - // Skip validation for embedding provider if we're using plugin-openai's configuration - if (!assumePluginOpenAI) { - // Only validate embedding provider if not using plugin-openai - if (config.EMBEDDING_PROVIDER === 'openai' && !config.OPENAI_API_KEY) { - throw new Error('OPENAI_API_KEY is required when EMBEDDING_PROVIDER is set to "openai"'); - } - if (config.EMBEDDING_PROVIDER === 'google' && !config.GOOGLE_API_KEY) { - throw new Error('GOOGLE_API_KEY is required when EMBEDDING_PROVIDER is set to "google"'); - } - } else { - // If we're assuming plugin-openai, make sure we have the required values - if (!config.OPENAI_API_KEY) { - throw new Error('OPENAI_API_KEY is required when using plugin-openai configuration'); - } - if (!config.TEXT_EMBEDDING_MODEL) { - throw new Error('OPENAI_EMBEDDING_MODEL is required when using plugin-openai configuration'); - } +export function validateModelConfig(runtime?: IAgentRuntime): ValidatedModelConfig { + // Check if CTX_KNOWLEDGE_ENABLED is set + const ctxKnowledgeEnabled = + runtime?.getSetting('CTX_KNOWLEDGE_ENABLED') === 'true' || + process.env.CTX_KNOWLEDGE_ENABLED === 'true' || + false; + + // Check if docs should be loaded on startup + const loadDocsOnStartup = + runtime?.getSetting('LOAD_DOCS_ON_STARTUP') !== 'false' && + process.env.LOAD_DOCS_ON_STARTUP !== 'false'; + + // Get token limits + const maxInputTokens = parseInt( + runtime?.getSetting('MAX_INPUT_TOKENS') || process.env.MAX_INPUT_TOKENS || '4000' + ); + const maxOutputTokens = parseInt( + runtime?.getSetting('MAX_OUTPUT_TOKENS') || process.env.MAX_OUTPUT_TOKENS || '4096' + ); + + // Get embedding provider configuration + let embeddingProvider = + runtime?.getSetting('EMBEDDING_PROVIDER') || process.env.EMBEDDING_PROVIDER || ''; + let textEmbeddingModel = + runtime?.getSetting('TEXT_EMBEDDING_MODEL') || process.env.TEXT_EMBEDDING_MODEL || ''; + + // Auto-detect from plugin-openai if not explicitly set + if (!embeddingProvider && runtime) { + // Since getModel returns a function, we can't check provider directly + // Instead, just default to openai if not set + embeddingProvider = 'openai'; + textEmbeddingModel = textEmbeddingModel || 'text-embedding-3-small'; + logger.info('Defaulting to OpenAI provider for embeddings'); } - // If Contextual Knowledge is enabled, we need additional validations - if (config.CTX_KNOWLEDGE_ENABLED) { - logger.info('Contextual Knowledge is enabled. Validating text generation settings...'); - - // Validate API keys based on the text provider - if (config.TEXT_PROVIDER === 'openai' && !config.OPENAI_API_KEY) { - throw new Error('OPENAI_API_KEY is required when TEXT_PROVIDER is set to "openai"'); - } - if (config.TEXT_PROVIDER === 'anthropic' && !config.ANTHROPIC_API_KEY) { - throw new Error('ANTHROPIC_API_KEY is required when TEXT_PROVIDER is set to "anthropic"'); - } - if (config.TEXT_PROVIDER === 'openrouter' && !config.OPENROUTER_API_KEY) { - throw new Error('OPENROUTER_API_KEY is required when TEXT_PROVIDER is set to "openrouter"'); - } - if (config.TEXT_PROVIDER === 'google' && !config.GOOGLE_API_KEY) { - throw new Error('GOOGLE_API_KEY is required when TEXT_PROVIDER is set to "google"'); - } + // Get text generation provider configuration (only needed if CTX_KNOWLEDGE_ENABLED) + let textProvider: string | undefined; + if (ctxKnowledgeEnabled) { + textProvider = runtime?.getSetting('TEXT_PROVIDER') || process.env.TEXT_PROVIDER || ''; - // If using OpenRouter with Claude or Gemini models, check for additional recommended configurations - if (config.TEXT_PROVIDER === 'openrouter') { - const modelName = config.TEXT_MODEL?.toLowerCase() || ''; - if (modelName.includes('claude') || modelName.includes('gemini')) { - logger.info( - `Using ${modelName} with OpenRouter. This configuration supports document caching for improved performance.` - ); - } - } - } else { - // Log appropriate message based on where embedding config came from - if (assumePluginOpenAI) { - logger.info( - 'Contextual Knowledge is disabled. Using embedding configuration from plugin-openai.' - ); - } else { - logger.info('Contextual Knowledge is disabled. Using basic embedding-only configuration.'); + // Auto-detect text provider if not set + if (!textProvider && runtime) { + // Default to openai if not set + textProvider = 'openai'; + logger.info('Defaulting to OpenAI provider for text generation'); } } -} - -/** - * Returns rate limit information for the configured providers - * - * @param runtime The agent runtime to get settings from - * @returns Rate limit configuration for the current providers - */ -export async function getProviderRateLimits(runtime?: IAgentRuntime): Promise { - const config = validateModelConfig(runtime); - // Helper function to get setting from runtime or fallback to process.env - const getSetting = (key: string, defaultValue: string) => { - if (runtime) { - return runtime.getSetting(key) || defaultValue; - } - return process.env[key] || defaultValue; - }; - - // Get rate limit values from runtime settings or use defaults - const maxConcurrentRequests = parseInt(getSetting('MAX_CONCURRENT_REQUESTS', '30'), 10); - const requestsPerMinute = parseInt(getSetting('REQUESTS_PER_MINUTE', '60'), 10); - const tokensPerMinute = parseInt(getSetting('TOKENS_PER_MINUTE', '150000'), 10); - - // Provider-specific rate limits - switch (config.EMBEDDING_PROVIDER) { - case 'openai': - // OpenAI typically allows 150,000 tokens per minute for embeddings - // and up to 3,000 RPM for Tier 4+ accounts - return { - maxConcurrentRequests, - requestsPerMinute: Math.min(requestsPerMinute, 3000), - tokensPerMinute: Math.min(tokensPerMinute, 150000), - provider: 'openai', - }; + // Validate required configurations + if (!embeddingProvider) { + throw new Error( + 'Knowledge plugin requires an embedding provider. ' + + 'Please set EMBEDDING_PROVIDER environment variable or ensure plugin-openai is loaded.' + ); + } - case 'google': - // Google's default is 60 requests per minute - return { - maxConcurrentRequests, - requestsPerMinute: Math.min(requestsPerMinute, 60), - tokensPerMinute: Math.min(tokensPerMinute, 100000), - provider: 'google', - }; + if (!textEmbeddingModel) { + throw new Error( + 'Knowledge plugin requires TEXT_EMBEDDING_MODEL to be set. ' + + 'Example: TEXT_EMBEDDING_MODEL=text-embedding-3-small' + ); + } - default: - // Use default values for unknown providers - return { - maxConcurrentRequests, - requestsPerMinute, - tokensPerMinute, - provider: config.EMBEDDING_PROVIDER, - }; + if (ctxKnowledgeEnabled && !textProvider) { + throw new Error( + 'When CTX_KNOWLEDGE_ENABLED=true, TEXT_PROVIDER must be set. ' + + 'Example: TEXT_PROVIDER=openai' + ); } -} -/** - * Helper function to get integer value from environment variables - * @param envVar The environment variable name - * @param defaultValue The default value if not present - * @returns The parsed integer value - */ -function getEnvInt(envVar: string, defaultValue: number): number { - return process.env[envVar] ? parseInt(process.env[envVar]!, 10) : defaultValue; + return { + CTX_KNOWLEDGE_ENABLED: ctxKnowledgeEnabled, + LOAD_DOCS_ON_STARTUP: loadDocsOnStartup, + MAX_INPUT_TOKENS: maxInputTokens, + MAX_OUTPUT_TOKENS: maxOutputTokens, + EMBEDDING_PROVIDER: embeddingProvider, + TEXT_PROVIDER: textProvider, + TEXT_EMBEDDING_MODEL: textEmbeddingModel, + }; } diff --git a/src/ctx-embeddings.ts b/src/ctx-embeddings.ts index 3491c86..7ffd22c 100644 --- a/src/ctx-embeddings.ts +++ b/src/ctx-embeddings.ts @@ -48,24 +48,24 @@ export const CONTEXT_TARGETS = { * This system prompt is more concise and focused on the specific task. */ export const SYSTEM_PROMPT = - "You are a precision text augmentation tool. Your task is to expand a given text chunk with its direct context from a larger document. You must: 1) Keep the original chunk intact; 2) Add critical context from surrounding text; 3) Never summarize or rephrase the original chunk; 4) Create contextually rich output for improved semantic retrieval."; + 'You are a precision text augmentation tool. Your task is to expand a given text chunk with its direct context from a larger document. You must: 1) Keep the original chunk intact; 2) Add critical context from surrounding text; 3) Never summarize or rephrase the original chunk; 4) Create contextually rich output for improved semantic retrieval.'; /** * System prompts optimized for different content types with caching support */ export const SYSTEM_PROMPTS = { DEFAULT: - "You are a precision text augmentation tool. Your task is to expand a given text chunk with its direct context from a larger document. You must: 1) Keep the original chunk intact; 2) Add critical context from surrounding text; 3) Never summarize or rephrase the original chunk; 4) Create contextually rich output for improved semantic retrieval.", + 'You are a precision text augmentation tool. Your task is to expand a given text chunk with its direct context from a larger document. You must: 1) Keep the original chunk intact; 2) Add critical context from surrounding text; 3) Never summarize or rephrase the original chunk; 4) Create contextually rich output for improved semantic retrieval.', - CODE: "You are a precision code augmentation tool. Your task is to expand a given code chunk with necessary context from the larger codebase. You must: 1) Keep the original code chunk intact with exact syntax and indentation; 2) Add relevant imports, function signatures, or class definitions; 3) Include critical surrounding code context; 4) Create contextually rich output that maintains correct syntax.", + CODE: 'You are a precision code augmentation tool. Your task is to expand a given code chunk with necessary context from the larger codebase. You must: 1) Keep the original code chunk intact with exact syntax and indentation; 2) Add relevant imports, function signatures, or class definitions; 3) Include critical surrounding code context; 4) Create contextually rich output that maintains correct syntax.', PDF: "You are a precision document augmentation tool. Your task is to expand a given PDF text chunk with its direct context from the larger document. You must: 1) Keep the original chunk intact; 2) Add section headings, references, or figure captions; 3) Include text that immediately precedes and follows the chunk; 4) Create contextually rich output that maintains the document's original structure.", MATH_PDF: - "You are a precision mathematical content augmentation tool. Your task is to expand a given mathematical text chunk with essential context. You must: 1) Keep original mathematical notations and expressions exactly as they appear; 2) Add relevant definitions, theorems, or equations from elsewhere in the document; 3) Preserve all LaTeX or mathematical formatting; 4) Create contextually rich output for improved mathematical comprehension.", + 'You are a precision mathematical content augmentation tool. Your task is to expand a given mathematical text chunk with essential context. You must: 1) Keep original mathematical notations and expressions exactly as they appear; 2) Add relevant definitions, theorems, or equations from elsewhere in the document; 3) Preserve all LaTeX or mathematical formatting; 4) Create contextually rich output for improved mathematical comprehension.', TECHNICAL: - "You are a precision technical documentation augmentation tool. Your task is to expand a technical document chunk with critical context. You must: 1) Keep the original chunk intact including all technical terminology; 2) Add relevant configuration examples, parameter definitions, or API references; 3) Include any prerequisite information; 4) Create contextually rich output that maintains technical accuracy.", + 'You are a precision technical documentation augmentation tool. Your task is to expand a technical document chunk with critical context. You must: 1) Keep the original chunk intact including all technical terminology; 2) Add relevant configuration examples, parameter definitions, or API references; 3) Include any prerequisite information; 4) Create contextually rich output that maintains technical accuracy.', }; /** @@ -281,10 +281,8 @@ export function getContextualizationPrompt( promptTemplate = CONTEXTUAL_CHUNK_ENRICHMENT_PROMPT_TEMPLATE ): string { if (!docContent || !chunkContent) { - console.warn( - "Document content or chunk content is missing for contextualization." - ); - return "Error: Document or chunk content missing."; + console.warn('Document content or chunk content is missing for contextualization.'); + return 'Error: Document or chunk content missing.'; } // Estimate if the chunk is already large relative to our target size @@ -298,10 +296,10 @@ export function getContextualizationPrompt( } return promptTemplate - .replace("{doc_content}", docContent) - .replace("{chunk_content}", chunkContent) - .replace("{min_tokens}", minTokens.toString()) - .replace("{max_tokens}", maxTokens.toString()); + .replace('{doc_content}', docContent) + .replace('{chunk_content}', chunkContent) + .replace('{min_tokens}', minTokens.toString()) + .replace('{max_tokens}', maxTokens.toString()); } /** @@ -321,9 +319,9 @@ export function getCachingContextualizationPrompt( maxTokens = CONTEXT_TARGETS.DEFAULT.MAX_TOKENS ): { prompt: string; systemPrompt: string } { if (!chunkContent) { - console.warn("Chunk content is missing for contextualization."); + console.warn('Chunk content is missing for contextualization.'); return { - prompt: "Error: Chunk content missing.", + prompt: 'Error: Chunk content missing.', systemPrompt: SYSTEM_PROMPTS.DEFAULT, }; } @@ -344,16 +342,16 @@ export function getCachingContextualizationPrompt( if (contentType) { if ( - contentType.includes("javascript") || - contentType.includes("typescript") || - contentType.includes("python") || - contentType.includes("java") || - contentType.includes("c++") || - contentType.includes("code") + contentType.includes('javascript') || + contentType.includes('typescript') || + contentType.includes('python') || + contentType.includes('java') || + contentType.includes('c++') || + contentType.includes('code') ) { promptTemplate = CACHED_CODE_CHUNK_PROMPT_TEMPLATE; systemPrompt = SYSTEM_PROMPTS.CODE; - } else if (contentType.includes("pdf")) { + } else if (contentType.includes('pdf')) { if (containsMathematicalContent(chunkContent)) { promptTemplate = CACHED_MATH_PDF_PROMPT_TEMPLATE; systemPrompt = SYSTEM_PROMPTS.MATH_PDF; @@ -361,8 +359,8 @@ export function getCachingContextualizationPrompt( systemPrompt = SYSTEM_PROMPTS.PDF; } } else if ( - contentType.includes("markdown") || - contentType.includes("text/html") || + contentType.includes('markdown') || + contentType.includes('text/html') || isTechnicalDocumentation(chunkContent) ) { promptTemplate = CACHED_TECHNICAL_PROMPT_TEMPLATE; @@ -371,9 +369,9 @@ export function getCachingContextualizationPrompt( } const formattedPrompt = promptTemplate - .replace("{chunk_content}", chunkContent) - .replace("{min_tokens}", minTokens.toString()) - .replace("{max_tokens}", maxTokens.toString()); + .replace('{chunk_content}', chunkContent) + .replace('{min_tokens}', minTokens.toString()) + .replace('{max_tokens}', maxTokens.toString()); return { prompt: formattedPrompt, @@ -399,48 +397,42 @@ export function getPromptForMimeType( let promptTemplate = CONTEXTUAL_CHUNK_ENRICHMENT_PROMPT_TEMPLATE; // Determine document type and apply appropriate settings - if (mimeType.includes("pdf")) { + if (mimeType.includes('pdf')) { // Check if PDF contains mathematical content if (containsMathematicalContent(docContent)) { minTokens = CONTEXT_TARGETS.MATH_PDF.MIN_TOKENS; maxTokens = CONTEXT_TARGETS.MATH_PDF.MAX_TOKENS; promptTemplate = MATH_PDF_PROMPT_TEMPLATE; - console.debug("Using mathematical PDF prompt template"); + console.debug('Using mathematical PDF prompt template'); } else { minTokens = CONTEXT_TARGETS.PDF.MIN_TOKENS; maxTokens = CONTEXT_TARGETS.PDF.MAX_TOKENS; - console.debug("Using standard PDF settings"); + console.debug('Using standard PDF settings'); } } else if ( - mimeType.includes("javascript") || - mimeType.includes("typescript") || - mimeType.includes("python") || - mimeType.includes("java") || - mimeType.includes("c++") || - mimeType.includes("code") + mimeType.includes('javascript') || + mimeType.includes('typescript') || + mimeType.includes('python') || + mimeType.includes('java') || + mimeType.includes('c++') || + mimeType.includes('code') ) { minTokens = CONTEXT_TARGETS.CODE.MIN_TOKENS; maxTokens = CONTEXT_TARGETS.CODE.MAX_TOKENS; promptTemplate = CODE_PROMPT_TEMPLATE; - console.debug("Using code prompt template"); + console.debug('Using code prompt template'); } else if ( isTechnicalDocumentation(docContent) || - mimeType.includes("markdown") || - mimeType.includes("text/html") + mimeType.includes('markdown') || + mimeType.includes('text/html') ) { minTokens = CONTEXT_TARGETS.TECHNICAL.MIN_TOKENS; maxTokens = CONTEXT_TARGETS.TECHNICAL.MAX_TOKENS; promptTemplate = TECHNICAL_PROMPT_TEMPLATE; - console.debug("Using technical documentation prompt template"); + console.debug('Using technical documentation prompt template'); } - return getContextualizationPrompt( - docContent, - chunkContent, - minTokens, - maxTokens, - promptTemplate - ); + return getContextualizationPrompt(docContent, chunkContent, minTokens, maxTokens, promptTemplate); } /** @@ -459,7 +451,7 @@ export function getCachingPromptForMimeType( let maxTokens = CONTEXT_TARGETS.DEFAULT.MAX_TOKENS; // Determine appropriate token targets based on content type - if (mimeType.includes("pdf")) { + if (mimeType.includes('pdf')) { if (containsMathematicalContent(chunkContent)) { minTokens = CONTEXT_TARGETS.MATH_PDF.MIN_TOKENS; maxTokens = CONTEXT_TARGETS.MATH_PDF.MAX_TOKENS; @@ -468,30 +460,25 @@ export function getCachingPromptForMimeType( maxTokens = CONTEXT_TARGETS.PDF.MAX_TOKENS; } } else if ( - mimeType.includes("javascript") || - mimeType.includes("typescript") || - mimeType.includes("python") || - mimeType.includes("java") || - mimeType.includes("c++") || - mimeType.includes("code") + mimeType.includes('javascript') || + mimeType.includes('typescript') || + mimeType.includes('python') || + mimeType.includes('java') || + mimeType.includes('c++') || + mimeType.includes('code') ) { minTokens = CONTEXT_TARGETS.CODE.MIN_TOKENS; maxTokens = CONTEXT_TARGETS.CODE.MAX_TOKENS; } else if ( isTechnicalDocumentation(chunkContent) || - mimeType.includes("markdown") || - mimeType.includes("text/html") + mimeType.includes('markdown') || + mimeType.includes('text/html') ) { minTokens = CONTEXT_TARGETS.TECHNICAL.MIN_TOKENS; maxTokens = CONTEXT_TARGETS.TECHNICAL.MAX_TOKENS; } - return getCachingContextualizationPrompt( - chunkContent, - mimeType, - minTokens, - maxTokens - ); + return getCachingContextualizationPrompt(chunkContent, mimeType, minTokens, maxTokens); } /** @@ -541,24 +528,22 @@ function containsMathematicalContent(content: string): boolean { // Keyword analysis const mathKeywords = [ - "theorem", - "lemma", - "proof", - "equation", - "function", - "derivative", - "integral", - "matrix", - "vector", - "algorithm", - "constraint", - "coefficient", + 'theorem', + 'lemma', + 'proof', + 'equation', + 'function', + 'derivative', + 'integral', + 'matrix', + 'vector', + 'algorithm', + 'constraint', + 'coefficient', ]; const contentLower = content.toLowerCase(); - const mathKeywordCount = mathKeywords.filter((keyword) => - contentLower.includes(keyword) - ).length; + const mathKeywordCount = mathKeywords.filter((keyword) => contentLower.includes(keyword)).length; // If multiple math keywords are present, it likely contains math return mathKeywordCount >= 2; @@ -619,21 +604,16 @@ function isTechnicalDocumentation(content: string): boolean { * @param generatedContext - The contextual enrichment generated by the LLM. * @returns The enriched chunk, or the original chunkContent if the enrichment is empty. */ -export function getChunkWithContext( - chunkContent: string, - generatedContext: string -): string { - if (!generatedContext || generatedContext.trim() === "") { - console.warn( - "Generated context is empty. Falling back to original chunk content." - ); +export function getChunkWithContext(chunkContent: string, generatedContext: string): string { + if (!generatedContext || generatedContext.trim() === '') { + console.warn('Generated context is empty. Falling back to original chunk content.'); return chunkContent; } // Verify that the generated context contains the original chunk if (!generatedContext.includes(chunkContent)) { console.warn( - "Generated context does not contain the original chunk. Appending original to ensure data integrity." + 'Generated context does not contain the original chunk. Appending original to ensure data integrity.' ); return `${generatedContext.trim()}\n\n${chunkContent}`; } diff --git a/src/docs-loader.ts b/src/docs-loader.ts index 5461727..dd111e1 100644 --- a/src/docs-loader.ts +++ b/src/docs-loader.ts @@ -1,8 +1,8 @@ -import { logger, UUID, createUniqueUuid } from "@elizaos/core"; -import * as fs from "fs"; -import * as path from "path"; -import { KnowledgeService } from "./service.ts"; -import { AddKnowledgeOptions } from "./types.ts"; +import { logger, UUID, createUniqueUuid } from '@elizaos/core'; +import * as fs from 'fs'; +import * as path from 'path'; +import { KnowledgeService } from './service.ts'; +import { AddKnowledgeOptions } from './types.ts'; import { isBinaryContentType } from './utils.ts'; /** diff --git a/src/document-processor.ts b/src/document-processor.ts index da3bff1..7890701 100644 --- a/src/document-processor.ts +++ b/src/document-processor.ts @@ -9,7 +9,7 @@ import { } from '@elizaos/core'; import { Buffer } from 'node:buffer'; import { v4 as uuidv4 } from 'uuid'; -import { getProviderRateLimits, validateModelConfig } from './config.ts'; +import { validateModelConfig } from './config.ts'; import { DEFAULT_CHARS_PER_TOKEN, DEFAULT_CHUNK_OVERLAP_TOKENS, @@ -82,8 +82,13 @@ export async function processFragmentsSynchronously({ logger.info(`Split content into ${chunks.length} chunks for document ${documentId}`); - // Get provider limits for rate limiting - const providerLimits = await getProviderRateLimits(); + // Use default rate limits + const providerLimits = { + maxConcurrentRequests: 30, + requestsPerMinute: 60, + tokensPerMinute: 150000, + provider: 'default', + }; const CONCURRENCY_LIMIT = Math.min(30, providerLimits.maxConcurrentRequests || 30); const rateLimiter = createRateLimiter(providerLimits.requestsPerMinute || 60); @@ -487,16 +492,19 @@ async function generateContextsInBatch( return []; } - const providerLimits = await getProviderRateLimits(); + const providerLimits = { + maxConcurrentRequests: 30, + requestsPerMinute: 60, + tokensPerMinute: 150000, + provider: 'default', + }; const rateLimiter = createRateLimiter(providerLimits.requestsPerMinute || 60); // Get active provider from validateModelConfig const config = validateModelConfig(); const isUsingOpenRouter = config.TEXT_PROVIDER === 'openrouter'; - const isUsingCacheCapableModel = - isUsingOpenRouter && - (config.TEXT_MODEL?.toLowerCase().includes('claude') || - config.TEXT_MODEL?.toLowerCase().includes('gemini')); + // For now, assume no cache capable model since TEXT_MODEL is not in our simplified config + const isUsingCacheCapableModel = false; // For now custom TEXT_PROVIDER is not supported. // logger.info( @@ -553,7 +561,7 @@ async function generateContextsInBatch( `context generation for chunk ${item.originalIndex}` ); - const generatedContext = llmResponse.text; + const generatedContext = (llmResponse as any).text || llmResponse; const contextualizedText = getChunkWithContext(item.chunkText, generatedContext); logger.debug( diff --git a/src/frontend/index.tsx b/src/frontend/index.tsx index b6868bd..3a48019 100644 --- a/src/frontend/index.tsx +++ b/src/frontend/index.tsx @@ -2,7 +2,7 @@ import { QueryClient, QueryClientProvider } from '@tanstack/react-query'; import { createRoot } from 'react-dom/client'; import './index.css'; import React from 'react'; -import { KnowledgeTab } from './ui/knowledge-tab.tsx'; +import { KnowledgeTab } from './ui/knowledge-tab.js'; import type { UUID } from '@elizaos/core'; const queryClient = new QueryClient(); diff --git a/src/frontend/test-components.html b/src/frontend/test-components.html new file mode 100644 index 0000000..d60e36b --- /dev/null +++ b/src/frontend/test-components.html @@ -0,0 +1,315 @@ + + + + + + UI Components Test Page + + + + +
+
+ +
+

Badge Components

+
+
+ Test Badge +
+
+ Outline Badge +
+
+ Secondary Badge +
+
+ Destructive Badge +
+
+ Custom Badge +
+
+
+ + +
+

Button Components

+
+ + + + + + +
+
+ + + + +
+
+ + Click count: 0 +
+
+ + +
+

Card Components

+
+
+

+ Test Card Title +

+

+ Test Description +

+
+
Test Content
+
Test Footer
+
+
+ + +
+

Input Components

+
+ + + + + + +
+
+ + +
+

Table Components

+
+ + + + + + + + + + + + + + + + + + + + +
+ Test Caption +
+ Column 1 + + Column 2 +
Cell 1Cell 2
Footer 1Footer 2
+
+
+ + +
+

Tabs Components

+
+
+ + +
+
+

Content for Tab 1

+
+
+
+ + +
+

KnowledgeTab Component

+
+ +
+
+
+
+ + + + + + + diff --git a/src/frontend/ui/badge.tsx b/src/frontend/ui/badge.tsx index bacc5b2..fa6c356 100644 --- a/src/frontend/ui/badge.tsx +++ b/src/frontend/ui/badge.tsx @@ -17,8 +17,6 @@ export function Badge({ children, variant = 'default', className = '' }: BadgePr }; return ( - - {children} - + {children} ); } diff --git a/src/frontend/ui/button.tsx b/src/frontend/ui/button.tsx index ff388ee..617700a 100644 --- a/src/frontend/ui/button.tsx +++ b/src/frontend/ui/button.tsx @@ -19,9 +19,10 @@ export function Button({ onClick, disabled = false, title, - type = 'button' + type = 'button', }: ButtonProps) { - const baseClasses = 'inline-flex items-center justify-center rounded-md font-medium transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:opacity-50 disabled:pointer-events-none'; + const baseClasses = + 'inline-flex items-center justify-center rounded-md font-medium transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:opacity-50 disabled:pointer-events-none'; const variantClasses = { default: 'bg-primary text-primary-foreground hover:bg-primary/90', diff --git a/src/frontend/ui/card.tsx b/src/frontend/ui/card.tsx index 935123a..cf569fa 100644 --- a/src/frontend/ui/card.tsx +++ b/src/frontend/ui/card.tsx @@ -14,19 +14,11 @@ export function Card({ children, className = '' }: CardProps) { } export function CardHeader({ children, className = '' }: CardProps) { - return ( -
- {children} -
- ); + return
{children}
; } export function CardFooter({ children, className = '' }: CardProps) { - return ( -
- {children} -
- ); + return
{children}
; } export function CardTitle({ children, className = '' }: CardProps) { @@ -38,17 +30,9 @@ export function CardTitle({ children, className = '' }: CardProps) { } export function CardDescription({ children, className = '' }: CardProps) { - return ( -

- {children} -

- ); + return

{children}

; } export function CardContent({ children, className = '' }: CardProps) { - return ( -
- {children} -
- ); + return
{children}
; } diff --git a/src/frontend/ui/knowledge-tab.tsx b/src/frontend/ui/knowledge-tab.tsx index fa44141..9614035 100644 --- a/src/frontend/ui/knowledge-tab.tsx +++ b/src/frontend/ui/knowledge-tab.tsx @@ -1,6 +1,16 @@ import React from 'react'; import type { UUID, Memory } from '@elizaos/core'; -import { Book, Clock, File, FileText, LoaderIcon, Trash2, Upload, List, Network } from 'lucide-react'; +import { + Book, + Clock, + File, + FileText, + LoaderIcon, + Trash2, + Upload, + List, + Network, +} from 'lucide-react'; import { useCallback, useEffect, useRef, useState } from 'react'; import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query'; import { ExtendedMemoryMetadata } from '../../types'; @@ -16,1055 +26,1279 @@ import { MemoryGraph } from './memory-graph'; // Local utility function instead of importing from client const cn = (...classes: (string | undefined | null | false)[]) => { - return classes.filter(Boolean).join(' '); + return classes.filter(Boolean).join(' '); }; // Temporary toast implementation const useToast = () => ({ - toast: ({ title, description, variant }: { title: string; description: string; variant?: string }) => { - console.log(`Toast: ${title} - ${description} (${variant || 'default'})`); - // TODO: Implement proper toast functionality - } + toast: ({ + title, + description, + variant, + }: { + title: string; + description: string; + variant?: string; + }) => { + console.log(`Toast: ${title} - ${description} (${variant || 'default'})`); + // TODO: Implement proper toast functionality + }, }); // Simple Dialog components for now -const Dialog = ({ open, onOpenChange, children }: { open: boolean; onOpenChange: (open: boolean) => void; children: React.ReactNode }) => { - if (!open) return null; - return ( -
onOpenChange(false)}> -
e.stopPropagation()}> - {children} -
-
- ); +const Dialog = ({ + open, + onOpenChange, + children, +}: { + open: boolean; + onOpenChange: (open: boolean) => void; + children: React.ReactNode; +}) => { + if (!open) return null; + return ( +
onOpenChange(false)} + > +
e.stopPropagation()} + > + {children} +
+
+ ); }; -const DialogContent = ({ className, children }: { className?: string; children: React.ReactNode }) => ( -
{children}
-); - -const DialogHeader = ({ className, children }: { className?: string; children: React.ReactNode }) => ( -
{children}
-); - -const DialogTitle = ({ className, children }: { className?: string; children: React.ReactNode }) => ( -

{children}

-); - -const DialogDescription = ({ className, children }: { className?: string; children: React.ReactNode }) => ( -

{children}

-); - -const DialogFooter = ({ className, children }: { className?: string; children: React.ReactNode }) => ( -
{children}
-); +const DialogContent = ({ + className, + children, +}: { + className?: string; + children: React.ReactNode; +}) =>
{children}
; + +const DialogHeader = ({ + className, + children, +}: { + className?: string; + children: React.ReactNode; +}) =>
{children}
; + +const DialogTitle = ({ + className, + children, +}: { + className?: string; + children: React.ReactNode; +}) =>

{children}

; + +const DialogDescription = ({ + className, + children, +}: { + className?: string; + children: React.ReactNode; +}) =>

{children}

; + +const DialogFooter = ({ + className, + children, +}: { + className?: string; + children: React.ReactNode; +}) =>
{children}
; const ITEMS_PER_PAGE = 10; interface UploadResultItem { - status: string; - id?: UUID; - filename?: string; + status: string; + id?: UUID; + filename?: string; } // Helper function to get correct MIME type based on file extension const getCorrectMimeType = (file: File): string => { - const filename = file.name.toLowerCase(); - const ext = filename.split('.').pop() || ''; - - // Map common text file extensions to text/plain - const textExtensions = [ - 'ts', 'tsx', 'js', 'jsx', 'mjs', 'cjs', - 'py', 'pyw', 'pyi', 'java', 'c', 'cpp', 'cc', 'cxx', 'h', 'hpp', - 'cs', 'php', 'rb', 'go', 'rs', 'swift', 'kt', 'kts', 'scala', - 'clj', 'cljs', 'ex', 'exs', 'r', 'R', 'm', 'mm', 'sh', 'bash', - 'zsh', 'fish', 'ps1', 'bat', 'cmd', 'sql', 'lua', 'pl', 'pm', - 'dart', 'hs', 'elm', 'ml', 'fs', 'fsx', 'vb', 'pas', 'd', 'nim', - 'zig', 'jl', 'tcl', 'awk', 'sed', 'vue', 'svelte', 'astro', - 'gitignore', 'dockerignore', 'editorconfig', 'env', 'cfg', 'conf', - 'ini', 'log', 'txt' - ]; - - const markdownExtensions = ['md', 'markdown']; - const jsonExtensions = ['json']; - const xmlExtensions = ['xml']; - const htmlExtensions = ['html', 'htm']; - const cssExtensions = ['css', 'scss', 'sass', 'less']; - const csvExtensions = ['csv', 'tsv']; - const yamlExtensions = ['yaml', 'yml']; - - // Check extensions and return appropriate MIME type - if (textExtensions.includes(ext)) { - return 'text/plain'; - } else if (markdownExtensions.includes(ext)) { - return 'text/markdown'; - } else if (jsonExtensions.includes(ext)) { - return 'application/json'; - } else if (xmlExtensions.includes(ext)) { - return 'application/xml'; - } else if (htmlExtensions.includes(ext)) { - return 'text/html'; - } else if (cssExtensions.includes(ext)) { - return 'text/css'; - } else if (csvExtensions.includes(ext)) { - return 'text/csv'; - } else if (yamlExtensions.includes(ext)) { - return 'text/yaml'; - } else if (ext === 'pdf') { - return 'application/pdf'; - } else if (ext === 'doc') { - return 'application/msword'; - } else if (ext === 'docx') { - return 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'; - } - - // Return the original MIME type if not recognized - return file.type || 'application/octet-stream'; + const filename = file.name.toLowerCase(); + const ext = filename.split('.').pop() || ''; + + // Map common text file extensions to text/plain + const textExtensions = [ + 'ts', + 'tsx', + 'js', + 'jsx', + 'mjs', + 'cjs', + 'py', + 'pyw', + 'pyi', + 'java', + 'c', + 'cpp', + 'cc', + 'cxx', + 'h', + 'hpp', + 'cs', + 'php', + 'rb', + 'go', + 'rs', + 'swift', + 'kt', + 'kts', + 'scala', + 'clj', + 'cljs', + 'ex', + 'exs', + 'r', + 'R', + 'm', + 'mm', + 'sh', + 'bash', + 'zsh', + 'fish', + 'ps1', + 'bat', + 'cmd', + 'sql', + 'lua', + 'pl', + 'pm', + 'dart', + 'hs', + 'elm', + 'ml', + 'fs', + 'fsx', + 'vb', + 'pas', + 'd', + 'nim', + 'zig', + 'jl', + 'tcl', + 'awk', + 'sed', + 'vue', + 'svelte', + 'astro', + 'gitignore', + 'dockerignore', + 'editorconfig', + 'env', + 'cfg', + 'conf', + 'ini', + 'log', + 'txt', + ]; + + const markdownExtensions = ['md', 'markdown']; + const jsonExtensions = ['json']; + const xmlExtensions = ['xml']; + const htmlExtensions = ['html', 'htm']; + const cssExtensions = ['css', 'scss', 'sass', 'less']; + const csvExtensions = ['csv', 'tsv']; + const yamlExtensions = ['yaml', 'yml']; + + // Check extensions and return appropriate MIME type + if (textExtensions.includes(ext)) { + return 'text/plain'; + } else if (markdownExtensions.includes(ext)) { + return 'text/markdown'; + } else if (jsonExtensions.includes(ext)) { + return 'application/json'; + } else if (xmlExtensions.includes(ext)) { + return 'application/xml'; + } else if (htmlExtensions.includes(ext)) { + return 'text/html'; + } else if (cssExtensions.includes(ext)) { + return 'text/css'; + } else if (csvExtensions.includes(ext)) { + return 'text/csv'; + } else if (yamlExtensions.includes(ext)) { + return 'text/yaml'; + } else if (ext === 'pdf') { + return 'application/pdf'; + } else if (ext === 'doc') { + return 'application/msword'; + } else if (ext === 'docx') { + return 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'; + } + + // Return the original MIME type if not recognized + return file.type || 'application/octet-stream'; }; const apiClient = { - getKnowledgeDocuments: async (agentId: UUID, options?: { limit?: number; before?: number; includeEmbedding?: boolean }) => { - const params = new URLSearchParams(); - params.append('agentId', agentId); - if (options?.limit) params.append('limit', options.limit.toString()); - if (options?.before) params.append('before', options.before.toString()); - if (options?.includeEmbedding) params.append('includeEmbedding', 'true'); - - const response = await fetch(`/api/documents?${params.toString()}`); - if (!response.ok) { - const errorText = await response.text(); - throw new Error(`Failed to fetch knowledge documents: ${response.status} ${errorText}`); - } - return await response.json(); - }, - - getKnowledgeChunks: async (agentId: UUID, options?: { limit?: number; before?: number; documentId?: UUID }) => { - const params = new URLSearchParams(); - params.append('agentId', agentId); - if (options?.limit) params.append('limit', options.limit.toString()); - if (options?.before) params.append('before', options.before.toString()); - if (options?.documentId) params.append('documentId', options.documentId); - - const response = await fetch(`/api/knowledges?${params.toString()}`); - if (!response.ok) { - const errorText = await response.text(); - throw new Error(`Failed to fetch knowledge chunks: ${response.status} ${errorText}`); - } - return await response.json(); - }, - - deleteKnowledgeDocument: async (agentId: UUID, knowledgeId: UUID) => { - const params = new URLSearchParams(); - params.append('agentId', agentId); + getKnowledgeDocuments: async ( + agentId: UUID, + options?: { limit?: number; before?: number; includeEmbedding?: boolean } + ) => { + const params = new URLSearchParams(); + params.append('agentId', agentId); + if (options?.limit) params.append('limit', options.limit.toString()); + if (options?.before) params.append('before', options.before.toString()); + if (options?.includeEmbedding) params.append('includeEmbedding', 'true'); + + const response = await fetch(`/api/documents?${params.toString()}`); + if (!response.ok) { + const errorText = await response.text(); + throw new Error(`Failed to fetch knowledge documents: ${response.status} ${errorText}`); + } + return await response.json(); + }, + + getKnowledgeChunks: async ( + agentId: UUID, + options?: { limit?: number; before?: number; documentId?: UUID } + ) => { + const params = new URLSearchParams(); + params.append('agentId', agentId); + if (options?.limit) params.append('limit', options.limit.toString()); + if (options?.before) params.append('before', options.before.toString()); + if (options?.documentId) params.append('documentId', options.documentId); + + const response = await fetch(`/api/knowledges?${params.toString()}`); + if (!response.ok) { + const errorText = await response.text(); + throw new Error(`Failed to fetch knowledge chunks: ${response.status} ${errorText}`); + } + return await response.json(); + }, - const response = await fetch(`/api/documents/${knowledgeId}?${params.toString()}`, { - method: 'DELETE', - }); - if (!response.ok) { - const errorText = await response.text(); - throw new Error(`Failed to delete knowledge document: ${response.status} ${errorText}`); - } - if (response.status === 204) return; - return await response.json(); - }, + deleteKnowledgeDocument: async (agentId: UUID, knowledgeId: UUID) => { + const params = new URLSearchParams(); + params.append('agentId', agentId); - uploadKnowledge: async (agentId: string, files: File[]) => { - const formData = new FormData(); - for (const file of files) { - // Create a new Blob with the correct MIME type - const correctedMimeType = getCorrectMimeType(file); - const blob = new Blob([file], { type: correctedMimeType }); - // Append as a file with the original name - formData.append('files', blob, file.name); - } - formData.append('agentId', agentId); + const response = await fetch(`/api/documents/${knowledgeId}?${params.toString()}`, { + method: 'DELETE', + }); + if (!response.ok) { + const errorText = await response.text(); + throw new Error(`Failed to delete knowledge document: ${response.status} ${errorText}`); + } + if (response.status === 204) return; + return await response.json(); + }, + + uploadKnowledge: async (agentId: string, files: File[]) => { + const formData = new FormData(); + for (const file of files) { + // Create a new Blob with the correct MIME type + const correctedMimeType = getCorrectMimeType(file); + const blob = new Blob([file], { type: correctedMimeType }); + // Append as a file with the original name + formData.append('files', blob, file.name); + } + formData.append('agentId', agentId); - const response = await fetch(`/api/documents`, { - method: 'POST', - body: formData, - }); - if (!response.ok) { - const errorText = await response.text(); - throw new Error(`Failed to upload knowledge: ${response.status} ${errorText}`); - } - return await response.json(); + const response = await fetch(`/api/documents`, { + method: 'POST', + body: formData, + }); + if (!response.ok) { + const errorText = await response.text(); + throw new Error(`Failed to upload knowledge: ${response.status} ${errorText}`); } + return await response.json(); + }, }; -const useKnowledgeDocuments = (agentId: UUID, enabled: boolean = true, includeEmbedding: boolean = false) => { - return useQuery({ - queryKey: ['agents', agentId, 'knowledge', 'documents', { includeEmbedding }], - queryFn: async () => { - const response = await apiClient.getKnowledgeDocuments(agentId, { includeEmbedding }); - return response.data.memories || []; - }, - enabled, - }); +const useKnowledgeDocuments = ( + agentId: UUID, + enabled: boolean = true, + includeEmbedding: boolean = false +) => { + return useQuery({ + queryKey: ['agents', agentId, 'knowledge', 'documents', { includeEmbedding }], + queryFn: async () => { + const response = await apiClient.getKnowledgeDocuments(agentId, { includeEmbedding }); + return response.data.memories || []; + }, + enabled, + }); }; const useKnowledgeChunks = (agentId: UUID, enabled: boolean = true, documentIdFilter?: UUID) => { - // Query to get fragments (chunks) - const { - data: chunks = [], - isLoading: chunksLoading, - error: chunksError, - } = useQuery({ - queryKey: ['agents', agentId, 'knowledge', 'chunks', { documentIdFilter }], - queryFn: async () => { - const response = await apiClient.getKnowledgeChunks(agentId, { documentId: documentIdFilter }); - return response.data.chunks || []; - }, - enabled, - }); - - // Query to get documents - const { - data: documents = [], - isLoading: documentsLoading, - error: documentsError, - } = useQuery({ - queryKey: ['agents', agentId, 'knowledge', 'documents-for-graph'], - queryFn: async () => { - const response = await apiClient.getKnowledgeDocuments(agentId, { includeEmbedding: false }); - return response.data.memories || []; - }, - enabled, - }); - - // Combine documents and fragments - const allMemories = [...documents, ...chunks]; - const isLoading = chunksLoading || documentsLoading; - const error = chunksError || documentsError; - - console.log(`Documents: ${documents.length}, Fragments: ${chunks.length}, Total: ${allMemories.length}`); - - return { - data: allMemories, - isLoading, - error, - }; + // Query to get fragments (chunks) + const { + data: chunks = [], + isLoading: chunksLoading, + error: chunksError, + } = useQuery({ + queryKey: ['agents', agentId, 'knowledge', 'chunks', { documentIdFilter }], + queryFn: async () => { + const response = await apiClient.getKnowledgeChunks(agentId, { + documentId: documentIdFilter, + }); + return response.data.chunks || []; + }, + enabled, + }); + + // Query to get documents + const { + data: documents = [], + isLoading: documentsLoading, + error: documentsError, + } = useQuery({ + queryKey: ['agents', agentId, 'knowledge', 'documents-for-graph'], + queryFn: async () => { + const response = await apiClient.getKnowledgeDocuments(agentId, { includeEmbedding: false }); + return response.data.memories || []; + }, + enabled, + }); + + // Combine documents and fragments + const allMemories = [...documents, ...chunks]; + const isLoading = chunksLoading || documentsLoading; + const error = chunksError || documentsError; + + console.log( + `Documents: ${documents.length}, Fragments: ${chunks.length}, Total: ${allMemories.length}` + ); + + return { + data: allMemories, + isLoading, + error, + }; }; // Hook for deleting knowledge documents const useDeleteKnowledgeDocument = (agentId: UUID) => { - const queryClient = useQueryClient(); - return useMutation< - void, - Error, - { knowledgeId: UUID } - >({ - mutationFn: async ({ knowledgeId }) => { - await apiClient.deleteKnowledgeDocument(agentId, knowledgeId); - }, - onSuccess: () => { - queryClient.invalidateQueries({ - queryKey: ['agents', agentId, 'knowledge', 'documents'], - }); - }, - }); + const queryClient = useQueryClient(); + return useMutation({ + mutationFn: async ({ knowledgeId }) => { + await apiClient.deleteKnowledgeDocument(agentId, knowledgeId); + }, + onSuccess: () => { + queryClient.invalidateQueries({ + queryKey: ['agents', agentId, 'knowledge', 'documents'], + }); + }, + }); }; export function KnowledgeTab({ agentId }: { agentId: UUID }) { - const [viewingContent, setViewingContent] = useState(null); - const [isUploading, setIsUploading] = useState(false); - const [visibleItems, setVisibleItems] = useState(ITEMS_PER_PAGE); - const [loadingMore, setLoadingMore] = useState(false); - const [viewMode, setViewMode] = useState<'list' | 'graph'>('list'); - const [selectedMemory, setSelectedMemory] = useState(null); - const [documentIdFilter, setDocumentIdFilter] = useState(undefined); - const [pdfZoom, setPdfZoom] = useState(1.0); - const [showUrlDialog, setShowUrlDialog] = useState(false); - const [urlInput, setUrlInput] = useState(''); - const [isUrlUploading, setIsUrlUploading] = useState(false); - const [urlError, setUrlError] = useState(null); - const [urls, setUrls] = useState([]); - - const fileInputRef = useRef(null); - const scrollContainerRef = useRef(null); - const { toast } = useToast(); - const queryClient = useQueryClient(); - - // List mode: use useKnowledgeDocuments to get only documents - const { - data: documentsOnly = [], - isLoading: documentsLoading, - error: documentsError, - } = useKnowledgeDocuments(agentId, viewMode === 'list', false); - - // Graph mode: use useKnowledgeChunks to get documents and fragments - const { - data: graphMemories = [], - isLoading: graphLoading, - error: graphError, - } = useKnowledgeChunks(agentId, viewMode === 'graph', documentIdFilter); - - // Use the appropriate data based on the mode - const isLoading = viewMode === 'list' ? documentsLoading : graphLoading; - const error = viewMode === 'list' ? documentsError : graphError; - const memories = viewMode === 'list' ? documentsOnly : graphMemories; - - const { mutate: deleteKnowledgeDoc } = useDeleteKnowledgeDocument(agentId); - - const handleScroll = useCallback(() => { - if (!scrollContainerRef.current || loadingMore || visibleItems >= memories.length) { - return; - } - const { scrollTop, scrollHeight, clientHeight } = scrollContainerRef.current; - const scrolledToBottom = scrollTop + clientHeight >= scrollHeight - 100; - if (scrolledToBottom) { - setLoadingMore(true); - setTimeout(() => { - setVisibleItems((prev) => Math.min(prev + ITEMS_PER_PAGE, memories.length)); - setLoadingMore(false); - }, 300); - } - }, [loadingMore, visibleItems, memories.length]); - - useEffect(() => { - setVisibleItems(ITEMS_PER_PAGE); - }, []); + const [viewingContent, setViewingContent] = useState(null); + const [isUploading, setIsUploading] = useState(false); + const [visibleItems, setVisibleItems] = useState(ITEMS_PER_PAGE); + const [loadingMore, setLoadingMore] = useState(false); + const [viewMode, setViewMode] = useState<'list' | 'graph'>('list'); + const [selectedMemory, setSelectedMemory] = useState(null); + const [documentIdFilter, setDocumentIdFilter] = useState(undefined); + const [pdfZoom, setPdfZoom] = useState(1.0); + const [showUrlDialog, setShowUrlDialog] = useState(false); + const [urlInput, setUrlInput] = useState(''); + const [isUrlUploading, setIsUrlUploading] = useState(false); + const [urlError, setUrlError] = useState(null); + const [urls, setUrls] = useState([]); + + const fileInputRef = useRef(null); + const scrollContainerRef = useRef(null); + const { toast } = useToast(); + const queryClient = useQueryClient(); + + // List mode: use useKnowledgeDocuments to get only documents + const { + data: documentsOnly = [], + isLoading: documentsLoading, + error: documentsError, + } = useKnowledgeDocuments(agentId, viewMode === 'list', false); + + // Graph mode: use useKnowledgeChunks to get documents and fragments + const { + data: graphMemories = [], + isLoading: graphLoading, + error: graphError, + } = useKnowledgeChunks(agentId, viewMode === 'graph', documentIdFilter); + + // Use the appropriate data based on the mode + const isLoading = viewMode === 'list' ? documentsLoading : graphLoading; + const error = viewMode === 'list' ? documentsError : graphError; + const memories = viewMode === 'list' ? documentsOnly : graphMemories; + + const { mutate: deleteKnowledgeDoc } = useDeleteKnowledgeDocument(agentId); + + const handleScroll = useCallback(() => { + if (!scrollContainerRef.current || loadingMore || visibleItems >= memories.length) { + return; + } + const { scrollTop, scrollHeight, clientHeight } = scrollContainerRef.current; + const scrolledToBottom = scrollTop + clientHeight >= scrollHeight - 100; + if (scrolledToBottom) { + setLoadingMore(true); + setTimeout(() => { + setVisibleItems((prev) => Math.min(prev + ITEMS_PER_PAGE, memories.length)); + setLoadingMore(false); + }, 300); + } + }, [loadingMore, visibleItems, memories.length]); - useEffect(() => { - const scrollContainer = scrollContainerRef.current; - if (scrollContainer) { - scrollContainer.addEventListener('scroll', handleScroll); - return () => scrollContainer.removeEventListener('scroll', handleScroll); - } - }, [handleScroll]); + useEffect(() => { + setVisibleItems(ITEMS_PER_PAGE); + }, []); - if (isLoading && (!memories || memories.length === 0)) { - return ( -
Loading knowledge documents...
- ); + useEffect(() => { + const scrollContainer = scrollContainerRef.current; + if (scrollContainer) { + scrollContainer.addEventListener('scroll', handleScroll); + return () => scrollContainer.removeEventListener('scroll', handleScroll); } + }, [handleScroll]); - if (error) { - return ( -
- Error loading knowledge documents: {error.message} -
- ); + if (isLoading && (!memories || memories.length === 0)) { + return ( +
Loading knowledge documents...
+ ); + } + + if (error) { + return ( +
+ Error loading knowledge documents: {error.message} +
+ ); + } + + const formatDate = (timestamp: number) => { + const date = new Date(timestamp); + return `${date.toLocaleDateString()} ${date.toLocaleTimeString()}`; + }; + + const getFileIcon = (fileName: string) => { + const ext = fileName.split('.').pop()?.toLowerCase(); + switch (ext) { + case 'md': + return ; + case 'js': + case 'ts': + case 'jsx': + case 'tsx': + return ; + case 'json': + return ; + case 'pdf': + return ; + default: + return ; } + }; - const formatDate = (timestamp: number) => { - const date = new Date(timestamp); - return `${date.toLocaleDateString()} ${date.toLocaleTimeString()}`; - }; - - const getFileIcon = (fileName: string) => { - const ext = fileName.split('.').pop()?.toLowerCase(); - switch (ext) { - case 'md': return ; - case 'js': case 'ts': case 'jsx': case 'tsx': return ; - case 'json': return ; - case 'pdf': return ; - default: return ; + const handleDelete = (knowledgeId: string) => { + if (knowledgeId && window.confirm('Are you sure you want to delete this document?')) { + deleteKnowledgeDoc({ knowledgeId: knowledgeId as UUID }); + setViewingContent(null); + } + }; + + const handleUploadClick = () => { + if (fileInputRef.current) fileInputRef.current.click(); + }; + + const handleUrlUploadClick = () => { + setShowUrlDialog(true); + setUrlInput(''); + setUrls([]); + setUrlError(null); + }; + + const addUrlToList = () => { + try { + const url = new URL(urlInput); + if (!url.protocol.startsWith('http')) { + setUrlError('URL must start with http:// or https://'); + return; + } + + if (urls.includes(urlInput)) { + setUrlError('This URL is already in the list'); + return; + } + + setUrls([...urls, urlInput]); + setUrlInput(''); + setUrlError(null); + } catch (e) { + setUrlError('Invalid URL'); + } + }; + + const removeUrl = (urlToRemove: string) => { + setUrls(urls.filter((url) => url !== urlToRemove)); + }; + + const handleUrlSubmit = async () => { + // Check if there's a URL in the input field that hasn't been added to the list + if (urlInput.trim()) { + try { + const url = new URL(urlInput); + if (url.protocol.startsWith('http') && !urls.includes(urlInput)) { + setUrls([...urls, urlInput]); } - }; + } catch (e) { + // If the input is not a valid URL, just ignore it + } + } - const handleDelete = (knowledgeId: string) => { - if (knowledgeId && window.confirm('Are you sure you want to delete this document?')) { - deleteKnowledgeDoc({ knowledgeId: knowledgeId as UUID }); - setViewingContent(null); - } - }; - - const handleUploadClick = () => { - if (fileInputRef.current) fileInputRef.current.click(); - }; - - const handleUrlUploadClick = () => { - setShowUrlDialog(true); - setUrlInput(''); - setUrls([]); - setUrlError(null); - }; - - const addUrlToList = () => { - try { - const url = new URL(urlInput); - if (!url.protocol.startsWith('http')) { - setUrlError('URL must start with http:// or https://'); - return; - } + // If no URLs to process, show error + if (urls.length === 0) { + setUrlError('Please add at least one valid URL'); + return; + } - if (urls.includes(urlInput)) { - setUrlError('This URL is already in the list'); - return; - } + setIsUrlUploading(true); + setUrlError(null); - setUrls([...urls, urlInput]); - setUrlInput(''); - setUrlError(null); - } catch (e) { - setUrlError('Invalid URL'); - } - }; - - const removeUrl = (urlToRemove: string) => { - setUrls(urls.filter(url => url !== urlToRemove)); - }; - - const handleUrlSubmit = async () => { - // Check if there's a URL in the input field that hasn't been added to the list - if (urlInput.trim()) { - try { - const url = new URL(urlInput); - if (url.protocol.startsWith('http') && !urls.includes(urlInput)) { - setUrls([...urls, urlInput]); - } - } catch (e) { - // If the input is not a valid URL, just ignore it - } - } + try { + const result = await fetch(`/api/documents`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ fileUrls: urls, agentId }), + }); - // If no URLs to process, show error - if (urls.length === 0) { - setUrlError('Please add at least one valid URL'); - return; - } + if (!result.ok) { + const error = await result.text(); + throw new Error(error); + } - setIsUrlUploading(true); - setUrlError(null); - - try { - const result = await fetch(`/api/documents`, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ fileUrls: urls, agentId }), - }); - - if (!result.ok) { - const error = await result.text(); - throw new Error(error); - } + const data = await result.json(); - const data = await result.json(); - - if (data.success) { - toast({ - title: 'URLs imported', - description: `Successfully imported ${urls.length} document(s)`, - }); - setShowUrlDialog(false); - queryClient.invalidateQueries({ - queryKey: ['agents', agentId, 'knowledge', 'documents'], - }); - } else { - setUrlError(data.error?.message || 'Error importing documents from URLs'); - } - } catch (error: any) { - setUrlError(error.message || 'Error importing documents from URLs'); - toast({ - title: 'Error', - description: 'Failed to import documents from URLs', - variant: 'destructive', - }); - } finally { - setIsUrlUploading(false); - } - }; - - const handleFileChange = async (e: React.ChangeEvent) => { - const files = e.target.files; - if (!files || files.length === 0) return; - setIsUploading(true); - try { - const fileArray = Array.from(files); - // Use direct fetch instead of apiClient until it's updated - const formData = new FormData(); - for (const file of fileArray) { - // Create a new Blob with the correct MIME type - const correctedMimeType = getCorrectMimeType(file); - const blob = new Blob([file], { type: correctedMimeType }); - // Append as a file with the original name - formData.append('files', blob, file.name); - } - formData.append('agentId', agentId); + if (data.success) { + toast({ + title: 'URLs imported', + description: `Successfully imported ${urls.length} document(s)`, + }); + setShowUrlDialog(false); + queryClient.invalidateQueries({ + queryKey: ['agents', agentId, 'knowledge', 'documents'], + }); + } else { + setUrlError(data.error?.message || 'Error importing documents from URLs'); + } + } catch (error: any) { + setUrlError(error.message || 'Error importing documents from URLs'); + toast({ + title: 'Error', + description: 'Failed to import documents from URLs', + variant: 'destructive', + }); + } finally { + setIsUrlUploading(false); + } + }; + + const handleFileChange = async (e: React.ChangeEvent) => { + const files = e.target.files; + if (!files || files.length === 0) return; + setIsUploading(true); + try { + const fileArray = Array.from(files); + // Use direct fetch instead of apiClient until it's updated + const formData = new FormData(); + for (const file of fileArray) { + // Create a new Blob with the correct MIME type + const correctedMimeType = getCorrectMimeType(file); + const blob = new Blob([file], { type: correctedMimeType }); + // Append as a file with the original name + formData.append('files', blob, file.name); + } + formData.append('agentId', agentId); + + const response = await fetch('/api/documents', { + method: 'POST', + body: formData, + }); + + if (!response.ok) { + throw new Error(`Upload failed: ${response.statusText}`); + } + + const result = await response.json(); + + // The actual array of upload outcomes is in result.data + const uploadOutcomes: UploadResultItem[] = result.data || []; + + if ( + Array.isArray(uploadOutcomes) && + uploadOutcomes.every((r: UploadResultItem) => r.status === 'success') + ) { + toast({ + title: 'Knowledge Uploaded', + description: `Successfully uploaded ${fileArray.length} file(s)`, + }); + queryClient.invalidateQueries({ + queryKey: ['agents', agentId, 'knowledge', 'documents'], + }); + } else { + const successfulUploads = uploadOutcomes.filter( + (r: UploadResultItem) => r.status === 'success' + ).length; + const failedUploads = fileArray.length - successfulUploads; + toast({ + title: failedUploads > 0 ? 'Upload Partially Failed' : 'Upload Issues', + description: `Uploaded ${successfulUploads} file(s). ${failedUploads} file(s) failed. Check console for details.`, + variant: failedUploads > 0 ? 'destructive' : 'default', + }); + console.error('Upload results:', uploadOutcomes); + } + } catch (uploadError: any) { + toast({ + title: 'Upload Failed', + description: + uploadError instanceof Error ? uploadError.message : 'Failed to upload knowledge files', + variant: 'destructive', + }); + console.error('Upload error:', uploadError); + } finally { + setIsUploading(false); + if (fileInputRef.current) { + fileInputRef.current.value = ''; + } + } + }; - const response = await fetch('/api/documents', { - method: 'POST', - body: formData, - }); + const visibleMemories = memories.slice(0, visibleItems); + const hasMoreToLoad = visibleItems < memories.length; - if (!response.ok) { - throw new Error(`Upload failed: ${response.statusText}`); - } + const LoadingIndicator = () => ( +
+ {loadingMore ? ( +
+ + Loading more... +
+ ) : ( + + )} +
+ ); + + const EmptyState = () => ( +
+ +

No Knowledge Documents

+

No Knowledge Documents found.

+ +
+ ); + + const KnowledgeCard = ({ memory, index }: { memory: Memory; index: number }) => { + const metadata = (memory.metadata as MemoryMetadata) || {}; + const title = metadata.title || memory.id || 'Unknown Document'; + const filename = metadata.filename || 'Unknown Document'; + const fileExt = metadata.fileExt || filename.split('.').pop()?.toLowerCase() || ''; + const displayName = title || filename; + const subtitle = metadata.path || filename; - const result = await response.json(); - - // The actual array of upload outcomes is in result.data - const uploadOutcomes: UploadResultItem[] = result.data || []; - - if (Array.isArray(uploadOutcomes) && uploadOutcomes.every((r: UploadResultItem) => r.status === 'success')) { - toast({ - title: 'Knowledge Uploaded', - description: `Successfully uploaded ${fileArray.length} file(s)`, - }); - queryClient.invalidateQueries({ - queryKey: ['agents', agentId, 'knowledge', 'documents'], - }); - } else { - const successfulUploads = uploadOutcomes.filter((r: UploadResultItem) => r.status === 'success').length; - const failedUploads = fileArray.length - successfulUploads; - toast({ - title: failedUploads > 0 ? 'Upload Partially Failed' : 'Upload Issues', - description: `Uploaded ${successfulUploads} file(s). ${failedUploads} file(s) failed. Check console for details.`, - variant: failedUploads > 0 ? 'destructive' : 'default', - }); - console.error('Upload results:', uploadOutcomes); - } - } catch (uploadError: any) { - toast({ - title: 'Upload Failed', - description: uploadError instanceof Error ? uploadError.message : 'Failed to upload knowledge files', - variant: 'destructive', - }); - console.error('Upload error:', uploadError); - } finally { - setIsUploading(false); - if (fileInputRef.current) { - fileInputRef.current.value = ''; - } - } - }; + return ( + + )} + + + + + + ); + }; + + // Add a function to handle the filtering of chunks by document + const handleDocumentFilter = (docId?: UUID) => { + setDocumentIdFilter(docId === documentIdFilter ? undefined : docId); + }; - const visibleMemories = memories.slice(0, visibleItems); - const hasMoreToLoad = visibleItems < memories.length; + // Component to display the details of a fragment or document + const MemoryDetails = ({ memory }: { memory: Memory }) => { + const metadata = memory.metadata as MemoryMetadata; + const isFragment = metadata?.type === 'fragment'; + const isDocument = metadata?.type === 'document'; - const LoadingIndicator = () => ( -
- {loadingMore ? ( -
- - Loading more... + return ( +
+
+
+

+ {isFragment ? ( + +
+ Fragment +
+ ) : ( + +
+ Document +
+ )} + + {metadata?.title || memory.id?.substring(0, 8)} + +

+ +
+
+ ID: {memory.id} +
+ + {isFragment && metadata.documentId && ( +
+ Parent Document:{' '} + {metadata.documentId}
- ) : ( - - )} + )} + + {isFragment && metadata.position !== undefined && ( +
Position: {metadata.position}
+ )} + + {metadata.source &&
Source: {metadata.source}
} + +
Created on: {formatDate(memory.createdAt || 0)}
+
+
+ +
- ); - const EmptyState = () => ( -
- -

No Knowledge Documents

-

No Knowledge Documents found.

- +
+
+
+              {memory.content?.text || 'No content available'}
+            
+
+ + {memory.embedding && ( +
+ + EMBEDDING + + Vector with {memory.embedding.length} dimensions +
+ )}
+
); + }; + + return ( +
+
+
+

Knowledge

+

+ {viewMode === 'list' + ? 'Viewing documents only' + : 'Viewing documents and their fragments'} +

+
+
+ + {viewMode === 'graph' && documentIdFilter && ( + + )} +
+ + +
+
+
+ + {/* Dialog for URL upload */} + {showUrlDialog && ( + + + + Import from URL + + Enter one or more URLs of PDF, text, or other files to import into the knowledge + base. + + + +
+
+ setUrlInput(e.target.value)} + disabled={isUrlUploading} + className="flex-1" + onKeyDown={(e: React.KeyboardEvent) => { + if (e.key === 'Enter' && urlInput.trim()) { + e.preventDefault(); + addUrlToList(); + } + }} + /> + +
- const KnowledgeCard = ({ memory, index }: { memory: Memory; index: number }) => { - const metadata = (memory.metadata as MemoryMetadata) || {}; - const title = metadata.title || memory.id || 'Unknown Document'; - const filename = metadata.filename || 'Unknown Document'; - const fileExt = metadata.fileExt || filename.split('.').pop()?.toLowerCase() || ''; - const displayName = title || filename; - const subtitle = metadata.path || filename; - - return ( - - )} -
-
- - - - ); - }; - - // Add a function to handle the filtering of chunks by document - const handleDocumentFilter = (docId?: UUID) => { - setDocumentIdFilter(docId === documentIdFilter ? undefined : docId); - }; - - // Component to display the details of a fragment or document - const MemoryDetails = ({ memory }: { memory: Memory }) => { - const metadata = memory.metadata as MemoryMetadata; - const isFragment = metadata?.type === 'fragment'; - const isDocument = metadata?.type === 'document'; - - return ( -
-
-
-

- {isFragment ? ( - -
- Fragment -
- ) : ( - -
- Document -
- )} - - {metadata?.title || memory.id?.substring(0, 8)} - -

- -
-
ID: {memory.id}
- - {isFragment && metadata.documentId && ( -
- Parent Document: {metadata.documentId} -
- )} - - {isFragment && metadata.position !== undefined && ( -
Position: {metadata.position}
- )} - - {metadata.source && ( -
Source: {metadata.source}
- )} - -
Created on: {formatDate(memory.createdAt || 0)}
-
-
- - + {urlError && ( +
+ {urlError}
+ )} + + {urls.length > 0 && ( +
+

URLs to import ({urls.length})

+
+ {urls.map((url, index) => ( +
+ {url} + +
+ ))} +
+
+ )} +
-
-
-
-                            {memory.content?.text || 'No content available'}
-                        
-
- - {memory.embedding && ( -
- EMBEDDING - Vector with {memory.embedding.length} dimensions -
- )} + + + + + + + )} + + {/* Existing input for file upload */} + + +
+ {memories.length === 0 ? ( + + ) : viewMode === 'graph' ? ( +
+
+ { + setSelectedMemory(memory); + // If this is a document, filter to show only its chunks + if ( + memory.metadata && + typeof memory.metadata === 'object' && + 'type' in memory.metadata && + (memory.metadata.type || '').toLowerCase() === 'document' && + !('documentId' in memory.metadata) + ) { + handleDocumentFilter(memory.id as UUID); + } + }} + selectedMemoryId={selectedMemory?.id} + /> + {documentIdFilter && ( +
+ + + + + Filtering by document ID:{' '} + {documentIdFilter.substring(0, 8)}... +
+ )}
- ); - }; - return ( -
-
-
-

Knowledge

-

- {viewMode === 'list' - ? 'Viewing documents only' - : 'Viewing documents and their fragments'} -

+ {/* Display details of selected node */} + {selectedMemory && ( +
+ +
+ )} +
+ ) : ( +
+
+ {visibleMemories.map((memory, index) => ( + + ))} +
+ {hasMoreToLoad && } +
+ )} +
+ + {viewingContent && ( + setViewingContent(null)}> + + +
+
+ + {(viewingContent.metadata as MemoryMetadata)?.title || 'Document Content'} + + + {(viewingContent.metadata as MemoryMetadata)?.filename || 'Knowledge document'} +
-
- - {viewMode === 'graph' && documentIdFilter && ( + {(() => { + const metadata = viewingContent.metadata as MemoryMetadata; + const contentType = metadata?.contentType || ''; + const fileExt = metadata?.fileExt?.toLowerCase() || ''; + const isPdf = contentType === 'application/pdf' || fileExt === 'pdf'; + + if (isPdf) { + return ( +
- )} -
+ + {Math.round(pdfZoom * 100)}% + - -
-
-
- - {/* Dialog for URL upload */} - {showUrlDialog && ( - - - - Import from URL - - Enter one or more URLs of PDF, text, or other files to import into the knowledge base. - - - -
-
- setUrlInput(e.target.value)} - disabled={isUrlUploading} - className="flex-1" - onKeyDown={(e: React.KeyboardEvent) => { - if (e.key === 'Enter' && urlInput.trim()) { - e.preventDefault(); - addUrlToList(); - } - }} - /> - -
- - {urlError && ( -
{urlError}
- )} - - {urls.length > 0 && ( -
-

URLs to import ({urls.length})

-
- {urls.map((url, index) => ( -
- {url} - -
- ))} -
-
- )} -
- - - - - -
-
- )} - - {/* Existing input for file upload */} - - -
- {memories.length === 0 ? ( - - ) : viewMode === 'graph' ? ( -
-
- { - setSelectedMemory(memory); - // If this is a document, filter to show only its chunks - if (memory.metadata && - typeof memory.metadata === 'object' && - ('type' in memory.metadata) && - ((memory.metadata.type || '').toLowerCase() === 'document') && - !('documentId' in memory.metadata)) { - handleDocumentFilter(memory.id as UUID); - } - }} - selectedMemoryId={selectedMemory?.id} +
+ ); + } + return null; + })()} +
+ +
+ {(() => { + const metadata = viewingContent.metadata as MemoryMetadata; + const contentType = metadata?.contentType || ''; + const fileExt = metadata?.fileExt?.toLowerCase() || ''; + const isPdf = contentType === 'application/pdf' || fileExt === 'pdf'; + + if (isPdf && viewingContent.content?.text) { + // For PDFs, the content.text contains base64 data + // Validate base64 content before creating data URL + const base64Content = viewingContent.content.text.trim(); + + if (!base64Content) { + // Show error message if no content available + return ( +
+
+ + - {documentIdFilter && ( -
- - - - - Filtering by document ID: {documentIdFilter.substring(0, 8)}... - -
- )} + +

PDF Content Unavailable

+

The PDF content could not be loaded.

- - {/* Display details of selected node */} - {selectedMemory && ( -
- -
- )} +
+ ); + } + + // Create a data URL for the PDF + const pdfDataUrl = `data:application/pdf;base64,${base64Content}`; + + return ( +
+
1 ? `${100 / pdfZoom}%` : '100%', + }} + > +