diff --git a/.changeset/config.json b/.changeset/config.json
index 302e184b..68c93e76 100644
--- a/.changeset/config.json
+++ b/.changeset/config.json
@@ -12,6 +12,7 @@
"example-astro",
"example-nextjs",
"example-nodejs",
- "example-remix"
+ "example-remix",
+ "testing"
]
}
diff --git a/DEPLOY b/DEPLOY
index 2cab3b28..44a20416 100644
--- a/DEPLOY
+++ b/DEPLOY
@@ -1 +1 @@
-Deployment at: Thu Oct 31 22:47:48 CET 2024
+Deployment at: Sun Sep 29 14:38:57 PDT 2024
diff --git a/README.md b/README.md
index 4e1efd3d..09a2051f 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,9 @@
-> BaseAI is now archived in the favor of [Langbase AI Primitives](https://langbase.com/docs). The more we built BaseAI the more we realized frameworks are a bad idea in AI engineering. This space is moving fast and frameworks become blockers. Instead you should be using AI primitives, like [memory, pipes, agents](https://langbase.com/docs) that work with any language as APIs and with TypeScript and Python SDKs. You can use any coding agent, like [CommandCode](https://commandcode.ai) to make your own AI framework with these AI primitives. BaseAI is a great example how.
-
-
-
+
+
+
+
BaseAI
@@ -15,196 +15,11 @@
## Getting Started
-BaseAI is the AI framework for building serverless and composable AI agents with memory and tools. It allows you to develop AI agent pipes on your local machine with integrated agentic tools and memory (RAG). Visit our [BaseAI.dev/learn](https://baseai.dev/learn) guide to start with BaseAI.
-
-### Documentation (recommended)
-
-Please check [BaseAI.dev/docs](https://baseai.dev/docs) and [BaseAI.dev/learn](https://baseai.dev/learn) to get started with full documentation.
-
-
-### 1. Initialize a new BaseAI project
-
-BaseAI is a TypeScript-first framework. To create a new BaseAI project, run the following command in your project:
-
-```bash
-npx baseai@latest init
-```
-
-This command will create a `baseai` directory in your project. This is what the directory structure looks like:
-
-```
-ROOT (of your app)
-├── baseai
-| ├── baseai.config.ts
-| ├── memory
-| ├── pipes
-| └── tools
-├── .env (your env file)
-└── package.json
-```
-
-### 2. Add API keys
-
-Copy the following in your `.env` file and add appropriate LLM API keys:
-
-```bash
-# !! SERVER SIDE ONLY !!
-# Keep all your API keys secret — use only on the server side.
-
-# TODO: ADD: Both in your production and local env files.
-# Langbase API key for your User or Org account.
-# How to get this API key https://langbase.com/docs/api-reference/api-keys
-LANGBASE_API_KEY=
-
-# TODO: ADD: LOCAL ONLY. Add only to local env files.
-# Following keys are needed for local pipe runs. For providers you are using.
-# For Langbase, please add the key to your LLM keysets.
-# Read more: Langbase LLM Keysets https://langbase.com/docs/features/keysets
-OPENAI_API_KEY=
-ANTHROPIC_API_KEY=
-COHERE_API_KEY=
-FIREWORKS_API_KEY=
-GOOGLE_API_KEY=
-GROQ_API_KEY=
-MISTRAL_API_KEY=
-PERPLEXITY_API_KEY=
-TOGETHER_API_KEY=
-XAI_API_KEY=
-```
-
-### 3. Create a new AI agent
-
-Pipe is your custom-built AI agent as an API. It's the fastest way to ship AI features/apps. Let's create a new pipe:
-
-```bash
-npx baseai@latest pipe
-```
-
-It will ask you for the name, description, and other details of the pipe step-by-step. Once done, a pipe will be created inside the `/baseai/pipes` directory. You can now edit the system prompt, change model params, and more. Here is what a pipe code looks like:
-
-```ts
-import { PipeI } from '@baseai/core';
-
-const pipeSummary = (): PipeI => ({
- // Replace with your API key https://langbase.com/docs/api-reference/api-keys
- apiKey: process.env.LANGBASE_API_KEY!,
- name: 'summary',
- description: 'AI Summary agent',
- status: 'public',
- model: 'openai:gpt-4o-mini',
- stream: true,
- json: false,
- store: true,
- moderate: true,
- top_p: 1,
- max_tokens: 1000,
- temperature: 0.7,
- presence_penalty: 1,
- frequency_penalty: 1,
- stop: [],
- tool_choice: 'auto',
- parallel_tool_calls: true,
- messages: [
- {
- role: 'system',
- content: `You are a helpful AI agent. Make everything Less wordy.`
- }
- ],
- variables: [],
- memory: [],
- tools: []
-});
-
-export default pipeSummary;
-```
-
-### 4. Integrate pipe in your app
-
-Let's create a new `index.ts` file in your project root. Now we need to do the following:
-
-1. Import the pipe config we created.
-2. Create a new pipe instance with the pipe config.
-3. Run the pipe with a user message.
-4. Listen to the stream events.
-
-Here is what the code looks like:
-
-```ts
-import { Pipe, getRunner } from '@baseai/core';
-import pipeSummarizer from './baseai/pipes/summary';
-
-const pipe = new Pipe(pipeSummarizer());
-
-const userMsg = `
-Langbase studio is your playground to build, collaborate, and deploy AI. It allows you to experiment with your pipes in real-time, with real data, store messages, version your prompts, and truly helps you take your idea from building prototypes to deployed in production with LLMOps on usage, cost, and quality.
-A complete AI developers platform.
-- Collaborate: Invite all team members to collaborate on the pipe. Build AI together.
-- Developers & Stakeholders: All your R&D team, engineering, product, GTM (marketing and sales), literally invlove every stakeholder can collaborate on the same pipe. It's like a powerful version of GitHub x Google Docs for AI. A complete AI developers platform.
-`;
-
-async function main() {
- const { stream } = await pipe.run({
- messages: [{ role: 'user', content: userMsg }],
- stream: true,
- });
-
- const runner = getRunner(stream);
-
- // Method 1: Using event listeners
- runner.on('connect', () => {
- console.log('Stream started.\n');
- });
-
- runner.on('content', content => {
- process.stdout.write(content);
- });
-
- runner.on('end', () => {
- console.log('\nStream ended.');
- });
-
- runner.on('error', error => {
- console.error('Error:', error);
- });
-}
-
-main();
-```
-
-Make sure to install and import `dotenv` at the top if you are using Node.js:
-
-```ts
-import 'dotenv/config';
-```
-
-### 5. Run the AI agent
-
-To run the pipe locally, you need to start the BaseAI server. Run the following command in your terminal:
-
-```bash
-npx baseai@latest dev
-```
-
-Now, run the `index.ts` file in your terminal:
-
-```bash
-npx tsx index.ts
-```
-
-You should see the following output in your terminal:
-
-```md
-Stream started.
+BaseAI is the AI framework for building declarative and composable AI-powered LLM products. It allows you to develop AI agent pipes on your local machine with integrated agentic tools and memory (RAG). Visit our [learn](https://baseai.dev/learn) guide to get started with BaseAI.
-Langbase Studio is your AI development playground. Experiment in real-time with real data, store messages, and version prompts to move from prototype to production seamlessly.
+## Documentation
-Key Features:
-- **Collaborate**: Invite team members to build AI together.
-- **Inclusive Teams**: Engage all stakeholders—R&D, engineering, product, and marketing—in a shared space. It’s like GitHub combined with Google Docs for AI development.
-Stream ended.
-```
-> [!TIP]
-> You can also run RAG locally with BaseAI. Check out the memory agent quickstart [guide](https://baseai.dev/docs/memory/quickstart) for more details.
+Visit [baseai.dev/docs](https://baseai.dev/docs) for the full documentation.
## Contributing
diff --git a/apps/baseai.dev/content/docs/api-reference/pipe-run.mdx b/apps/baseai.dev/content/docs/api-reference/pipe-run.mdx
index 29df14f3..f1402e95 100644
--- a/apps/baseai.dev/content/docs/api-reference/pipe-run.mdx
+++ b/apps/baseai.dev/content/docs/api-reference/pipe-run.mdx
@@ -83,7 +83,6 @@ The BaseAI core package provides a `pipe.run()` function that you can use in you
```ts {{title: 'RunOptions Object'}}
interface RunOptions {
messages?: Message[];
- runTools?: boolean;
variables?: Variable[];
threadId?: string;
rawResponse?: boolean;
@@ -156,14 +155,6 @@ The BaseAI core package provides a `pipe.run()` function that you can use in you
---
- ### runTools
-
-
-
- Enable if you want BaseAI to automically run tools**.**
-
-
-
### variables
diff --git a/apps/baseai.dev/content/docs/docs/index.mdx b/apps/baseai.dev/content/docs/docs/index.mdx
index a0160e34..f565e29e 100644
--- a/apps/baseai.dev/content/docs/docs/index.mdx
+++ b/apps/baseai.dev/content/docs/docs/index.mdx
@@ -9,13 +9,13 @@ modified: 2024-09-24
---
-
+
BaseAI Docs
- BaseAI is the first web AI framework for building Serverless AI agents with Node.js and TypeScript.
+ BaseAI is the first web AI framework built for web developers.
- **OPEN**: BaseAI is **free and open-source**
- **LOCAL**: world-class **local developer experience**
@@ -27,7 +27,7 @@ modified: 2024-09-24
- BaseAI is the first web AI framework for building Serverless AI agents with Node.js and TypeScript.
+ The first Web AI Framework for web developers.
- **OPEN**: BaseAI is **free and open-source**
- **LOCAL**: world-class **local dev experience**
diff --git a/apps/baseai.dev/content/docs/docs/supported-models-and-providers.mdx b/apps/baseai.dev/content/docs/docs/supported-models-and-providers.mdx
index 5ffae1a5..1222ae78 100644
--- a/apps/baseai.dev/content/docs/docs/supported-models-and-providers.mdx
+++ b/apps/baseai.dev/content/docs/docs/supported-models-and-providers.mdx
@@ -29,7 +29,6 @@ We currently support the following LLM providers.
- Fireworks AI
- Perplexity
- Mistral AI
-- xAI
- Ollama *(local-only)*
You can use any of these providers to build your Pipe, by adding your provider's key. Please feel free to request any specific provider you would like to use.
@@ -76,7 +75,6 @@ Learn more about [using Ollama models](/docs/guides/using-ollama-models) in Base
| Model | Provider | Owner | Context | Cost* |
|---------------------------------------------------------------------------------------------------------------------------|----------|------------|---------|----------------------------------------------|
-| Llama-3.3-70B-Instruct-Turbo ID: | Together | Meta | 131,072 | $0.88 prompt $0.88 completion |
| Llama-3.1-405B-Instruct-Turbo ID: | Together | Meta | 4,096 | $5 prompt $5 completion |
| Llama-3.1-70B-Instruct-Turbo ID: | Together | Meta | 8,192 | $0.88 prompt $0.88 completion |
| Llama-3.1-8B-Instruct-Turbo ID: | Together | Meta | 8,192 | $0.18 prompt $0.18 completion |
@@ -96,14 +94,12 @@ Learn more about [using Ollama models](/docs/guides/using-ollama-models) in Base
### Anthropic
-| Model | Provider | Owner | Context | Cost* |
-|------------------------------------------------------------------------------------------------------|-----------|-----------|---------|---------------------------------------------|
-| claude-3.5-sonnet-latest ID: | Anthropic | Anthropic | 200K | $3 prompt $15 completion |
-| claude-3.5-sonnet-20240620 ID: | Anthropic | Anthropic | 200K | $3 prompt $15 completion |
-| claude-3-opus ID: | Anthropic | Anthropic | 200K | $15 prompt $75 completion |
-| claude-3-sonnet ID: | Anthropic | Anthropic | 200K | $3 prompt $15 completion |
-| claude-3-haiku ID: | Anthropic | Anthropic | 200K | $0.25 prompt $1.25 completion |
-
+| Model | Provider | Owner | Context | Cost* |
+|---------------------------------------------------------------------------------------------|-----------|-----------|---------|---------------------------------------------|
+| claude-3.5-sonnet ID: | Anthropic | Anthropic | 200K | $3 prompt $15 completion |
+| claude-3-opus ID: | Anthropic | Anthropic | 200K | $15 prompt $75 completion |
+| claude-3-sonnet ID: | Anthropic | Anthropic | 200K | $3 prompt $15 completion |
+| claude-3-haiku ID: | Anthropic | Anthropic | 200K | $0.25 prompt $1.25 completion |
* USD per Million tokens
@@ -122,7 +118,6 @@ Learn more about [using Ollama models](/docs/guides/using-ollama-models) in Base
| Model | Provider | Owner | Context | Cost* |
|-----------------------------------------------------------------------------------------|----------|---------|---------|---------------------------------------------|
-| Llama-3.3-70b-versatile ID: | Groq | Meta | 128,000 | $0.59 prompt $0.79 completion |
| Llama-3.1-70b-versatile ID: | Groq | Meta | 131,072 | $0.59 prompt $0.79 completion |
| Llama-3.1-8b-instant ID: | Groq | Meta | 131,072 | $0.59 prompt $0.79 completion |
| Llama-3-70b ID: | Groq | Meta | 8,192 | $0.59 prompt $0.79 completion |
@@ -138,7 +133,6 @@ Learn more about [using Ollama models](/docs/guides/using-ollama-models) in Base
| Model | Provider | Owner | Context | Cost* |
|------------------------------------------------------------------------------|--------------|-------|---------|--------------------------------------------|
| Llama-3.2-3b ID: | Fireworks AI | Meta | 131,072 | $0.1 prompt $0.1 completion |
-| Llama 3.3 70B Instruct ID: | Fireworks AI | Meta | 131,072 | $0.9 prompt $0.9 completion |
| Llama-3.2-1b ID: | Fireworks AI | Meta | 131,072 | $0.1 prompt $0.1 completion |
| Llama-3.1-405b ID: | Fireworks AI | Meta | 131,072 | $3 prompt $3 completion |
| Llama-3.1-70b ID: | Fireworks AI | Meta | 131,072 | $0.9 prompt $0.9 completion |
@@ -179,16 +173,6 @@ Learn more about [using Ollama models](/docs/guides/using-ollama-models) in Base
* USD per Million tokens
-### xAI
-
-
-| Model | Provider | Owner | Context | Cost* |
-|---------------------------------------------------------------------------|----------|--------|---------|-------------------------------------------|
-| groq-beta ID: | xAI | xAI | 131K | $5 prompt $15 completion |
-
-* USD per Million tokens
-
-
## JSON Mode Support
See the [list of models that support JSON mode](/features/json-mode) and how to use it in your Pipe.
@@ -197,61 +181,6 @@ See the [list of models that support JSON mode](/features/json-mode) and how to
Completion and Prompt costs are based on the provider's pricing. Langbase does not charge on top of the provider's costs.
-## Tool Support
-
-The following models support tool calls in BaseAI.
-
-### OpenAI
-
-| Model | Parallel Tool Call Support | Tool Choice Support |
-|-------------------------------------------------------------------------------------|----------|--------|
-| o1-preview ID: | `true` | `true` |
-| o1-mini ID: | `true` | `true` |
-| gpt-4o ID: | `true` | `true` |
-| gpt-4o-2024-08-06 ID: | `true` | `true` |
-| gpt-4o-mini ID: | `true` | `true` |
-| gpt-4-turbo ID: | `true` | `true` |
-| gpt-4-turbo-preview ID: | `true` | `true` |
-| gpt-4-0125-preview ID: | `true` | `true` |
-| gpt-4-1106-preview ID: | `true` | `true` |
-| gpt-4 ID: | `true` | `true` |
-| gpt-4-0613 ID: | `true` | `true` |
-| gpt-4-32k ID: | `true` | `true` |
-| gpt-3.5-turbo-0125 ID: | `true` | `true` |
-| gpt-3.5-turbo-1106 ID: | `true` | `true` |
-| gpt-3.5-turbo ID: | `true` | `true` |
-| gpt-3.5-turbo-16k ID: | `true` | `true` |
-
-### Google
-
-| Model | Parallel Tool Call Support | Tool Choice Support |
-|--------------------------------------------------------------------------------------|----------|--------|
-| gemini-1.5-pro ID: | `true` | `true`|
-| gemini-1.5-flash ID: | `true` | `true`|
-| gemini-1.5-flash-8b ID: | `true` | `true`|
-| gemini-1.0-pro ID: | `false` | `false` |
-
-### Anthropic
-
-| Model | Parallel Tool Call Support | Tool Choice Support |
-|------------------------------------------------------------------------------------------------------|-----------|-----------|
-| claude-3.5-sonnet-latest ID: | `true` | `true` |
-| claude-3.5-sonnet-20240620 ID: | `true` | `true` |
-| claude-3-opus ID: | `true` | `true` |
-| claude-3-sonnet ID: | `true` | `true` |
-| claude-3-haiku ID: | `true` | `true` |
-
-### Together AI
-
-| Model | Parallel Tool Call Support | Tool Choice Support |
-|---------------------------------------------------------------------------------------------------------------------------|----------|------------|
-| Llama-3.1-405B-Instruct-Turbo ID: | `false` | `true` |
-| Llama-3.1-70B-Instruct-Turbo ID: | `false` | `true` |
-| Llama-3.1-8B-Instruct-Turbo ID: | `false` | `true` |
-| 7B-Instruct-v0.1 ID: | `false` | `true` |
-| Mixtral-8x7B-Instruct-v0.1 ID: | `false` | `true` |
-
-
## Deprecated Models
The following models are deprecated and no longer available for use in pipes. It is recommended to switch to a supported model.
diff --git a/apps/baseai.dev/content/docs/guides/memory-from-git.mdx b/apps/baseai.dev/content/docs/guides/memory-from-git.mdx
index 8023709a..4361a838 100644
--- a/apps/baseai.dev/content/docs/guides/memory-from-git.mdx
+++ b/apps/baseai.dev/content/docs/guides/memory-from-git.mdx
@@ -8,7 +8,7 @@ tags:
- langbase
section: 'Memory'
published: 2024-10-08
-modified: 2024-11-26
+modified: 2024-10-08
---
# Create a Memory from git Repository
@@ -34,37 +34,27 @@ It will also prompt you if you want to create a memory from the current project
Do you want to create memory from current project git repository? (yes/no) yes
```
-It will create a memory at `baseai/memory/chat-with-repo` and track files in the current git repository. It prints the path of the memory created.
+## Step #3 Provide directory
-Open that file in your editor, it looks like this:
+Next, it will ask you which directory or subdirectory you want to use for the memory. You can select the current directory or any subdirectory.
-```ts
-import {MemoryI} from '@baseai/core';
+```
+Enter the path to the directory to track (relative to current directory):
+```
-const chatWithRepoMemory = (): MemoryI => ({
- name: 'chat-with-repo',
- description: "My list of docs as memory for an AI agent pipe",
- git: {
- enabled: true,
- include: ['**/*'],
- gitignore: true,
- deployedAt: '',
- embeddedAt: ''
- }
-});
+Provide the path relative to the root of the project directory that you want to use for the memory. E.g., `src/content/docs`, to use the `docs` directory in the `src/content` directory.
-export default chatWithRepoMemory;
-```
+## Step #4 Provide file extensions
-Below is the explanation of the fields in the memory file:
+Next, it will ask you which files extensions you want to track. You can provide a comma-separated list of file extensions. E.g., `.mdx,.md` to track markdown files. Alternatively, you can provide `*` to track all files.
-- `enabled`: Set to `true` to enable tracking of git repository.
-- `include`: Follows glob pattern to include files from the git repository. You can change the pattern to include only specific files or directories.
-- `gitignore`: Set to `true` to include `.gitignore` file in the memory.
-- `deployedAt`: Set to the commit hash where the memory was last deployed. It is used to track the changes in the memory for deployment. Try to avoid changing this field manually.
-- `embeddedAt`: Set to the commit hash where the memory was last embedded. It is used to track the changes in the memory for local development. Try to avoid changing this field manually.
+```
+Enter file extensions to track (use * for all, or comma-separated list, e.g., .md,.mdx)
+```
-## Step #3 Deploy the memory
+That's it! It creates a memory at `baseai/memory/chat-with-repo` in your current directory that tracks the git repository directory and file extensions you provided.
+
+## Step #5 Deploy the memory
Commit all the changes to git and deploy.
@@ -72,13 +62,12 @@ Commit all the changes to git and deploy.
npx baseai@latest deploy -m chat-with-repo
```
----
+Next time you want to update the memory with the latest changes from the git repository, you can run the `deploy` command again. Make sure to commit all the changes before deploying.
-## Tracking changes from Git Repository
+---
-That's it! Next time you make changes to the git repository, you can run the `deploy` command again to update the memory. Make sure to commit all the changes before deploying.
+## Running the memory locally
-Similarly, if you are using memory locally, make sure to `embed` the memory again after commit before running the AI agent pipe. As a result, it automatically updates the memory with the latest changes and record the hashes in the memory file.
+You can embed the memory locally using the `embed` command. Just like any other memory and use it with a Pipe. Follow the [quickstart guide](/docs/memory/quickstart) to see how to embed and use the memory.
---
-
diff --git a/apps/baseai.dev/content/docs/tools/faqs.mdx b/apps/baseai.dev/content/docs/tools/faqs.mdx
index ffda7df1..22411ad1 100644
--- a/apps/baseai.dev/content/docs/tools/faqs.mdx
+++ b/apps/baseai.dev/content/docs/tools/faqs.mdx
@@ -44,31 +44,9 @@ You can learn more about it [here](/docs/tool/quickstart).
---
-## What providers in BaseAI support tool calling?
+## What models in BaseAI support tool calling?
-We support tool calling for the following providerss:
-
-### OpenAI models
-
-We support tool calling in all models provided by OpenAI.
-
-### Anthropic models
-
-We support tool calling in all models provided by Anthropic. However, it is in beta at the moment.
-
-### Google
-
-We support tool calling in all models provided by Google. However, it is in beta at the moment.
-
-### Together AI
-
-We support tool calling in the following models provided by Together AI. However, it is in beta at the moment.
-
-- meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo
-- meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo
-- meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo
-- mistralai/Mistral-7B-Instruct-v0.1
-- mistralai/Mixtral-8x7B-Instruct-v0.1
+Currently all the OpenAI models like GPT-3, GPT-4, etc., support tool calling in BaseAI.
---
diff --git a/apps/baseai.dev/content/learn/learn/index.mdx b/apps/baseai.dev/content/learn/learn/index.mdx
index 3da1d907..f2a7409d 100644
--- a/apps/baseai.dev/content/learn/learn/index.mdx
+++ b/apps/baseai.dev/content/learn/learn/index.mdx
@@ -46,7 +46,7 @@ In these learn guides, you will learn how to locally:
Create a directory in your local machine and navigate to it. Run the following command in the terminal:
```bash
-mkdir my-ai-project && cd my-ai-project
+mkdir my-ai-project
npm init -y
npm install dotenv
```
diff --git a/apps/baseai.dev/next.config.mjs b/apps/baseai.dev/next.config.mjs
index 9564050d..fc785992 100644
--- a/apps/baseai.dev/next.config.mjs
+++ b/apps/baseai.dev/next.config.mjs
@@ -7,7 +7,6 @@ const nextConfig = {
images: {
domains: ['raw.githubusercontent.com/']
},
- transpilePackages: ['next-mdx-remote'],
async redirects() {
return [];
}
diff --git a/apps/baseai.dev/package.json b/apps/baseai.dev/package.json
index c400ce7e..0a4c0a7c 100644
--- a/apps/baseai.dev/package.json
+++ b/apps/baseai.dev/package.json
@@ -41,12 +41,11 @@
"flexsearch": "^0.7.31",
"framer-motion": "^10.18.0",
"gray-matter": "^4.0.3",
- "html2canvas": "^1.4.1",
"lucide-react": "^0.378.0",
"mdast-util-to-string": "^4.0.0",
"mdx-annotations": "^0.1.1",
"mxcn": "^2.0.0",
- "next": "14.2.35",
+ "next": "^14.0.4",
"next-mdx-remote": "^5.0.0",
"next-themes": "^0.2.1",
"react": "^18.2.0",
diff --git a/apps/baseai.dev/public/arrow.svg b/apps/baseai.dev/public/arrow.svg
index df75ca25..fde1f997 100644
--- a/apps/baseai.dev/public/arrow.svg
+++ b/apps/baseai.dev/public/arrow.svg
@@ -7,4 +7,4 @@
-
+
diff --git a/apps/baseai.dev/public/baseai.svg b/apps/baseai.dev/public/baseai.svg
index 33d64298..f202bf48 100644
--- a/apps/baseai.dev/public/baseai.svg
+++ b/apps/baseai.dev/public/baseai.svg
@@ -1,7 +1,7 @@
+ stroke="white" stroke-width="6" stroke-linecap="round" stroke-linejoin="round" />
diff --git a/apps/baseai.dev/public/favicon/2024/android-chrome-192x192.png b/apps/baseai.dev/public/favicon/2024/android-chrome-192x192.png
deleted file mode 100644
index ed7c8c99..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/android-chrome-192x192.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/android-chrome-512x512.png b/apps/baseai.dev/public/favicon/2024/android-chrome-512x512.png
deleted file mode 100644
index 2de91b20..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/android-chrome-512x512.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/apple-touch-icon.png b/apps/baseai.dev/public/favicon/2024/apple-touch-icon.png
deleted file mode 100644
index f3607b30..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/apple-touch-icon.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/browserconfig.xml b/apps/baseai.dev/public/favicon/2024/browserconfig.xml
deleted file mode 100644
index b9639caf..00000000
--- a/apps/baseai.dev/public/favicon/2024/browserconfig.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
-
-
- #000000
-
-
-
diff --git a/apps/baseai.dev/public/favicon/2024/favicon-16x16.png b/apps/baseai.dev/public/favicon/2024/favicon-16x16.png
deleted file mode 100644
index d96c6e05..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/favicon-16x16.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/favicon-32x32.png b/apps/baseai.dev/public/favicon/2024/favicon-32x32.png
deleted file mode 100644
index 2afa3606..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/favicon-32x32.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/favicon.ico b/apps/baseai.dev/public/favicon/2024/favicon.ico
deleted file mode 100644
index 26fbba19..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/favicon.ico and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/mstile-144x144.png b/apps/baseai.dev/public/favicon/2024/mstile-144x144.png
deleted file mode 100644
index 055941f8..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/mstile-144x144.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/mstile-150x150.png b/apps/baseai.dev/public/favicon/2024/mstile-150x150.png
deleted file mode 100644
index 8f748f87..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/mstile-150x150.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/mstile-310x150.png b/apps/baseai.dev/public/favicon/2024/mstile-310x150.png
deleted file mode 100644
index 05bd230f..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/mstile-310x150.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/mstile-310x310.png b/apps/baseai.dev/public/favicon/2024/mstile-310x310.png
deleted file mode 100644
index f493e2ca..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/mstile-310x310.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/mstile-70x70.png b/apps/baseai.dev/public/favicon/2024/mstile-70x70.png
deleted file mode 100644
index 46fab254..00000000
Binary files a/apps/baseai.dev/public/favicon/2024/mstile-70x70.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/2024/safari-pinned-tab.svg b/apps/baseai.dev/public/favicon/2024/safari-pinned-tab.svg
deleted file mode 100644
index 8bb66010..00000000
--- a/apps/baseai.dev/public/favicon/2024/safari-pinned-tab.svg
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
-
-
-Created by potrace 1.14, written by Peter Selinger 2001-2017
-
-
-
-
-
-
-
-
-
-
diff --git a/apps/baseai.dev/public/favicon/2024/site.webmanifest b/apps/baseai.dev/public/favicon/2024/site.webmanifest
deleted file mode 100644
index 1b51ab61..00000000
--- a/apps/baseai.dev/public/favicon/2024/site.webmanifest
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "name": "LANGBASE",
- "short_name": "LANGBASE",
- "icons": [
- {
- "src": "/android-chrome-192x192.png",
- "sizes": "192x192",
- "type": "image/png"
- },
- {
- "src": "/android-chrome-512x512.png",
- "sizes": "512x512",
- "type": "image/png"
- }
- ],
- "theme_color": "#000000",
- "background_color": "#000000",
- "display": "standalone"
-}
diff --git a/apps/baseai.dev/public/favicon/old/android-chrome-192x192.png b/apps/baseai.dev/public/favicon/old/android-chrome-192x192.png
deleted file mode 100644
index 1131a116..00000000
Binary files a/apps/baseai.dev/public/favicon/old/android-chrome-192x192.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/old/android-chrome-512x512.png b/apps/baseai.dev/public/favicon/old/android-chrome-512x512.png
deleted file mode 100644
index b953c267..00000000
Binary files a/apps/baseai.dev/public/favicon/old/android-chrome-512x512.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/old/apple-touch-icon.png b/apps/baseai.dev/public/favicon/old/apple-touch-icon.png
deleted file mode 100644
index 819ab604..00000000
Binary files a/apps/baseai.dev/public/favicon/old/apple-touch-icon.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/old/browserconfig.xml b/apps/baseai.dev/public/favicon/old/browserconfig.xml
deleted file mode 100644
index b9639caf..00000000
--- a/apps/baseai.dev/public/favicon/old/browserconfig.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
-
-
- #000000
-
-
-
diff --git a/apps/baseai.dev/public/favicon/old/favicon-16x16.png b/apps/baseai.dev/public/favicon/old/favicon-16x16.png
deleted file mode 100644
index abd1f229..00000000
Binary files a/apps/baseai.dev/public/favicon/old/favicon-16x16.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/old/favicon-32x32.png b/apps/baseai.dev/public/favicon/old/favicon-32x32.png
deleted file mode 100644
index 3f8fe83c..00000000
Binary files a/apps/baseai.dev/public/favicon/old/favicon-32x32.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/old/favicon.ico b/apps/baseai.dev/public/favicon/old/favicon.ico
deleted file mode 100644
index 7c6d4a8a..00000000
Binary files a/apps/baseai.dev/public/favicon/old/favicon.ico and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/old/mstile-150x150.png b/apps/baseai.dev/public/favicon/old/mstile-150x150.png
deleted file mode 100644
index edd66071..00000000
Binary files a/apps/baseai.dev/public/favicon/old/mstile-150x150.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/old/safari-pinned-tab.svg b/apps/baseai.dev/public/favicon/old/safari-pinned-tab.svg
deleted file mode 100644
index 030832dd..00000000
--- a/apps/baseai.dev/public/favicon/old/safari-pinned-tab.svg
+++ /dev/null
@@ -1,46 +0,0 @@
-
-
-
-
-Created by potrace 1.14, written by Peter Selinger 2001-2017
-
-
-
-
-
-
-
-
-
-
diff --git a/apps/baseai.dev/public/favicon/old/site.webmanifest b/apps/baseai.dev/public/favicon/old/site.webmanifest
deleted file mode 100644
index 30ca3c44..00000000
--- a/apps/baseai.dev/public/favicon/old/site.webmanifest
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "name": "Langbase.com",
- "short_name": "Langbase.com",
- "icons": [
- {
- "src": "/android-chrome-192x192.png",
- "sizes": "192x192",
- "type": "image/png"
- },
- {
- "src": "/android-chrome-512x512.png",
- "sizes": "512x512",
- "type": "image/png"
- }
- ],
- "theme_color": "#ffffff",
- "background_color": "#ffffff",
- "display": "standalone"
-}
diff --git a/apps/baseai.dev/public/favicon/svg-black/favicon.png b/apps/baseai.dev/public/favicon/svg-black/favicon.png
deleted file mode 100644
index 0bd8dd5f..00000000
Binary files a/apps/baseai.dev/public/favicon/svg-black/favicon.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/svg-black/favicon.svg b/apps/baseai.dev/public/favicon/svg-black/favicon.svg
deleted file mode 100644
index c31c3778..00000000
--- a/apps/baseai.dev/public/favicon/svg-black/favicon.svg
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
\ No newline at end of file
diff --git a/apps/baseai.dev/public/favicon/svg-white/favicon.png b/apps/baseai.dev/public/favicon/svg-white/favicon.png
deleted file mode 100644
index 535f661e..00000000
Binary files a/apps/baseai.dev/public/favicon/svg-white/favicon.png and /dev/null differ
diff --git a/apps/baseai.dev/public/favicon/svg-white/favicon.svg b/apps/baseai.dev/public/favicon/svg-white/favicon.svg
deleted file mode 100644
index e02b9289..00000000
--- a/apps/baseai.dev/public/favicon/svg-white/favicon.svg
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
\ No newline at end of file
diff --git a/apps/baseai.dev/public/texture/panoenv9.jpg b/apps/baseai.dev/public/texture/panoenv9.jpg
deleted file mode 100644
index d5a32bfe..00000000
Binary files a/apps/baseai.dev/public/texture/panoenv9.jpg and /dev/null differ
diff --git a/apps/baseai.dev/src/app/layout.tsx b/apps/baseai.dev/src/app/layout.tsx
index f2542e5d..d0166555 100644
--- a/apps/baseai.dev/src/app/layout.tsx
+++ b/apps/baseai.dev/src/app/layout.tsx
@@ -1,5 +1,4 @@
import { Providers } from '@/app/providers';
-import SupportButton from '@/components/support-button';
import '@/styles/tailwind.css';
import { Inter } from 'next/font/google';
const inter = Inter({ subsets: ['latin'] });
@@ -14,9 +13,7 @@ export async function generateMetadata() {
openGraph: {
title: 'BaseAI - The first Web AI Framework',
description: `BaseAI is the first web AI framework. Deployable with Langbase the composable serverless AI cloud. Built with a focus on simplicity and composability. Helping developers build AI agents with memory (RAG), and deploy serverless. It's composable by design and offers a simple API to build and deploy any AI agents (AI features).`,
- images: [
- 'https://raw.githubusercontent.com/LangbaseInc/docs-images/refs/heads/main/baseai/baseai-ogg.jpg'
- ],
+ images: ['https://raw.githubusercontent.com/LangbaseInc/docs-images/refs/heads/main/baseai/baseai-ogg.jpg'],
siteName: 'BaseAI'
},
twitter: {
@@ -24,9 +21,7 @@ export async function generateMetadata() {
title: 'BaseAI - The first Web AI Framework',
creator: '@LangbaseInc',
description: `BaseAI is the first web AI framework. Deployable with Langbase the composable serverless AI cloud. Built with a focus on simplicity and composability. Helping developers build AI agents with memory (RAG), and deploy serverless. It's composable by design and offers a simple API to build and deploy any AI agents (AI features).`,
- images: [
- 'https://raw.githubusercontent.com/LangbaseInc/docs-images/refs/heads/main/baseai/baseai-ogg.jpg'
- ]
+ images: ['https://raw.githubusercontent.com/LangbaseInc/docs-images/refs/heads/main/baseai/baseai-ogg.jpg']
},
authors: [{ name: 'Langbase, Inc.' }],
robots: {
@@ -34,7 +29,7 @@ export async function generateMetadata() {
follow: true,
googleBot: {
index: true,
- follow: true
+ follow: true,
}
},
keywords: 'BaseAI, Web AI framework',
@@ -52,10 +47,7 @@ export default async function RootLayout({
return (
-
-
- {children}
-
+ {children}
);
diff --git a/apps/baseai.dev/src/components/Footer.tsx b/apps/baseai.dev/src/components/Footer.tsx
index 481ba427..66b48707 100644
--- a/apps/baseai.dev/src/components/Footer.tsx
+++ b/apps/baseai.dev/src/components/Footer.tsx
@@ -6,10 +6,6 @@ import { usePathname } from 'next/navigation';
import { Button } from '@/components/Button';
import navigationData, { navLearn } from '@/data/navigation';
-import { GitHubIcon } from './icons/GitHubIcon';
-import { TwitterIcon } from './icons/TwitterIcon';
-import { DiscordIcon } from './icons/DiscordIcon';
-
function PageLink({
label,
page,
@@ -47,7 +43,7 @@ function PageNavigation() {
const isLearnPath = pathname.startsWith('/learn');
const navLinks = isLearnPath ? navLearn : navigationData;
- let allPages = navLinks.flatMap(group => group.links);
+ let allPages = navLinks.flatMap(group => group.links);
let currentPageIndex = allPages.findIndex(page => page.href === pathname);
if (currentPageIndex === -1) {
@@ -77,46 +73,68 @@ function PageNavigation() {
);
}
-function Socials() {
+function XIcon(props: React.ComponentPropsWithoutRef<'svg'>) {
return (
- <>
-
-
-
-
-
-
-
-
-
- >
+
+
+
+ );
+}
+
+function GitHubIcon(props: React.ComponentPropsWithoutRef<'svg'>) {
+ return (
+
+
+
+ );
+}
+
+function DiscordIcon(props: React.ComponentPropsWithoutRef<'svg'>) {
+ return (
+
+
+
+ );
+}
+
+function SocialLink({
+ href,
+ icon: Icon,
+ children
+}: {
+ href: string;
+ icon: React.ComponentType<{ className?: string }>;
+ children: React.ReactNode;
+}) {
+ return (
+
+ {children}
+
+
);
}
function SmallPrint() {
return (
-
+
© Copyright {new Date().getFullYear()}. All rights
reserved.
-
+
+ Follow us on X
+
+
+ Follow us on GitHub
+
{/*
Join our Discord server
*/}
@@ -127,7 +145,7 @@ function SmallPrint() {
export function Footer() {
return (
-
+
diff --git a/apps/baseai.dev/src/components/Header.tsx b/apps/baseai.dev/src/components/Header.tsx
index 7fc46b80..ab15620b 100644
--- a/apps/baseai.dev/src/components/Header.tsx
+++ b/apps/baseai.dev/src/components/Header.tsx
@@ -15,9 +15,6 @@ import { ThemeToggle } from '@/components/ThemeToggle';
import BaseAILogo from './baseai-logo';
import { Anchor } from './ui/anchor';
import { IconDocs } from './ui/iconists/icon-docs';
-import { GitHubIcon } from './icons/GitHubIcon';
-import { TwitterIcon } from './icons/TwitterIcon';
-import { DiscordIcon } from './icons/DiscordIcon';
/**
* Retrieves the section title based on the provided pathname.
@@ -70,6 +67,14 @@ function HeaderLinks() {
return (
<>
+
+ ★ BaseAI
+
{text}
-
-
- Star us on GitHub
-
- >
- );
-}
-
-function Socials() {
- return (
- <>
-
-
-
-
-
-
-
-
-
>
);
}
@@ -140,9 +105,9 @@ export const Header = forwardRef<
ref={ref}
className={clsx(
className,
- 'fixed inset-0 inset-x-0 top-0 z-50 flex h-14 px-4 md:static md:mx-5 md:my-6 md:flex md:h-auto md:items-center md:justify-between md:gap-0 md:px-0 md:transition lg:left-72 lg:z-30 xl:left-80',
+ 'fixed inset-0 inset-x-0 top-0 z-50 flex h-14 px-4 md:static md:mx-5 md:my-6 md:flex md:h-auto md:items-center md:justify-between md:gap-12 md:px-0 md:transition lg:left-72 lg:z-30 xl:left-80',
!isInsideMobileNavigation &&
- 'backdrop-blur-sm lg:left-72 xl:left-80 dark:backdrop-blur',
+ 'backdrop-blur-sm lg:left-72 xl:left-80 dark:backdrop-blur',
isInsideMobileNavigation ? 'xbg-background' : 'xbg-background'
)}
style={
@@ -152,44 +117,36 @@ export const Header = forwardRef<
} as React.CSSProperties
}
>
-
+
-
-
+
+
-
-
-
- {currentTitle}
+ {currentTitle}
+
+
+
+
+
+
-
+
-
-
-
-
-
-
-
diff --git a/apps/baseai.dev/src/components/Layout.tsx b/apps/baseai.dev/src/components/Layout.tsx
index 90365d91..8b704316 100644
--- a/apps/baseai.dev/src/components/Layout.tsx
+++ b/apps/baseai.dev/src/components/Layout.tsx
@@ -27,8 +27,11 @@ export function Layout({
className="contents lg:pointer-events-none lg:fixed lg:inset-0 lg:z-40 lg:flex"
>
-
-
+
+
@@ -38,7 +41,7 @@ export function Layout({
-
+
{children}
diff --git a/apps/baseai.dev/src/components/MobileNavigation.tsx b/apps/baseai.dev/src/components/MobileNavigation.tsx
index d8c96717..551cd59e 100644
--- a/apps/baseai.dev/src/components/MobileNavigation.tsx
+++ b/apps/baseai.dev/src/components/MobileNavigation.tsx
@@ -13,6 +13,7 @@ import { Dialog, Transition } from '@headlessui/react';
import { motion } from 'framer-motion';
import { create } from 'zustand';
+import { Header } from '@/components/Header';
import { Navigation } from '@/components/Navigation';
function MenuIcon(props: React.ComponentPropsWithoutRef<'svg'>) {
@@ -40,7 +41,6 @@ function XIcon(props: React.ComponentPropsWithoutRef<'svg'>) {
>
-
);
}
@@ -67,20 +67,6 @@ function MobileNavigationDialog({
}
}, [pathname, searchParams, close, initialPathname, initialSearchParams]);
- useEffect(() => {
- const removeInertFromTopDiv = () => {
- const topDiv = document.querySelector('div');
-
- if (topDiv?.hasAttribute('inert')) {
- topDiv.removeAttribute('inert');
- }
- };
-
- const timer = setTimeout(removeInertFromTopDiv, 50);
-
- return () => clearTimeout(timer);
- }, [isOpen]);
-
function onClickDialog(event: React.MouseEvent) {
if (!(event.target instanceof HTMLElement)) {
return;
@@ -103,7 +89,7 @@ function MobileNavigationDialog({
-
+
+
+
+
+
@@ -156,7 +154,7 @@ export const useMobileNavigationStore = create<{
toggle: () => set(state => ({ isOpen: !state.isOpen }))
}));
-export function MobileNavigation(props: any) {
+export function MobileNavigation() {
let isInsideMobileNavigation = useIsInsideMobileNavigation();
let { isOpen, toggle, close } = useMobileNavigationStore();
let ToggleIcon = isOpen ? XIcon : MenuIcon;
@@ -169,14 +167,13 @@ export function MobileNavigation(props: any) {
aria-label="Toggle navigation"
onClick={toggle}
>
-
+
{!isInsideMobileNavigation && (
)}
-
);
}
diff --git a/apps/baseai.dev/src/components/Search.tsx b/apps/baseai.dev/src/components/Search.tsx
index 9af05ef4..25a4bfe7 100644
--- a/apps/baseai.dev/src/components/Search.tsx
+++ b/apps/baseai.dev/src/components/Search.tsx
@@ -470,19 +470,19 @@ export function Search() {
}, []);
return (
-
+
-
- Find something...
-
+
+ Find something...
+
{modifierKey}
K
-
+
@@ -494,17 +494,17 @@ export function MobileSearch() {
let { buttonProps, dialogProps } = useSearchProps();
return (
-
+
-
+
);
diff --git a/apps/baseai.dev/src/components/home/hero.tsx b/apps/baseai.dev/src/components/home/hero.tsx
index 0d2978ae..e52e7538 100644
--- a/apps/baseai.dev/src/components/home/hero.tsx
+++ b/apps/baseai.dev/src/components/home/hero.tsx
@@ -7,13 +7,12 @@ import '../../styles/global.css';
import { Anchor } from '../ui/anchor';
import { IconDocs } from '../ui/iconists/icon-docs';
import WebGLInitializer from './webgl';
-import Link from 'next/link';
const inter = Inter({ subsets: ['latin'] });
-export default function Hero({ }) {
+export default function Hero({}) {
return (
-
-
+
+
@@ -23,16 +22,19 @@ export default function Hero({ }) {
function Content() {
return (
-
-
-
+
+
+
-
-
+
deploy serverless
@@ -40,61 +42,40 @@ function Content() {
-
-
-
-
-
-
-
-
-
- BaseAI
-
-
- The first Web AI Framework.
+
+
+
+
+
+
+
+
+ Base AI
+ {' '}
+
+ The first Web AI Framework.
+
-
+
+
+ The easiest way to build serverless autonomous
+ AI agents with memory. Start building
+ local-first, agentic pipes, tools, and memory.
+ Deploy serverless with one command.
+
-
- The easiest way to build serverless autonomous AI
- agents with memory. Start building local-first,
- agentic pipes, tools, and memory. Deploy serverless
- with one command.
-
-
-
-
-
- Get Started
-
-
- Learn BaseAI
-
-
-
-
-
-
-
-
-
-
-
-
+
+
agentic{' '}
( {' '}
pipes{' '}
@@ -106,10 +87,30 @@ function Content() {
-
-
-
+
+
+
+
+ Get Started
+
+
+ Learn BaseAI
+
+
+
);
@@ -119,64 +120,6 @@ interface CopyableCommandProps {
command: string;
}
-function Socials() {
- return (
- <>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- >
- );
-}
-
function CopyableCommand({ command }: CopyableCommandProps) {
const [copied, setCopied] = useState(false);
@@ -201,7 +144,7 @@ function CopyableCommand({ command }: CopyableCommandProps) {
aria-label={`Copy command: ${command}`}
>
-
+
⌘
{' '}
@@ -245,4 +188,4 @@ function CopyableCommand({ command }: CopyableCommandProps) {
);
-}
\ No newline at end of file
+}
diff --git a/apps/baseai.dev/src/components/home/webgl.tsx b/apps/baseai.dev/src/components/home/webgl.tsx
index c6d649ee..e3750224 100644
--- a/apps/baseai.dev/src/components/home/webgl.tsx
+++ b/apps/baseai.dev/src/components/home/webgl.tsx
@@ -2,11 +2,12 @@
import { useEffect, useRef, useState } from 'react';
import * as THREE from 'three';
-import html2canvas from 'html2canvas';
-import '../../styles/webgl.css';
const WebGLInitializer = () => {
const mountRef = useRef
(null);
+ const [mousePosition, setMousePosition] = useState(
+ new THREE.Vector2(0, 0)
+ );
useEffect(() => {
const scene = new THREE.Scene();
@@ -25,80 +26,52 @@ const WebGLInitializer = () => {
mountRef.current.appendChild(renderer.domElement);
}
- const canvas = document.createElement('canvas');
- canvas.width = window.innerWidth;
- canvas.height = window.innerHeight;
-
- const textDiv = document.createElement('div');
- textDiv.style.position = 'absolute';
- textDiv.style.left = '0';
- textDiv.style.top = '0';
- textDiv.style.width = '100%';
- textDiv.style.height = '100%';
- textDiv.style.fontWeight = 'bold';
- textDiv.style.fontFamily = 'Grotesk';
- textDiv.style.color = 'rgba(255,255,255,1)';
- textDiv.style.display = 'flex';
- textDiv.style.justifyContent = 'center';
- textDiv.style.alignItems = 'center';
- textDiv.textContent = 'BASE AI';
- textDiv.style.zIndex = '-1';
-
- const PIXEL_RATIO = 2;
- const createHighResBackgroundTexture = async (
- width: number,
- height: number
- ) => {
- const scale = PIXEL_RATIO;
- textDiv.style.width = `${width}px`;
- textDiv.style.height = `${height}px`;
- textDiv.style.fontSize = `${width * 0.192}px`;
-
- await document.fonts.ready;
- document.body.appendChild(textDiv);
-
- const lineHeight = window.getComputedStyle(textDiv).lineHeight;
- const y = parseFloat(lineHeight);
-
- const canvas = await html2canvas(textDiv, {
- backgroundColor: '#000000',
- scale: scale,
- width: width,
- height: height,
- logging: false,
- y: y * 0,
- x: 0,
- onclone: document => {
- Array.from(document.querySelectorAll('*')).forEach(e => {
- let existingStyle = e.getAttribute('style') || '';
- e.setAttribute(
- 'style',
- existingStyle +
- '; font-family: Grotesk, sans-serif !important'
- );
- });
- }
- });
+ // Create a background texture with sharper text
+ const createBackgroundTexture = (width: number, height: number) => {
+ const canvas = document.createElement('canvas');
+ const ctx = canvas.getContext('2d');
- const texture = new THREE.CanvasTexture(canvas);
- texture.wrapS = THREE.RepeatWrapping;
- texture.wrapT = THREE.RepeatWrapping;
- return texture;
- };
+ // Increase canvas size for higher resolution
+ const scale = 2; // You can adjust this value for even higher resolution
+ canvas.width = width * scale;
+ canvas.height = height * scale;
- const createInitialTexture = async () => {
- const texture = await createHighResBackgroundTexture(
- window.innerWidth,
- window.innerHeight
- );
- scene.background = texture;
- if (material.uniforms && material.uniforms.u_background) {
- material.uniforms.u_background.value = texture;
+ if (ctx) {
+ ctx.scale(scale, scale); // Scale the context to match the increased canvas size
+ ctx.fillStyle = '#000000';
+ ctx.fillRect(0, 0, width, height);
+
+ // Calculate font size based on screen dimensions
+ const baseFontSize = width * 0.19; // 18.5% of the smaller dimension
+ ctx.font = `bold ${baseFontSize}px Grotesk`;
+
+ ctx.fillStyle = '#ffffff';
+ ctx.textAlign = 'center';
+ ctx.textBaseline = 'middle';
+
+ // Use crisp edges for text rendering
+ ctx.imageSmoothingEnabled = false;
+
+ // Draw the text
+ ctx.fillText('BASE AI', width / 2, height / 2);
}
+
+ const bgTexture = new THREE.CanvasTexture(canvas);
+ bgTexture.minFilter = THREE.LinearFilter;
+ bgTexture.magFilter = THREE.LinearFilter;
+ bgTexture.wrapS = THREE.RepeatWrapping;
+ bgTexture.wrapT = THREE.RepeatWrapping;
+ return bgTexture;
};
- createInitialTexture();
+ // Initial background texture creation
+ let bgTexture = createBackgroundTexture(
+ window.innerWidth,
+ window.innerHeight
+ );
+ scene.background = bgTexture;
+ // Create a sphere geometry
const geometry = new THREE.SphereGeometry(0.75, 256, 256);
const textureLoader = new THREE.TextureLoader();
@@ -116,6 +89,7 @@ const WebGLInitializer = () => {
}
);
+ // Custom shader material for the enhanced liquid wavy effect
const material = new THREE.ShaderMaterial({
transparent: true,
uniforms: {
@@ -126,7 +100,7 @@ const WebGLInitializer = () => {
window.innerHeight
)
},
- u_background: { value: null },
+ u_background: { value: bgTexture },
u_viewVector: { value: camera.position },
envMap: { value: envMap },
roughness: { value: 0.0 },
@@ -391,6 +365,7 @@ const WebGLInitializer = () => {
`
});
+ // Create a mesh with the geometry and material
const sphere = new THREE.Mesh(geometry, material);
scene.add(sphere);
@@ -399,7 +374,7 @@ const WebGLInitializer = () => {
function calculateCameraZ(screenWidth: number, screenHeight: number) {
let cameraZ;
-
+ // Breakpoints based on screen width and height
if (screenWidth <= 768) {
if (screen.availWidth < screen.availHeight) {
cameraZ = 4.5;
@@ -408,41 +383,55 @@ const WebGLInitializer = () => {
}
} else if (screenWidth > 768 && screenWidth <= 1920) {
if (screenHeight <= 1080) {
- cameraZ = 2;
+ cameraZ = 2; // Full HD screens (1920x1080)
} else {
- cameraZ = 1.9;
+ cameraZ = 1.9; // Higher aspect ratio or larger height
}
} else if (screenWidth > 1920 && screenWidth <= 2440) {
if (screenHeight <= 1080) {
- cameraZ = 1.75;
+ cameraZ = 1.75; // Wide screens with Full HD height
} else {
- cameraZ = 1.65;
+ cameraZ = 1.65; // Taller screens with higher resolutions
}
} else if (screenWidth > 2440) {
if (screenHeight <= 1440) {
- cameraZ = 1.5;
+ cameraZ = 1.5; // Ultra-wide or larger 2K displays
} else {
- cameraZ = 1.4;
+ cameraZ = 1.4; // 4K and above
}
}
return cameraZ;
}
+ // Get screen width and height
const screenWidth = window.innerWidth;
const screenHeight = window.innerHeight;
+ // Calculate camera Z position based on breakpoints
const cameraZ = calculateCameraZ(screenWidth, screenHeight);
if (cameraZ) camera.position.z = cameraZ;
+ // Raycaster setup
const raycaster = new THREE.Raycaster();
const mouse = new THREE.Vector2();
+ // Mouse move event handler
+ const onMouseMove = (event: MouseEvent) => {
+ mouse.x = (event.clientX / window.innerWidth) * 2 - 1;
+ mouse.y = -(event.clientY / window.innerHeight) * 2 + 1;
+ setMousePosition(new THREE.Vector2(mouse.x, mouse.y));
+ };
+
+ window.addEventListener('mousemove', onMouseMove);
+
+ // Animation loop
const animate = () => {
requestAnimationFrame(animate);
- material.uniforms.u_time.value += 0.02;
+ material.uniforms.u_time.value += 0.02; // Update time for animation
material.uniforms.u_viewVector.value = camera.position;
+ // Update mouse position in the shader
raycaster.setFromCamera(mouse, camera);
const intersects = raycaster.intersectObject(sphere);
if (intersects.length > 0) {
@@ -477,18 +466,20 @@ const WebGLInitializer = () => {
updateCameraPosition();
- createHighResBackgroundTexture(width, height).then(texture => {
- scene.background = texture;
- if (material.uniforms && material.uniforms.u_background) {
- material.uniforms.u_background.value = texture;
- }
- });
+ // Update background texture with new dimensions
+ bgTexture = createBackgroundTexture(width, height);
+ scene.background = bgTexture;
+
+ if (material.uniforms && material.uniforms.u_background) {
+ material.uniforms.u_background.value = bgTexture;
+ }
};
window.addEventListener('resize', onWindowResize);
return () => {
window.removeEventListener('resize', onWindowResize);
+ window.removeEventListener('mousemove', onMouseMove);
if (mountRef.current) {
mountRef.current.removeChild(renderer.domElement);
}
@@ -498,4 +489,4 @@ const WebGLInitializer = () => {
return
;
};
-export default WebGLInitializer;
\ No newline at end of file
+export default WebGLInitializer;
diff --git a/apps/baseai.dev/src/components/icons/DiscordIcon.tsx b/apps/baseai.dev/src/components/icons/DiscordIcon.tsx
deleted file mode 100644
index f119a18c..00000000
--- a/apps/baseai.dev/src/components/icons/DiscordIcon.tsx
+++ /dev/null
@@ -1,16 +0,0 @@
-export function DiscordIcon(props: React.ComponentPropsWithoutRef<'svg'>) {
- return (
-
-
-
- );
-}
diff --git a/apps/baseai.dev/src/components/icons/GitHubIcon.tsx b/apps/baseai.dev/src/components/icons/GitHubIcon.tsx
deleted file mode 100644
index f2d4d544..00000000
--- a/apps/baseai.dev/src/components/icons/GitHubIcon.tsx
+++ /dev/null
@@ -1,16 +0,0 @@
-export function GitHubIcon(props: React.ComponentPropsWithoutRef<'svg'>) {
- return (
-
-
-
- );
-}
diff --git a/apps/baseai.dev/src/components/icons/OpenLink.tsx b/apps/baseai.dev/src/components/icons/OpenLink.tsx
deleted file mode 100644
index ef5a72cb..00000000
--- a/apps/baseai.dev/src/components/icons/OpenLink.tsx
+++ /dev/null
@@ -1,19 +0,0 @@
-export function OpenLink(props: React.ComponentPropsWithoutRef<'svg'>) {
- return (
-
-
-
- );
-}
diff --git a/apps/baseai.dev/src/components/icons/TwitterIcon.tsx b/apps/baseai.dev/src/components/icons/TwitterIcon.tsx
deleted file mode 100644
index 223e689b..00000000
--- a/apps/baseai.dev/src/components/icons/TwitterIcon.tsx
+++ /dev/null
@@ -1,16 +0,0 @@
-export function TwitterIcon(props: React.ComponentPropsWithoutRef<'svg'>) {
- return (
-
-
-
- );
-}
diff --git a/apps/baseai.dev/src/components/mdx/InlineCodeCopy.tsx b/apps/baseai.dev/src/components/mdx/InlineCodeCopy.tsx
index a39ba5e1..6cfd7f7e 100644
--- a/apps/baseai.dev/src/components/mdx/InlineCodeCopy.tsx
+++ b/apps/baseai.dev/src/components/mdx/InlineCodeCopy.tsx
@@ -6,7 +6,6 @@ import {
} from '@heroicons/react/24/solid';
import { Button } from '../ui/button';
import { useCopyToClipboard } from '@/hooks/use-copy-to-clipboard';
-import cn from 'mxcn';
export function InlineCopy({
content,
@@ -16,7 +15,6 @@ export function InlineCopy({
children: React.ReactNode;
}) {
const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 });
- const totalChars = content.length;
const onCopy = () => {
navigator.clipboard.writeText(content);
@@ -26,7 +24,7 @@ export function InlineCopy({
return (
- 25 && 'w-[50%] sm:w-full overflow-scroll')}>{content}
+ {content}
{
},
table: (props: any) => {
return (
-
+
);
},
h2: (props: any) => {
diff --git a/apps/baseai.dev/src/components/support-button.tsx b/apps/baseai.dev/src/components/support-button.tsx
deleted file mode 100644
index 370acac5..00000000
--- a/apps/baseai.dev/src/components/support-button.tsx
+++ /dev/null
@@ -1,35 +0,0 @@
-'use client';
-
-import {
- Tooltip,
- TooltipContent,
- TooltipProvider,
- TooltipTrigger
-} from '@/components/ui/tooltip';
-
-export default function SupportButton() {
- return (
-
-
-
-
-
-
-
-
- Contact Support
-
-
-
- );
-}
\ No newline at end of file
diff --git a/apps/baseai.dev/src/mdx/languages.mjs b/apps/baseai.dev/src/mdx/languages.mjs
index 8074789c..2000aef5 100644
--- a/apps/baseai.dev/src/mdx/languages.mjs
+++ b/apps/baseai.dev/src/mdx/languages.mjs
@@ -1,23 +1,20 @@
-import { createRequire } from 'module';
-
-const require = createRequire(import.meta.url);
-const langGraphQL = require('shiki/languages/graphql.tmLanguage.json');
-const langJS = require('shiki/languages/javascript.tmLanguage.json');
-const langJSX = require('shiki/languages/jsx.tmLanguage.json');
-const langJSON = require('shiki/languages/json.tmLanguage.json');
-const langXML = require('shiki/languages/xml.tmLanguage.json');
-const langYAML = require('shiki/languages/yaml.tmLanguage.json');
-const langPHP = require('shiki/languages/php.tmLanguage.json');
-const langHTML = require('shiki/languages/html.tmLanguage.json');
-const langCSS = require('shiki/languages/css.tmLanguage.json');
-const langSCSS = require('shiki/languages/scss.tmLanguage.json');
-const langSASS = require('shiki/languages/sass.tmLanguage.json');
-const langLESS = require('shiki/languages/less.tmLanguage.json');
-const langMarkdown = require('shiki/languages/markdown.tmLanguage.json');
-const langTS = require('shiki/languages/typescript.tmLanguage.json');
-const langTSX = require('shiki/languages/tsx.tmLanguage.json');
-const langShell = require('shiki/languages/shellscript.tmLanguage.json');
-const langPy = require('shiki/languages/python.tmLanguage.json');
+import langGraphQL from 'shiki/languages/graphql.tmLanguage.json' assert { type: 'json' };
+import langJS from 'shiki/languages/javascript.tmLanguage.json' assert { type: 'json' };
+import langJSX from 'shiki/languages/jsx.tmLanguage.json' assert { type: 'json' };
+import langJSON from 'shiki/languages/json.tmLanguage.json' assert { type: 'json' };
+import langXML from 'shiki/languages/xml.tmLanguage.json' assert { type: 'json' };
+import langYAML from 'shiki/languages/yaml.tmLanguage.json' assert { type: 'json' };
+import langPHP from 'shiki/languages/php.tmLanguage.json' assert { type: 'json' };
+import langHTML from 'shiki/languages/html.tmLanguage.json' assert { type: 'json' };
+import langCSS from 'shiki/languages/css.tmLanguage.json' assert { type: 'json' };
+import langSCSS from 'shiki/languages/scss.tmLanguage.json' assert { type: 'json' };
+import langSASS from 'shiki/languages/sass.tmLanguage.json' assert { type: 'json' };
+import langLESS from 'shiki/languages/less.tmLanguage.json' assert { type: 'json' };
+import langMarkdown from 'shiki/languages/markdown.tmLanguage.json' assert { type: 'json' };
+import langTS from 'shiki/languages/typescript.tmLanguage.json' assert { type: 'json' };
+import langTSX from 'shiki/languages/tsx.tmLanguage.json' assert { type: 'json' };
+import langShell from 'shiki/languages/shellscript.tmLanguage.json' assert { type: 'json' };
+import langPy from 'shiki/languages/python.tmLanguage.json' assert { type: 'json' };
const lang = [
{
diff --git a/apps/baseai.dev/src/mdx/rehype.mjs b/apps/baseai.dev/src/mdx/rehype.mjs
index 34874934..ab15be45 100644
--- a/apps/baseai.dev/src/mdx/rehype.mjs
+++ b/apps/baseai.dev/src/mdx/rehype.mjs
@@ -1,17 +1,14 @@
-import { createRequire } from 'module';
import { slugifyWithCounter } from '@sindresorhus/slugify';
import * as acorn from 'acorn';
import { toString } from 'mdast-util-to-string';
import { mdxAnnotations } from 'mdx-annotations';
import shiki from 'shiki';
import { visit } from 'unist-util-visit';
+import theme from './themes/shades-of-purple.json' assert { type: 'json' };
import lang from './languages.mjs';
-const require = createRequire(import.meta.url);
-const theme = require('./themes/shades-of-purple.json');
-
export function rehypeParseCodeBlocks() {
- return tree => {
+ return (tree) => {
visit(tree, 'element', (node, _nodeIndex, parentNode) => {
if (node.tagName === 'code' && node.properties.className) {
parentNode.properties.language =
@@ -24,10 +21,10 @@ export function rehypeParseCodeBlocks() {
let highlighter;
export function rehypeShiki() {
- return async tree => {
+ return async (tree) => {
highlighter =
highlighter ??
- (await shiki.getHighlighter({ theme: theme, langs: lang }));
+ (await shiki.getHighlighter({ theme: theme , langs: lang }));
visit(tree, 'element', node => {
if (
@@ -59,7 +56,7 @@ export function rehypeShiki() {
}
export function rehypeSlugify() {
- return tree => {
+ return (tree) => {
let slugify = slugifyWithCounter();
visit(tree, 'element', node => {
if (node.tagName === 'h2' && !node.properties.id) {
@@ -70,7 +67,7 @@ export function rehypeSlugify() {
}
export function rehypeAddMDXExports(getExports) {
- return tree => {
+ return (tree) => {
let exports = Object.entries(getExports(tree));
for (let [name, value] of exports) {
@@ -126,7 +123,7 @@ export const rehypePlugins = [
rehypeSlugify,
[
rehypeAddMDXExports,
- tree => ({
+ (tree) => ({
sections: `[${getSections(tree).join()}]`
})
]
diff --git a/apps/baseai.dev/src/mdx/themes/index.mjs b/apps/baseai.dev/src/mdx/themes/index.mjs
index 90995857..98477e32 100644
--- a/apps/baseai.dev/src/mdx/themes/index.mjs
+++ b/apps/baseai.dev/src/mdx/themes/index.mjs
@@ -1,3 +1,3 @@
-import shadesOfPurple from './shades-of-purple.json' assert { type: 'json' };
+import shadesOfPurple from './shades-of-purple.json' assert { type: 'json' }
-export default { shadesOfPurple };
+export default { shadesOfPurple }
diff --git a/apps/baseai.dev/src/mdx/themes/index.ts b/apps/baseai.dev/src/mdx/themes/index.ts
index 90995857..98477e32 100644
--- a/apps/baseai.dev/src/mdx/themes/index.ts
+++ b/apps/baseai.dev/src/mdx/themes/index.ts
@@ -1,3 +1,3 @@
-import shadesOfPurple from './shades-of-purple.json' assert { type: 'json' };
+import shadesOfPurple from './shades-of-purple.json' assert { type: 'json' }
-export default { shadesOfPurple };
+export default { shadesOfPurple }
diff --git a/apps/baseai.dev/src/styles/global.css b/apps/baseai.dev/src/styles/global.css
index 7b656b4d..eb9ec71a 100644
--- a/apps/baseai.dev/src/styles/global.css
+++ b/apps/baseai.dev/src/styles/global.css
@@ -111,23 +111,16 @@ html {
}
@font-face {
- font-family: "Grotesk";
- src: url("/AlteHaasGroteskBold.ttf") format("truetype");
+ font-family: 'Grotesk';
+ src: url('/AlteHaasGroteskBold.ttf') format('truetype');
font-weight: normal;
font-style: normal;
}
-/* Crisp Chat Widget Style Overrides */
-#crisp-chatbox .cc-1d4mk.cc-8mq05 {
- background-color: black !important;
+.hero-content {
+ font-family: 'Grotesk';
}
-/* Additional selector for better specificity */
-#crisp-chatbox [data-id="chat_opened"] .cc-1d4mk {
- background-color: black !important;
-}
-
-/* Target the chat bubble background */
-.crisp-client .cc-1d4mk {
- background-color: black !important;
+.helvetica {
+ font-family: 'Helvetica';
}
diff --git a/apps/baseai.dev/src/styles/webgl.css b/apps/baseai.dev/src/styles/webgl.css
deleted file mode 100644
index edc49551..00000000
--- a/apps/baseai.dev/src/styles/webgl.css
+++ /dev/null
@@ -1,8 +0,0 @@
-@tailwind base;
-@layer base {
- img {
- @apply inline-block;
- }
-}
-@tailwind components;
-@tailwind utilities;
diff --git a/examples/agents/it-systems-triage-agent/baseai/baseai.config.ts b/examples/agents/it-systems-triage-agent/baseai/baseai.config.ts
deleted file mode 100644
index 9c028a3a..00000000
--- a/examples/agents/it-systems-triage-agent/baseai/baseai.config.ts
+++ /dev/null
@@ -1,18 +0,0 @@
-import type {BaseAIConfig} from 'baseai';
-
-export const config: BaseAIConfig = {
- log: {
- isEnabled: false,
- logSensitiveData: false,
- pipe: true,
- 'pipe.completion': true,
- 'pipe.request': false,
- 'pipe.response': false,
- tool: false,
- memory: false,
- },
- memory: {
- useLocalEmbeddings: false,
- },
- envFilePath: '.env',
-};
diff --git a/examples/agents/it-systems-triage-agent/baseai/pipes/it-systems-triage-agent.ts b/examples/agents/it-systems-triage-agent/baseai/pipes/it-systems-triage-agent.ts
deleted file mode 100644
index 4d2363b3..00000000
--- a/examples/agents/it-systems-triage-agent/baseai/pipes/it-systems-triage-agent.ts
+++ /dev/null
@@ -1,35 +0,0 @@
-import {PipeI} from '@baseai/core';
-
-const pipeItSystemsTriageAgent = (): PipeI => ({
- // Replace with your API key https://langbase.com/docs/api-reference/api-keys
- apiKey: process.env.LANGBASE_API_KEY!,
- name: `it-systems-triage-agent`,
- description: `IT Systems Triage Agent, provided first contact support to IT issues in your organization. This agent can then be extended with network of agents for tailored use case.`,
- status: `private`,
- model: `openai:gpt-4o-mini`,
- stream: true,
- json: false,
- store: true,
- moderate: true,
- top_p: 1,
- max_tokens: 1000,
- temperature: 0.7,
- presence_penalty: 0,
- frequency_penalty: 0,
- stop: [],
- tool_choice: 'auto',
- parallel_tool_calls: true,
- messages: [
- {
- role: 'system',
- content:
- 'You are an AI-powered IT support agent for a company that uses Windows, Mac, and Linux operating systems. Your primary role is to classify incoming IT issues and assign them appropriate priorities for the support team to handle. Your objective is to accurately categorize issues and prioritize them based on urgency, but you do not provide technical resolutions.\n\n## Issue Categories:\n1. **Hardware Issues**: Problems with physical components like monitors, keyboards, printers, etc.\n2. **Software Issues**: Issues with applications, operating systems, or software updates.\n3. **Network Issues**: Problems related to internet connectivity, VPN, internal network access.\n4. **Account/Access Issues**: Problems related to login credentials, password resets, or access permissions.\n5. **Security Issues**: Concerns about data breaches, unauthorized access, or phishing attempts.\n6. **Data/Backup Issues**: Problems related to data recovery, backup failures, or data corruption.\n7. **Printing Issues**: Problems with printers or printing functionality.\n8. **Mobile Device Issues**: Issues with company mobile devices, including syncing or app errors.\n9. **Video Conferencing Issues**: Problems with platforms like Zoom, Teams, or other video conferencing tools.\n10. **Other/Uncategorized**: Any issues that don\'t fall into the above categories.\n\n## Priority Levels:\n1. **Critical (P1)**: Major business impact; entire department or company affected. Immediate attention is required.\n2. **High (P2)**: Significant impact on user productivity, preventing completion of important tasks. Needs urgent attention.\n3. **Medium (P3)**: Moderate impact on a user or small team’s productivity; should be addressed in a timely manner.\n4. **Low (P4)**: Minor impact with no immediate effect on business operations; can be scheduled for later.\n\n## Your Tasks:\n1. **Greet the user**: Ask for a brief description of the issue, acknowledging their problem empathetically.\n2. **Issue Classification**: Based on the user’s description, assign the issue to one of the predefined categories. Ensure that you’re considering details such as the operating system and type of issue.\n3. **Priority Assignment**: Determine the priority level (Critical, High, Medium, Low) by evaluating the severity of the issue and its impact on business operations or the user\'s productivity.\n4. **Follow-up Questions**: If necessary, ask clarifying questions to gather more information to ensure accurate classification and prioritization. For example, inquire about error messages, the scope of the impact (one user vs. department), or the device in use.\n5. **Provide the User with Feedback**: Inform the user of the category and priority assigned, explaining why the issue is categorized in that way. \n6. **Log the Issue**: Confirm that the issue has been logged and inform the user that the IT support team will address it according to the assigned priority.\n\n## Example Interaction:\n\n**User**: "I can\'t access my email, and I have an important client meeting in an hour."\n\n**Agent**: "Thank you for reporting this issue. Based on your description, I’ve classified it as an **Account/Access Issue** with a **High (P2) priority**. Email access is critical for your business communication, especially with an upcoming meeting, so our IT team will prioritize it. Is there any other information you\'d like to provide about this issue?"\n\n---\n\n**User**: "My computer has been running slow for two days, and it’s hard to get any work done. I’m using Windows 10."\n\n**Agent**: "I see. I’ve classified this as a **Software Issue** under **Medium (P3) priority** because it’s affecting your ability to work, but it doesn’t appear to be business-critical. I’ve logged your issue, and our IT team will address it soon. Is there anything else you’d like to add?"\n\n---\n\n**User**: "I’m trying to print some documents from my MacBook, but the printer isn\'t responding."\n\n**Agent**: "Thank you for sharing this. I’ve categorized this as a **Printing Issue** with **Low (P4) priority**, as it seems to be isolated to your device. I’ve logged it, and the team will look into it as soon as possible. Let me know if you need any further assistance."\n',
- },
- {name: 'safety', role: 'system', content: ''},
- ],
- variables: [],
- tools: [],
- memory: [],
-});
-
-export default pipeItSystemsTriageAgent;
diff --git a/examples/agents/it-systems-triage-agent/index.ts b/examples/agents/it-systems-triage-agent/index.ts
deleted file mode 100644
index 7dd4c29e..00000000
--- a/examples/agents/it-systems-triage-agent/index.ts
+++ /dev/null
@@ -1,72 +0,0 @@
-import 'dotenv/config';
-import {Message, Pipe} from '@baseai/core';
-import inquirer from 'inquirer';
-import ora from 'ora';
-import chalk from 'chalk';
-import pipeItSystemsTriageAgent from './baseai/pipes/it-systems-triage-agent';
-
-const pipe = new Pipe(pipeItSystemsTriageAgent());
-
-async function main() {
- const initialSpinner = ora(
- 'Connecting to it-systems-triage-agent...',
- ).start();
- // Messages array for keeping track of the conversation
- const messages: Message[] = [
- // Initial message to the agent
- {role: 'user', content: 'Hello how can I use your services?'},
- ];
-
- try {
- const {completion} = await pipe.run({
- messages,
- });
-
- // Add the agent response to the messages array
- messages.push({role: 'assistant', content: completion});
-
- initialSpinner.stop();
- console.log(chalk.cyan('Agent response...'));
- console.log(completion);
- } catch (error) {
- initialSpinner.stop();
- console.error(chalk.red('Error processing initial request:'), error);
- }
-
- while (true) {
- const {userMsg} = await inquirer.prompt([
- {
- type: 'input',
- name: 'userMsg',
- message: chalk.blue(
- 'Enter your query (or type "exit" to quit):',
- ),
- },
- ]);
-
- if (userMsg.toLowerCase() === 'exit') {
- console.log(chalk.green('Goodbye!'));
- break;
- }
-
- const spinner = ora('Processing your request...').start();
- messages.push({role: 'user', content: userMsg});
- try {
- const {completion: itSystemsTriageAgentResponse} = await pipe.run({
- messages,
- });
- messages.push({
- role: 'assistant',
- content: itSystemsTriageAgentResponse,
- });
- spinner.stop();
- console.log(chalk.cyan('Agent:'));
- console.log(itSystemsTriageAgentResponse);
- } catch (error) {
- spinner.stop();
- console.error(chalk.red('Error processing your request:'), error);
- }
- }
-}
-
-main();
diff --git a/examples/agents/it-systems-triage-agent/package.json b/examples/agents/it-systems-triage-agent/package.json
deleted file mode 100644
index b3cfe5a4..00000000
--- a/examples/agents/it-systems-triage-agent/package.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "name": "it-systems-triage-agent",
- "version": "1.0.0",
- "main": "index.js",
- "scripts": {
- "baseai": "baseai"
- },
- "keywords": [],
- "author": "",
- "license": "ISC",
- "description": "",
- "dependencies": {
- "@baseai/core": "^0.9.19",
- "dotenv": "^16.4.5",
- "inquirer": "^12.0.0",
- "ora": "^8.1.0"
- },
- "devDependencies": {
- "baseai": "^0.9.19"
- }
-}
diff --git a/examples/agents/it-systems-triage-agent/.env.baseai.example b/examples/agents/marketing-campaign-tailoring-agent/.env.baseai.example
similarity index 100%
rename from examples/agents/it-systems-triage-agent/.env.baseai.example
rename to examples/agents/marketing-campaign-tailoring-agent/.env.baseai.example
diff --git a/examples/agents/it-systems-triage-agent/.gitignore b/examples/agents/marketing-campaign-tailoring-agent/.gitignore
similarity index 98%
rename from examples/agents/it-systems-triage-agent/.gitignore
rename to examples/agents/marketing-campaign-tailoring-agent/.gitignore
index 9b5994f6..c256453b 100644
--- a/examples/agents/it-systems-triage-agent/.gitignore
+++ b/examples/agents/marketing-campaign-tailoring-agent/.gitignore
@@ -6,4 +6,3 @@ package-lock.json
pnpm-lock.yaml
# env file
.env
-
diff --git a/examples/agents/it-systems-triage-agent/README.md b/examples/agents/marketing-campaign-tailoring-agent/README.md
similarity index 61%
rename from examples/agents/it-systems-triage-agent/README.md
rename to examples/agents/marketing-campaign-tailoring-agent/README.md
index 72770af4..96395fea 100644
--- a/examples/agents/it-systems-triage-agent/README.md
+++ b/examples/agents/marketing-campaign-tailoring-agent/README.md
@@ -1,38 +1,14 @@
-![IT Systems Triage Agent by ⌘ BaseAI][cover]
+![Marketing Campaign Tailoring Agent by ⌘ BaseAI][cover]
![License: MIT][mit] [![Fork on ⌘ Langbase][fork]][pipe]
-## Build an IT Systems Triage Agent with BaseAI framework — ⌘ Langbase
+## Build a Marketing Campaign Tailoring Agent with BaseAI framework — ⌘ Langbase
-This **IT Triage Agent** is designed to streamline the resolution process by assigning issue priority and category based on a user’s description of their IT system problem. Built as a CLI application, it relies on a BaseAI pipe call and features a central entry point in `index.ts`. Additionally, the logging is configured to be switched off by default in the `baseai.config.ts` file.
-
-This AI Agent is built using the BaseAI framework. It leverages an agentic pipe that integrates over 30+ LLMs (including OpenAI, Gemini, Mistral, Llama, Gemma, etc.) and can handle any data, with context sizes of up to 10M+ tokens, supported by memory. The framework is compatible with any front-end framework (such as React, Remix, Astro, Next.js), giving you, as a developer, the freedom to tailor your AI application exactly as you envision.
-
-## How to use
-
-Navigate to `examples/agents/it-systems-triage-agent` and run the following commands:
-
-```sh
-# Navigate to baseai/examples/agents/it-systems-triage-agent
-cd examples/agents/it-systems-triage-agent
-
-# Install the dependencies
-npm install
-
-# Make sure to copy .env.baseai.example file and
-# create .env file and add all the relevant API keys in it
-cp .env.baseai.example .env
-
-# Run the local baseai dev server to test the examples (uses localhost:9000 port)
-npx baseai dev
-
-# Run the agent
-tsx index.ts
-```
+This AI Agent is built using the BaseAI framework. It leverages an agentic pipe that integrates over 30+ LLMs (including OpenAI, Gemini, Mistral, Llama, Gemma, etc.) and can handle any data, with context sizes of up to 10M+ tokens, supported by memory. The framework is compatible with any front-end framework (such as React, Remix, Astro, Next.js), giving you, as a developer, the freedom to tailor your AI application exactly as you envision.
## Features
-- IT Systems Triage Agent — Built with [BaseAI framework and agentic Pipe ⌘ ][qs].
+- Marketing Campaign Tailoring Agent — Built with [BaseAI framework and agentic Pipe ⌘ ][qs].
- Composable Agents — build and compose agents with BaseAI.
- Add and Sync deployed pipe on Langbase locally npx baseai@latest add ([see the Code button][pipe]).
@@ -57,11 +33,12 @@ This project is created by [Langbase][lb] team members, with contributions from:
- Muhammad-Ali Danish - Software Engineer, [Langbase][lb]
**_Built by ⌘ [Langbase.com][lb] — Ship hyper-personalized AI assistants with memory!_**
+
[lb]: https://langbase.com
-[pipe]: https://langbase.com/examples/it-systems-triage-agent
-[gh]: https://github.com/LangbaseInc/baseai/tree/main/examples/agents/it-systems-triage-agent
+[pipe]: https://langbase.com/examples/marketing-campaign-tailoring-agent
+[gh]: https://github.com/LangbaseInc/baseai/tree/main/examples/agents/marketing-campaign-tailoring-agent
[cover]:https://raw.githubusercontent.com/LangbaseInc/docs-images/main/baseai/baseai-cover.png
-[download]:https://download-directory.github.io/?url=https://github.com/LangbaseInc/baseai/tree/main/examples/it-systems-triage-agent
+[download]:https://download-directory.github.io/?url=https://github.com/LangbaseInc/baseai/tree/main/examples/agents/marketing-campaign-tailoring-agent
[learn]:https://baseai.dev/learn
[memory]:https://baseai.dev/docs/memory/quickstart
[toolcalls]:https://baseai.dev/docs/tools/quickstart
@@ -73,4 +50,4 @@ This project is created by [Langbase][lb] team members, with contributions from:
[xab]:https://x.com/AhmadBilalDev
[local]:http://localhost:9000
[mit]: https://img.shields.io/badge/license-MIT-blue.svg?style=for-the-badge&color=%23000000
-[fork]: https://img.shields.io/badge/FORK%20ON-%E2%8C%98%20Langbase-000000.svg?style=for-the-badge&logo=%E2%8C%98%20Langbase&logoColor=000000
+[fork]: https://img.shields.io/badge/FORK%20ON-%E2%8C%98%20Langbase-000000.svg?style=for-the-badge&logo=%E2%8C%98%20Langbase&logoColor=000000
\ No newline at end of file
diff --git a/examples/agents/readme-writer-agent/baseai/baseai.config.ts b/examples/agents/marketing-campaign-tailoring-agent/baseai/baseai.config.ts
similarity index 66%
rename from examples/agents/readme-writer-agent/baseai/baseai.config.ts
rename to examples/agents/marketing-campaign-tailoring-agent/baseai/baseai.config.ts
index 4e65d3f5..3c0328bc 100644
--- a/examples/agents/readme-writer-agent/baseai/baseai.config.ts
+++ b/examples/agents/marketing-campaign-tailoring-agent/baseai/baseai.config.ts
@@ -1,4 +1,4 @@
-import type {BaseAIConfig} from 'baseai';
+import type { BaseAIConfig } from 'baseai';
export const config: BaseAIConfig = {
log: {
@@ -9,10 +9,10 @@ export const config: BaseAIConfig = {
'pipe.request': true,
'pipe.response': true,
tool: true,
- memory: true,
+ memory: true
},
memory: {
- useLocalEmbeddings: false,
+ useLocalEmbeddings: false
},
- envFilePath: '.env',
+ envFilePath: '.env'
};
diff --git a/examples/agents/marketing-campaign-tailoring-agent/baseai/pipes/marketing-campaign-tailoring-agent.ts b/examples/agents/marketing-campaign-tailoring-agent/baseai/pipes/marketing-campaign-tailoring-agent.ts
new file mode 100644
index 00000000..f4b12e59
--- /dev/null
+++ b/examples/agents/marketing-campaign-tailoring-agent/baseai/pipes/marketing-campaign-tailoring-agent.ts
@@ -0,0 +1,42 @@
+import { PipeI } from '@baseai/core';
+
+const pipeMarketingCampaignTailoringAgent = (): PipeI => ({
+ // Replace with your API key https://langbase.com/docs/api-reference/api-keys
+ apiKey: process.env.LANGBASE_API_KEY!,
+ name: `marketing-campaign-tailoring-agent`,
+ description: `MCT Agent, helps you tailor your marketing campaign on popular social media platform, with CTAs to track to campaign strategy.`,
+ status: `private`,
+ model: `openai:gpt-4o-mini`,
+ stream: true,
+ json: false,
+ store: true,
+ moderate: true,
+ top_p: 1,
+ max_tokens: 1000,
+ temperature: 0.7,
+ presence_penalty: 0,
+ frequency_penalty: 0,
+ stop: [],
+ tool_choice: 'auto',
+ parallel_tool_calls: true,
+ messages: [
+ {
+ role: 'system',
+ content:
+ 'You are an AI assistant specializing in creating tailored marketing campaigns for various popular platforms like X (formerly Twitter), Facebook, TikTok, LinkedIn, and WhatsApp. Your role is to help users create and adapt marketing messages, product descriptions, or content specifically designed for different audiences and platforms. Follow the guidelines below to generate platform-appropriate campaigns:\n\n## Guidelines:\n\n1. **Understand the Product**: Analyze the product description or content provided by the user. The user may describe the product, attach details, or specify a narrative they want to convey. Ask clarifying questions if needed to get a clearer understanding of the product specifics.\n\n2. **Audience Segmentation**: Help the user define or refine different audience categories they want to target (e.g., professionals on LinkedIn, Gen Z on TikTok, casual users on Facebook, etc.). Understand the needs, behaviors, and preferences of each audience segment.\n\n3. **Platform Customization**: Tailor the core message or content to match the unique tone, style, and format of each platform. Use platform-specific best practices:\n - **X (Twitter)**: Short, concise messaging with hashtags and mentions for maximum engagement.\n - **Facebook**: Friendly, engaging narratives, and visual-heavy content, leveraging groups or ads.\n - **TikTok**: Highly visual, trend-based, and engaging short-form video content that is informal and fun.\n - **LinkedIn**: Professional, informative, and solution-focused content, often geared towards B2B interactions.\n - **WhatsApp**: Personalized, direct, and conversational messaging for one-on-one or group interactions.\n\n4. **Message Tailoring**: Adapt the product’s narrative to resonate with each audience segment. Consider the following factors:\n - **Tone**: Adjust the tone to suit the platform (e.g., casual on TikTok, professional on LinkedIn).\n - **Content Length**: Short-form content for platforms like X and TikTok; longer, detailed posts for LinkedIn.\n - **Visuals and Media**: Ensure the right type of media (images, videos, GIFs) is suggested or included, optimized for each platform.\n\n5. **Call to Action (CTA)**: Help the user create compelling CTAs for each platform. Adapt the CTA based on the audience and platform (e.g., "Swipe up to learn more" on TikTok, "Download our whitepaper" on LinkedIn).\n\n6. **Multi-Channel Consistency**: Ensure that while the messaging is tailored for each platform, the core theme of the campaign remains consistent across channels.\n\n7. **Provide Recommendations**: Offer suggestions on message style, platform-specific keywords, hashtags, or even content schedules based on platform norms and audience behaviors.\n\n8. **Menu for Platform Selection**: Ask the user if they would like to focus on specific platforms (e.g., X, Facebook, LinkedIn) or if they want to tailor the message for all platforms. Provide a list of options for easy selection:\n - Option 1: X (formerly Twitter)\n - Option 2: Facebook\n - Option 3: TikTok\n - Option 4: LinkedIn\n - Option 5: WhatsApp\n - Option 6: All of the above\n\n9. **Feedback Loop**: Ask the user for feedback on the tailored messages and refine them if necessary. Adjust tone, content, or format based on user inputs.\n\n## When the user provides a message or product description, respond with:\n\n1. **Platform-Specific Campaign Suggestions**: Provide tailored versions of the user\'s message/content for each specified platform (X, Facebook, TikTok, LinkedIn, WhatsApp).\n2. **Audience-Specific Targeting Tips**: Suggest audience segmentation and best practices for each platform (e.g., appealing to Gen Z on TikTok, professionals on LinkedIn).\n3. **Media Recommendations**: Offer suggestions on visual or media formats for each platform (e.g., vertical video for TikTok, infographics for LinkedIn).\n4. **Call to Action**: Provide customized calls to action that best fit each platform and audience segment.\n\n5. **Menu for Platform Focus**: Offer a selection of platforms based on user preference to target one or multiple channels.\n\n**Example Input**: "We want to launch a new eco-friendly water bottle. Our key message is its sustainable design and durability. We want to target young professionals on LinkedIn, eco-conscious consumers on Facebook, and Gen Z on TikTok."\n\n**Example Output**:\n\n- **LinkedIn**: "Introducing our new eco-friendly water bottle – designed for the professional on the go. Durable, sleek, and sustainable. Join the movement towards a greener workplace. #Sustainability #EcoFriendlyWorkplace"\n- **Facebook**: "Our new water bottle isn\'t just another bottle – it\'s a commitment to sustainability. Durable, eco-friendly, and built to last, it’s perfect for those who care about the planet. Learn more and make a difference today!"\n- **TikTok**: [30-second video suggestion]: A quick demonstration of the bottle’s features with popular music. "Why settle for ordinary when you can go green? 🌍♻️ Check out our eco-friendly water bottle that\'s as tough as it is sustainable. #EcoVibes #Sustainability #ForThePlanet"\n\n---\n\n**Is there a specific platform you\'d like to focus on, or should I provide tailored suggestions for all of them?** \nPlease choose one of the following options:\n1. X (formerly Twitter)\n2. Facebook\n3. TikTok\n4. LinkedIn\n5. WhatsApp\n6. All of the above\n'
+ },
+ { name: 'json', role: 'system', content: '' },
+ { name: 'safety', role: 'system', content: '' },
+ {
+ name: 'opening',
+ role: 'system',
+ content: 'Welcome to Langbase. Prompt away!'
+ },
+ { name: 'rag', role: 'system', content: '' }
+ ],
+ variables: [],
+ tools: [],
+ memory: []
+});
+
+export default pipeMarketingCampaignTailoringAgent;
diff --git a/examples/agents/marketing-campaign-tailoring-agent/index.ts b/examples/agents/marketing-campaign-tailoring-agent/index.ts
new file mode 100644
index 00000000..43e73263
--- /dev/null
+++ b/examples/agents/marketing-campaign-tailoring-agent/index.ts
@@ -0,0 +1,57 @@
+import 'dotenv/config';
+import { Pipe } from '@baseai/core';
+import inquirer from 'inquirer';
+import ora from 'ora';
+import chalk from 'chalk';
+import pipeMarketingCampaignTailoringAgent from './baseai/pipes/marketing-campaign-tailoring-agent';
+
+
+const pipe = new Pipe(pipeMarketingCampaignTailoringAgent());
+
+async function main() {
+
+ const initialSpinner = ora('Connecting to marketing-campaign-tailoring-agent ...').start();
+ try {
+ const { completion: initialMctAgentResponse } = await pipe.run({
+ messages: [{ role: 'user', content: 'Hello how can I user your services?' }],
+ });
+ initialSpinner.stop();
+ console.log(chalk.cyan('Agent response...'));
+ console.log(initialMctAgentResponse);
+ } catch (error) {
+ initialSpinner.stop();
+ console.error(chalk.red('Error processing initial request:'), error);
+ }
+
+ while (true) {
+ const { userMsg } = await inquirer.prompt([
+ {
+ type: 'input',
+ name: 'userMsg',
+ message: chalk.blue('Enter your query (or type "exit" to quit):'),
+ },
+ ]);
+
+ if (userMsg.toLowerCase() === 'exit') {
+ console.log(chalk.green('Goodbye!'));
+ break;
+ }
+
+ const spinner = ora('Processing your request...').start();
+
+ try {
+ const { completion: mctAgentResponse } = await pipe.run({
+ messages: [{ role: 'user', content: userMsg }],
+ });
+
+ spinner.stop();
+ console.log(chalk.cyan('Agent:'));
+ console.log(mctAgentResponse);
+ } catch (error) {
+ spinner.stop();
+ console.error(chalk.red('Error processing your request:'), error);
+ }
+ }
+}
+
+main();
\ No newline at end of file
diff --git a/examples/agents/marketing-campaign-tailoring-agent/package.json b/examples/agents/marketing-campaign-tailoring-agent/package.json
new file mode 100644
index 00000000..7d582341
--- /dev/null
+++ b/examples/agents/marketing-campaign-tailoring-agent/package.json
@@ -0,0 +1,22 @@
+{
+ "name": "marketing-campaign-tailoring-agent",
+ "version": "1.0.0",
+ "main": "index.js",
+ "scripts": {
+ "test": "echo \"Error: no test specified\" && exit 1",
+ "baseai": "baseai"
+ },
+ "keywords": [],
+ "author": "",
+ "license": "ISC",
+ "description": "",
+ "dependencies": {
+ "@baseai/core": "^0.9.3",
+ "dotenv": "^16.4.5",
+ "inquirer": "^12.0.0",
+ "ora": "^8.1.0"
+ },
+ "devDependencies": {
+ "baseai": "^0.9.3"
+ }
+}
diff --git a/examples/agents/readme-writer-agent/.env.baseai.example b/examples/agents/readme-writer-agent/.env.baseai.example
deleted file mode 100644
index b0d9e992..00000000
--- a/examples/agents/readme-writer-agent/.env.baseai.example
+++ /dev/null
@@ -1,22 +0,0 @@
-# !! SERVER SIDE ONLY !!
-# Keep all your API keys secret — use only on the server side.
-
-# TODO: ADD: Both in your production and local env files.
-# Langbase API key for your User or Org account.
-# How to get this API key https://langbase.com/docs/api-reference/api-keys
-LANGBASE_API_KEY=
-
-# TODO: ADD: LOCAL ONLY. Add only to local env files.
-# Following keys are needed for local pipe runs. For providers you are using.
-# For Langbase, please add the key to your LLM keysets.
-# Read more: Langbase LLM Keysets https://langbase.com/docs/features/keysets
-OPENAI_API_KEY=
-ANTHROPIC_API_KEY=
-COHERE_API_KEY=
-FIREWORKS_API_KEY=
-GOOGLE_API_KEY=
-GROQ_API_KEY=
-MISTRAL_API_KEY=
-PERPLEXITY_API_KEY=
-TOGETHER_API_KEY=
-XAI_API_KEY=
diff --git a/examples/agents/readme-writer-agent/.gitignore b/examples/agents/readme-writer-agent/.gitignore
deleted file mode 100644
index 49056616..00000000
--- a/examples/agents/readme-writer-agent/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-# baseai
-**/.baseai/
-# env file
-.env
-/baseai/memory/code-files/documents
diff --git a/examples/agents/readme-writer-agent/baseai/memory/code-files/index.ts b/examples/agents/readme-writer-agent/baseai/memory/code-files/index.ts
deleted file mode 100644
index 83215427..00000000
--- a/examples/agents/readme-writer-agent/baseai/memory/code-files/index.ts
+++ /dev/null
@@ -1,15 +0,0 @@
-import {MemoryI} from '@baseai/core';
-
-const memoryCodeFiles = (): MemoryI => ({
- name: 'code-files',
- description: 'Memory that contains project files',
- git: {
- enabled: false,
- include: ['documents/**/*'],
- gitignore: true,
- deployedAt: '',
- embeddedAt: '',
- },
-});
-
-export default memoryCodeFiles;
diff --git a/examples/agents/readme-writer-agent/baseai/pipes/readme-writer.ts b/examples/agents/readme-writer-agent/baseai/pipes/readme-writer.ts
deleted file mode 100644
index d71520f3..00000000
--- a/examples/agents/readme-writer-agent/baseai/pipes/readme-writer.ts
+++ /dev/null
@@ -1,47 +0,0 @@
-import {PipeI} from '@baseai/core';
-import memoryCodeFiles from '../memory/code-files';
-
-const pipeReadmeWriter = (): PipeI => ({
- // Replace with your API key https://langbase.com/docs/api-reference/api-keys
- apiKey: process.env.LANGBASE_API_KEY!,
- name: `readme-writer`,
- description: ``,
- status: `public`,
- model: `openai:gpt-4o-mini`,
- stream: true,
- json: false,
- store: true,
- moderate: true,
- top_p: 1,
- max_tokens: 1000,
- temperature: 0.7,
- presence_penalty: 0,
- frequency_penalty: 0,
- stop: [],
- tool_choice: 'auto',
- parallel_tool_calls: true,
- messages: [
- {
- role: 'system',
- content:
- 'Write a {{level}} README file for an open-source project that effectively communicates its purpose, usage, installation instructions, and contribution guidelines.\n\nThe README should include the following sections:\n\n- **Project Title**: A clear and concise title of the project.\n- **Description**: A brief overview of what the project does and its significance.\n- **Installation Instructions**: Step-by-step guidance on how to install the project.\n- **Usage**: Examples demonstrating how to use the project.\n- **Contributing**: Guidelines for contributing to the project, including how to report issues and submit pull requests.\n- **License**: Information about the project\'s license.\n\n# Output Format\n\nThe output should be structured as a Markdown document with the appropriate headings for each section. Aim for a length of approximately 500-800 words.\n\n# Examples\n\n**Example 1:**\n\n**Input:** Project Title: "WeatherApp"\n**Output:**\n\n# WeatherApp\n\n## Description\nWeatherApp is a simple application that provides real-time weather updates for any location. It uses data from various weather APIs to fetch and display the latest weather information.\n\n## Installation Instructions\n1. Clone the repository: \\`git clone https://github.com/user/weatherapp.git\\`\n2. Navigate to the project directory: \\`cd weatherapp\\`\n3. Install dependencies: \\`npm install\\`\n\n## Usage\nTo run the application, use the command: \\`npm start\\`. Open your browser and go to \\`http://localhost:3000\\`.\n\n## Contributing\nWe welcome contributions! Please fork the repository and submit a pull request for any changes.\n\n## License\nThis project is licensed under the MIT License.\n\n**Example 2:**\n\n**Input:** Project Title: "TaskTracker"\n**Output:**\n\n# TaskTracker\n\n## Description\nTaskTracker is a web application designed to help users manage their tasks efficiently. It offers features like task creation, categorization, and progress tracking.\n\n## Installation Instructions\n1. Clone the repository: \\`git clone https://github.com/user/tasktracker.git\\`\n2. Install the required packages: \\`pip install -r requirements.txt\\`\n3. Run the application: \\`python app.py\\`\n\n## Usage\nOnce the application is running, navigate to \\`http://localhost:5000\\` to access the TaskTracker interface.\n\n## Contributing\nTo contribute, please read our contribution guidelines in the \\`CONTRIBUTING.md\\` file.\n\n## License\nThis project is licensed under the GPL-3.0 License.\n\n(Examples should include detailed project descriptions, installation steps, and usage instructions as they would appear for real open-source projects, using appropriate placeholders for project-specific details.)',
- },
- {name: 'json', role: 'system', content: ''},
- {name: 'safety', role: 'system', content: ''},
- {
- name: 'opening',
- role: 'system',
- content: 'Welcome to Langbase. Prompt away!',
- },
- {
- name: 'rag',
- role: 'system',
- content: `Below is some CONTEXT for you to answer the questions. ONLY generate readme from the CONTEXT. CONTEXT consists of multiple information chunks. `,
- },
- ],
- variables: [{name: 'level', value: ''}],
- tools: [],
- memory: [memoryCodeFiles()],
-});
-
-export default pipeReadmeWriter;
diff --git a/examples/agents/readme-writer-agent/index.ts b/examples/agents/readme-writer-agent/index.ts
deleted file mode 100644
index 02636e99..00000000
--- a/examples/agents/readme-writer-agent/index.ts
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env node
-import 'dotenv/config';
-import {init} from './utils/init';
-import {questions} from './utils/questions';
-import {startBaseAIDevServer} from './utils/start-baseai-server';
-import {copyProjectFiles} from './utils/copy-project-files';
-import {generateReadme} from './utils/generate-readme';
-import {exitServer} from './utils/exit-server';
-import {exit} from './utils/exit';
-import {generateEmbeddings} from './utils/generate-embeddings';
-import {dirName} from './utils/get-dirname';
-import {askOpenAIKey} from './utils/ask-openai-key';
-
-(async function () {
- // Show the welcome message
- init({
- title: `readme-writer-agent`,
- tagLine: `by Saad Irfan`,
- description: `An AI agent to help you write README files for open-source projects.`,
- version: `0.1.0`,
- clear: true,
- });
-
- // Ask for the OpenAI key if it doesn't exist
- await askOpenAIKey({dirName});
-
- // Ask for the readme level
- const {level} = await questions();
-
- // Start the baseAI server
- await startBaseAIDevServer();
-
- // Copy project files in the memory
- await copyProjectFiles({dirName});
-
- // Generate embeddings
- await generateEmbeddings({dirName});
-
- // Generate the readme
- const {path} = await generateReadme({level});
-
- // Exit the baseAI server
- await exitServer();
-
- // Exit the process
- exit({path});
-})();
diff --git a/examples/agents/readme-writer-agent/package.json b/examples/agents/readme-writer-agent/package.json
deleted file mode 100644
index e24cad5b..00000000
--- a/examples/agents/readme-writer-agent/package.json
+++ /dev/null
@@ -1,44 +0,0 @@
-{
- "name": "readme-writer-agent",
- "version": "0.1.0",
- "description": "An AI agent to help you write README files for open-source projects.",
- "type": "module",
- "main": "./dist/index.js",
- "module": "./dist/index.mjs",
- "types": "./dist/index.d.ts",
- "bin": {
- "write-readme": "dist/index.js"
- },
- "files": [
- "dist/**",
- "baseai"
- ],
- "scripts": {
- "build": "tsup",
- "dev": "tsup --watch",
- "write-readme": "NODE_NO_WARNINGS=1 npx tsx index.ts",
- "baseai": "baseai"
- },
- "repository": {
- "type": "git",
- "url": "git+https://github.com/LangbaseInc/baseai.git"
- },
- "keywords": [],
- "author": {
- "name": "Saad Irfan",
- "url": "https://github.com/msaaddev"
- },
- "license": "MIT",
- "dependencies": {
- "@baseai/core": "^0.9.20",
- "@clack/prompts": "^0.7.0",
- "chalk": "5.6.0",
- "clear-any-console": "^1.16.2",
- "figures": "^6.1.0",
- "picocolors": "^1.1.0",
- "tsup": "^8.3.0"
- },
- "devDependencies": {
- "baseai": "^0.9.20"
- }
-}
diff --git a/examples/agents/readme-writer-agent/readme.md b/examples/agents/readme-writer-agent/readme.md
deleted file mode 100644
index 71c26861..00000000
--- a/examples/agents/readme-writer-agent/readme.md
+++ /dev/null
@@ -1,59 +0,0 @@
- ![IT Systems Triage Agent by ⌘ BaseAI][cover]
-
- ![License: MIT][mit] [![Fork on ⌘ Langbase][fork]][pipe]
-
-## Description
-The `readme-writer-agent` is an AI-powered tool designed to assist developers in creating comprehensive README files for their open-source projects. This tool simplifies the process of structuring and writing documentation, ensuring that important information is communicated effectively.
-
-This AI Agent is built using the BaseAI framework. It leverages an agentic pipe that integrates over 30+ LLMs (including OpenAI, Gemini, Mistral, Llama, Gemma, etc.) and can handle any data, with context sizes of up to 10M+ tokens, supported by memory. The framework is compatible with any front-end framework (such as React, Remix, Astro, Next.js), giving you, as a developer, the freedom to tailor your AI application exactly as you envision.
-
-## How to use
-
-Navigate to `examples/agents/readme-writer-agent` and run the following commands:
-
-```sh
-# Navigate to baseai/examples/agents/readme-writer-agent
-cd examples/agents/readme-writer-agent
-
-# Install the dependencies
-npm install
-
-# Run the agent
-npm run write-readme
-```
-## Learn more
-
-1. Check the [Learning path to build an agentic AI pipe with ⌘ BaseAI][learn]
-2. Read the [source code on GitHub][gh] for this agent example
-3. Go through Documentaion: [Pipe Quick Start][qs]
-4. Learn more about [Memory features in ⌘ BaseAI][memory]
-5. Learn more about [Tool calls support in ⌘ BaseAI][toolcalls]
-
-
-> NOTE:
-> This is a BaseAI project, you can deploy BaseAI pipes, memory and tool calls on Langbase.
-
----
-
-## Authors
-
-This project is created by [Langbase][lb] team members, with contributions from:
-
-- [Saad Irfan](https://x.com/mrsaadirfan) - Founding Engineer, [Langbase][lb]
-
-
-
-
-[lb]: https://langbase.com
-[pipe]: https://langbase.com/saadirfan/readme-writer
-[gh]: https://github.com/LangbaseInc/baseai/tree/main/examples/agents/readme-writer-agent
-[cover]:https://raw.githubusercontent.com/LangbaseInc/docs-images/main/baseai/baseai-cover.png
-[learn]:https://baseai.dev/learn
-[memory]:https://baseai.dev/docs/memory/quickstart
-[toolcalls]:https://baseai.dev/docs/tools/quickstart
-[deploy]:https://baseai.dev/docs/deployment/authentication
-[signup]: https://langbase.fyi/io
-[qs]:https://baseai.dev/docs/pipe/quickstart
-[docs]:https://baseai.dev/docs
-[mit]: https://img.shields.io/badge/license-MIT-blue.svg?style=for-the-badge&color=%23000000
-[fork]: https://img.shields.io/badge/FORK%20ON-%E2%8C%98%20Langbase-000000.svg?style=for-the-badge&logo=%E2%8C%98%20Langbase&logoColor=000000
diff --git a/examples/agents/readme-writer-agent/tsup.config.ts b/examples/agents/readme-writer-agent/tsup.config.ts
deleted file mode 100644
index 48baa386..00000000
--- a/examples/agents/readme-writer-agent/tsup.config.ts
+++ /dev/null
@@ -1,16 +0,0 @@
-import {defineConfig} from 'tsup';
-
-export default defineConfig({
- clean: true,
- dts: true,
- entry: ['index.ts'],
- format: ['esm'],
- sourcemap: true,
- // target: 'esnext',
- target: 'node16',
- outDir: 'dist',
- splitting: false,
- bundle: true,
- minify: true,
- external: ['react', 'svelte', 'vue'],
-});
diff --git a/examples/agents/readme-writer-agent/utils/ask-openai-key.ts b/examples/agents/readme-writer-agent/utils/ask-openai-key.ts
deleted file mode 100644
index 6acfe9cd..00000000
--- a/examples/agents/readme-writer-agent/utils/ask-openai-key.ts
+++ /dev/null
@@ -1,37 +0,0 @@
-import * as p from '@clack/prompts';
-import path from 'path';
-import fs from 'fs';
-
-export async function askOpenAIKey({dirName}: {dirName: string}) {
- const envPath = path.join(dirName, '.env');
- const hasEnv = fs.existsSync(envPath);
-
- if (hasEnv) {
- const envContent = fs.readFileSync(envPath, 'utf-8');
- const hasOpenAIKey = envContent
- .replace('OPENAI_API_KEY=', '')
- .trim()
- .includes('sk-');
- if (hasOpenAIKey) return;
- }
-
- const openai = await p.group(
- {
- key: () =>
- p.password({
- message: 'Enter your OpenAI API key',
- }),
- },
- {
- onCancel: () => {
- p.cancel('Operation cancelled.');
- process.exit(0);
- },
- },
- );
-
- fs.writeFileSync(envPath, `OPENAI_API_KEY="${openai.key.trim()}"\n`);
- p.log.success('OpenAI API key saved successfully.');
- p.log.info('Now you can run the agent.');
- process.exit(0);
-}
diff --git a/examples/agents/readme-writer-agent/utils/copy-project-files.ts b/examples/agents/readme-writer-agent/utils/copy-project-files.ts
deleted file mode 100644
index 5253ddc0..00000000
--- a/examples/agents/readme-writer-agent/utils/copy-project-files.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import * as p from '@clack/prompts';
-import path from 'path';
-import {execAsync} from './exec-sync';
-import {handleError} from './handle-error';
-
-export async function copyProjectFiles({dirName}: {dirName: string}) {
- const spinner = p.spinner();
- spinner.start('Copying project files...');
-
- const source = process.cwd();
- const destination = path.join(
- dirName,
- 'baseai',
- 'memory',
- 'code-files',
- 'documents',
- );
-
- try {
- await execAsync(`rm -rf ${destination}`);
- await execAsync(`mkdir -p ${destination}`);
- await execAsync(`cp -rp ${source}/* ${destination}`);
- spinner.stop('Project files copied successfully.');
- } catch (error) {
- handleError({spinner, error});
- }
-}
diff --git a/examples/agents/readme-writer-agent/utils/exec-sync.ts b/examples/agents/readme-writer-agent/utils/exec-sync.ts
deleted file mode 100644
index 5180e65a..00000000
--- a/examples/agents/readme-writer-agent/utils/exec-sync.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-import {promisify} from 'util';
-import {exec} from 'child_process';
-export const execAsync = promisify(exec);
diff --git a/examples/agents/readme-writer-agent/utils/exit-server.ts b/examples/agents/readme-writer-agent/utils/exit-server.ts
deleted file mode 100644
index 44aafdd9..00000000
--- a/examples/agents/readme-writer-agent/utils/exit-server.ts
+++ /dev/null
@@ -1,19 +0,0 @@
-import * as p from '@clack/prompts';
-import {spawn} from 'child_process';
-
-export async function exitServer() {
- const spinner = p.spinner();
- spinner.start('Stopping AI server...');
- // Spawn the server process detached from the parent
- const serverProcess = spawn('npx', ['kill-port', '9000'], {
- // Detach the process so it runs independently
- detached: true,
- // Pipe stdout/stderr to files or ignore them
- stdio: 'ignore',
- shell: process.platform === 'win32',
- });
-
- // Unref the process so it won't keep the parent alive
- serverProcess.unref();
- spinner.stop('AI server stopped.');
-}
diff --git a/examples/agents/readme-writer-agent/utils/exit.ts b/examples/agents/readme-writer-agent/utils/exit.ts
deleted file mode 100644
index 8efee409..00000000
--- a/examples/agents/readme-writer-agent/utils/exit.ts
+++ /dev/null
@@ -1,15 +0,0 @@
-import pc from 'picocolors';
-import figures from 'figures';
-import * as p from '@clack/prompts';
-import {heading} from './heading';
-
-export async function exit({path}: {path: string}) {
- p.outro(
- heading({
- text: 'readme.md',
- sub: `instructions written in \n ${pc.dim(figures.pointer)} ${pc.italic(pc.dim(path))}`,
- green: true,
- }),
- );
- process.exit(0);
-}
diff --git a/examples/agents/readme-writer-agent/utils/generate-embeddings.ts b/examples/agents/readme-writer-agent/utils/generate-embeddings.ts
deleted file mode 100644
index 8fd7c79f..00000000
--- a/examples/agents/readme-writer-agent/utils/generate-embeddings.ts
+++ /dev/null
@@ -1,18 +0,0 @@
-import * as p from '@clack/prompts';
-import {execAsync} from './exec-sync';
-import {handleError} from './handle-error';
-
-export async function generateEmbeddings({dirName}: {dirName: string}) {
- const spinner = p.spinner();
- spinner.start('Understanding your project codebase...');
-
- try {
- await execAsync(`npx baseai@latest embed -m code-files`, {
- cwd: dirName,
- });
-
- spinner.stop('Developed understanding of your project codebase.');
- } catch (error) {
- handleError({spinner, error});
- }
-}
diff --git a/examples/agents/readme-writer-agent/utils/generate-readme.ts b/examples/agents/readme-writer-agent/utils/generate-readme.ts
deleted file mode 100644
index 7df57105..00000000
--- a/examples/agents/readme-writer-agent/utils/generate-readme.ts
+++ /dev/null
@@ -1,54 +0,0 @@
-import {getRunner, getTextContent, Pipe} from '@baseai/core';
-import {handleError} from './handle-error';
-import pipeReadmeWriter from '../baseai/pipes/readme-writer';
-import path from 'path';
-import fs from 'fs';
-import * as p from '@clack/prompts';
-import {execAsync} from './exec-sync';
-
-export async function generateReadme({level}: {level: string}) {
- const spinner = p.spinner();
- spinner.start('AI is thinking...');
-
- try {
- const pipe = new Pipe(pipeReadmeWriter());
- let readmeContent = '';
-
- const {stream} = await pipe.run({
- messages: [
- {
- role: 'user',
- content:
- 'Generate a carefully tailored readme that contains all the necessary information to get started with the project.',
- },
- ],
- variables: [{name: 'level', value: level}],
- stream: true,
- });
-
- // Convert the stream to a stream runner.
- const runner = getRunner(stream);
- spinner.stop(`Let's write the readme docs...`);
-
- const readmePath = path.join(process.cwd(), 'readme.md');
-
- const hasReadme = fs.existsSync(readmePath);
- if (hasReadme) {
- await execAsync(`rm ${readmePath}`);
- }
-
- spinner.start('Writing readme docs in project readme.md file...');
-
- for await (const chunk of runner) {
- const textPart = getTextContent(chunk);
- readmeContent += textPart;
- fs.writeFileSync(readmePath, readmeContent);
- }
-
- spinner.stop('Readme docs written successfully.');
- return {content: readmeContent, path: readmePath};
- } catch (error) {
- handleError({spinner, error});
- process.exit(1);
- }
-}
diff --git a/examples/agents/readme-writer-agent/utils/get-dirname.ts b/examples/agents/readme-writer-agent/utils/get-dirname.ts
deleted file mode 100644
index cb5491f7..00000000
--- a/examples/agents/readme-writer-agent/utils/get-dirname.ts
+++ /dev/null
@@ -1,9 +0,0 @@
-import {dirname} from 'path';
-import {fileURLToPath} from 'url';
-import path from 'path';
-
-const __filename = fileURLToPath(import.meta.url);
-let dirName = dirname(__filename);
-dirName = path.join(dirName, '..');
-
-export {dirName};
diff --git a/examples/agents/readme-writer-agent/utils/handle-error.ts b/examples/agents/readme-writer-agent/utils/handle-error.ts
deleted file mode 100644
index f92124c4..00000000
--- a/examples/agents/readme-writer-agent/utils/handle-error.ts
+++ /dev/null
@@ -1,14 +0,0 @@
-import * as p from '@clack/prompts';
-type Spinner = ReturnType;
-
-export async function handleError({
- spinner,
- error,
-}: {
- spinner: Spinner;
- error: Error;
-}) {
- spinner.stop();
- p.log.error(`ERROR: ${(error as Error).message}`);
- process.exit(1);
-}
diff --git a/examples/agents/readme-writer-agent/utils/heading.ts b/examples/agents/readme-writer-agent/utils/heading.ts
deleted file mode 100644
index f5afebf4..00000000
--- a/examples/agents/readme-writer-agent/utils/heading.ts
+++ /dev/null
@@ -1,21 +0,0 @@
-import color from 'picocolors';
-
-export function heading({
- text,
- sub,
- dim,
- green,
-}: {
- text: string;
- sub?: string;
- dim?: boolean;
- green?: boolean;
-}) {
- if (green) {
- return `${color.bgGreen(color.black(` ${text} `))} ${sub && sub}`;
- }
- if (dim) {
- return `${color.bgBlack(color.white(` ${text} `))} ${sub && sub}`;
- }
- return `${color.bold(color.bgCyan(color.black(` ${text} `)))} ${sub && sub}`;
-}
diff --git a/examples/agents/readme-writer-agent/utils/init.ts b/examples/agents/readme-writer-agent/utils/init.ts
deleted file mode 100644
index 513c7281..00000000
--- a/examples/agents/readme-writer-agent/utils/init.ts
+++ /dev/null
@@ -1,16 +0,0 @@
-import chalk from 'chalk';
-import clearConsole from 'clear-any-console';
-
-export function init({clear, title, version, tagLine, description}) {
- clear && clearConsole();
- const bg = chalk.hex('#6CC644').inverse.bold;
- const clr = chalk.hex(`#000000`).bold;
-
- console.log();
- console.log(
- `${clr(`${bg(` ${title} `)}`)} v${version} ${chalk.dim(tagLine)}\n${chalk.dim(
- description,
- )}`,
- );
- console.log();
-}
diff --git a/examples/agents/readme-writer-agent/utils/questions.ts b/examples/agents/readme-writer-agent/utils/questions.ts
deleted file mode 100644
index 93cdc9b0..00000000
--- a/examples/agents/readme-writer-agent/utils/questions.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-import * as p from '@clack/prompts';
-
-export async function questions() {
- const readme = await p.group(
- {
- level: () =>
- p.select({
- message:
- 'Choose the level of detail you want in the README.',
- options: [
- {label: 'Simple', value: 'simple' as unknown as any},
- {
- label: 'Detailed',
- value: 'detailed' as unknown as any,
- },
- ],
- }),
- },
- {
- onCancel: () => {
- p.cancel('Operation cancelled.');
- process.exit(0);
- },
- },
- );
-
- return {level: readme.level};
-}
diff --git a/examples/agents/readme-writer-agent/utils/start-baseai-server.ts b/examples/agents/readme-writer-agent/utils/start-baseai-server.ts
deleted file mode 100644
index f905bcb4..00000000
--- a/examples/agents/readme-writer-agent/utils/start-baseai-server.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-import {exec, spawn} from 'child_process';
-import * as p from '@clack/prompts';
-
-export async function startBaseAIDevServer() {
- const spinner = p.spinner();
- spinner.start('Starting AI server...');
- // Spawn the server process detached from the parent
- const serverProcess = spawn('npx', ['baseai', 'dev'], {
- // Detach the process so it runs independently
- detached: true,
- // Pipe stdout/stderr to files or ignore them
- stdio: 'ignore',
- shell: process.platform === 'win32',
- });
-
- // Unref the process so it won't keep the parent alive
- serverProcess.unref();
-
- // Wait a bit for the server to start
- return new Promise(resolve => {
- setTimeout(() => {
- spinner.stop('AI server started.');
- resolve(true);
- }, 2000);
- });
-}
diff --git a/examples/astro/baseai/memory/chat-with-docs/index.ts b/examples/astro/baseai/memory/chat-with-docs/index.ts
index 651e715f..3c9c53b3 100644
--- a/examples/astro/baseai/memory/chat-with-docs/index.ts
+++ b/examples/astro/baseai/memory/chat-with-docs/index.ts
@@ -1,14 +1,13 @@
import type {MemoryI} from '@baseai/core';
+import path from 'path';
const buidMemory = (): MemoryI => ({
name: 'chat-with-docs',
description: 'Chat with given docs',
- git: {
- enabled: false,
- include: ['documents/**/*'],
- gitignore: true,
- deployedAt: '',
- embeddedAt: '',
+ config: {
+ useGitRepo: false,
+ dirToTrack: path.posix.join(''),
+ extToTrack: ['*'],
},
});
diff --git a/examples/astro/package.json b/examples/astro/package.json
index 95c957d2..cfdf9a91 100644
--- a/examples/astro/package.json
+++ b/examples/astro/package.json
@@ -17,7 +17,7 @@
"@astrojs/react": "^3.6.2",
"@astrojs/tailwind": "^5.1.1",
"@astrojs/vercel": "^7.8.1",
- "@baseai/core": "^0.9.43",
+ "@baseai/core": "^0.9.15",
"@radix-ui/react-slot": "^1.1.0",
"@types/react": "^18.3.9",
"@types/react-dom": "^18.3.0",
@@ -33,6 +33,6 @@
"typescript": "^5.6.2"
},
"devDependencies": {
- "baseai": "^0.9.44"
+ "baseai": "^0.9.15"
}
}
diff --git a/examples/nextjs/.env.baseai.example b/examples/nextjs/.env.baseai.example
index b0d9e992..8c643651 100644
--- a/examples/nextjs/.env.baseai.example
+++ b/examples/nextjs/.env.baseai.example
@@ -19,4 +19,3 @@ GROQ_API_KEY=
MISTRAL_API_KEY=
PERPLEXITY_API_KEY=
TOGETHER_API_KEY=
-XAI_API_KEY=
diff --git a/examples/nextjs/app/api/langbase/pipes/run-stream/route.ts b/examples/nextjs/app/api/langbase/pipes/run-stream/route.ts
index a3f83d45..74390289 100644
--- a/examples/nextjs/app/api/langbase/pipes/run-stream/route.ts
+++ b/examples/nextjs/app/api/langbase/pipes/run-stream/route.ts
@@ -9,27 +9,14 @@ export async function POST(req: NextRequest) {
const pipe = new Pipe(pipeSummary());
// 2. Run the Pipe.
- try {
- const {stream, threadId} = await pipe.run(runOptions);
- // 3. Return the ReadableStream directly with the threadId in the headers
- // to be used on the client side to mainain a single chat thread.
- return new Response(stream, {
- status: 200,
- headers: {
- 'lb-thread-id': threadId ?? '',
- },
- });
- } catch (error: any) {
- return new Response(
- JSON.stringify({
- error,
- }),
- {
- status: error.status || 500,
- headers: {
- 'Content-Type': 'application/json',
- },
- },
- );
- }
+ const {stream, threadId} = await pipe.run(runOptions);
+
+ // 3. Return the ReadableStream directly with the threadId in the headers
+ // to be used on the client side to mainain a single chat thread.
+ return new Response(stream, {
+ status: 200,
+ headers: {
+ 'lb-thread-id': threadId ?? '',
+ },
+ });
}
diff --git a/examples/nextjs/app/api/langbase/pipes/run-tool-stream/route.ts b/examples/nextjs/app/api/langbase/pipes/run-tool-stream/route.ts
deleted file mode 100644
index 16d72972..00000000
--- a/examples/nextjs/app/api/langbase/pipes/run-tool-stream/route.ts
+++ /dev/null
@@ -1,24 +0,0 @@
-import pipeWithToolsStream from '@/baseai/pipes/pipe-with-tool-stream';
-import {Pipe, RunResponseStream} from '@baseai/core';
-import {NextRequest} from 'next/server';
-
-export async function POST(req: NextRequest) {
- const runOptions = await req.json();
-
- // 1. Initiate the Pipe.
- const pipe = new Pipe(pipeWithToolsStream());
-
- // 2. Run the pipe with user messages and other run options.
- let {stream, threadId} = (await pipe.run({
- ...runOptions,
- stream: true,
- })) as unknown as RunResponseStream;
-
- // 3. Stream the response.
- return new Response(stream, {
- status: 200,
- headers: {
- 'lb-thread-id': threadId ?? '',
- },
- });
-}
diff --git a/examples/nextjs/app/api/langbase/pipes/run/route.ts b/examples/nextjs/app/api/langbase/pipes/run/route.ts
index 342d100a..3bac1b6e 100644
--- a/examples/nextjs/app/api/langbase/pipes/run/route.ts
+++ b/examples/nextjs/app/api/langbase/pipes/run/route.ts
@@ -9,24 +9,8 @@ export async function POST(req: NextRequest) {
const pipe = new Pipe(pipeSummary());
// 2. Run the pipe
- try {
- const result = await pipe.run(runOptions);
+ const result = await pipe.run(runOptions);
- // 3. Return the response stringified.
- return new Response(JSON.stringify(result));
- } catch (error: any) {
- // 4. Return the error response
-
- return new Response(
- JSON.stringify({
- error,
- }),
- {
- status: error.status || 500,
- headers: {
- 'Content-Type': 'application/json',
- },
- },
- );
- }
+ // 3. Return the response stringified.
+ return new Response(JSON.stringify(result));
}
diff --git a/examples/nextjs/app/demo/tool-calling-stream/page.tsx b/examples/nextjs/app/demo/tool-calling-stream/page.tsx
deleted file mode 100644
index 6194ca9a..00000000
--- a/examples/nextjs/app/demo/tool-calling-stream/page.tsx
+++ /dev/null
@@ -1,18 +0,0 @@
-import PipeRunToolStreamExample from '@/components/pipe-run-with-tool-stream';
-import GoHome from '@/components/ui/go-home';
-
-export default function Page() {
- return (
-
-
-
-
- AI Agent Pipes: Tool Calling
-
-
- Run a pipe with tool calling.
-
-
-
- );
-}
diff --git a/examples/nextjs/app/page.tsx b/examples/nextjs/app/page.tsx
index 8e52d8c7..521cbe12 100644
--- a/examples/nextjs/app/page.tsx
+++ b/examples/nextjs/app/page.tsx
@@ -2,17 +2,16 @@ import Link from 'next/link';
export default function Page() {
const examples = [
- { title: 'Pipe Run', href: '/demo/pipe-run' },
- { title: 'Pipe Run Stream', href: '/demo/pipe-run-stream' },
- { title: 'Chat Simple', href: '/demo/chat-simple' },
- { title: 'Chat Advanced', href: '/demo/chat-advanced' },
- { title: 'Tool Calling', href: '/demo/tool-calling' },
- { title: 'Tool Calling Stream', href: '/demo/tool-calling-stream' },
+ {title: 'Pipe Run', href: '/demo/pipe-run'},
+ {title: 'Pipe Run Stream', href: '/demo/pipe-run-stream'},
+ {title: 'Chat Simple', href: '/demo/chat-simple'},
+ {title: 'Chat Advanced', href: '/demo/chat-advanced'},
+ {title: 'Tool Calling', href: '/demo/tool-calling'},
{
title: 'Tool Calling: Pipes as Tools',
href: '/demo/pipe-run-pipes-as-tools',
},
- { title: 'Memory', href: '/demo/memory' },
+ {title: 'Memory', href: '/demo/memory'},
];
return (
diff --git a/examples/nextjs/baseai/memory/chat-with-docs/index.ts b/examples/nextjs/baseai/memory/chat-with-docs/index.ts
index a2140495..58f08569 100644
--- a/examples/nextjs/baseai/memory/chat-with-docs/index.ts
+++ b/examples/nextjs/baseai/memory/chat-with-docs/index.ts
@@ -1,14 +1,13 @@
import {MemoryI} from '@baseai/core';
+import path from 'path';
const buidMemory = (): MemoryI => ({
name: 'chat-with-docs',
description: 'Chat with given docs',
- git: {
- enabled: false,
- include: ['documents/**/*'],
- gitignore: true,
- deployedAt: '',
- embeddedAt: '',
+ config: {
+ useGitRepo: false,
+ dirToTrack: path.posix.join(''),
+ extToTrack: ['*'],
},
});
diff --git a/examples/nextjs/baseai/pipes/pipe-with-tool-stream.ts b/examples/nextjs/baseai/pipes/pipe-with-tool-stream.ts
deleted file mode 100644
index 2d082f64..00000000
--- a/examples/nextjs/baseai/pipes/pipe-with-tool-stream.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-import {PipeI} from '@baseai/core';
-import toolCalculator from '../tools/calculator';
-import toolGetWeather from '../tools/weather';
-
-const pipeWithToolsStream = (): PipeI => ({
- apiKey: process.env.LANGBASE_API_KEY!,
- name: 'pipe-with-tool',
- description: 'An AI agent pipe that can call tools',
- status: 'public',
- model: 'openai:gpt-4o-mini',
- stream: true,
- json: false,
- store: true,
- moderate: true,
- top_p: 1,
- max_tokens: 1000,
- temperature: 0.7,
- presence_penalty: 1,
- frequency_penalty: 1,
- stop: [],
- tool_choice: 'auto',
- parallel_tool_calls: true,
- messages: [{role: 'system', content: `You are a helpful AI assistant.`}],
- variables: [],
- memory: [],
- tools: [toolGetWeather(), toolCalculator()],
-});
-export default pipeWithToolsStream;
diff --git a/examples/nextjs/baseai/pipes/pipe-with-tool.ts b/examples/nextjs/baseai/pipes/pipe-with-tool.ts
index 6abb7c69..b208ac3c 100644
--- a/examples/nextjs/baseai/pipes/pipe-with-tool.ts
+++ b/examples/nextjs/baseai/pipes/pipe-with-tool.ts
@@ -8,7 +8,7 @@ const pipeWithTools = (): PipeI => ({
description: 'An AI agent pipe that can call tools',
status: 'public',
model: 'openai:gpt-4o-mini',
- stream: false,
+ stream: true,
json: false,
store: true,
moderate: true,
diff --git a/examples/nextjs/components/pipe-run-with-tool-stream.tsx b/examples/nextjs/components/pipe-run-with-tool-stream.tsx
deleted file mode 100644
index 171a0377..00000000
--- a/examples/nextjs/components/pipe-run-with-tool-stream.tsx
+++ /dev/null
@@ -1,75 +0,0 @@
-'use client';
-
-import { Button } from '@/components/ui/button';
-import { Input } from '@/components/ui/input';
-import { getRunner, getTextContent } from '@baseai/core';
-import { useState } from 'react';
-
-export default function PipeRunToolStreamExample() {
- const [prompt, setPrompt] = useState(
- 'What is the weather in SF. Square root of 9 and then add 7?',
- );
- const [completion, setCompletion] = useState('');
- const [loading, setLoading] = useState(false);
-
- const handleSubmit = async (e: any) => {
- e.preventDefault();
- if (!prompt.trim()) return;
-
- setLoading(true);
- try {
- const response = await fetch('/api/langbase/pipes/run-tool-stream', {
- method: 'POST',
- headers: { 'Content-Type': 'application/json' },
- body: JSON.stringify({
- messages: [{ role: 'user', content: prompt }],
- }),
- });
-
- if (!response.ok) {
- throw new Error('Network response was not ok');
- }
-
- const runner = getRunner(response.body as ReadableStream);
-
- let localCompletion = '';
- for await (const chunk of runner) {
- const textPart = getTextContent(chunk);
- localCompletion += textPart;
- setCompletion(localCompletion);
- }
- } catch (error) {
- console.error('Error:', error);
- setCompletion('An error occurred while generating the completion.');
- } finally {
- setLoading(false);
- }
- };
-
- return (
-
-
-
- {!loading && completion && (
-
- AI: {completion}
-
- )}
-
- );
-}
diff --git a/examples/nextjs/components/pipe-run.tsx b/examples/nextjs/components/pipe-run.tsx
index 16b7f3c3..239a92db 100644
--- a/examples/nextjs/components/pipe-run.tsx
+++ b/examples/nextjs/components/pipe-run.tsx
@@ -25,19 +25,15 @@ export default function PipeRunExample() {
});
if (!response.ok) {
- const res = await response.json();
- throw new Error(res.error.error.message);
+ throw new Error('Network response was not ok');
}
// Parse the JSON response.
const data = await response.json();
setCompletion(data.completion);
- } catch (error: any) {
- if (error.message) setCompletion(error.message);
- else
- setCompletion(
- 'An error occurred while generating the completion.',
- );
+ } catch (error) {
+ console.error('Error:', error);
+ setCompletion('An error occurred while generating the completion.');
} finally {
setLoading(false);
}
diff --git a/examples/nextjs/package.json b/examples/nextjs/package.json
index ca90550b..6d1bcc70 100644
--- a/examples/nextjs/package.json
+++ b/examples/nextjs/package.json
@@ -11,14 +11,14 @@
"baseai": "baseai"
},
"dependencies": {
- "@baseai/core": "^0.9.43",
+ "@baseai/core": "^0.9.15",
"@radix-ui/react-slot": "^1.1.0",
"class-variance-authority": "^0.7.0",
"clsx": "^2.1.1",
"lucide-react": "^0.416.0",
"mathjs": "^13.1.1",
"mxcn": "^2.0.0",
- "next": "14.2.35",
+ "next": "14.2.5",
"openai": "^4.53.0",
"react": "^18",
"react-dom": "^18",
@@ -29,7 +29,7 @@
"@types/node": "^20",
"@types/react": "^18",
"@types/react-dom": "^18",
- "baseai": "^0.9.44",
+ "baseai": "^0.9.15",
"eslint": "^8",
"eslint-config-next": "14.2.5",
"mini-css-extract-plugin": "^2.9.0",
diff --git a/examples/nodejs/baseai/memory/ai-agent-memory/index.ts b/examples/nodejs/baseai/memory/ai-agent-memory/index.ts
deleted file mode 100644
index 312ef85e..00000000
--- a/examples/nodejs/baseai/memory/ai-agent-memory/index.ts
+++ /dev/null
@@ -1,25 +0,0 @@
-import {MemoryI} from '@baseai/core';
-
-const memoryAiAgentMemory = (): MemoryI => ({
- name: 'ai-agent-memory',
- description: 'My list of docs as memory for an AI agent pipe',
- git: {
- enabled: true,
- include: ['**/*'],
- gitignore: true,
- deployedAt: '',
- embeddedAt: '',
- },
- documents: {
- meta: doc => {
- // generate a URL for each document
- const url = `https://example.com/${doc.path}`;
- return {
- url,
- name: doc.name,
- };
- },
- },
-});
-
-export default memoryAiAgentMemory;
diff --git a/examples/nodejs/baseai/memory/chat-with-docs/index.ts b/examples/nodejs/baseai/memory/chat-with-docs/index.ts
index 8f013d10..78711b34 100644
--- a/examples/nodejs/baseai/memory/chat-with-docs/index.ts
+++ b/examples/nodejs/baseai/memory/chat-with-docs/index.ts
@@ -1,14 +1,13 @@
import {MemoryI} from '@baseai/core';
+import path from 'path';
const buildMemory = (): MemoryI => ({
name: 'chat-with-docs',
description: 'Chat with docs',
- git: {
- enabled: false,
- include: ['documents/**/*'],
- gitignore: true,
- deployedAt: '',
- embeddedAt: '',
+ config: {
+ useGitRepo: false,
+ dirToTrack: path.posix.join(''),
+ extToTrack: ['*'],
},
});
diff --git a/examples/nodejs/baseai/memory/chat-with-repo/index.ts b/examples/nodejs/baseai/memory/chat-with-repo/index.ts
index 60baccc4..c9faa3e7 100644
--- a/examples/nodejs/baseai/memory/chat-with-repo/index.ts
+++ b/examples/nodejs/baseai/memory/chat-with-repo/index.ts
@@ -1,15 +1,14 @@
-import {MemoryI} from '@baseai/core';
+import { MemoryI } from '@baseai/core';
+import path from 'path';
const memoryChatWithRepo = (): MemoryI => ({
- name: 'chat-with-repo',
- description: '',
- git: {
- enabled: true,
- include: ['examples/**/*'],
- gitignore: true,
- deployedAt: '',
- embeddedAt: '',
- },
+ name: 'chat-with-repo',
+ description: '',
+ config: {
+ useGitRepo: true,
+ dirToTrack: path.posix.join('examples'),
+ extToTrack: ["*"]
+ }
});
export default memoryChatWithRepo;
diff --git a/examples/nodejs/package.json b/examples/nodejs/package.json
index 5be76560..4edc1147 100644
--- a/examples/nodejs/package.json
+++ b/examples/nodejs/package.json
@@ -17,11 +17,11 @@
"author": "Ahmad Awais (https://twitter.com/MrAhmadAwais)",
"license": "UNLICENSED",
"dependencies": {
- "@baseai/core": "^0.9.43",
+ "@baseai/core": "^0.9.15",
"dotenv": "^16.4.5"
},
"devDependencies": {
- "baseai": "^0.9.44",
+ "baseai": "^0.9.15",
"tsx": "^4.19.0"
}
}
diff --git a/examples/remix/baseai/memory/chat-with-docs/index.ts b/examples/remix/baseai/memory/chat-with-docs/index.ts
index 925380b1..58f08569 100644
--- a/examples/remix/baseai/memory/chat-with-docs/index.ts
+++ b/examples/remix/baseai/memory/chat-with-docs/index.ts
@@ -1,10 +1,14 @@
import {MemoryI} from '@baseai/core';
+import path from 'path';
const buidMemory = (): MemoryI => ({
name: 'chat-with-docs',
description: 'Chat with given docs',
- useGit: false,
- include: ['documents/**/*'],
+ config: {
+ useGitRepo: false,
+ dirToTrack: path.posix.join(''),
+ extToTrack: ['*'],
+ },
});
export default buidMemory;
diff --git a/examples/remix/package.json b/examples/remix/package.json
index 494b703c..d8115f71 100644
--- a/examples/remix/package.json
+++ b/examples/remix/package.json
@@ -13,7 +13,7 @@
"baseai": "baseai"
},
"dependencies": {
- "@baseai/core": "^0.9.43",
+ "@baseai/core": "^0.9.15",
"@radix-ui/react-slot": "^1.1.0",
"@remix-run/node": "2.12.0",
"@remix-run/react": "2.12.0",
@@ -35,7 +35,7 @@
"@typescript-eslint/parser": "^6.7.4",
"@vercel/remix": "2.12.0",
"autoprefixer": "^10.4.20",
- "baseai": "^0.9.44",
+ "baseai": "^0.9.15",
"eslint": "^8.38.0",
"eslint-import-resolver-typescript": "^3.6.1",
"eslint-plugin-import": "^2.28.1",
diff --git a/packages/baseai/CHANGELOG.md b/packages/baseai/CHANGELOG.md
index d1b20baa..421d2394 100644
--- a/packages/baseai/CHANGELOG.md
+++ b/packages/baseai/CHANGELOG.md
@@ -1,181 +1,5 @@
# baseai
-## 0.9.44
-
-### Patch Changes
-
-- 👌 IMPROVE: Pinned chalk version
-
-## 0.9.43
-
-### Patch Changes
-
-- Fix moderation
-
-## 0.9.42
-
-### Patch Changes
-
-- 📦 NEW: LB-LLM-Key header support
-
-## 0.9.41
-
-### Patch Changes
-
-- 🐛 FIX: Google stream
-
-## 0.9.40
-
-### Patch Changes
-
-- 📦 NEW: meta-llama/Llama-3.3-70B-Instruct-Turbo model
-
-## 0.9.39
-
-### Patch Changes
-
-- 📦 NEW: tools support in pipe.run()
-
-## 0.9.38
-
-### Patch Changes
-
-- 📦 NEW: .env file based BaseAI auth
-
-## 0.9.37
-
-### Patch Changes
-
-- 👌 IMPROVE: Remove unused type
-
-## 0.9.36
-
-### Patch Changes
-
-- 📦 NEW: Dynamically set document metadata
-
-## 0.9.35
-
-### Patch Changes
-
-- 📦 NEW: Pipe API key support in pipe.run()
-
-## 0.9.34
-
-### Patch Changes
-
-- 👌 IMPROVE: Memory config with new features and better UX
-
-## 0.9.33
-
-### Patch Changes
-
-- 📦 NEW: Params for pipe.run() sdk support
-
-## 0.9.32
-
-### Patch Changes
-
-- 👌 IMPROVE: Error handling in usePipe
-
-## 0.9.31
-
-### Patch Changes
-
-- 98f2d7c: 🐛 FIX: Local development server
-- 👌 IMPROVE: Local development server
-
-## 0.9.30
-
-### Patch Changes
-
-- 📦 NEW: Request production AI agent pipe
-
-## 0.9.29
-
-### Patch Changes
-
-- 🐛 FIX: execAsync breaking paths in Windows
-
-## 0.9.28
-
-### Patch Changes
-
-- 📦 NEW: Pipe v1 support
-
-## 0.9.27
-
-### Patch Changes
-
-- 🐛 FIX: Broken pipes and tools build paths in Windows
-
-## 0.9.26
-
-### Patch Changes
-
-- 📦 NEW: Allow empty submit with no message
-
-## 0.9.25
-
-### Patch Changes
-
-- 🐛 FIX: Request timeout and special characters in description
-
-## 0.9.24
-
-### Patch Changes
-
-- 📦 NEW: claude 3.5 Haiku
-
-## 0.9.23
-
-### Patch Changes
-
-- 📦 NEW: setThreadId function in usePipe
-
-## 0.9.22
-
-### Patch Changes
-
-- 🐛 FIX: Anthropic streaming
-- 84d789c: 🐛 FIX: Anthropic streaming
-
-## 0.9.21
-
-### Patch Changes
-
-- 👌 IMPROVE: Redact LLM API key
-
-## 0.9.20
-
-### Patch Changes
-
-- 👌 IMPROVE: logs
-
-## 0.9.19
-
-### Patch Changes
-
-- 🐛 FIX: BaseAI deploy spinner not stopping
-
-## 0.9.18
-
-### Patch Changes
-
-- 📦 NEW: Export setInput and handleResponseStream functions
-
-## 0.9.17
-
-### Patch Changes
-
-- 📦 NEW: Add claude-3.5-sonnet-latest
-
-## 0.9.16
-
-### Patch Changes
-
-- 📦 NEW: XAI models support
-
## 0.9.15
### Patch Changes
diff --git a/packages/baseai/package.json b/packages/baseai/package.json
index fb30c983..ed546477 100644
--- a/packages/baseai/package.json
+++ b/packages/baseai/package.json
@@ -1,7 +1,7 @@
{
"name": "baseai",
"description": "The Web AI Framework Dev - BaseAI.dev",
- "version": "0.9.44",
+ "version": "0.9.15",
"license": "UNLICENSED",
"type": "module",
"main": "./dist/index.js",
@@ -52,7 +52,7 @@
"@hono/zod-openapi": "^0.16.0",
"@sindresorhus/slugify": "^2.2.1",
"camelcase": "^8.0.0",
- "chalk": "5.6.0",
+ "chalk": "^5.3.0",
"cli-alerts": "^2.0.0",
"cli-handle-error": "^4.4.0",
"cli-handle-unhandled": "^1.1.1",
@@ -60,11 +60,11 @@
"cli-table3": "^0.6.5",
"cli-welcome": "^3.0.0",
"compute-cosine-similarity": "^1.1.0",
+ "conf": "^13.0.1",
"cosmiconfig": "^9.0.0",
"cosmiconfig-typescript-loader": "^5.0.0",
"dotenv": "^16.4.5",
"execa": "^9.4.0",
- "fast-glob": "^3.3.2",
"figures": "^6.1.0",
"get-package-json-file": "^2.0.0",
"hono": "^4.5.11",
diff --git a/packages/baseai/src/add/index.ts b/packages/baseai/src/add/index.ts
index 27720abb..46fd3fb3 100644
--- a/packages/baseai/src/add/index.ts
+++ b/packages/baseai/src/add/index.ts
@@ -1,10 +1,10 @@
+import { getStoredAuth } from '@/auth';
import { dim, dimItalic } from '@/utils/formatting';
import { getAvailablePipes } from '@/utils/get-available-pipes';
import { getAvailableTools } from '@/utils/get-available-tools';
import { heading } from '@/utils/heading';
import icons from '@/utils/icons';
import { isToolPresent } from '@/utils/is-tool-present';
-import { retrieveAuthentication } from '@/utils/retrieve-credentials';
import { formatCode } from '@/utils/ts-format-code';
import * as p from '@clack/prompts';
import slugify from '@sindresorhus/slugify';
@@ -43,6 +43,37 @@ function extractLoginName(loginAndPipe: string) {
};
}
+/**
+ * Represents an account with login credentials and an API key.
+ */
+interface Account {
+ login: string;
+ apiKey: string;
+}
+
+/**
+ * Retrieves the stored authentication account.
+ *
+ * This function attempts to retrieve the stored authentication account
+ * asynchronously. If the account is found, it is returned. If no account
+ * is found or an error occurs during retrieval, `null` is returned.
+ *
+ * @returns {Promise} A promise that resolves to the stored
+ * authentication account, or `null` if no account is found or an error occurs.
+ */
+async function retrieveAuthentication(): Promise {
+ try {
+ const account = await getStoredAuth();
+ if (!account) return null;
+
+ return account;
+ } catch (error) {
+ p.log.error(
+ `Error retrieving stored auth: ${(error as Error).message}`
+ );
+ return null;
+ }
+}
/**
* Fetches a pipe from Langbase using the provided login and name.
@@ -62,17 +93,9 @@ async function getPipe({
name: string;
spinner: Spinner;
}) {
+ spinner.start('Fetching pipe from Langbase');
try {
- const account = await retrieveAuthentication({ spinner });
- if (!account) {
- p.log.error(
- 'Authentication failed. Please run "npx baseai auth" to authenticate.'
- );
- return;
- }
-
- spinner.start('Fetching pipe from Langbase');
-
+ const account = await retrieveAuthentication();
const API_URL = `https://api.langbase.com/v1/pipes/${login}/${name}`;
const createResponse = await fetch(API_URL, {
diff --git a/packages/baseai/src/auth/index.ts b/packages/baseai/src/auth/index.ts
index d042df4f..1309548b 100644
--- a/packages/baseai/src/auth/index.ts
+++ b/packages/baseai/src/auth/index.ts
@@ -9,11 +9,21 @@ import {
outro,
password
} from '@clack/prompts';
-import fs from 'fs/promises';
+import Conf from 'conf';
+import fs from 'fs';
import open from 'open';
import path from 'path';
import color from 'picocolors';
+const config = new Conf({
+ projectName: 'baseai'
+});
+
+interface Account {
+ login: string;
+ apiKey: string;
+}
+
export async function auth() {
p.intro(
heading({
@@ -62,6 +72,17 @@ export async function auth() {
process.exit(1);
}
+ // Store in Conf (old functionality)
+ const newAccount: Account = { login, apiKey };
+ const existingAccounts = (config.get('accounts') as Account[]) || [];
+ const updatedAccounts = [...existingAccounts, newAccount];
+ config.set('accounts', updatedAccounts);
+
+ // Store in .env file (new functionality)
+ // const envKeyName = apiKey.startsWith('user_')
+ // ? 'LANGBASE_USER_API_KEY'
+ // : 'LANGBASE_ORG_API_KEY';
+
const envKeyName = 'LANGBASE_API_KEY';
const envContent = `\n# Langbase API key for https://langbase.com/${login}\n${envKeyName}=${apiKey}\n\n`;
@@ -78,45 +99,37 @@ export async function auth() {
const baiConfig = await loadConfig();
let envFile = baiConfig.envFilePath || '.env';
- const envFileContent = await fs.readFile(envFile, 'utf-8');
-
- const oldKey = envFileContent
- .split('\n')
- .reverse() // Reverse to get the latest key if there are multiple
- .find(line => line.includes('LANGBASE_API_KEY'))
- ?.split('=')[1];
-
- if (oldKey) {
- const shouldOverwrite = await confirm({
- message: `API key found in ${envFile}. Overwrite?`
- });
-
- if (isCancel(shouldOverwrite)) {
- cancel('Operation cancelled.');
- process.exit(0);
- }
-
- if (!shouldOverwrite) {
- outro(
- color.yellow('API key is not overwritten.')
- );
- process.exit(0);
- }
-
- const newEnvContent = envFileContent.replace(
- new RegExp(`LANGBASE_API_KEY=${oldKey}`),
- envContent.trim()
- );
-
- await fs.writeFile(path.join(process.cwd(), envFile), newEnvContent);
- } else {
- await fs.appendFile(path.join(process.cwd(), envFile), envContent);
- }
+ fs.appendFileSync(path.join(process.cwd(), envFile), envContent);
outro(
color.green(
- `Authentication successful. API key is stored in ${envFile}`
+ `Authentication successful. Credentials stored in config and ${envFile}`
)
);
+ console.log(color.dim(`Config file location: ${config.path}`));
process.exit(0);
}
+
+export function getStoredAuth(): Account | undefined {
+ const accounts = (config.get('accounts') as Account[]) || [];
+ const currentLogin = config.get('currentAccount') as string | undefined;
+
+ if (currentLogin) {
+ return accounts.find(account => account.login === currentLogin);
+ }
+
+ return accounts[0]; // Return the first account if no current account is set
+}
+
+export function getStoredAccounts(): Account[] {
+ return (config.get('accounts') as Account[]) || [];
+}
+
+export function setCurrentAccount(login: string): boolean {
+ const accounts = getStoredAccounts();
+ if (accounts.some(account => account.login === login)) {
+ config.set('currentAccount', login);
+ return true;
+ }
+ return false;
+}
diff --git a/packages/baseai/src/build/index.ts b/packages/baseai/src/build/index.ts
index 4db3337b..e6f213ae 100644
--- a/packages/baseai/src/build/index.ts
+++ b/packages/baseai/src/build/index.ts
@@ -32,7 +32,7 @@ const buildPipes = async () => {
p.intro(heading({ text: 'PIPES', sub: '', dim: true }));
const sourcePath = path.join(process.cwd(), 'baseai', 'pipes');
- const outputPath = path.join(process.cwd(), '.baseai', 'pipes');
+ const outputPath = path.join(process.cwd(), '.baseai/pipes');
const builtPipes = await buildTypeScriptFiles(
sourcePath,
@@ -47,7 +47,7 @@ const buildTools = async () => {
p.intro(heading({ text: 'TOOLS', sub: '', dim: true }));
const sourcePath = path.join(process.cwd(), 'baseai', 'tools');
- const outputPath = path.join(process.cwd(), '.baseai', 'tools');
+ const outputPath = path.join(process.cwd(), '.baseai/tools');
const builtTools = await buildTypeScriptFiles(
sourcePath,
@@ -109,7 +109,7 @@ export const buildMemory = async ({
const displayName = path.dirname(file); // This is the last directory name
try {
const { stdout } = await execAsync(
- `npx tsx -e "import memoryConfig from '${JSON.stringify(inputFile)}'; console.log(JSON.stringify(memoryConfig()))"`
+ `npx tsx -e "import memoryConfig from '${inputFile}'; console.log(JSON.stringify(memoryConfig()))"`
);
await fs.writeFile(outputFile, stdout);
@@ -160,7 +160,7 @@ const buildTypeScriptFiles = async (
try {
const { stdout } = await execAsync(
- `npx tsx -e "import config from '${JSON.stringify(inputFile)}'; console.log(JSON.stringify(config()))"`
+ `npx tsx -e "import config from '${inputFile}'; console.log(JSON.stringify(config()))"`
);
// Parse the JSON output
diff --git a/packages/baseai/src/data/models.ts b/packages/baseai/src/data/models.ts
index d8c4e055..4edd98e7 100644
--- a/packages/baseai/src/data/models.ts
+++ b/packages/baseai/src/data/models.ts
@@ -90,8 +90,6 @@ export const PERPLEXITY: string = 'Perplexity';
export const DEEPINFRA: string = 'deepinfra';
export const BEDROCK: string = 'bedrock';
export const AZURE_OPEN_AI: string = 'azure-openai';
-export const X_AI: string = 'xAI';
-export const OLLAMA: string = 'ollama';
interface Model {
id: string;
@@ -253,12 +251,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
}
],
[TOGETHER_AI]: [
- {
- id: 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
- provider: TOGETHER_AI,
- promptCost: 0.88,
- completionCost: 0.88,
- },
{
id: 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
provider: TOGETHER_AI,
@@ -371,16 +363,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
}
],
[ANTHROPIC]: [
- {
- id: 'claude-3-5-sonnet-latest',
- provider: ANTHROPIC,
- promptCost: 3,
- completionCost: 15,
- toolSupport: {
- toolChoice: true,
- parallelToolCalls: true
- }
- },
{
id: 'claude-3-5-sonnet-20240620',
provider: ANTHROPIC,
@@ -420,25 +402,9 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
toolChoice: true,
parallelToolCalls: true
}
- },
- {
- id: 'claude-3-5-haiku-20241022',
- provider: ANTHROPIC,
- promptCost: 1,
- completionCost: 5,
- toolSupport: {
- toolChoice: true,
- parallelToolCalls: true
- }
}
],
[GROQ]: [
- {
- id: 'llama-3.3-70b-versatile',
- provider: GROQ,
- promptCost: 0.59,
- completionCost: 0.79,
- },
{
id: 'llama-3.1-70b-versatile',
provider: GROQ,
@@ -539,12 +505,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
}
],
[FIREWORKS_AI]: [
- {
- id: 'llama-v3p3-70b-instruct',
- provider: FIREWORKS_AI,
- promptCost: 0.88,
- completionCost: 0.88,
- },
{
id: 'llama-v3p1-405b-instruct',
provider: FIREWORKS_AI,
@@ -610,18 +570,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
promptCost: 0.2,
completionCost: 0.2
}
- ],
- [X_AI]: [
- {
- id: 'grok-beta',
- provider: X_AI,
- promptCost: 5,
- completionCost: 15,
- toolSupport: {
- toolChoice: true,
- parallelToolCalls: false
- }
- }
]
};
diff --git a/packages/baseai/src/deploy/document.ts b/packages/baseai/src/deploy/document.ts
index 484adbca..6692d3c9 100644
--- a/packages/baseai/src/deploy/document.ts
+++ b/packages/baseai/src/deploy/document.ts
@@ -7,7 +7,9 @@ import {
handleError,
handleInvalidConfig,
listMemoryDocuments,
+ retrieveAuthentication,
uploadDocumentsToMemory,
+ type Account
} from '.';
import path from 'path';
import fs from 'fs/promises';
@@ -19,7 +21,6 @@ import {
} from '@/utils/memory/load-memory-files';
import type { MemoryI } from 'types/memory';
import { compareDocumentLists } from '@/utils/memory/compare-docs-list';
-import { retrieveAuthentication, type Account } from '@/utils/retrieve-credentials';
type Spinner = ReturnType;
diff --git a/packages/baseai/src/deploy/index.ts b/packages/baseai/src/deploy/index.ts
index 8e2a64ba..14543640 100644
--- a/packages/baseai/src/deploy/index.ts
+++ b/packages/baseai/src/deploy/index.ts
@@ -18,19 +18,17 @@ import path from 'path';
import color from 'picocolors';
import { type MemoryI } from 'types/memory';
import type { Pipe, PipeOld } from 'types/pipe';
+import { getStoredAuth } from './../auth/index';
import {
handleGitSyncMemories,
updateDeployedCommitHash
} from '@/utils/memory/git-sync/handle-git-sync-memories';
import { handleSingleDocDeploy } from './document';
-import {
- generateUpgradeInstructions,
- isOldMemoryConfigFormat
-} from '@/utils/memory/handle-old-memory-config';
-import {
- retrieveAuthentication,
- type Account
-} from '@/utils/retrieve-credentials';
+
+export interface Account {
+ login: string;
+ apiKey: string;
+}
interface ErrorResponse {
error?: { message: string };
@@ -159,6 +157,26 @@ async function readToolsDirectory({
}
}
+export async function retrieveAuthentication({
+ spinner
+}: {
+ spinner: Spinner;
+}): Promise {
+ spinner.start('Retrieving stored authentication');
+ try {
+ const account = await getStoredAuth();
+ if (!account) {
+ handleNoAccountFound({ spinner });
+ return null;
+ }
+ spinner.stop(`Deploying as ${color.cyan(account.login)}`);
+ return account;
+ } catch (error) {
+ handleAuthError({ spinner, error });
+ return null;
+ }
+}
+
async function deployPipes({
spinner,
pipes,
@@ -335,6 +353,23 @@ function handleDirectoryReadError({
}
}
+function handleNoAccountFound({ spinner }: { spinner: Spinner }): void {
+ spinner.stop('No account found');
+ p.log.warn('No account found. Please authenticate first.');
+ p.log.info(`Run: ${color.green('npx baseai auth')}`);
+}
+
+function handleAuthError({
+ spinner,
+ error
+}: {
+ spinner: Spinner;
+ error: unknown;
+}): void {
+ spinner.stop('Failed to retrieve authentication');
+ p.log.error(`Error retrieving stored auth: ${(error as Error).message}`);
+}
+
export function handleInvalidConfig({
spinner,
name,
@@ -387,7 +422,6 @@ export async function readMemoryDirectory({
spinner.start('Reading memory directory');
try {
const memory = await fs.readdir(memoryDir);
- spinner.stop();
return memory;
} catch (error) {
handleDirectoryReadError({ spinner, dir: memoryDir, error });
@@ -447,27 +481,19 @@ export async function deployMemory({
p.log.step(`Processing documents for memory: ${memoryNameWithoutExt}`);
- if (isOldMemoryConfigFormat(memoryObject)) {
- p.note(generateUpgradeInstructions(memoryObject));
- p.cancel(
- 'Deployment cancelled. Please update your memory config file to the new format.'
- );
- process.exit(1);
- }
-
let filesToDeploy: string[] = [];
let filesToDelete: string[] = [];
let memoryDocs: MemoryDocumentI[] = [];
// Git sync memories
- if (memoryObject.git.enabled) {
+ if (memoryObject.config?.useGitRepo) {
// Get names of files to deploy, i.e., changed or new files
const {
filesToDeploy: gitFilesToDeploy,
filesToDelete: gitFilesToDelete
} = await handleGitSyncMemories({
memoryName: memoryNameWithoutExt,
- config: memoryObject,
+ config: memoryObject.config,
account
});
@@ -503,7 +529,7 @@ export async function deployMemory({
documents: memoryDocs,
account,
overwrite,
- isGitSync: memoryObject.git.enabled,
+ isGitSync: memoryObject.config?.useGitRepo,
docsToDelete: filesToDelete
});
spinner.stop(`Deployment finished memory: ${memoryObject.name}`);
@@ -539,6 +565,7 @@ export async function upsertMemory({
docsToDelete?: string[];
}): Promise {
const { createMemory } = getMemoryApiUrls({
+ account,
memoryName: memory.name
});
@@ -650,8 +677,7 @@ export async function uploadDocumentsToMemory({
const signedUrl = await getSignedUploadUrl({
documentName: doc.name,
memoryName: name,
- account,
- meta: doc.meta
+ account
});
const uploadResponse = await uploadDocument(signedUrl, doc.blob);
@@ -845,6 +871,7 @@ export async function listMemoryDocuments({
memoryName: string;
}) {
const { listDocuments } = getMemoryApiUrls({
+ account,
memoryName: memoryName
});
@@ -873,26 +900,30 @@ export async function listMemoryDocuments({
);
}
- const res = (await listResponse.json()) as { name: string }[];
- const documents = res.map((doc: { name: string }) => doc.name);
+ const res = (await listResponse.json()) as { docs: { name: string }[] };
+ const documents = res.docs.map((doc: { name: string }) => doc.name);
return documents;
}
async function getSignedUploadUrl({
documentName,
memoryName,
- account,
- meta
+ account
}: {
documentName: string;
memoryName: string;
account: Account;
- meta: Record;
}): Promise {
const { uploadDocument } = getMemoryApiUrls({
+ account,
memoryName
});
+ const isOrgAccount = account.apiKey.includes(':');
+
+ const ownerLogin = isOrgAccount
+ ? account.apiKey.split(':')[0]
+ : account.login;
try {
const response = await fetch(uploadDocument, {
method: 'POST',
@@ -901,8 +932,8 @@ async function getSignedUploadUrl({
Authorization: `Bearer ${account.apiKey}`
},
body: JSON.stringify({
- meta,
memoryName,
+ ownerLogin,
fileName: documentName
})
});
@@ -936,6 +967,7 @@ async function deleteDocument({
account: Account;
}) {
const { deleteDocument } = getMemoryApiUrls({
+ account,
memoryName,
documentName
});
@@ -1000,36 +1032,44 @@ async function uploadDocument(signedUrl: string, document: Blob) {
}
export function getMemoryApiUrls({
+ account,
memoryName,
documentName
}: {
+ account: Account;
memoryName: string;
documentName?: string;
}) {
- // Base URL
- const baseUrl = `https://api.langbase.com/v1`;
+ const isOrgAccount = account.apiKey.includes(':');
+ const ownerLogin = isOrgAccount
+ ? account.apiKey.split(':')[0]
+ : account.login;
+ const baseUrl = `https://api.langbase.com/beta`;
+ const baseUrlV1 = `https://api.langbase.com/v1`;
// Create memory URL
- const createMemory = `${baseUrl}/memory`;
-
- // Delete memory URL
- const deleteMemory = `${baseUrl}/memory/${memoryName}`;
+ const createUrlOrg = `${baseUrl}/org/${ownerLogin}/memorysets`;
+ const createUrlUser = `${baseUrl}/user/memorysets`;
// Upload document URL
- const uploadDocument = `${baseUrl}/memory/documents`;
+ const uploadDocumentOrg = `${baseUrl}/org/${ownerLogin}/memorysets/documents`;
+ const uploadDocumentUser = `${baseUrl}/user/memorysets/documents`;
// List documents URL
- const listDocuments = `${baseUrl}/memory/${memoryName}/documents`;
+ const listDocuments = `${baseUrl}/memorysets/${ownerLogin}/${memoryName}/documents`;
+
+ // Delete memory URL
+ const deleteMemory = `${baseUrl}/memorysets/${ownerLogin}/${memoryName}`;
// Delete document URL
- const deleteDocument = `${baseUrl}/memory/${memoryName}/documents/${documentName}`;
+ const deleteDocument = `${baseUrlV1}/memory/${memoryName}/documents/${documentName}`;
return {
listDocuments,
deleteMemory,
deleteDocument,
- createMemory,
- uploadDocument
+ createMemory: isOrgAccount ? createUrlOrg : createUrlUser,
+ uploadDocument: isOrgAccount ? uploadDocumentOrg : uploadDocumentUser
};
}
@@ -1049,6 +1089,7 @@ async function overwriteMemory({
// Delete old memory.
dlog(`Deleting old memory: ${memory.name}`);
const { deleteMemory } = getMemoryApiUrls({
+ account,
memoryName: memory.name
});
@@ -1139,10 +1180,10 @@ export async function deploySingleMemory({
// Retrieve authentication
const account = await retrieveAuthentication({ spinner });
if (!account) {
- p.outro(
- `No account found. Skipping deployment. \n Run: ${cyan('npx baseai@latest auth')}`
+ p.log.error(
+ 'Authentication failed. Please run "npx baseai auth" to authenticate.'
);
- process.exit(1);
+ return;
}
// Call deployMemory function
@@ -1155,7 +1196,7 @@ export async function deploySingleMemory({
});
p.outro(`Successfully deployed memory: ${memoryName}`);
- process.exit(0);
+ process.exit(1);
} catch (error) {
if (error instanceof Error) {
if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
diff --git a/packages/baseai/src/dev/data/models.ts b/packages/baseai/src/dev/data/models.ts
index b82060c5..e1f28e9e 100644
--- a/packages/baseai/src/dev/data/models.ts
+++ b/packages/baseai/src/dev/data/models.ts
@@ -11,7 +11,6 @@ export const DEEPINFRA: string = 'deepinfra';
export const BEDROCK: string = 'bedrock';
export const AZURE_OPEN_AI: string = 'azure-openai';
export const OLLAMA: string = 'ollama';
-export const X_AI: string = 'xAI';
interface Model {
id: string;
@@ -173,12 +172,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
}
],
[TOGETHER_AI]: [
- {
- id: 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
- provider: TOGETHER_AI,
- promptCost: 0.88,
- completionCost: 0.88,
- },
{
id: 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
provider: TOGETHER_AI,
@@ -291,16 +284,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
}
],
[ANTHROPIC]: [
- {
- id: 'claude-3-5-sonnet-latest',
- provider: ANTHROPIC,
- promptCost: 3,
- completionCost: 15,
- toolSupport: {
- toolChoice: true,
- parallelToolCalls: true
- }
- },
{
id: 'claude-3-5-sonnet-20240620',
provider: ANTHROPIC,
@@ -340,25 +323,9 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
toolChoice: true,
parallelToolCalls: true
}
- },
- {
- id: 'claude-3-5-haiku-20241022',
- provider: ANTHROPIC,
- promptCost: 1,
- completionCost: 5,
- toolSupport: {
- toolChoice: true,
- parallelToolCalls: true
- }
}
],
[GROQ]: [
- {
- id: 'llama-3.3-70b-versatile',
- provider: GROQ,
- promptCost: 0.59,
- completionCost: 0.79,
- },
{
id: 'llama-3.1-70b-versatile',
provider: GROQ,
@@ -459,12 +426,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
}
],
[FIREWORKS_AI]: [
- {
- id: 'llama-v3p3-70b-instruct',
- provider: FIREWORKS_AI,
- promptCost: 0.88,
- completionCost: 0.88,
- },
{
id: 'llama-v3p1-405b-instruct',
provider: FIREWORKS_AI,
@@ -550,14 +511,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
promptCost: 1,
completionCost: 3
}
- ],
- [X_AI]: [
- {
- id: 'grok-beta',
- provider: X_AI,
- promptCost: 5,
- completionCost: 15
- }
]
};
diff --git a/packages/baseai/src/dev/index.ts b/packages/baseai/src/dev/index.ts
index eb2bee78..3c7b0433 100644
--- a/packages/baseai/src/dev/index.ts
+++ b/packages/baseai/src/dev/index.ts
@@ -13,7 +13,7 @@ import { customCors } from './middleware/custom-cors';
import { poweredBy } from './middleware/powered-by';
import { preFlight } from './middleware/pre-flight';
import { registerRoot } from './routes/base';
-import { registerV1PipesRun } from './routes/v1/pipes/run';
+import { registerBetaPipesRun } from './routes/beta/pipes/run';
export async function runBaseServer() {
const app = new Hono();
@@ -29,7 +29,7 @@ export async function runBaseServer() {
// Routes.
registerRoot(app);
- registerV1PipesRun(app);
+ registerBetaPipesRun(app);
const port = 9000;
diff --git a/packages/baseai/src/dev/llms/call-anthropic.ts b/packages/baseai/src/dev/llms/call-anthropic.ts
index 432b78d9..62b10c3b 100644
--- a/packages/baseai/src/dev/llms/call-anthropic.ts
+++ b/packages/baseai/src/dev/llms/call-anthropic.ts
@@ -4,26 +4,23 @@ import { handleProviderRequest } from '../utils/provider-handlers/provider-reque
import { ANTHROPIC } from '../data/models';
import { handleLlmError } from './utils';
import type { ModelParams } from 'types/providers';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
import { addToolsToParams } from '../utils/add-tools-to-params';
-import type { PipeTool } from 'types/tools';
export async function callAnthropic({
pipe,
messages,
llmApiKey,
- stream,
- paramsTools
+ stream
}: {
- pipe: Pipe;
+ pipe: any;
llmApiKey: string;
stream: boolean;
messages: Message[];
- paramsTools: PipeTool[] | undefined;
}) {
try {
const modelParams = buildModelParams(pipe, stream, messages);
- addToolsToParams(modelParams, pipe, paramsTools);
+ addToolsToParams(modelParams, pipe);
// Transform params according to provider's format
const transformedRequestParams = transformToProviderRequest({
@@ -47,28 +44,14 @@ export async function callAnthropic({
}
function buildModelParams(
- pipe: Pipe,
+ pipe: any,
stream: boolean,
messages: Message[]
): ModelParams {
- const model = pipe.model.split(':')[1];
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
return {
messages,
stream,
- model,
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
+ model: pipe.model.name,
+ ...pipe.model.params
};
}
diff --git a/packages/baseai/src/dev/llms/call-cohere.ts b/packages/baseai/src/dev/llms/call-cohere.ts
index feded453..9b639aaa 100644
--- a/packages/baseai/src/dev/llms/call-cohere.ts
+++ b/packages/baseai/src/dev/llms/call-cohere.ts
@@ -4,7 +4,7 @@ import { dlog } from '../utils/dlog';
import { COHERE } from '../data/models';
import { handleLlmError } from './utils';
import type { ModelParams } from 'types/providers';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
export async function callCohere({
pipe,
@@ -12,7 +12,7 @@ export async function callCohere({
llmApiKey,
stream
}: {
- pipe: Pipe;
+ pipe: any;
llmApiKey: string;
messages: Message[];
stream: boolean;
@@ -42,28 +42,14 @@ export async function callCohere({
}
function buildModelParams(
- pipe: Pipe,
+ pipe: any,
stream: boolean,
messages: Message[]
): ModelParams {
- const model = pipe.model.split(':')[1];
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
return {
messages,
stream,
- model,
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
+ model: pipe.model.name,
+ ...pipe.model.params
};
}
diff --git a/packages/baseai/src/dev/llms/call-fireworks.ts b/packages/baseai/src/dev/llms/call-fireworks.ts
index e7f1f457..85dc920e 100644
--- a/packages/baseai/src/dev/llms/call-fireworks.ts
+++ b/packages/baseai/src/dev/llms/call-fireworks.ts
@@ -5,7 +5,7 @@ import { FIREWORKS_AI } from '../data/models';
import { handleLlmError } from './utils';
import type { ModelParams } from 'types/providers';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
export async function callFireworks({
pipe,
@@ -13,7 +13,7 @@ export async function callFireworks({
llmApiKey,
stream
}: {
- pipe: Pipe;
+ pipe: any;
llmApiKey: string;
stream: boolean;
messages: Message[];
@@ -30,7 +30,7 @@ export async function callFireworks({
dlog('Fireworks request params', transformedRequestParams);
// Fireworks llama-3.1 405b behaves weirdly with stop value. Bug on their side. Omitting it.
- if (modelParams?.model === 'llama-v3p1-405b-instruct')
+ if (pipe.model.name === 'llama-v3p1-405b-instruct')
delete transformedRequestParams['stop'];
const providerOptions = { provider: FIREWORKS_AI, llmApiKey };
@@ -47,33 +47,19 @@ export async function callFireworks({
}
function buildModelParams(
- pipe: Pipe,
+ pipe: any,
stream: boolean,
messages: Message[]
): ModelParams {
// Create model strings for Fireworks AI
- const pipeModel = pipe.model.split(':')[1];
- const model =
- pipeModel === 'yi-large'
+ const modelString =
+ pipe.model.name === 'yi-large'
? 'accounts/yi-01-ai/models/yi-large'
- : `accounts/fireworks/models/${pipeModel}`;
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
+ : `accounts/fireworks/models/${pipe.model.name}`;
return {
messages,
stream,
- model,
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
+ model: modelString,
+ ...pipe.model.params
};
}
diff --git a/packages/baseai/src/dev/llms/call-google.ts b/packages/baseai/src/dev/llms/call-google.ts
index 19ba6792..4188e295 100644
--- a/packages/baseai/src/dev/llms/call-google.ts
+++ b/packages/baseai/src/dev/llms/call-google.ts
@@ -4,26 +4,23 @@ import { handleProviderRequest } from '../utils/provider-handlers/provider-reque
import { GOOGLE } from '../data/models';
import { applyJsonModeIfEnabledForGoogle, handleLlmError } from './utils';
import type { ModelParams } from 'types/providers';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
import { addToolsToParams } from '../utils/add-tools-to-params';
-import type { PipeTool } from 'types/tools';
export async function callGoogle({
pipe,
messages,
llmApiKey,
- stream,
- paramsTools
+ stream
}: {
- pipe: Pipe;
+ pipe: any;
stream: boolean;
llmApiKey: string;
messages: Message[];
- paramsTools: PipeTool[] | undefined;
}) {
try {
const modelParams = buildModelParams(pipe, stream, messages);
- addToolsToParams(modelParams, pipe, paramsTools);
+ addToolsToParams(modelParams, pipe);
// Transform params according to provider's format
const transformedRequestParams = transformToProviderRequest({
@@ -51,29 +48,15 @@ export async function callGoogle({
}
function buildModelParams(
- pipe: Pipe,
+ pipe: any,
stream: boolean,
messages: Message[]
): ModelParams {
- const model = pipe.model.split(':')[1];
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
return {
messages,
stream,
- model,
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
+ model: pipe.model.name,
+ ...pipe.model.params
};
}
diff --git a/packages/baseai/src/dev/llms/call-groq.ts b/packages/baseai/src/dev/llms/call-groq.ts
index 9f967402..78089721 100644
--- a/packages/baseai/src/dev/llms/call-groq.ts
+++ b/packages/baseai/src/dev/llms/call-groq.ts
@@ -4,7 +4,7 @@ import { GROQ } from '../data/models';
import transformToProviderRequest from '../utils/provider-handlers/transfrom-to-provider-request';
import { applyJsonModeIfEnabled, handleLlmError } from './utils';
import type { ModelParams } from 'types/providers';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
export async function callGroq({
pipe,
@@ -12,7 +12,7 @@ export async function callGroq({
llmApiKey,
stream
}: {
- pipe: Pipe;
+ pipe: any;
llmApiKey: string;
stream: boolean;
messages: Message[];
@@ -42,28 +42,14 @@ export async function callGroq({
}
function buildModelParams(
- pipe: Pipe,
+ pipe: any,
stream: boolean,
messages: Message[]
): ModelParams {
- const model = pipe.model.split(':')[1];
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
return {
messages,
stream,
- model,
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
+ model: pipe.model.name,
+ ...pipe.model.params
};
}
diff --git a/packages/baseai/src/dev/llms/call-llm.ts b/packages/baseai/src/dev/llms/call-llm.ts
index dcbf9b74..11439e08 100644
--- a/packages/baseai/src/dev/llms/call-llm.ts
+++ b/packages/baseai/src/dev/llms/call-llm.ts
@@ -7,13 +7,13 @@ import {
OLLAMA,
OPEN_AI,
PERPLEXITY,
- TOGETHER_AI,
- X_AI
+ TOGETHER_AI
} from '@/dev/data/models';
import { addContextFromMemory } from '@/utils/memory/lib';
-import type { Message, Pipe, VariablesI } from 'types/pipe';
+import type { Message, VariablesI } from 'types/pipe';
import { ApiError } from '../hono/errors';
+import type { Pipe } from '../routes/beta/pipes/run';
import { dlog } from '../utils/dlog';
import { getRunThread } from '../utils/thread/get-run-thread';
import { callAnthropic } from './call-anthropic';
@@ -25,35 +25,28 @@ import { callOllama } from './call-ollama';
import { callOpenAI } from './call-openai';
import { callPerplexity } from './call-perplexity';
import { callTogether } from './call-together';
-import { callXAI } from './call-xai';
-import { getProvider } from '../utils/get-provider';
-import type { PipeTool } from 'types/tools';
export async function callLLM({
pipe,
stream,
messages,
llmApiKey,
- variables,
- paramsTools
+ variables
}: {
pipe: Pipe;
stream: boolean;
llmApiKey: string;
messages: Message[];
variables?: VariablesI;
- paramsTools: PipeTool[] | undefined;
}) {
try {
- // Get the model provider from the pipe.
- const providerString = pipe.model.split(':')[0];
- const modelProvider = getProvider(providerString);
-
- const memoryNames = pipe.memory.map(memory => memory.name);
+ // Get the model provider from the pipe config.
+ const modelProvider = pipe.model.provider;
const similarChunks = await addContextFromMemory({
+ pipe,
messages,
- memoryNames
+ memoryNames: pipe.memorysets
});
// Process the messages to be sent to the model provider.
@@ -73,8 +66,7 @@ export async function callLLM({
pipe,
stream,
messages,
- llmApiKey,
- paramsTools
+ llmApiKey
});
}
@@ -82,10 +74,9 @@ export async function callLLM({
dlog('ANTHROPIC', '✅');
return await callAnthropic({
pipe,
- stream,
messages,
llmApiKey,
- paramsTools
+ stream
});
}
@@ -93,10 +84,9 @@ export async function callLLM({
dlog('TOGETHER_AI', '✅');
return await callTogether({
pipe,
- stream,
messages,
llmApiKey,
- paramsTools,
+ stream
});
}
@@ -116,19 +106,7 @@ export async function callLLM({
pipe,
messages,
llmApiKey,
- stream,
- paramsTools
- });
- }
-
- if (modelProvider === X_AI) {
- dlog('XAI', '✅');
- return await callXAI({
- pipe,
- messages,
- llmApiKey,
- stream,
- paramsTools
+ stream
});
}
diff --git a/packages/baseai/src/dev/llms/call-ollama.ts b/packages/baseai/src/dev/llms/call-ollama.ts
index 1eb0c098..972a9cc2 100644
--- a/packages/baseai/src/dev/llms/call-ollama.ts
+++ b/packages/baseai/src/dev/llms/call-ollama.ts
@@ -4,7 +4,7 @@ import { handleProviderRequest } from '../utils/provider-handlers/provider-reque
import { OLLAMA } from '../data/models';
import { handleLlmError } from './utils';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
import type { ModelParams } from 'types/providers';
export async function callOllama({
@@ -13,7 +13,7 @@ export async function callOllama({
llmApiKey,
stream
}: {
- pipe: Pipe;
+ pipe: any;
llmApiKey: string;
stream: boolean;
messages: Message[];
@@ -42,28 +42,14 @@ export async function callOllama({
}
function buildModelParams(
- pipe: Pipe,
+ pipe: any,
stream: boolean,
messages: Message[]
): ModelParams {
- const model = pipe.model.split(':')[1];
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
return {
messages,
stream,
- model,
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
+ model: pipe.model.name,
+ ...pipe.model.params
};
}
diff --git a/packages/baseai/src/dev/llms/call-openai.ts b/packages/baseai/src/dev/llms/call-openai.ts
index 00afca25..be1eba9e 100644
--- a/packages/baseai/src/dev/llms/call-openai.ts
+++ b/packages/baseai/src/dev/llms/call-openai.ts
@@ -4,31 +4,28 @@ import { dlog } from '../utils/dlog';
import { moderate } from '../utils/moderate';
import { OPEN_AI } from '../data/models';
import { applyJsonModeIfEnabled, handleLlmError } from './utils';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
import type { ModelParams } from 'types/providers';
import { addToolsToParams } from '../utils/add-tools-to-params';
-import type { PipeTool } from 'types/tools';
export async function callOpenAI({
pipe,
stream,
llmApiKey,
- messages,
- paramsTools
+ messages
}: {
- pipe: Pipe;
+ pipe: any;
stream: boolean;
llmApiKey: string;
messages: Message[];
- paramsTools: PipeTool[] | undefined;
}) {
try {
validateInput(pipe, messages);
const openai = new OpenAI({ apiKey: llmApiKey });
- await moderateContent(openai, messages, pipe.moderate);
+ await moderateContent(openai, messages, pipe.meta.moderate);
const modelParams = buildModelParams(pipe, stream, messages);
- addToolsToParams(modelParams, pipe, paramsTools);
+ addToolsToParams(modelParams, pipe);
applyJsonModeIfEnabled(modelParams, pipe);
dlog('modelParams', modelParams);
@@ -38,7 +35,7 @@ export async function callOpenAI({
}
}
-function validateInput(pipe: Pipe, messages: Message[]) {
+function validateInput(pipe: any, messages: Message[]) {
if (!pipe || !pipe.model || !messages || messages.length === 0) {
throw new ApiError({
code: 'BAD_REQUEST',
@@ -68,28 +65,14 @@ async function moderateContent(
}
function buildModelParams(
- pipe: Pipe,
+ pipe: any,
stream: boolean,
messages: Message[]
): ModelParams {
- const model = pipe.model.split(':')[1];
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
return {
messages,
stream,
- model: model || 'gpt-4o-mini',
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
+ model: pipe.model.name || 'gpt-4o-mini',
+ ...pipe.model.params
};
}
diff --git a/packages/baseai/src/dev/llms/call-perplexity.ts b/packages/baseai/src/dev/llms/call-perplexity.ts
index 5be492d7..3596ed9e 100644
--- a/packages/baseai/src/dev/llms/call-perplexity.ts
+++ b/packages/baseai/src/dev/llms/call-perplexity.ts
@@ -4,7 +4,7 @@ import { handleProviderRequest } from '../utils/provider-handlers/provider-reque
import { PERPLEXITY } from '../data/models';
import { handleLlmError } from './utils';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
import type { ModelParams } from 'types/providers';
export async function callPerplexity({
@@ -13,7 +13,7 @@ export async function callPerplexity({
llmApiKey,
stream
}: {
- pipe: Pipe;
+ pipe: any;
llmApiKey: string;
stream: boolean;
messages: Message[];
@@ -42,28 +42,14 @@ export async function callPerplexity({
}
function buildModelParams(
- pipe: Pipe,
+ pipe: any,
stream: boolean,
messages: Message[]
): ModelParams {
- const model = pipe.model.split(':')[1];
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
return {
messages,
stream,
- model,
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
+ model: pipe.model.name,
+ ...pipe.model.params
};
}
diff --git a/packages/baseai/src/dev/llms/call-together.ts b/packages/baseai/src/dev/llms/call-together.ts
index 76baeb0d..77880db7 100644
--- a/packages/baseai/src/dev/llms/call-together.ts
+++ b/packages/baseai/src/dev/llms/call-together.ts
@@ -2,23 +2,20 @@ import OpenAI from 'openai';
import { dlog } from '../utils/dlog';
import { GROQ } from '../data/models';
import { applyJsonModeIfEnabled, handleLlmError } from './utils';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
import type { ModelParams } from 'types/providers';
import { addToolsToParams } from '../utils/add-tools-to-params';
-import type { PipeTool } from 'types/tools';
export async function callTogether({
pipe,
messages,
llmApiKey,
- stream,
- paramsTools
+ stream
}: {
- pipe: Pipe;
+ pipe: any;
llmApiKey: string;
stream: boolean;
messages: Message[];
- paramsTools: PipeTool[] | undefined;
}) {
try {
const modelParams = buildModelParams(pipe, stream, messages);
@@ -32,7 +29,7 @@ export async function callTogether({
// Together behaves weirdly with stop value. Omitting it.
delete modelParams['stop'];
applyJsonModeIfEnabled(modelParams, pipe);
- addToolsToParams(modelParams, pipe, paramsTools);
+ addToolsToParams(modelParams, pipe);
dlog('modelParams', modelParams);
return await together.chat.completions.create(modelParams as any);
@@ -42,28 +39,14 @@ export async function callTogether({
}
function buildModelParams(
- pipe: Pipe,
+ pipe: any,
stream: boolean,
messages: Message[]
): ModelParams {
- const model = pipe.model.split(':')[1];
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
return {
messages,
stream,
- model,
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
+ model: pipe.model.name,
+ ...pipe.model.params
};
}
diff --git a/packages/baseai/src/dev/llms/call-xai.ts b/packages/baseai/src/dev/llms/call-xai.ts
deleted file mode 100644
index d10e605d..00000000
--- a/packages/baseai/src/dev/llms/call-xai.ts
+++ /dev/null
@@ -1,67 +0,0 @@
-import OpenAI from 'openai';
-import { dlog } from '../utils/dlog';
-import { X_AI } from '../data/models';
-import { handleLlmError } from './utils';
-import type { Message, Pipe } from 'types/pipe';
-import type { ModelParams } from 'types/providers';
-import { addToolsToParams } from '../utils/add-tools-to-params';
-import type { PipeTool } from 'types/tools';
-
-export async function callXAI({
- pipe,
- stream,
- llmApiKey,
- messages,
- paramsTools
-}: {
- pipe: Pipe;
- stream: boolean;
- llmApiKey: string;
- messages: Message[];
- paramsTools: PipeTool[] | undefined;
-}) {
- try {
- const modelParams = buildModelParams(pipe, stream, messages);
-
- // LLM.
- const groq = new OpenAI({
- apiKey: llmApiKey,
- baseURL: 'https://api.x.ai/v1'
- });
-
- // Add tools (functions) to modelParams
- addToolsToParams(modelParams, pipe, paramsTools);
- dlog('modelParams', modelParams);
-
- return await groq.chat.completions.create(modelParams as any);
- } catch (error: any) {
- handleLlmError({ error, provider: X_AI });
- }
-}
-
-function buildModelParams(
- pipe: Pipe,
- stream: boolean,
- messages: Message[]
-): ModelParams {
- const model = pipe.model.split(':')[1];
- const {
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- } = pipe;
- return {
- messages,
- stream,
- model,
- top_p,
- max_tokens,
- temperature,
- presence_penalty,
- frequency_penalty,
- stop
- };
-}
diff --git a/packages/baseai/src/dev/llms/utils.ts b/packages/baseai/src/dev/llms/utils.ts
index 7870afd1..ce28d054 100644
--- a/packages/baseai/src/dev/llms/utils.ts
+++ b/packages/baseai/src/dev/llms/utils.ts
@@ -2,7 +2,6 @@ import type { ModelParams } from 'types/providers';
import { ApiError } from '../hono/errors';
import { dlog } from '../utils/dlog';
import { isJsonModeOn } from '../utils/is-json-mode';
-import type { Pipe } from 'types/pipe';
export function handleLlmError({
error,
@@ -18,10 +17,10 @@ export function handleLlmError({
});
}
-export function applyJsonModeIfEnabled(modelParams: ModelParams, pipe: Pipe) {
+export function applyJsonModeIfEnabled(modelParams: ModelParams, pipe: any) {
const hasJsonMode = isJsonModeOn({
- currentModel: modelParams.model as string,
- jsonMode: pipe.json || false
+ currentModel: pipe.model.name,
+ jsonMode: pipe.meta.json || false
});
if (hasJsonMode) {
@@ -31,12 +30,11 @@ export function applyJsonModeIfEnabled(modelParams: ModelParams, pipe: Pipe) {
export function applyJsonModeIfEnabledForGoogle(
transformedRequestParams: any,
- pipe: Pipe
+ pipe: any
) {
- const currentModel = pipe.model.split(':')[1];
const hasJsonMode = isJsonModeOn({
- currentModel,
- jsonMode: pipe.json || false
+ currentModel: pipe.model.name,
+ jsonMode: pipe.meta.json || false
});
if (hasJsonMode) {
diff --git a/packages/baseai/src/dev/providers/anthropic/chatComplete.ts b/packages/baseai/src/dev/providers/anthropic/chatComplete.ts
index c0173ee6..86b4439f 100644
--- a/packages/baseai/src/dev/providers/anthropic/chatComplete.ts
+++ b/packages/baseai/src/dev/providers/anthropic/chatComplete.ts
@@ -465,7 +465,6 @@ export const AnthropicChatCompleteStreamChunkTransform: (
choices: [
{
delta: {
- role: 'assistant',
content: ''
},
index: 0,
diff --git a/packages/baseai/src/dev/providers/google/chatComplete.ts b/packages/baseai/src/dev/providers/google/chatComplete.ts
index d5b45b34..3b90512e 100644
--- a/packages/baseai/src/dev/providers/google/chatComplete.ts
+++ b/packages/baseai/src/dev/providers/google/chatComplete.ts
@@ -439,7 +439,7 @@ export const GoogleChatCompleteStreamChunkTransform: (
model: '',
provider: 'google',
choices:
- parsedChunk.candidates?.map((generation, index) => {
+ parsedChunk.candidates?.map(generation => {
let message: ProviderMessage = {
role: 'assistant',
content: ''
@@ -473,7 +473,7 @@ export const GoogleChatCompleteStreamChunkTransform: (
}
return {
delta: message,
- index: generation.index ?? index,
+ index: generation.index,
finish_reason: generation.finishReason
};
}) ?? []
diff --git a/packages/baseai/src/dev/routes/beta/pipes/run.ts b/packages/baseai/src/dev/routes/beta/pipes/run.ts
index ddb88673..5f4b3cc4 100644
--- a/packages/baseai/src/dev/routes/beta/pipes/run.ts
+++ b/packages/baseai/src/dev/routes/beta/pipes/run.ts
@@ -115,15 +115,7 @@ const handleGenerateError = (c: any, error: unknown) => {
const handleRun = async (c: any) => {
try {
const body = await c.req.json();
-
- const llmKey = (body.llmApiKey as string) || '';
- const hiddenChars = new Array(45).fill('*').join('');
- const redactedKey = llmKey.length
- ? llmKey.slice(0, 8) + hiddenChars
- : '';
-
- const logData = { ...body, llmApiKey: redactedKey };
- logger('pipe.request', logData, 'Pipe Request Body');
+ logger('pipe.request', body, 'Pipe Request Body');
const validatedBody = validateRequestBody(body);
diff --git a/packages/baseai/src/dev/routes/v1/pipes/run.ts b/packages/baseai/src/dev/routes/v1/pipes/run.ts
deleted file mode 100644
index c58d0db2..00000000
--- a/packages/baseai/src/dev/routes/v1/pipes/run.ts
+++ /dev/null
@@ -1,155 +0,0 @@
-import { ApiError, ApiErrorZod } from '@/dev/hono/errors';
-import { callLLM } from '@/dev/llms/call-llm';
-import { dlog } from '@/dev/utils/dlog';
-import { handleStreamingResponse } from '@/dev/utils/provider-handlers/streaming-response-handler';
-import { logger } from '@/utils/logger-utils';
-import { Hono } from 'hono';
-import {
- schemaMessage,
- toolChoiceSchema,
- VariablesSchema,
- type PipeModelT
-} from 'types/pipe';
-import { pipeToolSchema } from 'types/tools';
-import { z } from 'zod';
-
-// Schema definitions
-const PipeSchema = z.object({
- name: z.string(),
- description: z.string(),
- status: z.enum(['public', 'private']),
- model: z.string(),
- stream: z.boolean(),
- json: z.boolean(),
- store: z.boolean(),
- moderate: z.boolean(),
- top_p: z.number(),
- max_tokens: z.number(),
- temperature: z.number(),
- presence_penalty: z.number(),
- frequency_penalty: z.number(),
- stop: z.array(z.string()),
- tool_choice: z
- .union([z.enum(['auto', 'required', 'none']), toolChoiceSchema])
- .default('auto'),
- parallel_tool_calls: z.boolean(),
- messages: z.array(schemaMessage),
- variables: VariablesSchema,
- tools: z.array(pipeToolSchema).default([]),
- memory: z.array(z.object({ name: z.string().trim().min(1) })).default([])
-});
-
-const RequestBodySchema = z.object({
- pipe: PipeSchema,
- stream: z.boolean(),
- messages: z.array(schemaMessage),
- llmApiKey: z.string(),
- tools: z.array(pipeToolSchema).optional(),
- variables: VariablesSchema.optional()
-});
-
-type RequestBody = z.infer;
-
-// Helper functions
-const validateRequestBody = (body: unknown): RequestBody => {
- const result = RequestBodySchema.safeParse(body);
- if (!result.success) {
- throw new ApiErrorZod({
- code: 'BAD_REQUEST',
- validationResult: result,
- customMessage: 'Invalid request body'
- });
- }
- return result.data;
-};
-
-const processLlmResponse = (c: any, body: RequestBody, rawLlmResponse: any) => {
- const isStreaming = body.stream;
-
- // Non-streaming
- if (!isStreaming && rawLlmResponse?.choices?.length > 0) {
- const completion = rawLlmResponse.choices[0]?.message?.content ?? '';
- const toolCalls = rawLlmResponse.choices[0]?.message?.tool_calls ?? [];
- const isToolCall = toolCalls.length > 0;
-
- logger('tool', isToolCall, 'Tool calls found');
- logger('tool.calls', toolCalls);
- logger('pipe.completion', completion, 'Pipe completion');
- logger('pipe.response', rawLlmResponse, 'type: (non-streaming)');
-
- return c.json({ completion, ...rawLlmResponse });
- }
-
- // Streaming
- if (isStreaming) {
- logger('pipe.response', rawLlmResponse, 'type: (streaming)');
- return handleStreamingResponse({
- response: rawLlmResponse,
- headers: {},
- c
- });
- }
- return c.json({ body });
-};
-
-const handleGenerateError = (c: any, error: unknown) => {
- if (error instanceof ApiErrorZod) {
- throw error;
- }
-
- const errorMessage =
- error instanceof Error
- ? error.message
- : 'Unexpected error occurred in /pipe/v1/run';
-
- dlog('Error /pipe/v1/run.ts:', error);
-
- throw new ApiError({
- status: error instanceof ApiError ? error.status : 500,
- code: error instanceof ApiError ? error.code : 'INTERNAL_SERVER_ERROR',
- message: errorMessage,
- docs: error instanceof ApiError ? error.docs : undefined
- });
-};
-
-// Main endpoint handler
-const handleRun = async (c: any) => {
- try {
- const body = await c.req.json();
-
- const llmKey = (body.llmApiKey as string) || '';
- const hiddenChars = new Array(45).fill('*').join('');
- const redactedKey = llmKey.length
- ? llmKey.slice(0, 8) + hiddenChars
- : '';
-
- const logData = { ...body, llmApiKey: redactedKey };
- logger('pipe.request', logData, 'Pipe Request Body');
-
- const validatedBody = validateRequestBody(body);
-
- const { pipe, messages, llmApiKey, stream, variables } = validatedBody;
- const model = pipe.model as PipeModelT;
-
- const rawLlmResponse = await callLLM({
- pipe: {
- ...pipe,
- model
- },
- messages,
- llmApiKey,
- stream,
- variables,
- paramsTools: validatedBody.tools
- });
-
- return processLlmResponse(c, validatedBody, rawLlmResponse);
- } catch (error: unknown) {
- return handleGenerateError(c, error);
- }
-};
-
-// Register the endpoint
-export const registerV1PipesRun = (app: Hono) => {
- app.post('/v1/pipes/run', handleRun);
-};
diff --git a/packages/baseai/src/dev/utils/add-tools-to-params.ts b/packages/baseai/src/dev/utils/add-tools-to-params.ts
index 83816e61..6cbef242 100644
--- a/packages/baseai/src/dev/utils/add-tools-to-params.ts
+++ b/packages/baseai/src/dev/utils/add-tools-to-params.ts
@@ -1,53 +1,30 @@
-import type { Pipe, ToolCall } from 'types/pipe';
-import { getProvider } from './get-provider';
import { getSupportedToolSettings, hasToolSupport } from './has-tool-support';
import type { ModelParams } from 'types/providers';
-import type { PipeTool } from 'types/tools';
-export function addToolsToParams(
- modelParams: ModelParams,
- pipe: Pipe,
- paramsTools: PipeTool[] | undefined
-) {
- const pipeTools = pipe.tools as unknown as string[];
- const hasParamsTools = paramsTools && paramsTools.length > 0;
-
- // 1. If no tools are provided, return the modelParams as is
- if (!hasParamsTools && !pipeTools.length) return modelParams;
-
- const [providerString, modelName] = pipe.model.split(':');
- const provider = getProvider(providerString);
+export function addToolsToParams(modelParams: ModelParams, pipe: any) {
+ if (!pipe.functions.length) return;
// Check if the model supports tool calls
const hasToolCallSupport = hasToolSupport({
- modelName,
- provider
+ modelName: pipe.model.name,
+ provider: pipe.model.provider
});
- // 2. If the model does not support tool calls, return the modelParams as is
- if (!hasToolCallSupport) return modelParams;
+ if (hasToolCallSupport) {
+ const { hasParallelToolCallSupport, hasToolChoiceSupport } =
+ getSupportedToolSettings({
+ modelName: pipe.model.name,
+ provider: pipe.model.provider
+ });
- // If tools are provided in request param, prioritize and use them
- if (hasParamsTools) {
- modelParams.tools = paramsTools as ToolCall[];
- }
+ if (hasParallelToolCallSupport) {
+ modelParams.parallel_tool_calls = pipe.model.parallel_tool_calls;
+ }
- // If tools are not provided in request param, use the tools from the pipe config
- if (!hasParamsTools && pipeTools.length) {
- modelParams.tools = pipe.tools as ToolCall[];
- }
-
- const { hasParallelToolCallSupport, hasToolChoiceSupport } =
- getSupportedToolSettings({
- modelName,
- provider
- });
-
- if (hasParallelToolCallSupport) {
- modelParams.parallel_tool_calls = pipe.parallel_tool_calls;
- }
+ if (hasToolChoiceSupport) {
+ modelParams.tool_choice = pipe.model.tool_choice;
+ }
- if (hasToolChoiceSupport) {
- modelParams.tool_choice = pipe.tool_choice;
+ modelParams.tools = pipe.functions;
}
}
diff --git a/packages/baseai/src/dev/utils/get-llm-api-key.ts b/packages/baseai/src/dev/utils/get-llm-api-key.ts
index ad45792c..523a84a0 100644
--- a/packages/baseai/src/dev/utils/get-llm-api-key.ts
+++ b/packages/baseai/src/dev/utils/get-llm-api-key.ts
@@ -6,8 +6,7 @@ import {
GROQ,
OPEN_AI,
PERPLEXITY,
- TOGETHER_AI,
- X_AI
+ TOGETHER_AI
} from '@/dev/data/models';
export function getLLMApiKey(modelProvider: string): string {
@@ -28,8 +27,6 @@ export function getLLMApiKey(modelProvider: string): string {
return process.env.FIREWORKS_API_KEY || '';
case modelProvider.includes(PERPLEXITY):
return process.env.PERPLEXITY_API_KEY || '';
- case modelProvider.includes(X_AI):
- return process.env.XAI_API_KEY || '';
default:
throw new Error(`Unsupported model provider: ${modelProvider}`);
}
diff --git a/packages/baseai/src/dev/utils/get-provider.ts b/packages/baseai/src/dev/utils/get-provider.ts
deleted file mode 100644
index d434b7e0..00000000
--- a/packages/baseai/src/dev/utils/get-provider.ts
+++ /dev/null
@@ -1,50 +0,0 @@
-import {
- ANTHROPIC,
- COHERE,
- FIREWORKS_AI,
- GOOGLE,
- GROQ,
- OLLAMA,
- OPEN_AI,
- PERPLEXITY,
- TOGETHER_AI,
- X_AI
-} from '@/data/models';
-
-type Provider =
- | typeof OPEN_AI
- | typeof ANTHROPIC
- | typeof TOGETHER_AI
- | typeof GOOGLE
- | typeof GROQ
- | typeof COHERE
- | typeof FIREWORKS_AI
- | typeof PERPLEXITY;
-
-/**
- * Retrieves the provider based on the given provider string.
- *
- * @param providerString - The provider string.
- * @returns The corresponding provider object.
- * @throws Error if the provider is unknown.
- */
-export function getProvider(providerString: string): Provider {
- const providerMap: { [key: string]: Provider } = {
- openai: OPEN_AI,
- anthropic: ANTHROPIC,
- together: TOGETHER_AI,
- google: GOOGLE,
- groq: GROQ,
- cohere: COHERE,
- fireworks: FIREWORKS_AI,
- perplexity: PERPLEXITY,
- ollama: OLLAMA,
- xai: X_AI
- };
-
- const provider = providerMap[providerString.toLowerCase()];
- if (!provider) {
- throw new Error(`Unknown provider: ${providerString}`);
- }
- return provider;
-}
diff --git a/packages/baseai/src/dev/utils/moderate.ts b/packages/baseai/src/dev/utils/moderate.ts
index 739cdde7..36b0dc0d 100644
--- a/packages/baseai/src/dev/utils/moderate.ts
+++ b/packages/baseai/src/dev/utils/moderate.ts
@@ -29,10 +29,7 @@ export async function moderate({
}
// Perform moderation on the constructed prompt text
- const moderation = await openai.moderations.create({
- model: 'omni-moderation-latest',
- input: promptText
- });
+ const moderation = await openai.moderations.create({ input: promptText });
const result = moderation?.results[0];
// dlog('moderation:', result);
diff --git a/packages/baseai/src/dev/utils/thread/add-json-mode.ts b/packages/baseai/src/dev/utils/thread/add-json-mode.ts
index 2740e351..9bd6b86e 100644
--- a/packages/baseai/src/dev/utils/thread/add-json-mode.ts
+++ b/packages/baseai/src/dev/utils/thread/add-json-mode.ts
@@ -8,7 +8,7 @@
import { jsonModeModels } from '@/data/models';
import { defaultJsonPrompt } from '@/dev/data/globals';
-import type { Pipe } from 'types/pipe';
+import type { Pipe } from '@/dev/routes/beta/pipes/run';
export function addJsonMode({
pipe,
@@ -18,11 +18,9 @@ export function addJsonMode({
systemPrompt: string;
}) {
// Return the system prompt if JSON mode is not enabled
- if (!pipe?.json) return systemPrompt;
-
- const modelName = pipe.model.split(':')[1];
+ if (!pipe.meta?.json) return systemPrompt;
// Return the system prompt if JSON mode is not supported by the current model
- if (!jsonModeModels.includes(modelName)) return systemPrompt;
+ if (!jsonModeModels.includes(pipe.model.name)) return systemPrompt;
const jsonModePrompt = getJsonPrompt(pipe);
diff --git a/packages/baseai/src/dev/utils/thread/get-few-shot-messages.ts b/packages/baseai/src/dev/utils/thread/get-few-shot-messages.ts
index f7adfcfb..aec9773b 100644
--- a/packages/baseai/src/dev/utils/thread/get-few-shot-messages.ts
+++ b/packages/baseai/src/dev/utils/thread/get-few-shot-messages.ts
@@ -1,4 +1,5 @@
-import type { Message, Pipe } from 'types/pipe';
+import type { Pipe } from '@/dev/routes/beta/pipes/run';
+import type { Message } from 'types/pipe';
export function getPipeFewShotsMessages(pipe: Pipe): Message[] {
const fewShotMessages: Message[] = pipe.messages.filter(
diff --git a/packages/baseai/src/dev/utils/thread/get-run-thread.ts b/packages/baseai/src/dev/utils/thread/get-run-thread.ts
index d4c224f5..0a335a7e 100644
--- a/packages/baseai/src/dev/utils/thread/get-run-thread.ts
+++ b/packages/baseai/src/dev/utils/thread/get-run-thread.ts
@@ -1,6 +1,7 @@
import { ApiError } from '@/dev/hono/errors';
+import type { Pipe } from '@/dev/routes/beta/pipes/run';
import type { SimilarChunk } from '@/utils/memory/db/lib';
-import type { Message, Pipe, VariablesI } from 'types/pipe';
+import type { Message, VariablesI } from 'types/pipe';
import { dlog } from '../dlog';
import { getPipeFewShotsMessages } from './get-few-shot-messages';
import { getSystemPromptMessage } from './get-system-prompt';
diff --git a/packages/baseai/src/dev/utils/thread/get-system-prompt.ts b/packages/baseai/src/dev/utils/thread/get-system-prompt.ts
index f8a5aa6b..f2e0ee7d 100644
--- a/packages/baseai/src/dev/utils/thread/get-system-prompt.ts
+++ b/packages/baseai/src/dev/utils/thread/get-system-prompt.ts
@@ -1,6 +1,7 @@
+import type { Pipe } from '@/dev/routes/beta/pipes/run';
import { defaultRagPrompt } from '@/utils/memory/constants';
import type { SimilarChunk } from '@/utils/memory/db/lib';
-import type { Message, Pipe } from 'types/pipe';
+import type { Message } from 'types/pipe';
import { addJsonMode } from './add-json-mode';
export function getSystemPromptMessage({
diff --git a/packages/baseai/src/dev/utils/thread/process-messages.ts b/packages/baseai/src/dev/utils/thread/process-messages.ts
index 4bd0fc7b..11686743 100644
--- a/packages/baseai/src/dev/utils/thread/process-messages.ts
+++ b/packages/baseai/src/dev/utils/thread/process-messages.ts
@@ -1,4 +1,5 @@
-import type { Message, Pipe, VariableI, VariablesI } from 'types/pipe';
+import type { Pipe } from '@/dev/routes/beta/pipes/run';
+import type { Message, VariableI, VariablesI } from 'types/pipe';
import { dlog } from '../dlog';
/**
diff --git a/packages/baseai/src/init/index.ts b/packages/baseai/src/init/index.ts
index 4e6e795d..89fca02d 100644
--- a/packages/baseai/src/init/index.ts
+++ b/packages/baseai/src/init/index.ts
@@ -292,7 +292,6 @@ GROQ_API_KEY=
MISTRAL_API_KEY=
PERPLEXITY_API_KEY=
TOGETHER_API_KEY=
-XAI_API_KEY=
`;
try {
diff --git a/packages/baseai/src/memory/create.ts b/packages/baseai/src/memory/create.ts
index 68131a6a..c45d3416 100644
--- a/packages/baseai/src/memory/create.ts
+++ b/packages/baseai/src/memory/create.ts
@@ -20,7 +20,7 @@ const defaultConfig = {
};
const MEMORY_CONSTANTS = {
- documentsDir: 'documents'
+ documentsDir: 'documents' // Path to store documents
};
export async function createMemory() {
@@ -48,7 +48,7 @@ export async function createMemory() {
message: 'Description of the memory',
placeholder: defaultConfig.description
}),
- useGit: () =>
+ useGitRepo: () =>
p.confirm({
message:
'Do you want to create memory from current project git repository?',
@@ -63,72 +63,115 @@ export async function createMemory() {
}
);
- const memoryNameSlugified = slugify(memoryInfo.name);
- const memoryNameCamelCase = camelCase('memory-' + memoryNameSlugified);
- const baseDir = path.join(process.cwd(), 'baseai', 'memory');
- const memoryDir = path.join(baseDir, memoryNameSlugified);
- const filePath = path.join(memoryDir, 'index.ts');
- const dbDir = path.join(process.cwd(), '.baseai', 'db');
+ let memoryFilesDir = '.';
+ let fileExtensions: string[] = ['*'];
- if (memoryInfo.useGit) {
+ if (memoryInfo.useGitRepo) {
+ // Check if the current directory is a Git repository
try {
await execAsync('git rev-parse --is-inside-work-tree');
} catch (error) {
p.cancel('The current directory is not a Git repository.');
process.exit(1);
}
+
+ memoryFilesDir = (await p.text({
+ message:
+ 'Enter the path to the directory to track (relative to current directory):',
+ initialValue: '.',
+ validate: value => {
+ if (!value.trim()) {
+ return 'The path cannot be empty.';
+ }
+ const fullPath = path.resolve(process.cwd(), value);
+ if (!fs.existsSync(fullPath)) {
+ return 'The specified path does not exist.';
+ }
+ if (!fs.lstatSync(fullPath).isDirectory()) {
+ return 'The specified path is not a directory.';
+ }
+ return;
+ }
+ })) as string;
+
+ const extensionsInput = (await p.text({
+ message:
+ 'Enter file extensions to track (use * for all, or comma-separated list, e.g., .md,.mdx):',
+ validate: value => {
+ if (value.trim() === '') {
+ return 'Please enter at least one file extension or *';
+ }
+ if (value !== '*') {
+ const extensions = value.split(',').map(ext => ext.trim());
+ const invalidExtensions = extensions.filter(
+ ext => !/^\.\w+$/.test(ext)
+ );
+ if (invalidExtensions.length > 0) {
+ return `Invalid extension(s): ${invalidExtensions.join(', ')}. Extensions should start with a dot followed by alphanumeric characters.`;
+ }
+ }
+ return;
+ }
+ })) as string;
+
+ fileExtensions =
+ extensionsInput === '*'
+ ? ['*']
+ : extensionsInput.split(',').map(ext => ext.trim());
}
- const memoryContent = `import {MemoryI} from '@baseai/core';
+ const memoryNameSlugified = slugify(memoryInfo.name);
+ const memoryNameCamelCase = camelCase('memory-' + memoryNameSlugified);
+
+ const baseDir = path.join(process.cwd(), 'baseai', 'memory');
+ const memoryDir = path.join(baseDir, memoryNameSlugified);
+ const filePath = path.join(memoryDir, 'index.ts');
+ const memoryDocumentsPath = path.join(
+ memoryDir,
+ MEMORY_CONSTANTS.documentsDir
+ );
+ const dbDir = path.join(process.cwd(), '.baseai', 'db');
+
+ const memoryContent = `import { MemoryI } from '@baseai/core';
+import path from 'path';
const ${memoryNameCamelCase} = (): MemoryI => ({
- name: '${memoryNameSlugified}',
- description: ${JSON.stringify(memoryInfo.description || '')},
- git: {
- enabled: ${memoryInfo.useGit},${
- memoryInfo.useGit
- ? `
- include: ['**/*'],
- gitignore: true,`
- : `
- include: ['${MEMORY_CONSTANTS.documentsDir}/**/*'],
- gitignore: false,`
- }
- deployedAt: '',
- embeddedAt: ''
- }
+ name: '${memoryNameSlugified}',
+ description: '${memoryInfo.description || ''}',
+ config: {
+ useGitRepo: ${memoryInfo.useGitRepo},
+ dirToTrack: path.posix.join(${memoryFilesDir
+ .split(path.sep)
+ .map(segment => `'${segment}'`)
+ .join(', ')}),
+ extToTrack: ${JSON.stringify(fileExtensions)}
+ }
});
-export default ${memoryNameCamelCase};`;
+export default ${memoryNameCamelCase};
+`;
try {
await fs.promises.mkdir(baseDir, { recursive: true });
await fs.promises.mkdir(memoryDir, { recursive: true });
await fs.promises.writeFile(filePath, memoryContent);
await fs.promises.mkdir(dbDir, { recursive: true });
+ await createDb(memoryNameSlugified);
- if (!memoryInfo.useGit) {
- const memoryDocumentsPath = path.join(
- memoryDir,
- MEMORY_CONSTANTS.documentsDir
- );
+ if (!memoryInfo.useGitRepo) {
await fs.promises.mkdir(memoryDocumentsPath, { recursive: true });
p.note(
`Add documents in baseai/memory/${memoryNameSlugified}/${cyan(`documents`)} to use them in the memory.`
);
} else {
+ const extensionsMsg = fileExtensions.includes('*')
+ ? 'all file types'
+ : `files with extensions: ${cyan(fileExtensions.join(', '))}`;
p.note(
- [
- 'All files in this Git repository will be tracked by default.',
- '',
- `To modify which files are being tracked, update the config at:`,
- cyan(filePath)
- ].join('\n')
+ `All ${extensionsMsg} under ${cyan(memoryFilesDir)} will be tracked and used in the memory.`
);
}
- await createDb(memoryNameSlugified);
-
p.outro(
heading({
text: memoryNameCamelCase,
diff --git a/packages/baseai/src/memory/embed.ts b/packages/baseai/src/memory/embed.ts
index 7fa4aa6b..73a35bd0 100644
--- a/packages/baseai/src/memory/embed.ts
+++ b/packages/baseai/src/memory/embed.ts
@@ -58,7 +58,7 @@ export async function embedMemory({
let filesToEmbed: string[] = [];
let filesToDelete: string[] = [];
- if (memoryConfig.git.enabled) {
+ if (memoryConfig?.useGitRepo) {
const { filesToDeploy, filesToDelete: gitFilesToDelete } =
await handleGitSyncMemories({
memoryName: memoryName,
@@ -78,7 +78,7 @@ export async function embedMemory({
let embedResult = 'Embeddings updated.';
if (memoryFiles && memoryFiles.length > 0) {
s.message('Generating embeddings...');
- const shouldOverwrite = memoryConfig.git.enabled ? true : overwrite;
+ const shouldOverwrite = memoryConfig?.useGitRepo ? true : overwrite;
embedResult = await generateEmbeddings({
memoryFiles,
memoryName,
@@ -87,7 +87,7 @@ export async function embedMemory({
});
}
- if (memoryConfig.git.enabled) {
+ if (memoryConfig?.useGitRepo) {
if (filesToDelete.length > 0) {
await deleteDocumentsFromDB({
memoryName,
diff --git a/packages/baseai/src/pipe/index.ts b/packages/baseai/src/pipe/index.ts
index 48b3ec81..0ba6cb93 100644
--- a/packages/baseai/src/pipe/index.ts
+++ b/packages/baseai/src/pipe/index.ts
@@ -38,7 +38,7 @@ export async function createPipe() {
name: () =>
p.text({
message: 'Name of the pipe',
- placeholder: 'ai-agent-pipe',
+ placeholder: 'AI Pipe Agent',
validate: value => {
const result = pipeNameSchema.safeParse(value);
if (!result.success) {
@@ -135,7 +135,7 @@ const ${pipeNameCamelCase} = (): PipeI => ({
// Replace with your API key https://langbase.com/docs/api-reference/api-keys
apiKey: process.env.LANGBASE_API_KEY!,
name: '${pipeNameSlugified}',
- description: ${JSON.stringify(pipeInfo.description) || ''},
+ description: '${pipeInfo.description || ''}',
status: '${pipeInfo.status}',
model: 'openai:gpt-4o-mini',
stream: true,
diff --git a/packages/baseai/src/tool/index.ts b/packages/baseai/src/tool/index.ts
index d461bd8f..4239a9ee 100644
--- a/packages/baseai/src/tool/index.ts
+++ b/packages/baseai/src/tool/index.ts
@@ -110,7 +110,7 @@ const ${camelCaseNameToolName} = (): ToolI => ({
type: 'function' as const,
function: {
name: '${camelCaseNameToolName}',
- description: ${JSON.stringify(description) || ''},
+ description: '${description}',
parameters: {},
},
});
diff --git a/packages/baseai/src/utils/memory/git-sync/get-changed-files-between-commits.ts b/packages/baseai/src/utils/memory/git-sync/get-changed-files-between-commits.ts
index a128f386..f8d582bf 100644
--- a/packages/baseai/src/utils/memory/git-sync/get-changed-files-between-commits.ts
+++ b/packages/baseai/src/utils/memory/git-sync/get-changed-files-between-commits.ts
@@ -1,23 +1,23 @@
import { execSync } from 'child_process';
/**
- * Retrieves the list of changed and deleted files between two Git commits matching specified glob patterns.
+ * Retrieves the list of changed and deleted files between two Git commits within a specified directory.
*
* @param {Object} params - The parameters for the function.
* @param {string} params.oldCommit - The old commit reference to compare from.
* @param {string} [params.latestCommit='HEAD'] - The latest commit reference to compare to. Defaults to 'HEAD'.
- * @param {string[]} params.include - Array of glob patterns to track for changes.
+ * @param {string} params.dirToTrack - The directory to track for changes.
* @returns {Promise<{ changedFiles: string[]; deletedFiles: string[] }>} - A promise that resolves to an object containing arrays of changed and deleted files.
* @throws {Error} - Throws an error if the Git command execution fails or if the commit references are invalid.
*/
export async function getChangedAndDeletedFilesBetweenCommits({
oldCommit,
latestCommit = 'HEAD',
- include
+ dirToTrack
}: {
oldCommit: string;
latestCommit: string;
- include: string[];
+ dirToTrack: string;
}): Promise<{ changedFiles: string[]; deletedFiles: string[] }> {
try {
// Validate inputs
@@ -25,53 +25,30 @@ export async function getChangedAndDeletedFilesBetweenCommits({
throw new Error('Invalid commit references');
}
- if (!Array.isArray(include) || include.length === 0) {
- throw new Error('Include patterns must be a non-empty array');
- }
-
const repoPath = process.cwd();
- // Execute the Git commands for changed and deleted files
- const changedResult = execSync(
- constructGitCommand({
- include,
- oldCommit,
- diffFilter: 'ACMRT',
- latestCommit
- }),
- {
- encoding: 'utf-8',
- cwd: repoPath
- }
- ).trim();
+ // Construct the Git commands to get changed and deleted files in the specific directory
+ const changedCommand = `git diff --diff-filter=ACMRT --name-only ${oldCommit} ${latestCommit} -- ${dirToTrack}`;
+ const deletedCommand = `git diff --diff-filter=D --name-only ${oldCommit} ${latestCommit} -- ${dirToTrack}`;
+
+ // Execute the Git commands
+ const changedResult = execSync(changedCommand, {
+ encoding: 'utf-8',
+ cwd: repoPath
+ }).trim();
- const deletedResult = execSync(
- constructGitCommand({
- include,
- oldCommit,
- diffFilter: 'D',
- latestCommit
- }),
- {
- encoding: 'utf-8',
- cwd: repoPath
- }
- ).trim();
+ const deletedResult = execSync(deletedCommand, {
+ encoding: 'utf-8',
+ cwd: repoPath
+ }).trim();
// Process the results
- const changedFiles = changedResult
- ? changedResult
- .split('\n')
- .filter(Boolean)
- .map(file => file.replace(/\//g, '-'))
- : [];
+ let changedFiles = changedResult.split('\n').filter(Boolean);
+ let deletedFiles = deletedResult.split('\n').filter(Boolean);
- const deletedFiles = deletedResult
- ? deletedResult
- .split('\n')
- .filter(Boolean)
- .map(file => file.replace(/\//g, '-'))
- : [];
+ // Resolve full paths
+ changedFiles = changedFiles.map(file => file.replace(/\//g, '-'));
+ deletedFiles = deletedFiles.map(file => file.replace(/\//g, '-'));
return { changedFiles, deletedFiles };
} catch (error) {
@@ -79,27 +56,3 @@ export async function getChangedAndDeletedFilesBetweenCommits({
throw error;
}
}
-
-// Helper function to construct the Git command for changed files
-const constructGitCommand = ({
- include,
- oldCommit,
- diffFilter,
- latestCommit
-}: {
- include: string[];
- oldCommit: string;
- diffFilter: 'ACMRT' | 'D';
- latestCommit: string;
-}) => {
- const baseCommand = `git diff --diff-filter=${diffFilter} --name-only ${oldCommit} ${latestCommit}`;
-
- // If there's only one pattern, use it directly
- if (include.length === 1) {
- return `${baseCommand} -- "${include[0]}"`;
- }
-
- // For multiple patterns, use brace expansion
- const patterns = include.map(pattern => `"${pattern}"`).join(' ');
- return `${baseCommand} -- ${patterns}`;
-};
diff --git a/packages/baseai/src/utils/memory/git-sync/handle-git-sync-memories.ts b/packages/baseai/src/utils/memory/git-sync/handle-git-sync-memories.ts
index 5bb638ee..207b5bb9 100644
--- a/packages/baseai/src/utils/memory/git-sync/handle-git-sync-memories.ts
+++ b/packages/baseai/src/utils/memory/git-sync/handle-git-sync-memories.ts
@@ -72,13 +72,13 @@ export async function handleGitSyncMemories({
// If there's no deployedCommitHash, user is deploying for the first time
// Deploy all files in the directory
const lastHashUsed = isEmbed
- ? config.git?.embeddedAt
- : config.git?.deployedAt;
+ ? config.embeddedCommitHash
+ : config.deployedCommitHash;
if (!lastHashUsed) {
filesToDeploy = allFiles;
p.log.info(
- `Found no previous ${isEmbed ? 'deployed' : 'embedded'} commit. ${isEmbed ? 'Deploying' : 'Embedding'} all ${filesToDeploy.length} files in memory "${memoryName}":`
+ `Found no previous deployed commit. Deploying all ${filesToDeploy.length} files in memory "${memoryName}":`
);
}
// Step 2.2: Otherwise, get changed files between commits
@@ -87,7 +87,7 @@ export async function handleGitSyncMemories({
await getChangedAndDeletedFilesBetweenCommits({
oldCommit: lastHashUsed,
latestCommit: 'HEAD',
- include: config.git.include
+ dirToTrack: config.dirToTrack
});
filesToDeploy = changedFiles;
diff --git a/packages/baseai/src/utils/memory/git-sync/save-deployed-commit-in-config.ts b/packages/baseai/src/utils/memory/git-sync/save-deployed-commit-in-config.ts
index 6c344f44..a8ddff82 100644
--- a/packages/baseai/src/utils/memory/git-sync/save-deployed-commit-in-config.ts
+++ b/packages/baseai/src/utils/memory/git-sync/save-deployed-commit-in-config.ts
@@ -19,108 +19,27 @@ export async function saveDeployedCommitHashInMemoryConfig({
const indexFilePath = path.join(memoryDir, 'index.ts');
let fileContents = await fs.readFile(indexFilePath, 'utf-8');
- // Check if the git block exists
- if (fileContents.includes('git:')) {
- // Find the git block including its indentation
- const gitBlockMatch = fileContents.match(/(\t*)git:\s*{[^}]*?}/);
- if (gitBlockMatch) {
- const [fullMatch, outerIndent] = gitBlockMatch;
- const innerIndent = outerIndent + '\t';
-
- // Parse existing content
- const contentMatch = fullMatch.match(
- /{\s*\n?\s*(.*?)\s*\n?\s*}/s
- );
- let existingContent = contentMatch ? contentMatch[1] : '';
-
- let contentLines = existingContent
- .split('\n')
- .map(line => line.trim().replace(/,\s*$/, '')) // Remove trailing commas
- .filter(Boolean);
-
- let newGitContent: string;
-
- // If deployedAt exists, update it while preserving formatting
- if (existingContent.includes('deployedAt:')) {
- contentLines = contentLines.map(line => {
- if (line.includes('deployedAt:')) {
- return `deployedAt: '${deployedCommitHash}'`;
- }
- return line;
- });
- } else {
- // Add deployedAt to existing content
- contentLines.push(`deployedAt: '${deployedCommitHash}'`);
- }
-
- // Add commas between lines but not after the last line
- newGitContent = contentLines
- .map((line, index) => {
- const isLast = index === contentLines.length - 1;
- return `${innerIndent}${line}${isLast ? '' : ','}`;
- })
- .join('\n');
-
- // Replace the old git block with the new one
- fileContents = fileContents.replace(
- /(\t*)git:\s*{[^}]*?}/,
- `${outerIndent}git: {\n${newGitContent}\n${outerIndent}}`
- );
- }
+ // Check if the deployedCommitHash already exists in the config
+ if (fileContents.includes('deployedCommitHash:')) {
+ // Update the existing deployedCommitHash
+ fileContents = fileContents.replace(
+ /deployedCommitHash:\s*['"].*['"]/,
+ `deployedCommitHash: '${deployedCommitHash}'`
+ );
} else {
- // Add new git config block
- const match = fileContents.match(
- /(?:const\s+\w+\s*=\s*\(\s*\)\s*(?::\s*\w+)?\s*=>\s*\({[\s\S]*?)(}\))/
+ // Add the deployedCommitHash to the config
+ fileContents = fileContents.replace(
+ /config:\s*{/,
+ `config: {\n deployedCommitHash: '${deployedCommitHash}',`
);
-
- if (match) {
- // Insert before the closing parenthesis
- const insertPosition =
- match.index! + match[0].length - match[1].length;
- const prefix = fileContents.slice(0, insertPosition);
- const suffix = fileContents.slice(insertPosition);
-
- // Match the indentation of nearby properties
- const indentMatch = prefix.match(/\n(\t+)[^\n]+\n\s*$/);
- const baseIndent = indentMatch ? indentMatch[1] : '\t';
- const innerIndent = baseIndent + '\t';
-
- const lines = [
- 'enabled: false',
- "include: ['**/*']",
- 'gitignore: false',
- `deployedAt: '${deployedCommitHash}'`
- ];
-
- const gitConfig = lines
- .map((line, index) => {
- const isLast = index === lines.length - 1;
- return `${innerIndent}${line}${isLast ? '' : ','}`;
- })
- .join('\n');
-
- fileContents = `${prefix},\n${baseIndent}git: {\n${gitConfig}\n${baseIndent}}${suffix}`;
- } else {
- throw new Error(
- 'Could not find appropriate location to insert git config'
- );
- }
}
// Write the updated contents back to the file
await fs.writeFile(indexFilePath, fileContents, 'utf-8');
- p.log.success(`Updated deployedAt hash for memory '${memoryName}'.`);
+ p.log.success(`Updated deployedCommitHash for memory '${memoryName}'.`);
} catch (error) {
- if (error instanceof Error) {
- p.cancel(
- `Failed to save deployedAt hash for memory '${memoryName}': ${error.message}`
- );
- } else {
- p.cancel(
- `Failed to save deployedAt hash for memory '${memoryName}': Unknown error`
- );
- }
+ console.error(`Error saving latest commit hash: ${error}`);
throw error;
}
}
diff --git a/packages/baseai/src/utils/memory/git-sync/save-embedded-commit-in-config.ts b/packages/baseai/src/utils/memory/git-sync/save-embedded-commit-in-config.ts
index 52051b12..edef22d2 100644
--- a/packages/baseai/src/utils/memory/git-sync/save-embedded-commit-in-config.ts
+++ b/packages/baseai/src/utils/memory/git-sync/save-embedded-commit-in-config.ts
@@ -19,108 +19,27 @@ export async function saveEmbeddedCommitHashInMemoryConfig({
const indexFilePath = path.join(memoryDir, 'index.ts');
let fileContents = await fs.readFile(indexFilePath, 'utf-8');
- // Check if the git block exists
- if (fileContents.includes('git:')) {
- // Find the git block including its indentation
- const gitBlockMatch = fileContents.match(/(\t*)git:\s*{[^}]*?}/);
- if (gitBlockMatch) {
- const [fullMatch, outerIndent] = gitBlockMatch;
- const innerIndent = outerIndent + '\t';
-
- // Parse existing content
- const contentMatch = fullMatch.match(
- /{\s*\n?\s*(.*?)\s*\n?\s*}/s
- );
- let existingContent = contentMatch ? contentMatch[1] : '';
-
- let contentLines = existingContent
- .split('\n')
- .map(line => line.trim().replace(/,\s*$/, '')) // Remove trailing commas
- .filter(Boolean);
-
- let newGitContent: string;
-
- // If embeddedAt exists, update it while preserving formatting
- if (existingContent.includes('embeddedAt:')) {
- contentLines = contentLines.map(line => {
- if (line.includes('embeddedAt:')) {
- return `embeddedAt: '${embeddedCommitHash}'`;
- }
- return line;
- });
- } else {
- // Add embeddedAt to existing content
- contentLines.push(`embeddedAt: '${embeddedCommitHash}'`);
- }
-
- // Add commas between lines but not after the last line
- newGitContent = contentLines
- .map((line, index) => {
- const isLast = index === contentLines.length - 1;
- return `${innerIndent}${line}${isLast ? '' : ','}`;
- })
- .join('\n');
-
- // Replace the old git block with the new one
- fileContents = fileContents.replace(
- /(\t*)git:\s*{[^}]*?}/,
- `${outerIndent}git: {\n${newGitContent}\n${outerIndent}}`
- );
- }
+ // Check if the embeddedCommitHash already exists in the config
+ if (fileContents.includes('embeddedCommitHash:')) {
+ // Update the existing embeddedCommitHash
+ fileContents = fileContents.replace(
+ /embeddedCommitHash:\s*['"].*['"]/,
+ `embeddedCommitHash: '${embeddedCommitHash}'`
+ );
} else {
- // Add new git config block
- const match = fileContents.match(
- /(?:const\s+\w+\s*=\s*\(\s*\)\s*(?::\s*\w+)?\s*=>\s*\({[\s\S]*?)(}\))/
+ // Add the embeddedCommitHash to the config
+ fileContents = fileContents.replace(
+ /config:\s*{/,
+ `config: {\n embeddedCommitHash: '${embeddedCommitHash}',`
);
-
- if (match) {
- // Insert before the closing parenthesis
- const insertPosition =
- match.index! + match[0].length - match[1].length;
- const prefix = fileContents.slice(0, insertPosition);
- const suffix = fileContents.slice(insertPosition);
-
- // Match the indentation of nearby properties
- const indentMatch = prefix.match(/\n(\t+)[^\n]+\n\s*$/);
- const baseIndent = indentMatch ? indentMatch[1] : '\t';
- const innerIndent = baseIndent + '\t';
-
- const lines = [
- 'enabled: false',
- "include: ['**/*']",
- 'gitignore: false',
- `embeddedAt: '${embeddedCommitHash}'`
- ];
-
- const gitConfig = lines
- .map((line, index) => {
- const isLast = index === lines.length - 1;
- return `${innerIndent}${line}${isLast ? '' : ','}`;
- })
- .join('\n');
-
- fileContents = `${prefix},\n${baseIndent}git: {\n${gitConfig}\n${baseIndent}}${suffix}`;
- } else {
- throw new Error(
- 'Could not find appropriate location to insert git config'
- );
- }
}
// Write the updated contents back to the file
await fs.writeFile(indexFilePath, fileContents, 'utf-8');
- p.log.success(`Updated embeddedAt hash for memory '${memoryName}'.`);
+ p.log.success(`Updated embeddedCommitHash for memory '${memoryName}'.`);
} catch (error) {
- if (error instanceof Error) {
- p.cancel(
- `Failed to save embeddedAt hash for memory '${memoryName}': ${error.message}`
- );
- } else {
- p.cancel(
- `Failed to save embeddedAt hash for memory '${memoryName}': Unknown error`
- );
- }
+ console.error(`Error saving latest commit hash: ${error}`);
throw error;
}
}
diff --git a/packages/baseai/src/utils/memory/handle-old-memory-config.ts b/packages/baseai/src/utils/memory/handle-old-memory-config.ts
deleted file mode 100644
index e9bd9b4d..00000000
--- a/packages/baseai/src/utils/memory/handle-old-memory-config.ts
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Represents the old memory configuration format for backward compatibility.
- */
-export interface OldMemoryConfig {
- name: string;
- description?: string;
- config?: OldConfigObject;
-}
-
-interface OldConfigObject {
- useGitRepo: boolean;
- dirToTrack: string;
- extToTrack: string[] | ['*'];
- deployedCommitHash?: string;
- embeddedCommitHash?: string;
-}
-
-/**
- * Type guard to check if an object is of type `OldConfigObject`.
- *
- * @param obj - The object to check.
- * @returns `true` if the object is an `OldConfigObject`, otherwise `false`.
- */
-function isOldConfigObject(obj: unknown): obj is OldConfigObject {
- return (
- typeof obj === 'object' &&
- obj !== null &&
- 'useGitRepo' in obj &&
- typeof (obj as OldConfigObject).useGitRepo === 'boolean' &&
- 'dirToTrack' in obj &&
- typeof (obj as OldConfigObject).dirToTrack === 'string' &&
- 'extToTrack' in obj &&
- Array.isArray((obj as OldConfigObject).extToTrack)
- );
-}
-
-/**
- * Checks if an object conforms to the old memory configuration format.
- *
- * @param obj - The object to check.
- * @returns `true` if the object is in the old memory configuration format, otherwise `false`.
- */
-export function isOldMemoryConfigFormat(obj: unknown): boolean {
- if (
- typeof obj !== 'object' ||
- obj === null ||
- !('name' in obj) ||
- !('config' in obj)
- ) {
- return false;
- }
-
- const typedObj = obj as { name: unknown; config: unknown };
-
- return (
- typeof typedObj.name === 'string' &&
- (typedObj.config === undefined || isOldConfigObject(typedObj.config))
- );
-}
-
-/**
- * Generates upgrade instructions for converting an old memory configuration to the new format.
- *
- * @param oldConfig - The old memory configuration.
- * @returns A string containing the upgrade instructions.
- */
-export function generateUpgradeInstructions(
- oldConfig: OldMemoryConfig
-): string {
- if (!oldConfig.config) {
- return 'Invalid memory config.';
- }
-
- const newConfigExample = {
- name: oldConfig.name,
- description: oldConfig.description || 'Your memory description',
- git: {
- enabled: oldConfig.config.useGitRepo,
- include:
- oldConfig.config.extToTrack[0] === '*'
- ? [`${oldConfig.config.dirToTrack}/**/*`]
- : oldConfig.config.extToTrack.map(
- ext => `${oldConfig.config?.dirToTrack}/**/*${ext}`
- ),
- gitignore: true,
- deployedAt: oldConfig.config.deployedCommitHash || '',
- embeddedAt: oldConfig.config.embeddedCommitHash || ''
- }
- };
-
- return `
-Your memory config is using an outdated format in baseai/memory/${oldConfig.name}/index.ts. Please update the file to this new format:
-
-${JSON.stringify(newConfigExample, null, 2)}
-
-Key changes:
-- Removed nested 'config' object structure
-- Git-related fields are now grouped under a 'git' object
-- 'useGitRepo' is now 'git.enabled'
-- 'dirToTrack' and 'extToTrack' are combined into 'git.include' glob patterns
-- 'deployedCommitHash' is now 'git.deployedAt'
-- 'embeddedCommitHash' is now 'git.embeddedAt'
-- Added new 'git.gitignore' field (defaults to true)
-
-For more information, refer to the documentation: https://baseai.dev/docs/guides/memory-from-git
-`;
-}
diff --git a/packages/baseai/src/utils/memory/lib.ts b/packages/baseai/src/utils/memory/lib.ts
index a567ceb0..516878c0 100644
--- a/packages/baseai/src/utils/memory/lib.ts
+++ b/packages/baseai/src/utils/memory/lib.ts
@@ -7,6 +7,7 @@ import * as p from '@clack/prompts';
import fs from 'fs';
import type { Message } from 'types/pipe';
import { fromZodError } from 'zod-validation-error';
+import type { Pipe } from '../../dev/routes/beta/pipes/run';
import { defaultRagPrompt, MEMORYSETS } from './constants';
import {
cosineSimilaritySearch,
@@ -134,9 +135,11 @@ export const getAugmentedContext = ({
};
export const addContextFromMemory = async ({
+ pipe,
messages,
memoryNames
}: {
+ pipe: Pipe;
messages: Message[];
memoryNames: string[];
}) => {
diff --git a/packages/baseai/src/utils/memory/load-memory-config.ts b/packages/baseai/src/utils/memory/load-memory-config.ts
index 2022d83c..8c29d07e 100644
--- a/packages/baseai/src/utils/memory/load-memory-config.ts
+++ b/packages/baseai/src/utils/memory/load-memory-config.ts
@@ -2,60 +2,77 @@ import fs from 'fs/promises';
import path from 'path';
import * as p from '@clack/prompts';
import { memoryConfigSchema, type MemoryConfigI } from 'types/memory';
-import {
- generateUpgradeInstructions,
- isOldMemoryConfigFormat,
- type OldMemoryConfig
-} from './handle-old-memory-config';
-function extractConfigObject(fileContents: string): unknown {
- try {
- // Remove import statements and exports
- const cleanedContent = fileContents
- .replace(/import\s+.*?['"];?\s*/g, '')
- .replace(/export\s+default\s+/, '');
-
- // First try to match a function that returns an object directly with parentheses
- let match = cleanedContent.match(
- /(?:const\s+)?(\w+)\s*=\s*\(\s*\)\s*(?::\s*\w+)?\s*=>\s*\(({[\s\S]*?})\)/
- );
+function parsePathJoin(joinArgs: string): string {
+ // Remove any quotes, split by comma, and trim each argument
+ const args = joinArgs
+ .split(',')
+ .map(arg => arg.trim().replace(/['"]/g, ''));
+ // Join all arguments to preserve the complete path
+ return path.join(...args);
+}
- // If no direct parentheses match, try to match function with return statement
- if (!match) {
- match = cleanedContent.match(
- /(?:const\s+)?(\w+)\s*=\s*\(\s*\)\s*(?::\s*\w+)?\s*=>\s*\{[\s\S]*?return\s+({[\s\S]*?})\s*;\s*\}/
- );
- }
+function parseConfig(configString: string): MemoryConfigI {
+ // Remove all whitespace that's not inside quotes
+ const cleanConfig = configString.replace(
+ /\s+(?=(?:(?:[^"]*"){2})*[^"]*$)/g,
+ ''
+ );
- // If still no match, try to match direct object assignment
- if (!match) {
- match = cleanedContent.match(
- /(?:const\s+)?(?:memory|\w+)\s*=\s*({[\s\S]*?});?$/m
- );
- }
+ const useGitRepoMatch = cleanConfig.match(/useGitRepo:(true|false)/);
+ const dirToTrackMatch = cleanConfig.match(
+ /dirToTrack:(?:path\.(?:posix\.)?join\((.*?)\)|['"](.+?)['"])/
+ );
+ const extToTrackMatch = cleanConfig.match(/extToTrack:(\[.*?\])/);
+ const deployedCommitHashMatch = cleanConfig.match(
+ /deployedCommitHash:['"](.+?)['"]/
+ );
+ const embeddedCommitHashMatch = cleanConfig.match(
+ /embeddedCommitHash:['"](.+?)['"]/
+ );
- if (!match) {
- throw new Error('Unable to find memory object definition');
- }
+ if (!useGitRepoMatch || !dirToTrackMatch || !extToTrackMatch) {
+ throw new Error('Unable to parse config structure');
+ }
- // The object literal will be in the last capture group
- const memoryObjStr = match[match.length - 1];
+ const useGitRepo = useGitRepoMatch[1] === 'true';
+ const dirToTrack = dirToTrackMatch[2]
+ ? dirToTrackMatch[2]
+ : parsePathJoin(dirToTrackMatch[1]);
+ const extToTrack = JSON.parse(extToTrackMatch[1].replace(/'/g, '"'));
+ const deployedCommitHash = deployedCommitHashMatch
+ ? deployedCommitHashMatch[1]
+ : undefined;
+ const embeddedCommitHash = embeddedCommitHashMatch
+ ? embeddedCommitHashMatch[1]
+ : undefined;
- // Create a new Function that returns the object literal
- const fn = new Function(`return ${memoryObjStr}`);
- return fn();
- } catch (error) {
- console.error('Parsing error:', error);
- console.error('File contents:', fileContents);
- throw new Error(
- `Failed to extract config: ${error instanceof Error ? error.message : 'Unknown error'}`
- );
+ const config: MemoryConfigI = {
+ useGitRepo,
+ dirToTrack,
+ extToTrack
+ };
+
+ if (deployedCommitHash) {
+ config.deployedCommitHash = deployedCommitHash;
}
+
+ if (embeddedCommitHash) {
+ config.embeddedCommitHash = embeddedCommitHash;
+ }
+
+ // Validate the parsed config against the schema
+ const result = memoryConfigSchema.safeParse(config);
+ if (!result.success) {
+ throw new Error(`Invalid config: ${result.error.message}`);
+ }
+
+ return config;
}
export default async function loadMemoryConfig(
memoryName: string
-): Promise {
+): Promise {
try {
const memoryDir = path.join(
process.cwd(),
@@ -65,35 +82,46 @@ export default async function loadMemoryConfig(
);
const indexFilePath = path.join(memoryDir, 'index.ts');
+ // Check if the directory exists
+ await fs.access(memoryDir);
+
+ // Check if the index.ts file exists
await fs.access(indexFilePath);
+
+ // Read the file contents
const fileContents = await fs.readFile(indexFilePath, 'utf-8');
- const configObj = extractConfigObject(fileContents);
- // Try to parse with new schema first
+ // Extract the config object, allowing for any amount of whitespace
+ const configMatch = fileContents.match(/config\s*:\s*({[\s\S]*?})/);
+ if (!configMatch) {
+ return null;
+ }
+
+ // Parse the config
try {
- return memoryConfigSchema.parse(configObj);
- } catch (parseError) {
- if (!configObj) throw parseError;
-
- // If parsing fails, check if it's an old format
- if (isOldMemoryConfigFormat(configObj)) {
- p.note(
- generateUpgradeInstructions(configObj as OldMemoryConfig)
+ const config = parseConfig(configMatch[1]);
+ return config;
+ } catch (error) {
+ if (error instanceof Error) {
+ p.cancel(
+ `Unable to read config in '${memoryName}/index.ts': ${error.message}`
);
+ } else {
p.cancel(
- 'Deployment cancelled. Please update your memory config file to the new format.'
+ `Unable to read config in '${memoryName}/index.ts': Unknown error occurred`
);
- process.exit(1);
}
-
- // If it's neither new nor old format, throw the original error
- throw parseError;
+ process.exit(1);
}
} catch (error) {
if (error instanceof Error) {
- p.cancel(`Failed to load memory '${memoryName}': ${error.message}`);
+ p.cancel(
+ `Memory '${memoryName}' does not exist or could not be loaded: ${error.message}`
+ );
} else {
- p.cancel(`Failed to load memory '${memoryName}': Unknown error`);
+ p.cancel(
+ `Memory '${memoryName}' does not exist or could not be loaded: Unknown error occurred`
+ );
}
process.exit(1);
}
diff --git a/packages/baseai/src/utils/memory/load-memory-files.ts b/packages/baseai/src/utils/memory/load-memory-files.ts
index e643309e..ebaafe24 100644
--- a/packages/baseai/src/utils/memory/load-memory-files.ts
+++ b/packages/baseai/src/utils/memory/load-memory-files.ts
@@ -5,21 +5,14 @@ import { allSupportedExtensions, MEMORYSETS } from './constants';
import { getDocumentContent } from './get-document-content';
import { formatDocSize } from './lib';
import loadMemoryConfig from './load-memory-config';
-import {
- memoryConfigSchema,
- type DocumentConfigI,
- type MemoryConfigI
-} from 'types/memory';
+import { memoryConfigSchema, type MemoryConfigI } from 'types/memory';
import { execSync } from 'child_process';
-import fg from 'fast-glob';
export interface MemoryDocumentI {
name: string;
size: string;
content: string;
blob: Blob;
- path: string;
- meta: Record;
}
export const loadMemoryFiles = async (
@@ -29,12 +22,11 @@ export const loadMemoryFiles = async (
const memoryConfig = await checkMemoryConfig(memoryName);
// useDocumentsDir
- const useDocumentsDir = !memoryConfig || !memoryConfig.git.enabled;
- const documentConfig = memoryConfig?.documents;
+ const useDocumentsDir = !memoryConfig || !memoryConfig?.useGitRepo;
// Load files from documents directory.
if (useDocumentsDir) {
- return await loadMemoryFilesFromDocsDir({ memoryName, documentConfig });
+ return await loadMemoryFilesFromDocsDir(memoryName);
}
// Load files from the repo.
@@ -58,52 +50,46 @@ export const loadMemoryFilesFromCustomDir = async ({
memoryName: string;
memoryConfig: MemoryConfigI;
}): Promise => {
- const includePatterns = memoryConfig.git.include;
+ const memoryFilesPath = memoryConfig.dirToTrack;
- if (!Array.isArray(includePatterns) || includePatterns.length === 0) {
- p.cancel(`No include patterns specified for memory '${memoryName}'`);
+ try {
+ await fs.access(memoryFilesPath);
+ } catch (error) {
+ p.cancel(
+ `Documents directory for memory '${memoryName}' does not exist.`
+ );
process.exit(1);
}
console.log('Reading documents in memory...');
- // Get all files that match the glob patterns and are tracked by git
let allFiles: string[];
try {
- // First get all git tracked files
- const gitFiles = new Set([
- ...execSync('git ls-files', { encoding: 'utf-8' })
- .split('\n')
- .filter(Boolean),
- ...execSync('git ls-files --others --exclude-standard', {
- encoding: 'utf-8'
- })
- .split('\n')
- .filter(Boolean),
- ...execSync('git diff --name-only', { encoding: 'utf-8' })
- .split('\n')
- .filter(Boolean)
- ]);
-
- // Then match against glob patterns
- const matchedFiles = await fg(includePatterns, {
- ignore: ['node_modules/**'],
- dot: true,
- gitignore: memoryConfig.git.gitignore || true
- });
-
- // Only keep files that are both tracked by git and match the patterns
- allFiles = matchedFiles.filter((file: string) => gitFiles.has(file));
+ allFiles = execSync(`git ls-files ${memoryFilesPath}`, {
+ encoding: 'utf-8'
+ })
+ .split('\n')
+ .filter(Boolean);
} catch (error) {
p.cancel(`Failed to read documents in memory '${memoryName}'.`);
process.exit(1);
}
+ // Check if all extensions are allowed.
+ const allExtensionsAllowed = memoryConfig.extToTrack[0] === '*';
+
+ // Filter files based on allowed extensions.
+ const extensionsToUse = allExtensionsAllowed
+ ? allSupportedExtensions
+ : memoryConfig.extToTrack.filter(ext =>
+ allSupportedExtensions.includes(ext)
+ );
+
const memoryFilesContent = await Promise.all(
allFiles.map(async filePath => {
- // Check if the file is allowed
- const isSupportedExtension = allSupportedExtensions.some(
- extension => filePath.endsWith(extension)
+ // Check if the file is allowed.
+ const isSupportedExtension = extensionsToUse.some(extension =>
+ filePath.endsWith(extension)
);
if (!isSupportedExtension) {
@@ -128,21 +114,12 @@ export const loadMemoryFilesFromCustomDir = async ({
return null;
}
- const memoryFile = {
- path: filePath,
+ return {
name: path.basename(filePath.replace(/\//g, '-')),
size: formatDocSize(fileContentBlob.size),
content: await getDocumentContent(fileContentBlob),
blob: fileContentBlob
};
-
- let meta = {};
-
- if (memoryConfig?.documents?.meta) {
- meta = memoryConfig.documents.meta(memoryFile) || {};
- }
-
- return { ...memoryFile, meta };
})
);
@@ -175,13 +152,9 @@ export const loadMemoryFilesFromCustomDir = async ({
* - Have unsupported file extensions.
* 5. Returns an array of `MemoryDocumentI` objects representing the valid memory files.
*/
-export const loadMemoryFilesFromDocsDir = async ({
- memoryName,
- documentConfig
-}: {
- memoryName: string;
- documentConfig?: DocumentConfigI;
-}): Promise => {
+export const loadMemoryFilesFromDocsDir = async (
+ memoryName: string
+): Promise => {
const memoryDir = path.join(process.cwd(), 'baseai', 'memory', memoryName);
const memoryFilesPath = path.join(memoryDir, 'documents');
@@ -234,21 +207,12 @@ export const loadMemoryFilesFromDocsDir = async ({
return null;
}
- const memoryFile = {
+ return {
name: file,
- path: filePath,
size: formatDocSize(fileContentBlob.size),
content: await getDocumentContent(fileContentBlob),
blob: fileContentBlob
};
-
- let meta = {};
-
- if (documentConfig?.meta) {
- meta = documentConfig.meta(memoryFile) || {};
- }
-
- return { ...memoryFile, meta };
})
);
@@ -299,6 +263,28 @@ async function checkMemoryConfig(
};
}
+/**
+ * Recursively traverses a directory and returns a list of all file paths.
+ *
+ * @param dir - The directory to traverse.
+ * @returns A promise that resolves to an array of file paths.
+ */
+const traverseDirectory = async (dir: string): Promise => {
+ const files: string[] = [];
+ const entries = await fs.readdir(dir, { withFileTypes: true });
+
+ for (const entry of entries) {
+ const fullPath = path.join(dir, entry.name);
+ if (entry.isDirectory()) {
+ files.push(...(await traverseDirectory(fullPath)));
+ } else {
+ files.push(fullPath);
+ }
+ }
+
+ return files;
+};
+
export const getMemoryFileNames = async (
memoryName: string
): Promise => {
diff --git a/packages/baseai/src/utils/retrieve-credentials.ts b/packages/baseai/src/utils/retrieve-credentials.ts
deleted file mode 100644
index accd30c7..00000000
--- a/packages/baseai/src/utils/retrieve-credentials.ts
+++ /dev/null
@@ -1,59 +0,0 @@
-import { loadConfig } from './config/config-handler';
-import fs from 'fs/promises';
-import * as p from '@clack/prompts';
-import color from 'picocolors';
-
-export interface Account {
- apiKey: string;
-}
-
-type Spinner = ReturnType;
-
-function handleNoAccountFound({ spinner }: { spinner: Spinner }): void {
- spinner.stop('No account found');
- p.log.warn('No account found. Please authenticate first.');
- p.log.info(`Run: ${color.green('npx baseai auth')}`);
-}
-function handleAuthError({
- spinner,
- error
-}: {
- spinner: Spinner;
- error: unknown;
-}): void {
- spinner.stop('Failed to retrieve authentication');
- p.log.error(`Error retrieving stored auth: ${(error as Error).message}`);
-}
-
-export async function retrieveAuthentication({
- spinner
-}: {
- spinner: Spinner;
-}): Promise {
- spinner.start('Retrieving stored authentication');
- try {
- const baiConfig = await loadConfig();
- let envFile = baiConfig.envFilePath || '.env';
- const envFileContent = await fs.readFile(envFile, 'utf-8');
-
- const apiKey = envFileContent
- .split('\n')
- .reverse()
- .find(line => line.includes('LANGBASE_API_KEY='))
- ?.split('=')[1];
-
- if (!apiKey) {
- handleNoAccountFound({ spinner });
- return null;
- }
-
- spinner.stop('Retrieved stored authentication');
-
- return {
- apiKey
- };
- } catch (error) {
- handleAuthError({ spinner, error });
- return null;
- }
-}
diff --git a/packages/baseai/src/utils/to-old-pipe-format.ts b/packages/baseai/src/utils/to-old-pipe-format.ts
index 35568f42..c7972761 100644
--- a/packages/baseai/src/utils/to-old-pipe-format.ts
+++ b/packages/baseai/src/utils/to-old-pipe-format.ts
@@ -1,3 +1,4 @@
+import { OLLAMA } from '@/dev/data/models';
import type { Pipe, PipeOld } from './../../types/pipe';
import {
ANTHROPIC,
@@ -5,11 +6,9 @@ import {
FIREWORKS_AI,
GOOGLE,
GROQ,
- OLLAMA,
OPEN_AI,
PERPLEXITY,
- TOGETHER_AI,
- X_AI
+ TOGETHER_AI
} from './../data/models';
type Provider =
@@ -103,8 +102,7 @@ function getProvider(providerString: string): Provider {
cohere: COHERE,
fireworks: FIREWORKS_AI,
perplexity: PERPLEXITY,
- ollama: OLLAMA,
- xai: X_AI
+ ollama: OLLAMA
};
const provider = providerMap[providerString.toLowerCase()];
diff --git a/packages/baseai/types/memory.ts b/packages/baseai/types/memory.ts
index 2716493b..cf7936f8 100644
--- a/packages/baseai/types/memory.ts
+++ b/packages/baseai/types/memory.ts
@@ -1,5 +1,11 @@
import { z } from 'zod';
+export interface MemoryI {
+ name: string;
+ description?: string;
+ config?: MemoryConfigI;
+}
+
export const memoryNameSchema = z
.string()
.min(3, 'Memory name must be at least 3 characters long')
@@ -16,43 +22,28 @@ export const memoryDocSchema = z.object({
documentName: docNameSchema
});
-export const gitConfigSchema = z.object({
- enabled: z.boolean(),
- include: z
- .array(z.string().trim().min(1, 'Include pattern must not be empty'))
- .min(1, 'At least one include pattern must be specified')
- .describe('Glob patterns to include files in the memory'),
- gitignore: z.boolean().optional().default(true),
- deployedAt: z.string().trim().optional().default(''),
- embeddedAt: z.string().trim().optional().default('')
-});
-
-export const documentSchema = z.object({
- meta: z
- .function()
- .args(
- z.object({
- name: z.string(),
- size: z.string(),
- content: z.string(),
- blob: z.instanceof(Blob),
- path: z.string(),
- })
- )
- .returns(z.record(z.string()))
- .optional()
-});
-
export const memoryConfigSchema = z.object({
- name: z.string(),
- description: z.string().optional(),
- git: gitConfigSchema,
- documents: documentSchema.optional()
+ useGitRepo: z.boolean(),
+ dirToTrack: z
+ .string()
+ .trim()
+ .min(1, 'Directory to track must not be empty'),
+ extToTrack: z.union([
+ z.tuple([z.literal('*')]),
+ z
+ .array(
+ z
+ .string()
+ .trim()
+ .regex(
+ /^\.\w+$/,
+ 'File extension must start with a dot followed by alphanumeric characters'
+ )
+ )
+ .min(1, 'At least one file extension must be specified')
+ ]),
+ deployedCommitHash: z.string().optional(),
+ embeddedCommitHash: z.string().optional()
});
-export type GitConfigI = z.infer;
-
export type MemoryConfigI = z.infer;
-export type DocumentConfigI = z.infer;
-
-export type MemoryI = MemoryConfigI;
diff --git a/packages/baseai/types/model.ts b/packages/baseai/types/model.ts
index 4ec94b0f..669b6626 100644
--- a/packages/baseai/types/model.ts
+++ b/packages/baseai/types/model.ts
@@ -29,16 +29,13 @@ export type TogetherModels =
| 'together:mistralai/Mistral-7B-Instruct-v0.2'
| 'together:mistralai/Mixtral-8x7B-Instruct-v0.1'
| 'together:mistralai/Mixtral-8x22B-Instruct-v0.1'
- | 'together:databricks/dbrx-instruct'
- | 'together:meta-llama/Llama-3.3-70B-Instruct-Turbo';
+ | 'together:databricks/dbrx-instruct';
export type AnthropicModels =
- | 'anthropic:claude-3-5-sonnet-latest'
| 'anthropic:claude-3-5-sonnet-20240620'
| 'anthropic:claude-3-opus-20240229'
| 'anthropic:claude-3-sonnet-20240229'
- | 'anthropic:claude-3-haiku-20240307'
- | 'anthropic:claude-3-5-haiku-20241022';
+ | 'anthropic:claude-3-haiku-20240307';
export type GroqModels =
| 'groq:llama-3.1-70b-versatile'
@@ -47,8 +44,7 @@ export type GroqModels =
| 'groq:llama3-8b-8192'
| 'groq:mixtral-8x7b-32768'
| 'groq:gemma2-9b-it'
- | 'groq:gemma-7b-it'
- | 'groq:llama-3.3-70b-versatile';
+ | 'groq:gemma-7b-it';
export type GoogleModels =
| 'google:gemini-1.5-pro-latest'
@@ -63,8 +59,7 @@ export type FireworksAIModels =
| 'fireworks:llama-v3p1-8b-instruct'
| 'fireworks:llama-v3p1-70b-instruct'
| 'fireworks:llama-v3-70b-instruct'
- | 'fireworks:yi-large'
- | 'fireworks:llama-v3p3-70b-instruct';
+ | 'fireworks:yi-large';
export type PerplexityModels =
| 'perplexity:llama-3.1-sonar-huge-128k-online'
@@ -78,6 +73,4 @@ export type MistralAIModels =
| 'mistral:open-mistral-nemo'
| 'mistral:codestral-latest';
-export type XAIModels = 'xai:grok-beta';
-
export type OllamaModels = `ollama:${string}`;
diff --git a/packages/baseai/types/pipe.ts b/packages/baseai/types/pipe.ts
index bc18a65a..b51e37a0 100644
--- a/packages/baseai/types/pipe.ts
+++ b/packages/baseai/types/pipe.ts
@@ -153,14 +153,3 @@ export interface ToolCall {
arguments: string;
};
}
-
-const functionNameRegex = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/;
-
-export const toolChoiceSchema = z
- .object({
- type: z.enum(['function']).default('function'),
- function: z.object({
- name: z.string().refine(value => functionNameRegex.test(value))
- })
- })
- .optional();
diff --git a/packages/baseai/types/tools.ts b/packages/baseai/types/tools.ts
index 7536bc86..eee73a84 100644
--- a/packages/baseai/types/tools.ts
+++ b/packages/baseai/types/tools.ts
@@ -1,5 +1,3 @@
-import { z } from 'zod';
-
export interface Tool {
run: (...args: any[]) => Promise | any;
type: 'function';
@@ -10,13 +8,11 @@ export interface Tool {
};
}
-export const pipeToolSchema = z.object({
- type: z.literal('function'),
- function: z.object({
- name: z.string(),
- description: z.string().optional(),
- parameters: z.record(z.any()).optional()
- })
-});
-
-export type PipeTool = z.infer;
+export interface PipeTool {
+ type: 'function';
+ function: {
+ name: string;
+ description?: string;
+ parameters?: Record;
+ };
+}
diff --git a/packages/core/CHANGELOG.md b/packages/core/CHANGELOG.md
index 7d6cc9b1..625d85e9 100644
--- a/packages/core/CHANGELOG.md
+++ b/packages/core/CHANGELOG.md
@@ -1,175 +1,5 @@
# `baseai` SDK
-## 0.9.43
-
-### Patch Changes
-
-- Fix moderation
-
-## 0.9.42
-
-### Patch Changes
-
-- 📦 NEW: LB-LLM-Key header support
-
-## 0.9.41
-
-### Patch Changes
-
-- 🐛 FIX: Google stream
-
-## 0.9.40
-
-### Patch Changes
-
-- 📦 NEW: meta-llama/Llama-3.3-70B-Instruct-Turbo model
-
-## 0.9.39
-
-### Patch Changes
-
-- 📦 NEW: tools support in pipe.run()
-
-## 0.9.38
-
-### Patch Changes
-
-- 📦 NEW: .env file based BaseAI auth
-
-## 0.9.37
-
-### Patch Changes
-
-- 👌 IMPROVE: Remove unused type
-
-## 0.9.36
-
-### Patch Changes
-
-- 📦 NEW: Dynamically set document metadata
-
-## 0.9.35
-
-### Patch Changes
-
-- 📦 NEW: Pipe API key support in pipe.run()
-
-## 0.9.34
-
-### Patch Changes
-
-- 👌 IMPROVE: Memory config with new features and better UX
-
-## 0.9.33
-
-### Patch Changes
-
-- 📦 NEW: Params for pipe.run() sdk support
-
-## 0.9.32
-
-### Patch Changes
-
-- 👌 IMPROVE: Error handling in usePipe
-
-## 0.9.31
-
-### Patch Changes
-
-- 98f2d7c: 🐛 FIX: Local development server
-- 👌 IMPROVE: Local development server
-
-## 0.9.30
-
-### Patch Changes
-
-- 📦 NEW: Request production AI agent pipe
-
-## 0.9.29
-
-### Patch Changes
-
-- 🐛 FIX: execAsync breaking paths in Windows
-
-## 0.9.28
-
-### Patch Changes
-
-- 📦 NEW: Pipe v1 support
-
-## 0.9.27
-
-### Patch Changes
-
-- 🐛 FIX: Broken pipes and tools build paths in Windows
-
-## 0.9.26
-
-### Patch Changes
-
-- 📦 NEW: Allow empty submit with no message
-
-## 0.9.25
-
-### Patch Changes
-
-- 🐛 FIX: Request timeout and special characters in description
-
-## 0.9.24
-
-### Patch Changes
-
-- 📦 NEW: claude 3.5 Haiku
-
-## 0.9.23
-
-### Patch Changes
-
-- 📦 NEW: setThreadId function in usePipe
-
-## 0.9.22
-
-### Patch Changes
-
-- 🐛 FIX: Anthropic streaming
-- 84d789c: 🐛 FIX: Anthropic streaming
-
-## 0.9.21
-
-### Patch Changes
-
-- 👌 IMPROVE: Redact LLM API key
-
-## 0.9.20
-
-### Patch Changes
-
-- 👌 IMPROVE: logs
-
-## 0.9.19
-
-### Patch Changes
-
-- 🐛 FIX: BaseAI deploy spinner not stopping
-
-## 0.9.18
-
-### Patch Changes
-
-- 📦 NEW: Export setInput and handleResponseStream functions
-
-## 0.9.17
-
-### Patch Changes
-
-- 📦 NEW: Add claude-3.5-sonnet-latest
-
-## 0.9.16
-
-### Patch Changes
-
-- 📦 NEW: XAI models support
-
## 0.9.15
### Patch Changes
diff --git a/packages/core/package.json b/packages/core/package.json
index 6d986085..bae9f1da 100644
--- a/packages/core/package.json
+++ b/packages/core/package.json
@@ -1,7 +1,7 @@
{
"name": "@baseai/core",
"description": "The Web AI Framework's core - BaseAI.dev",
- "version": "0.9.43",
+ "version": "0.9.15",
"license": "Apache-2.0",
"sideEffects": false,
"main": "./dist/index.js",
@@ -119,4 +119,4 @@
"langbase.com",
"generative AI"
]
-}
\ No newline at end of file
+}
diff --git a/packages/core/src/common/request.ts b/packages/core/src/common/request.ts
index 9b0edff5..26ee2e99 100644
--- a/packages/core/src/common/request.ts
+++ b/packages/core/src/common/request.ts
@@ -1,4 +1,4 @@
-import {handleResponseStream} from 'src/helpers';
+import {Stream} from 'openai/streaming';
import {APIConnectionError, APIError} from './errors';
interface RequestOptions {
@@ -14,7 +14,6 @@ interface RequestConfig {
apiKey?: string;
baseUrl: string;
timeout?: number;
- llmKey?: string;
}
interface SendOptions extends RequestOptions {
@@ -65,7 +64,7 @@ export class Request {
const threadId = response.headers.get('lb-thread-id');
if (options.body?.stream) {
- return handleResponseStream({
+ return this.handleRunResponseStream({
response,
rawResponse: options.body.rawResponse,
}) as T;
@@ -91,7 +90,6 @@ export class Request {
return {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
- 'LB-LLM-Key': this.config.llmKey ?? '',
...headers,
};
}
@@ -101,13 +99,12 @@ export class Request {
options,
headers,
}: MakeRequestParams): Promise {
+ // console.log(' =================== REQUEST ===================');
const resp = await fetch(url, {
method: options.method,
headers,
body: JSON.stringify(options.body),
- ...(this.config.timeout && {
- signal: AbortSignal.timeout(this.config.timeout),
- }),
+ signal: AbortSignal.timeout(this.config.timeout || 30000),
});
return resp;
}
@@ -131,6 +128,42 @@ export class Request {
);
}
+ private handleRunResponseStream({
+ response,
+ rawResponse,
+ }: {
+ response: Response;
+ rawResponse?: boolean;
+ }): {
+ stream: any;
+ threadId: string | null;
+ rawResponse?: {
+ headers: Record;
+ };
+ } {
+ const controller = new AbortController();
+ // const stream = Stream.fromSSEResponse(response, controller);
+ const streamSSE = Stream.fromSSEResponse(response, controller);
+ const stream = streamSSE.toReadableStream();
+
+ const result: {
+ stream: ReadableStream;
+ threadId: string | null;
+ rawResponse?: {
+ headers: Record;
+ };
+ } = {
+ stream,
+ threadId: response.headers.get('lb-thread-id'),
+ };
+ if (rawResponse) {
+ result.rawResponse = {
+ headers: Object.fromEntries(response.headers.entries()),
+ };
+ }
+ return result;
+ }
+
private async handleRunResponse({
response,
isChat,
@@ -161,8 +194,8 @@ export class Request {
}
async post(options: Omit): Promise {
- // logger('Request.post.options');
- // logger(options, {depth: null, colors: true});
+ console.log('Request.post.options');
+ console.dir(options, {depth: null, colors: true});
return this.send({...options, method: 'POST'});
}
diff --git a/packages/core/src/data/models.ts b/packages/core/src/data/models.ts
index be39bcd2..22d50890 100644
--- a/packages/core/src/data/models.ts
+++ b/packages/core/src/data/models.ts
@@ -11,7 +11,6 @@ export const DEEPINFRA: string = 'deepinfra';
export const BEDROCK: string = 'bedrock';
export const AZURE_OPEN_AI: string = 'azure-openai';
export const OLLAMA: string = 'ollama';
-export const X_AI: string = 'xAI';
interface Model {
id: string;
@@ -113,12 +112,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
},
],
[TOGETHER_AI]: [
- {
- id: 'meta-llama/Llama-3.3-70B-Instruct-Turbo',
- provider: TOGETHER_AI,
- promptCost: 0.88,
- completionCost: 0.88,
- },
{
id: 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
provider: TOGETHER_AI,
@@ -211,12 +204,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
},
],
[ANTHROPIC]: [
- {
- id: 'claude-3-5-sonnet-latest',
- provider: ANTHROPIC,
- promptCost: 3,
- completionCost: 15,
- },
{
id: 'claude-3-5-sonnet-20240620',
provider: ANTHROPIC,
@@ -241,20 +228,8 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
promptCost: 0.25,
completionCost: 1.25,
},
- {
- id: 'claude-3-5-haiku-20241022',
- provider: ANTHROPIC,
- promptCost: 1,
- completionCost: 5,
- },
],
[GROQ]: [
- {
- id: 'llama-3.3-70b-versatile',
- provider: GROQ,
- promptCost: 0.59,
- completionCost: 0.79,
- },
{
id: 'llama-3.1-70b-versatile',
provider: GROQ,
@@ -339,12 +314,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
},
],
[FIREWORKS_AI]: [
- {
- id: 'llama-v3p3-70b-instruct',
- provider: FIREWORKS_AI,
- promptCost: 0.88,
- completionCost: 0.88,
- },
{
id: 'llama-v3p1-405b-instruct',
provider: FIREWORKS_AI,
@@ -431,14 +400,6 @@ export const modelsByProvider: ModelsByProviderInclCosts = {
completionCost: 3,
},
],
- [X_AI]: [
- {
- id: 'grok-beta',
- provider: X_AI,
- promptCost: 5,
- completionCost: 15,
- },
- ],
};
export const jsonModeModels = [
diff --git a/packages/core/src/helpers/stream.ts b/packages/core/src/helpers/stream.ts
index 64593011..38a74977 100644
--- a/packages/core/src/helpers/stream.ts
+++ b/packages/core/src/helpers/stream.ts
@@ -1,7 +1,5 @@
import {ChatCompletionStream} from 'openai/lib/ChatCompletionStream';
import {ChunkStream} from 'src/pipes';
-import {Stream} from 'openai/streaming';
-import {ToolCallResult} from 'types/pipes';
export interface Runner extends ChatCompletionStream {}
@@ -34,65 +32,3 @@ export const getRunner = (readableStream: ReadableStream) => {
export const getTextPart = (chunk: ChunkStream) => {
return chunk.choices[0]?.delta?.content || '';
};
-
-/**
- * Handles the response stream from a given `Response` object.
- *
- * @param {Object} params - The parameters for handling the response stream.
- * @param {Response} params.response - The API response to handle.
- * @param {boolean} params.rawResponse - Optional flag to include raw response headers.
- *
- * @returns {Object} An object containing the processed stream, thread ID, and optionally raw response headers.
- * @returns {ReadableStream} return.stream - The readable stream created from the response.
- * @returns {string | null} return.threadId - The thread ID extracted from the response headers.
- * @returns {Object} [return.rawResponse] - Optional raw response headers.
- * @returns {Record} return.rawResponse.headers - The headers from the raw response.
- */
-export function handleResponseStream({
- response,
- rawResponse,
-}: {
- response: Response;
- rawResponse?: boolean;
-}): {
- stream: any;
- threadId: string | null;
- rawResponse?: {
- headers: Record;
- };
-} {
- const controller = new AbortController();
- const streamSSE = Stream.fromSSEResponse(response, controller);
- const stream = streamSSE.toReadableStream();
-
- const result: {
- stream: ReadableStream;
- threadId: string | null;
- rawResponse?: {
- headers: Record;
- };
- } = {
- stream,
- threadId: response.headers.get('lb-thread-id'),
- };
- if (rawResponse) {
- result.rawResponse = {
- headers: Object.fromEntries(response.headers.entries()),
- };
- }
- return result;
-}
-
-/**
- * Retrieves tool calls from a given readable stream.
- *
- * @param stream - The readable stream from which to extract tool calls.
- * @returns A promise that resolves to an array of `ToolCall` objects.
- */
-export async function getToolsFromStream(
- stream: ReadableStream,
-): Promise {
- let run = getRunner(stream);
- const {choices} = await run.finalChatCompletion();
- return choices[0].message.tool_calls;
-}
diff --git a/packages/core/src/pipes/pipes.ts b/packages/core/src/pipes/pipes.ts
index 410d74d4..f5a501e4 100644
--- a/packages/core/src/pipes/pipes.ts
+++ b/packages/core/src/pipes/pipes.ts
@@ -1,18 +1,10 @@
import type {Runner} from 'src/helpers';
-import {
- Message,
- MessageRole,
- Pipe as PipeI,
- ToolCallResult,
- Tools,
-} from '../../types/pipes';
+import {Message, MessageRole, Pipe as PipeI, ToolCall} from '../../types/pipes';
import {Request} from '../common/request';
import {getLLMApiKey} from '../utils/get-llm-api-key';
import {getApiUrl, isProd} from '../utils/is-prod';
+import {toOldPipeFormat} from '../utils/to-old-pipe-format';
import {isLocalServerRunning} from 'src/utils/local-server-running';
-import {getToolsFromStream} from 'src/helpers';
-import {ANTHROPIC} from 'src/data/models';
-import {getProvider} from 'src/utils/get-provider';
export interface Variable {
name: string;
@@ -24,15 +16,10 @@ export interface RunOptions {
variables?: Variable[];
threadId?: string;
rawResponse?: boolean;
- runTools?: boolean;
- tools?: Tools[];
- name?: string; // Pipe name for SDK,
- apiKey?: string; // pipe level key for SDK
- llmKey?: string; // LLM API key
}
export interface RunOptionsStream extends RunOptions {
- stream: boolean;
+ stream: true;
}
export interface Usage {
@@ -66,7 +53,6 @@ export interface RunResponseStream {
export interface PipeOptions extends PipeI {
maxCalls?: number;
- prod?: boolean;
}
interface ChoiceGenerate {
@@ -91,22 +77,11 @@ export class Pipe {
private tools: Record Promise>;
private maxCalls: number;
private hasTools: boolean;
- private prod: boolean;
- private baseUrl: string;
- private entityApiKey?: string;
constructor(options: PipeOptions) {
- this.prod = options.prod ?? isProd();
- this.baseUrl = getApiUrl(this.prod);
-
- this.request = new Request({
- apiKey: options.apiKey,
- baseUrl: this.baseUrl,
- });
- this.pipe = options;
- this.entityApiKey = options.apiKey;
-
- delete this.pipe.prod;
+ const baseUrl = getApiUrl();
+ this.request = new Request({apiKey: options.apiKey, baseUrl});
+ this.pipe = toOldPipeFormat(options);
delete this.pipe.apiKey;
this.tools = this.getToolsFromPipe(this.pipe);
@@ -115,7 +90,7 @@ export class Pipe {
}
private getToolsFromPipe(
- pipe: Pipe,
+ pipe: any,
): Record Promise> {
const tools: Record Promise> = {};
if (pipe.tools && Array.isArray(pipe.tools)) {
@@ -126,16 +101,14 @@ export class Pipe {
return tools;
}
- private async runTools(toolCalls: ToolCallResult[]): Promise {
- const toolPromises = toolCalls.map(async (toolCall: ToolCallResult) => {
+ private async runTools(toolCalls: ToolCall[]): Promise {
+ const toolPromises = toolCalls.map(async (toolCall: ToolCall) => {
const toolName = toolCall.function.name;
const toolParameters = JSON.parse(toolCall.function.arguments);
const toolFunction = this.tools[toolName];
if (!toolFunction) {
- throw new Error(
- `Tool ${toolName} not found. If this is intentional, please set runTools to false to disable tool execution by default.`,
- );
+ throw new Error(`Tool '${toolName}' not found`);
}
const toolResponse = await toolFunction(toolParameters);
@@ -160,7 +133,7 @@ export class Pipe {
responseMessage: Message,
toolResults: Message[],
): Message[] {
- return this.prod
+ return isProd()
? toolResults
: [...messages, responseMessage, ...toolResults];
}
@@ -172,137 +145,26 @@ export class Pipe {
private warnIfToolsWithStream(requestedStream: boolean): void {
if (this.hasTools && requestedStream) {
console.warn(
- 'Warning: Streaming is not yet supported in Anthropic models when tools are present in the pipe. Falling back to non-streaming mode.',
+ 'Warning: Streaming is not yet supported when tools are present in the pipe. Falling back to non-streaming mode.',
);
}
}
- private async handleStreamResponse(
- options: RunOptionsStream,
- response: RunResponseStream,
- ): Promise {
- const endpoint = '/v1/pipes/run';
- const stream = this.isStreamRequested(options);
- const body = {...options, stream};
-
- const [streamForToolCall, streamForReturn] = response.stream.tee();
- const tools = await getToolsFromStream(streamForToolCall);
-
- if (tools.length) {
- let messages = options.messages || [];
-
- let currentResponse: RunResponseStream = {
- stream: streamForReturn,
- threadId: response.threadId,
- rawResponse: response.rawResponse,
- };
-
- let callCount = 0;
-
- while (callCount < this.maxCalls) {
- const [streamForToolCall, streamForReturn] =
- currentResponse.stream.tee();
-
- const tools = await getToolsFromStream(streamForToolCall);
-
- if (tools.length === 0) {
- return {
- stream: streamForReturn,
- threadId: currentResponse.threadId,
- rawResponse: response.rawResponse,
- };
- }
-
- const toolResults = await this.runTools(tools);
-
- const responseMessage = {
- role: 'assistant',
- content: null,
- tool_calls: tools,
- } as Message;
-
- messages = this.getMessagesToSend(
- messages,
- responseMessage,
- toolResults,
- );
-
- currentResponse = await this.createRequest(
- endpoint,
- {
- ...body,
- messages,
- threadId: currentResponse.threadId,
- },
- );
-
- callCount++;
- }
- }
-
- return {
- ...response,
- stream: streamForReturn,
- } as RunResponseStream;
- }
-
public async run(options: RunOptionsStream): Promise;
public async run(options: RunOptions): Promise;
public async run(
options: RunOptions | RunOptionsStream,
): Promise {
- // logger('pipe.run', this.pipe.name, 'RUN');
-
- const endpoint = '/v1/pipes/run';
- // logger('pipe.run.baseUrl.endpoint', getApiUrl() + endpoint);
- // logger('pipe.run.options');
- // logger(options, {depth: null, colors: true});
-
- const providerString = this.pipe.model.split(':')[0];
- const modelProvider = getProvider(providerString);
- const isAnthropic = modelProvider === ANTHROPIC;
- const hasTools = this.pipe.tools.length > 0;
-
- // For SDK
- // Run the given pipe name
- if (options.name) {
- this.pipe = {...this.pipe, name: options.name};
- }
+ console.log('pipe.run', this.pipe.name, 'RUN');
- // For SDK
- // Run the pipe against the given Pipe API key
- if (options.apiKey) {
- this.request = new Request({
- apiKey: options.apiKey,
- baseUrl: this.baseUrl,
- ...((options.llmKey && {llmKey: options.llmKey}) || {}),
- });
- }
-
- if (options.llmKey && !options.apiKey) {
- this.request = new Request({
- apiKey: this.entityApiKey,
- baseUrl: this.baseUrl,
- llmKey: options.llmKey,
- });
- }
-
- let stream = this.isStreamRequested(options);
-
- // Anthropic models don't support streaming with tools.
- if (isAnthropic && hasTools && stream) {
- this.warnIfToolsWithStream(stream);
- stream = false;
- }
+ const endpoint = '/beta/pipes/run';
+ console.log('pipe.run.baseUrl.endpoint', getApiUrl() + endpoint);
+ console.log('pipe.run.options');
+ console.dir(options, {depth: null, colors: true});
- let runTools = options.runTools ?? true;
-
- // Do not run tools if they are explicitly provided in the options.
- if (options.tools && options.tools?.length) {
- runTools = false;
- }
-
- delete options.runTools;
+ const requestedStream = this.isStreamRequested(options);
+ const stream = this.hasTools ? false : requestedStream;
+ this.warnIfToolsWithStream(requestedStream);
const body = {...options, stream};
@@ -313,22 +175,13 @@ export class Pipe {
return {} as RunResponse | RunResponseStream;
}
- if (!runTools) {
- if (!stream) {
- return response as RunResponse;
- }
-
- return response as RunResponseStream;
- }
+ console.log('pipe.run.response');
+ console.dir(response, {depth: null, colors: true});
if (stream) {
- return await this.handleStreamResponse(
- options as RunOptionsStream,
- response as RunResponseStream,
- );
+ return response as RunResponseStream;
}
- // STREAM IS OFF
let messages = options.messages || [];
let currentResponse = response as RunResponse;
let callCount = 0;
@@ -337,21 +190,21 @@ export class Pipe {
const responseMessage = currentResponse.choices[0].message;
if (this.hasNoToolCalls(responseMessage)) {
- // logger('No more tool calls. Returning final response.');
+ console.log('No more tool calls. Returning final response.');
return currentResponse;
}
- // logger('\npipe.run.response.toolCalls');
- // logger(responseMessage.tool_calls, {
- // depth: null,
- // colors: true,
- // });
+ console.log('\npipe.run.response.toolCalls');
+ console.dir(responseMessage.tool_calls, {
+ depth: null,
+ colors: true,
+ });
const toolResults = await this.runTools(
- responseMessage.tool_calls as ToolCallResult[],
+ responseMessage.tool_calls as ToolCall[],
);
- // logger('\npipe.run.toolResults');
- // logger(toolResults, {depth: null, colors: true});
+ console.log('\npipe.run.toolResults');
+ console.dir(toolResults, {depth: null, colors: true});
messages = this.getMessagesToSend(
messages,
@@ -373,9 +226,9 @@ export class Pipe {
// Explicitly check if the new response has no tool calls
if (this.hasNoToolCalls(currentResponse.choices[0].message)) {
- // logger(
- // 'New response has no tool calls. Returning final response.',
- // );
+ console.log(
+ 'New response has no tool calls. Returning final response.',
+ );
return currentResponse;
}
}
@@ -387,7 +240,6 @@ export class Pipe {
}
private async createRequest(endpoint: string, body: any): Promise {
- const isProdEnv = this.prod;
const prodOptions = {
endpoint,
body: {
@@ -395,21 +247,17 @@ export class Pipe {
name: this.pipe.name,
},
};
+ const localOptions = {
+ endpoint,
+ body: {
+ ...body,
+ pipe: this.pipe,
+ llmApiKey: getLLMApiKey(this.pipe.model.provider),
+ },
+ };
- let localOptions = {} as any;
-
+ const isProdEnv = isProd();
if (!isProdEnv) {
- const providerString = this.pipe.model.split(':')[0];
- const modelProvider = getProvider(providerString);
- localOptions = {
- endpoint,
- body: {
- ...body,
- pipe: this.pipe,
- llmApiKey: getLLMApiKey(modelProvider),
- },
- };
-
const isServerRunning = await isLocalServerRunning();
if (!isServerRunning) return {} as T;
}
@@ -449,7 +297,7 @@ interface ContentChunk {
interface ToolCallChunk {
type: 'toolCall';
- toolCall: ToolCallResult;
+ toolCall: ToolCall;
}
interface ChoiceStream {
@@ -462,7 +310,7 @@ interface ChoiceStream {
interface Delta {
role?: MessageRole;
content?: string;
- tool_calls?: ToolCallResult[];
+ tool_calls?: ToolCall[];
}
interface UnknownChunk {
@@ -481,7 +329,7 @@ export interface ChunkStream {
export interface Chunk {
type: 'content' | 'toolCall' | 'unknown';
content?: string;
- toolCall?: ToolCallResult;
+ toolCall?: ToolCall;
rawChunk?: ChunkStream;
}
diff --git a/packages/core/src/react/use-pipe.ts b/packages/core/src/react/use-pipe.ts
index 2e3864ae..2f743d89 100644
--- a/packages/core/src/react/use-pipe.ts
+++ b/packages/core/src/react/use-pipe.ts
@@ -24,7 +24,6 @@ interface UsePipeOptions {
}
const uuidSchema = z.string().uuid();
-const externalThreadIdSchema = uuidSchema.optional();
export function usePipe({
apiRoute = '/langbase/pipes/run-stream',
@@ -42,9 +41,7 @@ export function usePipe({
const [error, setError] = useState(null);
const abortControllerRef = useRef(null);
- const threadIdRef = useRef(
- initialThreadId || undefined,
- );
+ const threadIdRef = useRef(initialThreadId || null);
const messagesRef = useRef(initialMessages);
const isFirstRequestRef = useRef(true);
@@ -90,17 +87,6 @@ export function usePipe({
[updateMessages, onResponse, onFinish],
);
- const setThreadId = useCallback((newThreadId: string | undefined) => {
- const isValidThreadId =
- externalThreadIdSchema.safeParse(newThreadId).success;
-
- if (isValidThreadId) {
- threadIdRef.current = newThreadId;
- } else {
- throw new Error('Invalid thread ID');
- }
- }, []);
-
const getMessagesToSend = useCallback(
(updatedMessages: Message[]): [Message[], boolean] => {
const isInitialRequest = isFirstRequestRef.current;
@@ -155,8 +141,8 @@ export function usePipe({
const [messagesToSend, lastMessageOnly] =
getMessagesToSend(updatedMessages);
- // Ensure there's at least one message to send if not allowing empty submit
- if (messagesToSend.length === 0 && !options.allowEmptySubmit) {
+ // Ensure there's at least one message to send
+ if (messagesToSend.length === 0) {
throw new Error(
'At least one message or initial message is required',
);
@@ -186,7 +172,7 @@ export function usePipe({
signal,
});
- if (!response.ok) await processErrorResponse(response);
+ if (!response.ok) throw new Error('Failed to send message');
const newThreadId = response.headers.get('lb-thread-id');
if (newThreadId) threadIdRef.current = newThreadId;
@@ -197,12 +183,10 @@ export function usePipe({
const result: RunResponse = await response.json();
processNonStreamResponse(result);
}
- } catch (err: any) {
+ } catch (err) {
if (err instanceof Error && err.name !== 'AbortError') {
setError(err);
onError?.(err);
- } else if (err.name !== 'AbortError') {
- throw new Error('Failed to send message');
}
} finally {
setIsLoading(false);
@@ -266,16 +250,6 @@ export function usePipe({
setIsLoading(false);
}, []);
- const processErrorResponse = async (response: Response) => {
- const res = await response.json();
- if (res.error.error) {
- // Throw error object if it exists
- throw new Error(res.error.error.message);
- } else {
- throw new Error('Failed to send message');
- }
- };
-
return useMemo(
() => ({
messages,
@@ -289,8 +263,6 @@ export function usePipe({
setMessages: updateMessages,
threadId: threadIdRef.current,
sendMessage,
- setInput,
- setThreadId,
}),
[
messages,
diff --git a/packages/core/src/utils/get-llm-api-key.ts b/packages/core/src/utils/get-llm-api-key.ts
index d99c5a88..936cf70f 100644
--- a/packages/core/src/utils/get-llm-api-key.ts
+++ b/packages/core/src/utils/get-llm-api-key.ts
@@ -8,7 +8,6 @@ import {
OPEN_AI,
PERPLEXITY,
TOGETHER_AI,
- X_AI,
} from '../data/models';
export function getLLMApiKey(modelProvider: string): string {
@@ -31,8 +30,6 @@ export function getLLMApiKey(modelProvider: string): string {
return process.env.PERPLEXITY_API_KEY || '';
case modelProvider.includes(OLLAMA):
return process.env.OLLAMA_API_KEY || '';
- case modelProvider.includes(X_AI):
- return process.env.XAI_API_KEY || '';
default:
throw new Error(`Unsupported model provider: ${modelProvider}`);
diff --git a/packages/core/src/utils/get-provider.ts b/packages/core/src/utils/get-provider.ts
deleted file mode 100644
index 5ffb2209..00000000
--- a/packages/core/src/utils/get-provider.ts
+++ /dev/null
@@ -1,52 +0,0 @@
-import {
- ANTHROPIC,
- COHERE,
- FIREWORKS_AI,
- GOOGLE,
- GROQ,
- MISTRAL_AI,
- OLLAMA,
- OPEN_AI,
- PERPLEXITY,
- TOGETHER_AI,
- X_AI,
-} from '../data/models';
-
-type Provider =
- | typeof OPEN_AI
- | typeof ANTHROPIC
- | typeof TOGETHER_AI
- | typeof GOOGLE
- | typeof GROQ
- | typeof COHERE
- | typeof FIREWORKS_AI
- | typeof PERPLEXITY;
-
-/**
- * Retrieves the provider based on the given provider string.
- *
- * @param providerString - The provider string.
- * @returns The corresponding provider object.
- * @throws Error if the provider is unknown.
- */
-export function getProvider(providerString: string): Provider {
- const providerMap: {[key: string]: Provider} = {
- openai: OPEN_AI,
- anthropic: ANTHROPIC,
- together: TOGETHER_AI,
- google: GOOGLE,
- groq: GROQ,
- cohere: COHERE,
- fireworks: FIREWORKS_AI,
- perplexity: PERPLEXITY,
- ollama: OLLAMA,
- xai: X_AI,
- mistral: MISTRAL_AI,
- };
-
- const provider = providerMap[providerString.toLowerCase()];
- if (!provider) {
- throw new Error(`Unknown provider: ${providerString}`);
- }
- return provider;
-}
diff --git a/packages/core/src/utils/is-prod.ts b/packages/core/src/utils/is-prod.ts
index 179331bf..226bd5bd 100644
--- a/packages/core/src/utils/is-prod.ts
+++ b/packages/core/src/utils/is-prod.ts
@@ -10,11 +10,8 @@ export function isLocal() {
return process.env.NODE_ENV !== 'production';
}
-export function getApiUrl(prod?: boolean) {
- if (prod) return 'https://api.langbase.com';
- else return 'http://localhost:9000';
-
+export function getApiUrl() {
// TODO: Make local port configurable.
- // return isProd() ? 'https://api.langbase.com' : 'http://localhost:9000';
+ return isProd() ? 'https://api.langbase.com' : 'http://localhost:9000';
// return isProd() ? 'http://localhost:8787' : 'http://localhost:9000';
}
diff --git a/packages/core/src/utils/local-server-running.ts b/packages/core/src/utils/local-server-running.ts
index 210724fd..1dfcf960 100644
--- a/packages/core/src/utils/local-server-running.ts
+++ b/packages/core/src/utils/local-server-running.ts
@@ -1,9 +1,8 @@
-import {getApiUrl, isProd} from './is-prod';
+import {getApiUrl} from './is-prod';
export async function isLocalServerRunning(): Promise {
try {
- const prod = isProd();
- const endpoint = getApiUrl(prod);
+ const endpoint = getApiUrl();
const response = await fetch(endpoint, {
mode: 'no-cors',
diff --git a/packages/core/src/utils/to-old-pipe-format.ts b/packages/core/src/utils/to-old-pipe-format.ts
new file mode 100644
index 00000000..7ffbd9fd
--- /dev/null
+++ b/packages/core/src/utils/to-old-pipe-format.ts
@@ -0,0 +1,91 @@
+import type {Pipe, PipeOld} from '../../types/pipes';
+
+import {
+ ANTHROPIC,
+ COHERE,
+ FIREWORKS_AI,
+ GOOGLE,
+ GROQ,
+ OLLAMA,
+ OPEN_AI,
+ PERPLEXITY,
+ TOGETHER_AI,
+} from '../data/models';
+
+type Provider =
+ | typeof OPEN_AI
+ | typeof ANTHROPIC
+ | typeof TOGETHER_AI
+ | typeof GOOGLE
+ | typeof GROQ
+ | typeof COHERE
+ | typeof FIREWORKS_AI
+ | typeof PERPLEXITY;
+
+/**
+ * Converts a new pipe format to an old pipe format.
+ *
+ * @param newFormat - The new pipe format to convert.
+ * @returns The converted old pipe format.
+ */
+export function toOldPipeFormat(newFormat: Pipe): PipeOld {
+ const [providerString, modelName] = newFormat.model.split(':');
+
+ return {
+ name: newFormat.name,
+ description: newFormat.description || '',
+ status: newFormat.status,
+ meta: {
+ stream: newFormat.stream,
+ json: newFormat.json,
+ store: newFormat.store,
+ moderate: newFormat.moderate,
+ },
+ model: {
+ name: modelName,
+ provider: getProvider(providerString),
+ params: {
+ top_p: newFormat.top_p,
+ max_tokens: newFormat.max_tokens,
+ temperature: newFormat.temperature,
+ presence_penalty: newFormat.presence_penalty,
+ frequency_penalty: newFormat.frequency_penalty,
+ stop: newFormat.stop,
+ },
+ tool_choice: newFormat.tool_choice,
+ parallel_tool_calls: newFormat.parallel_tool_calls,
+ },
+ messages: newFormat.messages,
+ variables: newFormat.variables,
+ tools: newFormat.tools,
+ functions: newFormat.tools,
+ memorysets: newFormat.memory.map(memory => memory.name),
+ };
+}
+
+/**
+ * Retrieves the provider based on the given provider string.
+ *
+ * @param providerString - The provider string.
+ * @returns The corresponding provider object.
+ * @throws Error if the provider is unknown.
+ */
+function getProvider(providerString: string): Provider {
+ const providerMap: {[key: string]: Provider} = {
+ openai: OPEN_AI,
+ anthropic: ANTHROPIC,
+ together: TOGETHER_AI,
+ google: GOOGLE,
+ groq: GROQ,
+ cohere: COHERE,
+ fireworks: FIREWORKS_AI,
+ perplexity: PERPLEXITY,
+ ollama: OLLAMA,
+ };
+
+ const provider = providerMap[providerString.toLowerCase()];
+ if (!provider) {
+ throw new Error(`Unknown provider: ${providerString}`);
+ }
+ return provider;
+}
diff --git a/packages/core/types/memory.ts b/packages/core/types/memory.ts
index aca7fbb8..e5d8205a 100644
--- a/packages/core/types/memory.ts
+++ b/packages/core/types/memory.ts
@@ -1,26 +1,13 @@
-export interface GitConfig {
- enabled: boolean;
- include: string[];
- gitignore?: boolean;
- deployedAt?: string;
- embeddedAt?: string;
-}
-
-export interface MemoryDocumentI {
- name: string;
- size: string;
- content: string;
- blob: Blob;
- path: string;
-}
-
-export interface Document {
- meta?: (doc: MemoryDocumentI) => Record;
-}
-
export interface Memory {
name: string;
description?: string;
- git: GitConfig;
- documents?: Document;
+ config?: MemoryConfig;
+}
+
+interface MemoryConfig {
+ useGitRepo: boolean;
+ dirToTrack: string;
+ extToTrack: string[];
+ deployedCommitHash?: string;
+ embeddedCommitHash?: string;
}
diff --git a/packages/core/types/model.ts b/packages/core/types/model.ts
index 960cdaad..b559c9b3 100644
--- a/packages/core/types/model.ts
+++ b/packages/core/types/model.ts
@@ -29,16 +29,13 @@ export type TogetherModels =
| 'together:mistralai/Mistral-7B-Instruct-v0.2'
| 'together:mistralai/Mixtral-8x7B-Instruct-v0.1'
| 'together:mistralai/Mixtral-8x22B-Instruct-v0.1'
- | 'together:databricks/dbrx-instruct'
- | 'together:meta-llama/Llama-3.3-70B-Instruct-Turbo';
+ | 'together:databricks/dbrx-instruct';
export type AnthropicModels =
- | 'anthropic:claude-3-5-sonnet-latest'
| 'anthropic:claude-3-5-sonnet-20240620'
| 'anthropic:claude-3-opus-20240229'
| 'anthropic:claude-3-sonnet-20240229'
- | 'anthropic:claude-3-haiku-20240307'
- | 'anthropic:claude-3-5-haiku-20241022';
+ | 'anthropic:claude-3-haiku-20240307';
export type GroqModels =
| 'groq:llama-3.1-70b-versatile'
@@ -47,8 +44,7 @@ export type GroqModels =
| 'groq:llama3-8b-8192'
| 'groq:mixtral-8x7b-32768'
| 'groq:gemma2-9b-it'
- | 'groq:gemma-7b-it'
- | 'groq:llama-3.3-70b-versatile';
+ | 'groq:gemma-7b-it';
export type GoogleModels =
| 'google:gemini-1.5-pro-latest'
@@ -63,8 +59,7 @@ export type FireworksAIModels =
| 'fireworks:llama-v3p1-8b-instruct'
| 'fireworks:llama-v3p1-70b-instruct'
| 'fireworks:llama-v3-70b-instruct'
- | 'fireworks:yi-large'
- | 'fireworks:llama-v3p3-70b-instruct';
+ | 'fireworks:yi-large';
export type PerplexityModels =
| 'perplexity:llama-3.1-sonar-huge-128k-online'
@@ -78,7 +73,5 @@ export type MistralAIModels =
| 'mistral:open-mistral-nemo'
| 'mistral:codestral-latest';
-export type XAIModels = 'xai:grok-beta';
-
// Any string that starts with 'ollama:'
export type OllamaModels = `ollama:${string}`;
diff --git a/packages/core/types/pipes.ts b/packages/core/types/pipes.ts
index d4390e86..4a009035 100644
--- a/packages/core/types/pipes.ts
+++ b/packages/core/types/pipes.ts
@@ -9,7 +9,6 @@ import {
OpenAIModels,
PerplexityModels,
TogetherModels,
- XAIModels,
} from './model';
export type MessageRole = 'function' | 'assistant' | 'system' | 'user' | 'tool';
@@ -19,7 +18,7 @@ export interface Function {
arguments: string;
}
-export interface ToolCallResult {
+export interface ToolCall {
id: string;
type: 'function';
function: Function;
@@ -30,7 +29,7 @@ export interface Message {
content: string | null;
name?: string;
tool_call_id?: string;
- tool_calls?: ToolCallResult[];
+ tool_calls?: ToolCall[];
}
interface ToolFunction {
@@ -44,15 +43,6 @@ interface ToolChoiceFunction {
type ToolChoice = 'auto' | 'required' | ToolChoiceFunction;
-export interface Tools {
- type: 'function';
- function: {
- name: string;
- description?: string;
- parameters?: Record;
- };
-}
-
export type Model =
| OpenAIModels
| TogetherModels
@@ -63,7 +53,6 @@ export type Model =
| FireworksAIModels
| PerplexityModels
| MistralAIModels
- | XAIModels
| OllamaModels;
export interface Pipe {
diff --git a/packages/testing/index.ts b/packages/testing/index.ts
new file mode 100644
index 00000000..25ec8c70
--- /dev/null
+++ b/packages/testing/index.ts
@@ -0,0 +1,20 @@
+// import {Pipe} from '@baseai/core';
+// import pipeSummary from '../baseai/pipes/summary';
+
+// const pipe = new Pipe(pipeSummary());
+
+// async function main() {
+// const userMsg = 'Who is an AI Engineer?';
+
+// const response = await pipe.run({
+// messages: [
+// {
+// role: 'user',
+// content: userMsg,
+// },
+// ],
+// });
+// console.log('response: ', response);
+// }
+
+// main();
diff --git a/packages/testing/package.json b/packages/testing/package.json
new file mode 100644
index 00000000..1ae66a2c
--- /dev/null
+++ b/packages/testing/package.json
@@ -0,0 +1,15 @@
+{
+ "private": true,
+ "name": "testing",
+ "main": "index.js",
+ "scripts": {
+ "baseai": "baseai"
+ },
+ "license": "UNLICENSED",
+ "dependencies": {
+ "@baseai/core": "workspace:*"
+ },
+ "devDependencies": {
+ "baseai": "workspace:*"
+ }
+}
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 8c662e8e..d0a87531 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -138,9 +138,6 @@ importers:
gray-matter:
specifier: ^4.0.3
version: 4.0.3
- html2canvas:
- specifier: ^1.4.1
- version: 1.4.1
lucide-react:
specifier: ^0.378.0
version: 0.378.0(react@18.3.1)
@@ -154,14 +151,14 @@ importers:
specifier: ^2.0.0
version: 2.0.0
next:
- specifier: 14.2.35
- version: 14.2.35(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ specifier: ^14.0.4
+ version: 14.2.5(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
next-mdx-remote:
specifier: ^5.0.0
version: 5.0.0(@types/react@18.3.11)(react@18.3.1)
next-themes:
specifier: ^0.2.1
- version: 0.2.1(next@14.2.35(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ version: 0.2.1(next@14.2.5(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
react:
specifier: ^18.2.0
version: 18.3.1
@@ -264,10 +261,10 @@ importers:
version: 5.1.1(astro@4.15.10(@types/node@22.7.4)(rollup@4.24.0)(terser@5.34.1)(typescript@5.6.2))(tailwindcss@3.4.13(ts-node@10.9.1(@types/node@22.7.4)(typescript@5.6.2)))(ts-node@10.9.1(@types/node@22.7.4)(typescript@5.6.2))
'@astrojs/vercel':
specifier: ^7.8.1
- version: 7.8.1(astro@4.15.10(@types/node@22.7.4)(rollup@4.24.0)(terser@5.34.1)(typescript@5.6.2))(next@14.2.35(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
+ version: 7.8.1(astro@4.15.10(@types/node@22.7.4)(rollup@4.24.0)(terser@5.34.1)(typescript@5.6.2))(next@14.2.5(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
'@baseai/core':
- specifier: ^0.9.43
- version: 0.9.43(react@18.3.1)(zod@3.23.8)
+ specifier: ^0.9.15
+ version: 0.9.15(react@18.3.1)(zod@3.23.8)
'@radix-ui/react-slot':
specifier: ^1.1.0
version: 1.1.0(@types/react@18.3.11)(react@18.3.1)
@@ -309,14 +306,14 @@ importers:
version: 5.6.2
devDependencies:
baseai:
- specifier: ^0.9.44
- version: 0.9.44(@types/node@22.7.4)(typescript@5.6.2)
+ specifier: ^0.9.15
+ version: 0.9.15(@types/node@22.7.4)(typescript@5.6.2)
examples/nextjs:
dependencies:
'@baseai/core':
- specifier: ^0.9.43
- version: 0.9.43(react@18.3.1)(zod@3.23.8)
+ specifier: ^0.9.15
+ version: 0.9.15(react@18.3.1)(zod@3.23.8)
'@radix-ui/react-slot':
specifier: ^1.1.0
version: 1.1.0(@types/react@18.3.11)(react@18.3.1)
@@ -336,8 +333,8 @@ importers:
specifier: ^2.0.0
version: 2.0.0
next:
- specifier: 14.2.35
- version: 14.2.35(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ specifier: 14.2.5
+ version: 14.2.5(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
openai:
specifier: ^4.53.0
version: 4.67.1(zod@3.23.8)
@@ -364,8 +361,8 @@ importers:
specifier: ^18
version: 18.3.0
baseai:
- specifier: ^0.9.44
- version: 0.9.44(@types/node@20.16.10)(typescript@5.6.2)
+ specifier: ^0.9.15
+ version: 0.9.15(@types/node@20.16.10)(typescript@5.6.2)
eslint:
specifier: ^8
version: 8.57.1
@@ -388,15 +385,15 @@ importers:
examples/nodejs:
dependencies:
'@baseai/core':
- specifier: ^0.9.43
- version: 0.9.43(react@18.3.1)(zod@3.23.8)
+ specifier: ^0.9.15
+ version: 0.9.15(react@18.3.1)(zod@3.23.8)
dotenv:
specifier: ^16.4.5
version: 16.4.5
devDependencies:
baseai:
- specifier: ^0.9.44
- version: 0.9.44(@types/node@22.7.4)(typescript@5.6.2)
+ specifier: ^0.9.15
+ version: 0.9.15(@types/node@22.7.4)(typescript@5.6.2)
tsx:
specifier: ^4.19.0
version: 4.19.1
@@ -404,8 +401,8 @@ importers:
examples/remix:
dependencies:
'@baseai/core':
- specifier: ^0.9.43
- version: 0.9.43(react@18.3.1)(zod@3.23.8)
+ specifier: ^0.9.15
+ version: 0.9.15(react@18.3.1)(zod@3.23.8)
'@radix-ui/react-slot':
specifier: ^1.1.0
version: 1.1.0(@types/react@18.3.11)(react@18.3.1)
@@ -465,8 +462,8 @@ importers:
specifier: ^10.4.20
version: 10.4.20(postcss@8.4.47)
baseai:
- specifier: ^0.9.44
- version: 0.9.44(@types/node@22.7.4)(typescript@5.6.2)
+ specifier: ^0.9.15
+ version: 0.9.15(@types/node@22.7.4)(typescript@5.6.2)
eslint:
specifier: ^8.38.0
version: 8.57.1
@@ -525,8 +522,8 @@ importers:
specifier: ^8.0.0
version: 8.0.0
chalk:
- specifier: 5.6.0
- version: 5.6.0
+ specifier: ^5.3.0
+ version: 5.3.0
cli-alerts:
specifier: ^2.0.0
version: 2.0.0
@@ -548,6 +545,9 @@ importers:
compute-cosine-similarity:
specifier: ^1.1.0
version: 1.1.0
+ conf:
+ specifier: ^13.0.1
+ version: 13.0.1
cosmiconfig:
specifier: ^9.0.0
version: 9.0.0(typescript@5.6.2)
@@ -560,9 +560,6 @@ importers:
execa:
specifier: ^9.4.0
version: 9.4.0
- fast-glob:
- specifier: ^3.3.2
- version: 3.3.2
figures:
specifier: ^6.1.0
version: 6.1.0
@@ -704,6 +701,16 @@ importers:
specifier: 1.6.0
version: 1.6.0(@edge-runtime/vm@4.0.3)(@types/node@22.7.4)(jsdom@25.0.1(canvas@2.11.2))(terser@5.34.1)
+ packages/testing:
+ dependencies:
+ '@baseai/core':
+ specifier: workspace:*
+ version: link:../core
+ devDependencies:
+ baseai:
+ specifier: workspace:*
+ version: link:../baseai
+
tools/eslint-config:
devDependencies:
'@next/eslint-plugin-next':
@@ -1033,8 +1040,8 @@ packages:
resolution: {integrity: sha512-vwIVdXG+j+FOpkwqHRcBgHLYNL7XMkufrlaFvL9o6Ai9sJn9+PdyIL5qa0XzTZw084c+u9LOls53eoZWP/W5WQ==}
engines: {node: '>=6.9.0'}
- '@baseai/core@0.9.43':
- resolution: {integrity: sha512-jb0EUJjWqvvqq6Kh7xKTdCwgez4/hoJH3B8VaoWYLXYquOpCsGmVe7hgtt22bgJ2r6LPucglLShGz5uv9DbbKA==}
+ '@baseai/core@0.9.15':
+ resolution: {integrity: sha512-OUHIpZ9HAAKnn6U9UPYa4qRp8Dj1JohXQmWPwPmgXdBxwy/2t4zGk4sqb5OfgVe4+XGca5T1tyg+W267nvNcaw==}
engines: {node: '>=18'}
peerDependencies:
react: ^18 || ^19
@@ -2173,8 +2180,8 @@ packages:
'@microsoft/tsdoc@0.14.2':
resolution: {integrity: sha512-9b8mPpKrfeGRuhFH5iO1iwCLeIIsV6+H1sRfxbkoGXIyQE2BTsPd9zqSqQJ+pv5sJ/hT5M1zvOFL02MnEezFug==}
- '@next/env@14.2.35':
- resolution: {integrity: sha512-DuhvCtj4t9Gwrx80dmz2F4t/zKQ4ktN8WrMwOuVzkJfBilwAwGr6v16M5eI8yCuZ63H9TTuEU09Iu2HqkzFPVQ==}
+ '@next/env@14.2.5':
+ resolution: {integrity: sha512-/zZGkrTOsraVfYjGP8uM0p6r0BDT6xWpkjdVbcz66PJVSpwXX3yNiRycxAuDfBKGWBrZBXRuK/YVlkNgxHGwmA==}
'@next/eslint-plugin-next@14.2.14':
resolution: {integrity: sha512-kV+OsZ56xhj0rnTn6HegyTGkoa16Mxjrpk7pjWumyB2P8JVQb8S9qtkjy/ye0GnTr4JWtWG4x/2qN40lKZ3iVQ==}
@@ -2193,56 +2200,56 @@ packages:
'@mdx-js/react':
optional: true
- '@next/swc-darwin-arm64@14.2.33':
- resolution: {integrity: sha512-HqYnb6pxlsshoSTubdXKu15g3iivcbsMXg4bYpjL2iS/V6aQot+iyF4BUc2qA/J/n55YtvE4PHMKWBKGCF/+wA==}
+ '@next/swc-darwin-arm64@14.2.5':
+ resolution: {integrity: sha512-/9zVxJ+K9lrzSGli1///ujyRfon/ZneeZ+v4ptpiPoOU+GKZnm8Wj8ELWU1Pm7GHltYRBklmXMTUqM/DqQ99FQ==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [darwin]
- '@next/swc-darwin-x64@14.2.33':
- resolution: {integrity: sha512-8HGBeAE5rX3jzKvF593XTTFg3gxeU4f+UWnswa6JPhzaR6+zblO5+fjltJWIZc4aUalqTclvN2QtTC37LxvZAA==}
+ '@next/swc-darwin-x64@14.2.5':
+ resolution: {integrity: sha512-vXHOPCwfDe9qLDuq7U1OYM2wUY+KQ4Ex6ozwsKxp26BlJ6XXbHleOUldenM67JRyBfVjv371oneEvYd3H2gNSA==}
engines: {node: '>= 10'}
cpu: [x64]
os: [darwin]
- '@next/swc-linux-arm64-gnu@14.2.33':
- resolution: {integrity: sha512-JXMBka6lNNmqbkvcTtaX8Gu5by9547bukHQvPoLe9VRBx1gHwzf5tdt4AaezW85HAB3pikcvyqBToRTDA4DeLw==}
+ '@next/swc-linux-arm64-gnu@14.2.5':
+ resolution: {integrity: sha512-vlhB8wI+lj8q1ExFW8lbWutA4M2ZazQNvMWuEDqZcuJJc78iUnLdPPunBPX8rC4IgT6lIx/adB+Cwrl99MzNaA==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [linux]
- '@next/swc-linux-arm64-musl@14.2.33':
- resolution: {integrity: sha512-Bm+QulsAItD/x6Ih8wGIMfRJy4G73tu1HJsrccPW6AfqdZd0Sfm5Imhgkgq2+kly065rYMnCOxTBvmvFY1BKfg==}
+ '@next/swc-linux-arm64-musl@14.2.5':
+ resolution: {integrity: sha512-NpDB9NUR2t0hXzJJwQSGu1IAOYybsfeB+LxpGsXrRIb7QOrYmidJz3shzY8cM6+rO4Aojuef0N/PEaX18pi9OA==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [linux]
- '@next/swc-linux-x64-gnu@14.2.33':
- resolution: {integrity: sha512-FnFn+ZBgsVMbGDsTqo8zsnRzydvsGV8vfiWwUo1LD8FTmPTdV+otGSWKc4LJec0oSexFnCYVO4hX8P8qQKaSlg==}
+ '@next/swc-linux-x64-gnu@14.2.5':
+ resolution: {integrity: sha512-8XFikMSxWleYNryWIjiCX+gU201YS+erTUidKdyOVYi5qUQo/gRxv/3N1oZFCgqpesN6FPeqGM72Zve+nReVXQ==}
engines: {node: '>= 10'}
cpu: [x64]
os: [linux]
- '@next/swc-linux-x64-musl@14.2.33':
- resolution: {integrity: sha512-345tsIWMzoXaQndUTDv1qypDRiebFxGYx9pYkhwY4hBRaOLt8UGfiWKr9FSSHs25dFIf8ZqIFaPdy5MljdoawA==}
+ '@next/swc-linux-x64-musl@14.2.5':
+ resolution: {integrity: sha512-6QLwi7RaYiQDcRDSU/os40r5o06b5ue7Jsk5JgdRBGGp8l37RZEh9JsLSM8QF0YDsgcosSeHjglgqi25+m04IQ==}
engines: {node: '>= 10'}
cpu: [x64]
os: [linux]
- '@next/swc-win32-arm64-msvc@14.2.33':
- resolution: {integrity: sha512-nscpt0G6UCTkrT2ppnJnFsYbPDQwmum4GNXYTeoTIdsmMydSKFz9Iny2jpaRupTb+Wl298+Rh82WKzt9LCcqSQ==}
+ '@next/swc-win32-arm64-msvc@14.2.5':
+ resolution: {integrity: sha512-1GpG2VhbspO+aYoMOQPQiqc/tG3LzmsdBH0LhnDS3JrtDx2QmzXe0B6mSZZiN3Bq7IOMXxv1nlsjzoS1+9mzZw==}
engines: {node: '>= 10'}
cpu: [arm64]
os: [win32]
- '@next/swc-win32-ia32-msvc@14.2.33':
- resolution: {integrity: sha512-pc9LpGNKhJ0dXQhZ5QMmYxtARwwmWLpeocFmVG5Z0DzWq5Uf0izcI8tLc+qOpqxO1PWqZ5A7J1blrUIKrIFc7Q==}
+ '@next/swc-win32-ia32-msvc@14.2.5':
+ resolution: {integrity: sha512-Igh9ZlxwvCDsu6438FXlQTHlRno4gFpJzqPjSIBZooD22tKeI4fE/YMRoHVJHmrQ2P5YL1DoZ0qaOKkbeFWeMg==}
engines: {node: '>= 10'}
cpu: [ia32]
os: [win32]
- '@next/swc-win32-x64-msvc@14.2.33':
- resolution: {integrity: sha512-nOjfZMy8B94MdisuzZo9/57xuFVLHJaDj5e/xrduJp9CV2/HrfxTRH2fbyLe+K9QT41WBLUd4iXX3R7jBp0EUg==}
+ '@next/swc-win32-x64-msvc@14.2.5':
+ resolution: {integrity: sha512-tEQ7oinq1/CjSG9uSTerca3v4AZ+dFa+4Yu6ihaG8Ud8ddqLQgFGcnwYls13H5X5CPDPZJdYxyeMui6muOLd4g==}
engines: {node: '>= 10'}
cpu: [x64]
os: [win32]
@@ -3466,6 +3473,14 @@ packages:
ajv:
optional: true
+ ajv-formats@3.0.1:
+ resolution: {integrity: sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==}
+ peerDependencies:
+ ajv: ^8.0.0
+ peerDependenciesMeta:
+ ajv:
+ optional: true
+
ajv-keywords@3.5.2:
resolution: {integrity: sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==}
peerDependencies:
@@ -3652,6 +3667,9 @@ packages:
asynckit@0.4.0:
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
+ atomically@2.0.3:
+ resolution: {integrity: sha512-kU6FmrwZ3Lx7/7y3hPS5QnbJfaohcIul5fGqf7ok+4KklIEk9tJ0C2IQPdacSbVUWv6zVHXEBWoWd6NrVMT7Cw==}
+
autoprefixer@10.4.20:
resolution: {integrity: sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==}
engines: {node: ^10 || ^12 || >=14}
@@ -3680,15 +3698,11 @@ packages:
base-64@1.0.0:
resolution: {integrity: sha512-kwDPIFCGx0NZHog36dj+tHiwP4QMzsZ3AgMViUBKI0+V5n4U0ufTCUMhnQ04diaRI8EX/QcPfql7zlhZ7j4zgg==}
- base64-arraybuffer@1.0.2:
- resolution: {integrity: sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==}
- engines: {node: '>= 0.6.0'}
-
base64-js@1.5.1:
resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
- baseai@0.9.44:
- resolution: {integrity: sha512-NhRxhWEBW/pmaODcCMFAXNZgM0XVQHrUsEeUkmXuHV4UXSfiJPg/W1r6yDwGq9bCz/pqPNXdHlU9vy0+0GsOTw==}
+ baseai@0.9.15:
+ resolution: {integrity: sha512-5IeI6zIPL/UkhSoVvBATv2NA0px2ebl28i+FvKNZfj8ttOZMJpfOJdRpQ+yYdd9M3Mz96y1wMoCiHurV7bmv0g==}
hasBin: true
basic-auth@2.0.1:
@@ -3845,10 +3859,6 @@ packages:
resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==}
engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
- chalk@5.6.0:
- resolution: {integrity: sha512-46QrSQFyVSEyYAgQ22hQ+zDa60YHA4fBstHmtSApj1Y5vKtG27fWowW03jCk5KcbXEWPZUIR894aARCA/G1kfQ==}
- engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
-
character-entities-html4@2.1.0:
resolution: {integrity: sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==}
@@ -4057,6 +4067,10 @@ packages:
concat-map@0.0.1:
resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
+ conf@13.0.1:
+ resolution: {integrity: sha512-l9Uwc9eOnz39oADzGO2cSBDi7siv8lwO+31ocQ2nOJijnDiW3pxqm9VV10DPYUO28wW83DjABoUqY1nfHRR2hQ==}
+ engines: {node: '>=18'}
+
confbox@0.1.7:
resolution: {integrity: sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA==}
@@ -4136,9 +4150,6 @@ packages:
resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==}
engines: {node: '>= 8'}
- css-line-break@2.1.0:
- resolution: {integrity: sha512-FHcKFCZcAha3LwfVBhCQbW2nCNbkZXn7KVUJcsT5/P8YmfsVja0FMPJr0B903j/E69HUphKiV9iQArX8SDYA4w==}
-
css-what@6.1.0:
resolution: {integrity: sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==}
engines: {node: '>= 6'}
@@ -4226,6 +4237,10 @@ packages:
dayjs@1.11.13:
resolution: {integrity: sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg==}
+ debounce-fn@6.0.0:
+ resolution: {integrity: sha512-rBMW+F2TXryBwB54Q0d8drNEI+TfoS9JpNTAoVpukbWEhjXQq4rySFYLaqXMFXwdv61Zb2OHtj5bviSoimqxRQ==}
+ engines: {node: '>=18'}
+
debug@2.6.9:
resolution: {integrity: sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==}
peerDependencies:
@@ -4407,6 +4422,10 @@ packages:
dom-accessibility-api@0.5.16:
resolution: {integrity: sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==}
+ dot-prop@9.0.0:
+ resolution: {integrity: sha512-1gxPBJpI/pcjQhKgIU91II6Wkay+dLcN3M6rf2uwP8hRur3HtQXjVrdAK3sjC0piaEuxzMwjXChcETiJl47lAQ==}
+ engines: {node: '>=18'}
+
dotenv@16.0.3:
resolution: {integrity: sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==}
engines: {node: '>=12'}
@@ -4482,6 +4501,10 @@ packages:
resolution: {integrity: sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==}
engines: {node: '>=6'}
+ env-paths@3.0.0:
+ resolution: {integrity: sha512-dtJUTepzMW3Lm/NPxRf3wP4642UWhjL2sQxc+ym2YMj1m/H2zDNQOlezafzkHwn6sMstjHTwG6iQQsctDW/b1A==}
+ engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
+
environment@1.1.0:
resolution: {integrity: sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==}
engines: {node: '>=18'}
@@ -5594,10 +5617,6 @@ packages:
html-void-elements@3.0.0:
resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==}
- html2canvas@1.4.1:
- resolution: {integrity: sha512-fPU6BHNpsyIhr8yyMpTLLxAbkaK8ArIBcmZIRiBLiDhjeqvXolaEmDGmELFuX9I4xDcaKKcJl+TKZLqruBbmWA==}
- engines: {node: '>=8.0.0'}
-
http-cache-semantics@4.1.1:
resolution: {integrity: sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==}
@@ -6056,6 +6075,9 @@ packages:
json-schema-traverse@1.0.0:
resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==}
+ json-schema-typed@8.0.1:
+ resolution: {integrity: sha512-XQmWYj2Sm4kn4WeTYvmpKEbyPsL7nBsb647c7pMe6l02/yx2+Jfc4dT6UZkEXnIUb5LhD55r2HPsJ1milQ4rDg==}
+
json-stable-stringify-without-jsonify@1.0.1:
resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
@@ -6799,8 +6821,8 @@ packages:
react: '*'
react-dom: '*'
- next@14.2.35:
- resolution: {integrity: sha512-KhYd2Hjt/O1/1aZVX3dCwGXM1QmOV4eNM2UTacK5gipDdPN/oHHK/4oVGy7X8GMfPMsUTUEmGlsy0EY1YGAkig==}
+ next@14.2.5:
+ resolution: {integrity: sha512-0f8aRfBVL+mpzfBjYfQuLWh2WyAwtJXCRfkPF4UJ5qd2YwrHczsrSzXU4tRMV0OAxR8ZJZWPFn6uhSC56UTsLA==}
engines: {node: '>=18.17.0'}
hasBin: true
peerDependencies:
@@ -8077,7 +8099,6 @@ packages:
source-map@0.8.0-beta.0:
resolution: {integrity: sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA==}
engines: {node: '>= 8'}
- deprecated: The work that was done in this beta branch won't be included in future versions
sourcemap-codec@1.4.8:
resolution: {integrity: sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==}
@@ -8252,6 +8273,9 @@ packages:
strip-literal@2.1.0:
resolution: {integrity: sha512-Op+UycaUt/8FbN/Z2TWPBLge3jWrP3xj10f3fnYxf052bKuS3EKs1ZQcVGjnEMdsNVAM+plXRdmjrZ/KgG3Skw==}
+ stubborn-fs@1.2.5:
+ resolution: {integrity: sha512-H2N9c26eXjzL/S/K+i/RHHcFanE74dptvvjM8iwzwbVcWY/zjBbgRqF3K0DY4+OD+uTTASTBvDoxPDaPN02D7g==}
+
style-to-object@0.4.4:
resolution: {integrity: sha512-HYNoHZa2GorYNyqiCaBgsxvcJIn7OHq6inEga+E6Ke3m5JkoqpQbnFssk4jwe+K7AhGa2fcha4wSOf1Kn01dMg==}
@@ -8359,9 +8383,6 @@ packages:
engines: {node: '>=10'}
hasBin: true
- text-segmentation@1.0.3:
- resolution: {integrity: sha512-iOiPUo/BGnZ6+54OsWxZidGCsdU8YbE4PSpdPinp7DeMtUJNJBoJ/ouUSTJjHkh1KntHaltHl/gDs2FC4i5+Nw==}
-
text-table@0.2.0:
resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==}
@@ -8650,6 +8671,10 @@ packages:
uid-promise@1.0.0:
resolution: {integrity: sha512-R8375j0qwXyIu/7R0tjdF06/sElHqbmdmWC9M2qQHpEVbvE4I5+38KJI7LUUmQMp7NVq4tKHiBMkT0NFM453Ig==}
+ uint8array-extras@1.4.0:
+ resolution: {integrity: sha512-ZPtzy0hu4cZjv3z5NW9gfKnNLjoz4y6uv4HlelAjDK7sY/xOkKZv9xK/WQpcsBB3jEybChz9DPC2U/+cusjJVQ==}
+ engines: {node: '>=18'}
+
ultrahtml@1.5.3:
resolution: {integrity: sha512-GykOvZwgDWZlTQMtp5jrD4BVL+gNn2NVlVafjcFUJ7taY20tqYdwdoWBFy6GBJsNTZe1GkGPkSl5knQAjtgceg==}
@@ -8813,9 +8838,6 @@ packages:
resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==}
engines: {node: '>= 0.4.0'}
- utrie@1.0.2:
- resolution: {integrity: sha512-1MLa5ouZiOmQzUbjbu9VmjLzn1QLXBhwpUa7kdLUQK+KQ5KA9I1vk5U4YHe/X2Ch7PYnJfWuWT+VbuxbGwljhw==}
-
uuid@10.0.0:
resolution: {integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==}
hasBin: true
@@ -9144,6 +9166,9 @@ packages:
whatwg-url@7.1.0:
resolution: {integrity: sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==}
+ when-exit@2.1.3:
+ resolution: {integrity: sha512-uVieSTccFIr/SFQdFWN/fFaQYmV37OKtuaGphMAzi4DmmUlrvRBJW5WSLkHyjNQY/ePJMz3LoiX9R3yy1Su6Hw==}
+
which-boxed-primitive@1.0.2:
resolution: {integrity: sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==}
@@ -9589,10 +9614,10 @@ snapshots:
transitivePeerDependencies:
- supports-color
- '@astrojs/vercel@7.8.1(astro@4.15.10(@types/node@22.7.4)(rollup@4.24.0)(terser@5.34.1)(typescript@5.6.2))(next@14.2.35(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
+ '@astrojs/vercel@7.8.1(astro@4.15.10(@types/node@22.7.4)(rollup@4.24.0)(terser@5.34.1)(typescript@5.6.2))(next@14.2.5(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
dependencies:
'@astrojs/internal-helpers': 0.4.1
- '@vercel/analytics': 1.3.1(next@14.2.35(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
+ '@vercel/analytics': 1.3.1(next@14.2.5(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)
'@vercel/edge': 1.1.2
'@vercel/nft': 0.27.4
astro: 4.15.10(@types/node@22.7.4)(rollup@4.24.0)(terser@5.34.1)(typescript@5.6.2)
@@ -9846,7 +9871,7 @@ snapshots:
'@babel/helper-validator-identifier': 7.25.7
to-fast-properties: 2.0.0
- '@baseai/core@0.9.43(react@18.3.1)(zod@3.23.8)':
+ '@baseai/core@0.9.15(react@18.3.1)(zod@3.23.8)':
dependencies:
openai: 4.67.1(zod@3.23.8)
optionalDependencies:
@@ -10797,7 +10822,7 @@ snapshots:
'@microsoft/tsdoc@0.14.2': {}
- '@next/env@14.2.35': {}
+ '@next/env@14.2.5': {}
'@next/eslint-plugin-next@14.2.14':
dependencies:
@@ -10814,31 +10839,31 @@ snapshots:
'@mdx-js/loader': 3.0.1(webpack@5.95.0(esbuild@0.17.19))
'@mdx-js/react': 3.0.1(@types/react@18.3.11)(react@18.3.1)
- '@next/swc-darwin-arm64@14.2.33':
+ '@next/swc-darwin-arm64@14.2.5':
optional: true
- '@next/swc-darwin-x64@14.2.33':
+ '@next/swc-darwin-x64@14.2.5':
optional: true
- '@next/swc-linux-arm64-gnu@14.2.33':
+ '@next/swc-linux-arm64-gnu@14.2.5':
optional: true
- '@next/swc-linux-arm64-musl@14.2.33':
+ '@next/swc-linux-arm64-musl@14.2.5':
optional: true
- '@next/swc-linux-x64-gnu@14.2.33':
+ '@next/swc-linux-x64-gnu@14.2.5':
optional: true
- '@next/swc-linux-x64-musl@14.2.33':
+ '@next/swc-linux-x64-musl@14.2.5':
optional: true
- '@next/swc-win32-arm64-msvc@14.2.33':
+ '@next/swc-win32-arm64-msvc@14.2.5':
optional: true
- '@next/swc-win32-ia32-msvc@14.2.33':
+ '@next/swc-win32-ia32-msvc@14.2.5':
optional: true
- '@next/swc-win32-x64-msvc@14.2.33':
+ '@next/swc-win32-x64-msvc@14.2.5':
optional: true
'@nicolo-ribaudo/eslint-scope-5-internals@5.1.1-v1':
@@ -11593,7 +11618,7 @@ snapshots:
'@types/node-fetch@2.6.11':
dependencies:
- '@types/node': 22.7.4
+ '@types/node': 20.16.10
form-data: 4.0.0
'@types/node-forge@1.3.11':
@@ -11956,11 +11981,11 @@ snapshots:
'@vanilla-extract/private@1.0.6': {}
- '@vercel/analytics@1.3.1(next@14.2.35(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
+ '@vercel/analytics@1.3.1(next@14.2.5(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react@18.3.1)':
dependencies:
server-only: 0.0.1
optionalDependencies:
- next: 14.2.35(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ next: 14.2.5(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
react: 18.3.1
'@vercel/build-utils@8.4.5': {}
@@ -12424,6 +12449,10 @@ snapshots:
optionalDependencies:
ajv: 8.17.1
+ ajv-formats@3.0.1(ajv@8.17.1):
+ optionalDependencies:
+ ajv: 8.17.1
+
ajv-keywords@3.5.2(ajv@6.12.6):
dependencies:
ajv: 6.12.6
@@ -12711,6 +12740,11 @@ snapshots:
asynckit@0.4.0: {}
+ atomically@2.0.3:
+ dependencies:
+ stubborn-fs: 1.2.5
+ when-exit: 2.1.3
+
autoprefixer@10.4.20(postcss@8.4.47):
dependencies:
browserslist: 4.24.0
@@ -12735,11 +12769,9 @@ snapshots:
base-64@1.0.0: {}
- base64-arraybuffer@1.0.2: {}
-
base64-js@1.5.1: {}
- baseai@0.9.44(@types/node@20.16.10)(typescript@5.6.2):
+ baseai@0.9.15(@types/node@20.16.10)(typescript@5.6.2):
dependencies:
'@antfu/ni': 0.23.0
'@clack/core': 0.3.4
@@ -12748,7 +12780,7 @@ snapshots:
'@hono/zod-openapi': 0.16.2(hono@4.6.3)(zod@3.23.8)
'@sindresorhus/slugify': 2.2.1
camelcase: 8.0.0
- chalk: 5.6.0
+ chalk: 5.3.0
cli-alerts: 2.0.0
cli-handle-error: 4.4.0
cli-handle-unhandled: 1.1.1
@@ -12756,11 +12788,11 @@ snapshots:
cli-table3: 0.6.5
cli-welcome: 3.0.0
compute-cosine-similarity: 1.1.0
+ conf: 13.0.1
cosmiconfig: 9.0.0(typescript@5.6.2)
cosmiconfig-typescript-loader: 5.0.0(@types/node@20.16.10)(cosmiconfig@9.0.0(typescript@5.6.2))(typescript@5.6.2)
dotenv: 16.4.5
execa: 9.4.0
- fast-glob: 3.3.2
figures: 6.1.0
get-package-json-file: 2.0.0
hono: 4.6.3
@@ -12787,7 +12819,7 @@ snapshots:
- supports-color
- typescript
- baseai@0.9.44(@types/node@22.7.4)(typescript@5.6.2):
+ baseai@0.9.15(@types/node@22.7.4)(typescript@5.6.2):
dependencies:
'@antfu/ni': 0.23.0
'@clack/core': 0.3.4
@@ -12796,7 +12828,7 @@ snapshots:
'@hono/zod-openapi': 0.16.2(hono@4.6.3)(zod@3.23.8)
'@sindresorhus/slugify': 2.2.1
camelcase: 8.0.0
- chalk: 5.6.0
+ chalk: 5.3.0
cli-alerts: 2.0.0
cli-handle-error: 4.4.0
cli-handle-unhandled: 1.1.1
@@ -12804,11 +12836,11 @@ snapshots:
cli-table3: 0.6.5
cli-welcome: 3.0.0
compute-cosine-similarity: 1.1.0
+ conf: 13.0.1
cosmiconfig: 9.0.0(typescript@5.6.2)
cosmiconfig-typescript-loader: 5.0.0(@types/node@22.7.4)(cosmiconfig@9.0.0(typescript@5.6.2))(typescript@5.6.2)
dotenv: 16.4.5
execa: 9.4.0
- fast-glob: 3.3.2
figures: 6.1.0
get-package-json-file: 2.0.0
hono: 4.6.3
@@ -12880,7 +12912,7 @@ snapshots:
dependencies:
ansi-align: 3.0.1
camelcase: 8.0.0
- chalk: 5.6.0
+ chalk: 5.3.0
cli-boxes: 3.0.0
string-width: 7.2.0
type-fest: 4.26.1
@@ -13010,7 +13042,7 @@ snapshots:
chalk-template@1.1.0:
dependencies:
- chalk: 5.6.0
+ chalk: 5.3.0
chalk@2.4.2:
dependencies:
@@ -13030,8 +13062,6 @@ snapshots:
chalk@5.3.0: {}
- chalk@5.6.0: {}
-
character-entities-html4@2.1.0: {}
character-entities-legacy@3.0.0: {}
@@ -13122,7 +13152,7 @@ snapshots:
cli-meow-help@4.0.0:
dependencies:
- chalk: 5.6.0
+ chalk: 5.3.0
chalk-template: 1.1.0
cli-table3: 0.6.5
@@ -13141,7 +13171,7 @@ snapshots:
cli-welcome@3.0.0:
dependencies:
- chalk: 5.6.0
+ chalk: 5.3.0
clear-any-console: 1.16.2
client-only@0.0.1: {}
@@ -13243,6 +13273,18 @@ snapshots:
concat-map@0.0.1: {}
+ conf@13.0.1:
+ dependencies:
+ ajv: 8.17.1
+ ajv-formats: 3.0.1(ajv@8.17.1)
+ atomically: 2.0.3
+ debounce-fn: 6.0.0
+ dot-prop: 9.0.0
+ env-paths: 3.0.0
+ json-schema-typed: 8.0.1
+ semver: 7.6.3
+ uint8array-extras: 1.4.0
+
confbox@0.1.7: {}
consola@3.2.3: {}
@@ -13310,10 +13352,6 @@ snapshots:
shebang-command: 2.0.0
which: 2.0.2
- css-line-break@2.1.0:
- dependencies:
- utrie: 1.0.2
-
css-what@6.1.0: {}
cssesc@3.0.0: {}
@@ -13393,6 +13431,10 @@ snapshots:
dayjs@1.11.13: {}
+ debounce-fn@6.0.0:
+ dependencies:
+ mimic-function: 5.0.1
+
debug@2.6.9:
dependencies:
ms: 2.0.0
@@ -13538,6 +13580,10 @@ snapshots:
dom-accessibility-api@0.5.16: {}
+ dot-prop@9.0.0:
+ dependencies:
+ type-fest: 4.26.1
+
dotenv@16.0.3: {}
dotenv@16.4.5: {}
@@ -13608,6 +13654,8 @@ snapshots:
env-paths@2.2.1: {}
+ env-paths@3.0.0: {}
+
environment@1.1.0: {}
err-code@2.0.3: {}
@@ -15149,11 +15197,6 @@ snapshots:
html-void-elements@3.0.0: {}
- html2canvas@1.4.1:
- dependencies:
- css-line-break: 2.1.0
- text-segmentation: 1.0.3
-
http-cache-semantics@4.1.1: {}
http-errors@1.4.0:
@@ -15568,6 +15611,8 @@ snapshots:
json-schema-traverse@1.0.0: {}
+ json-schema-typed@8.0.1: {}
+
json-stable-stringify-without-jsonify@1.0.1: {}
json5@1.0.2:
@@ -15707,7 +15752,7 @@ snapshots:
log-symbols@6.0.0:
dependencies:
- chalk: 5.6.0
+ chalk: 5.3.0
is-unicode-supported: 1.3.0
log-symbols@7.0.0:
@@ -16773,15 +16818,15 @@ snapshots:
- '@types/react'
- supports-color
- next-themes@0.2.1(next@14.2.35(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
+ next-themes@0.2.1(next@14.2.5(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
- next: 14.2.35(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ next: 14.2.5(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
react: 18.3.1
react-dom: 18.3.1(react@18.3.1)
- next@14.2.35(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
+ next@14.2.5(@babel/core@7.25.7)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
- '@next/env': 14.2.35
+ '@next/env': 14.2.5
'@swc/helpers': 0.5.5
busboy: 1.6.0
caniuse-lite: 1.0.30001666
@@ -16791,23 +16836,23 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
styled-jsx: 5.1.1(@babel/core@7.25.7)(react@18.3.1)
optionalDependencies:
- '@next/swc-darwin-arm64': 14.2.33
- '@next/swc-darwin-x64': 14.2.33
- '@next/swc-linux-arm64-gnu': 14.2.33
- '@next/swc-linux-arm64-musl': 14.2.33
- '@next/swc-linux-x64-gnu': 14.2.33
- '@next/swc-linux-x64-musl': 14.2.33
- '@next/swc-win32-arm64-msvc': 14.2.33
- '@next/swc-win32-ia32-msvc': 14.2.33
- '@next/swc-win32-x64-msvc': 14.2.33
+ '@next/swc-darwin-arm64': 14.2.5
+ '@next/swc-darwin-x64': 14.2.5
+ '@next/swc-linux-arm64-gnu': 14.2.5
+ '@next/swc-linux-arm64-musl': 14.2.5
+ '@next/swc-linux-x64-gnu': 14.2.5
+ '@next/swc-linux-x64-musl': 14.2.5
+ '@next/swc-win32-arm64-msvc': 14.2.5
+ '@next/swc-win32-ia32-msvc': 14.2.5
+ '@next/swc-win32-x64-msvc': 14.2.5
transitivePeerDependencies:
- '@babel/core'
- babel-plugin-macros
optional: true
- next@14.2.35(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
+ next@14.2.5(@playwright/test@1.47.2)(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
- '@next/env': 14.2.35
+ '@next/env': 14.2.5
'@swc/helpers': 0.5.5
busboy: 1.6.0
caniuse-lite: 1.0.30001666
@@ -16817,15 +16862,15 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
styled-jsx: 5.1.1(@babel/core@7.25.7)(react@18.3.1)
optionalDependencies:
- '@next/swc-darwin-arm64': 14.2.33
- '@next/swc-darwin-x64': 14.2.33
- '@next/swc-linux-arm64-gnu': 14.2.33
- '@next/swc-linux-arm64-musl': 14.2.33
- '@next/swc-linux-x64-gnu': 14.2.33
- '@next/swc-linux-x64-musl': 14.2.33
- '@next/swc-win32-arm64-msvc': 14.2.33
- '@next/swc-win32-ia32-msvc': 14.2.33
- '@next/swc-win32-x64-msvc': 14.2.33
+ '@next/swc-darwin-arm64': 14.2.5
+ '@next/swc-darwin-x64': 14.2.5
+ '@next/swc-linux-arm64-gnu': 14.2.5
+ '@next/swc-linux-arm64-musl': 14.2.5
+ '@next/swc-linux-x64-gnu': 14.2.5
+ '@next/swc-linux-x64-musl': 14.2.5
+ '@next/swc-win32-arm64-msvc': 14.2.5
+ '@next/swc-win32-ia32-msvc': 14.2.5
+ '@next/swc-win32-x64-msvc': 14.2.5
'@playwright/test': 1.47.2
transitivePeerDependencies:
- '@babel/core'
@@ -17080,7 +17125,7 @@ snapshots:
ora@8.1.0:
dependencies:
- chalk: 5.6.0
+ chalk: 5.3.0
cli-cursor: 5.0.0
cli-spinners: 2.9.2
is-interactive: 2.0.0
@@ -18398,6 +18443,8 @@ snapshots:
dependencies:
js-tokens: 9.0.0
+ stubborn-fs@1.2.5: {}
+
style-to-object@0.4.4:
dependencies:
inline-style-parser: 0.1.1
@@ -18575,10 +18622,6 @@ snapshots:
commander: 2.20.3
source-map-support: 0.5.21
- text-segmentation@1.0.3:
- dependencies:
- utrie: 1.0.2
-
text-table@0.2.0: {}
thenify-all@1.6.0:
@@ -18884,6 +18927,8 @@ snapshots:
uid-promise@1.0.0: {}
+ uint8array-extras@1.4.0: {}
+
ultrahtml@1.5.3: {}
unbox-primitive@1.0.2:
@@ -19088,10 +19133,6 @@ snapshots:
utils-merge@1.0.1: {}
- utrie@1.0.2:
- dependencies:
- base64-arraybuffer: 1.0.2
-
uuid@10.0.0: {}
uuid@3.3.2: {}
@@ -19493,6 +19534,8 @@ snapshots:
tr46: 1.0.1
webidl-conversions: 4.0.2
+ when-exit@2.1.3: {}
+
which-boxed-primitive@1.0.2:
dependencies:
is-bigint: 1.0.4