diff --git a/concepts/prompt.mdx b/concepts/prompt.mdx
new file mode 100644
index 0000000..f432dad
--- /dev/null
+++ b/concepts/prompt.mdx
@@ -0,0 +1,188 @@
+---
+title: "Prompt"
+---
+
+You can draft and iterate on prompts on the Literal Prompt Playground. Once you are happy with your prompt, you can save it and use it in your code.
+
+Moving prompts out of your code has several benefits:
+- Non technical people can draft and iterate on prompts.
+- You can deploy a new version of your prompt without deploying your code.
+- You can track which prompt version for a specific generation.
+- You can compare prompt versions to see which one performs better.
+
+## Get a Prompt
+
+Getting a prompt is the first step to use it in your code. You can get a prompt by its name.
+
+
+```python Python
+import os
+from literalai import LiteralClient
+
+client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))
+# This will fetch the champion version, you can also pass a specific version
+prompt = await client.api.get_prompt(name="Default")
+
+# Synchronous version
+prompt = client.api.get_prompt_sync(name="Default")
+```
+
+```typescript TypeScript
+import { LiteralClient } from '@literalai/client';
+
+const client = new LiteralClient(process.env['LITERAL_API_KEY']);
+
+const promptName = 'Default';
+// This will fetch the champion version, you can also pass a specific version
+const prompt = await client.api.getPrompt(promptName);
+```
+
+
+
+## Format a Prompt
+
+Once you got your prompt, you can format it to get messages in the OpenAI format.
+
+Combining prompts with integrations (like the [OpenAI integration](/integrations/openai)) allows you to log the generations and to track which prompt versions were used to generate them.
+
+
+```python Python
+import os
+import asyncio
+from literalai import LiteralClient
+from openai import AsyncOpenAI
+
+openai_client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+
+literal_client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))
+# Optionally instrument the openai client to log generations
+literal_client.instrument_openai()
+
+async def main():
+ prompt = await literal_client.api.get_prompt(name="Default")
+
+ # Optionally pass variables to the prompt
+ variables = {"foo": "bar"}
+
+ messages = prompt.format(variables)
+ stream = await openai_client.chat.completions.create(
+ messages=messages,
+ # Optionally pass the tools defined in the prompt
+ tools=prompt.tools,
+ # Pass the settings defined in the prompt
+ **prompt.settings,
+ stream=True
+ )
+
+ async for chunk in stream:
+ print(chunk)
+
+loop = asyncio.get_event_loop()
+loop.run_until_complete(main())
+```
+
+```typescript TypeScript
+import { LiteralClient } from '@literalai/client';
+import OpenAI from 'openai';
+
+const literalClient = new LiteralClient(process.env['LITERAL_API_KEY']);
+
+const openai = new OpenAI({
+ apiKey: process.env['OPENAI_API_KEY'],
+});
+
+async function main() {
+ const prompt = await literalClient.api.getPrompt('Default');
+
+ // Optionally pass variables to the prompt
+ const variables = { foo: 'bar' };
+
+ const messages = prompt.format(variables);
+
+ const result = await openai.chat.completions.create({
+ ...prompt.settings,
+ tools: prompt.tools,
+ messages: messages,
+ stream: true
+ });
+
+ await client.instrumentation.openai(result);
+}
+
+main();
+
+```
+
+
+
+## Langchain Chat Template
+
+Since Langchain has a different format for prompts, you can convert a Literal prompt to a Langchain Chat Template.
+
+You can combine the prompt with the [Langchain integration](/integrations/langchain) to log the generations and to track which prompt versions were used to generate them.
+
+
+```python Python
+import os
+import asyncio
+from literalai import LiteralClient
+
+from langchain_openai import ChatOpenAI
+from langchain.schema.runnable.config import RunnableConfig
+from langchain.schema import StrOutputParser
+
+literal_client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))
+
+async def main():
+ prompt = await literal_client.api.get_prompt(name="Default")
+ lc_prompt = prompt.to_langchain_chat_prompt_template()
+
+ model = ChatOpenAI(streaming=True)
+ runnable = lc_prompt | model | StrOutputParser()
+
+ cb = literal_client.langchain_callback()
+ variables = {"foo": "bar"}
+
+ res = runnable.invoke(
+ variables,
+ config=RunnableConfig(callbacks=[cb], run_name="Test run")
+ )
+
+loop = asyncio.get_event_loop()
+loop.run_until_complete(main())
+```
+
+```typescript TypeScript
+import { LiteralClient } from '@literalai/client';
+
+import { StringOutputParser } from '@langchain/core/output_parsers';
+import { ChatOpenAI } from '@langchain/openai';
+
+const literalClient = new LiteralClient(process.env['LITERAL_API_KEY']);
+
+async function main() {
+ const prompt = await literalClient.api.getPrompt('Default');
+
+ const chatPrompt = prompt.toLangchainChatPromptTemplate();
+
+ const model = new ChatOpenAI({});
+ const outputParser = new StringOutputParser();
+
+ const chain = chatPrompt.pipe(model).pipe(outputParser);
+
+ const variables = { foo: 'bar' };
+
+ const response = await chain.invoke(
+ variables,
+ {
+ runName: 'Test run',
+ callbacks: [client.instrumentation.langchain.literalCallback()]
+ }
+ );
+}
+
+main();
+
+```
+
+
\ No newline at end of file
diff --git a/integrations/langchain.mdx b/integrations/langchain.mdx
index 8ef7eb8..05cbe8d 100644
--- a/integrations/langchain.mdx
+++ b/integrations/langchain.mdx
@@ -11,17 +11,58 @@ The Langchain integration enables to monitor your Langchain agents and chains wi
import os
from literalai import LiteralClient
+from langchain_openai import ChatOpenAI
+from langchain.schema.runnable.config import RunnableConfig
+from langchain.schema import StrOutputParser
+from langchain.prompts import ChatPromptTemplate
+
literal_client = LiteralClient(api_key=os.getenv("LITERAL_API_KEY"))
cb = client.langchain_callback()
+
+prompt = ChatPromptTemplate.from_messages(
+ ['human', 'Tell me a short joke about {topic}']
+)
+
+model = ChatOpenAI(streaming=True)
+runnable = prompt | model | StrOutputParser()
+
+res = runnable.invoke(
+ {"topic": "ice cream"},
+ config=RunnableConfig(callbacks=[cb], run_name="joke")
+ )
```
```typescript TypeScript
import { LiteralClient } from '@literalai/client';
+import { StringOutputParser } from '@langchain/core/output_parsers';
+import { ChatPromptTemplate } from '@langchain/core/prompts';
+import { ChatOpenAI } from '@langchain/openai';
+
const client = new LiteralClient(process.env['LITERAL_API_KEY']); // This is the default and can be omitted
const cb = client.instrumentation.langchain.literalCallback();
+
+// Example of using the callback
+const prompt = ChatPromptTemplate.fromMessages([
+ ['human', 'Tell me a short joke about {topic}']
+ ]);
+
+const model = new ChatOpenAI({});
+const outputParser = new StringOutputParser();
+
+const chain = prompt.pipe(model).pipe(outputParser);
+
+const response = await chain.invoke(
+{
+ topic: 'ice cream'
+},
+{
+ runName: 'joke',
+ callbacks: [cb]
+}
+);
```
diff --git a/mint.json b/mint.json
index fff3506..f630ee2 100644
--- a/mint.json
+++ b/mint.json
@@ -1,6 +1,6 @@
{
"$schema": "https://mintlify.com/schema.json",
- "name": "Literal Platform",
+ "name": "Literal Doc",
"logo": {
"dark": "/logo/logo-dark.svg",
"light": "/logo/logo-light.svg"
@@ -48,12 +48,18 @@
{
"group": "Concepts",
"pages": [
- "concepts/thread",
- "concepts/step",
- "concepts/feedback",
- "concepts/user",
- "concepts/attachment",
- "concepts/dataset"
+ "concepts/prompt",
+ "concepts/dataset",
+ {
+ "group": "Observability",
+ "pages": [
+ "concepts/thread",
+ "concepts/step",
+ "concepts/feedback",
+ "concepts/user",
+ "concepts/attachment"
+ ]
+ }
]
},
{
@@ -67,7 +73,12 @@
},
{
"group": "Examples",
- "pages": ["examples/rag-example", "examples/agent-run", "examples/conversational-agent", "examples/conversational-agent-fastapi"]
+ "pages": [
+ "examples/rag-example",
+ "examples/agent-run",
+ "examples/conversational-agent",
+ "examples/conversational-agent-fastapi"
+ ]
},
{
"group": "Get Started",