Skip to content

Commit

Permalink
Personalization
Browse files Browse the repository at this point in the history
  • Loading branch information
ashpreetbedi committed May 20, 2024
1 parent 2d62ceb commit 06a25cd
Show file tree
Hide file tree
Showing 32 changed files with 125 additions and 92 deletions.
2 changes: 1 addition & 1 deletion cookbook/agents/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ peewee==3.17.5
# via yfinance
pgvector==0.2.5
# via -r cookbook/llm_os/requirements.in
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llm_os/requirements.in
pillow==10.3.0
# via
Expand Down
2 changes: 1 addition & 1 deletion cookbook/examples/auto_rag/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ pandas==2.2.2
# streamlit
pgvector==0.2.5
# via -r cookbook/llms/openai/auto_rag/requirements.in
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llms/openai/auto_rag/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/examples/data_eng/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ pandas==2.2.2
# -r cookbook/examples/data_eng/requirements.in
# altair
# streamlit
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/examples/data_eng/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
# Autonomous RAG with Personalized Memory
# Personalized Memory & Autonomous RAG

This cookbook shows how to do Autonomous retrieval-augmented generation with GPT4.
This cookbook implements Personalized Memory & Autonomous retrieval-augmented generation.

Auto-RAG is just a fancy name for giving the LLM tools like "search_knowledge_base", "read_chat_history", "search_the_web"
and letting it decide how to retrieve the information it needs to answer the question.
i.e. the Assistant will remember details about the user across runs. Similar to how [ChatGPT implements Memory](https://openai.com/index/memory-and-new-controls-for-chatgpt/).

> Note: Fork and clone this repository if needed
Expand All @@ -23,7 +22,7 @@ export OPENAI_API_KEY=***
### 3. Install libraries

```shell
pip install -r cookbook/examples/auto_rag/requirements.txt
pip install -r cookbook/examples/personalization/requirements.txt
```

### 4. Run PgVector
Expand All @@ -50,16 +49,17 @@ docker run -d \
phidata/pgvector:16
```

### 5. Run Autonomous RAG App
### 5. Run personalized Autonomous RAG App

```shell
streamlit run cookbook/examples/auto_rag/app.py
streamlit run cookbook/examples/personalization/app.py
```

- Open [localhost:8501](http://localhost:8501) to view your RAG app.
- Add websites or PDFs and ask question.

- Example Website: https://techcrunch.com/2024/04/18/meta-releases-llama-3-claims-its-among-the-best-open-models-available/
- Open [localhost:8501](http://localhost:8501) to view the streamlit app.
- Add to memory: "call me 'your highness'"
- Add to memory: "always respond with a nice greeting and salutation"
- Add to memory: "i like cats so add a cat pun in the response"
- Add a website to the knowledge base: https://techcrunch.com/2024/04/18/meta-releases-llama-3-claims-its-among-the-best-open-models-available/
- Ask questions like:
- What did Meta release?
- Tell me more about the Llama 3 models?
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -6,23 +6,24 @@
from phi.document import Document
from phi.document.reader.pdf import PDFReader
from phi.document.reader.website import WebsiteReader
from phi.tools.streamlit.components import get_username_sidebar
from phi.utils.log import logger

from assistant import get_auto_rag_assistant # type: ignore
from assistant import get_personalized_auto_rag_assistant # type: ignore

nest_asyncio.apply()
st.set_page_config(
page_title="Autonomous RAG",
page_title="Personalized Memory & Auto RAG",
page_icon=":orange_heart:",
)
st.title("Autonomous RAG")
st.title("Personalized Memory & Auto RAG")
st.markdown("##### :orange_heart: built using [phidata](https://github.com/phidatahq/phidata)")


def restart_assistant():
logger.debug("---*--- Restarting Assistant ---*---")
st.session_state["auto_rag_assistant"] = None
st.session_state["auto_rag_assistant_run_id"] = None
st.session_state["personalized_auto_rag_assistant"] = None
st.session_state["personalized_auto_rag_assistant_run_id"] = None
if "url_scrape_key" in st.session_state:
st.session_state["url_scrape_key"] += 1
if "file_uploader_key" in st.session_state:
Expand All @@ -31,8 +32,16 @@ def restart_assistant():


def main() -> None:
# Get username
user_id = get_username_sidebar()
if user_id:
st.sidebar.info(f":technologist: User: {user_id}")
else:
st.write(":technologist: Please enter a username")
return

# Get LLM model
llm_model = st.sidebar.selectbox("Select LLM", options=["gpt-4-turbo", "gpt-3.5-turbo"])
llm_model = st.sidebar.selectbox("Select LLM", options=["gpt-4o", "gpt-4-turbo"])
# Set assistant_type in session state
if "llm_model" not in st.session_state:
st.session_state["llm_model"] = llm_model
Expand All @@ -42,23 +51,26 @@ def main() -> None:
restart_assistant()

# Get the assistant
auto_rag_assistant: Assistant
if "auto_rag_assistant" not in st.session_state or st.session_state["auto_rag_assistant"] is None:
personalized_auto_rag_assistant: Assistant
if (
"personalized_auto_rag_assistant" not in st.session_state
or st.session_state["personalized_auto_rag_assistant"] is None
):
logger.info(f"---*--- Creating {llm_model} Assistant ---*---")
auto_rag_assistant = get_auto_rag_assistant(llm_model=llm_model)
st.session_state["auto_rag_assistant"] = auto_rag_assistant
personalized_auto_rag_assistant = get_personalized_auto_rag_assistant(llm_model=llm_model, user_id=user_id)
st.session_state["personalized_auto_rag_assistant"] = personalized_auto_rag_assistant
else:
auto_rag_assistant = st.session_state["auto_rag_assistant"]
personalized_auto_rag_assistant = st.session_state["personalized_auto_rag_assistant"]

# Create assistant run (i.e. log to database) and save run_id in session state
try:
st.session_state["auto_rag_assistant_run_id"] = auto_rag_assistant.create_run()
st.session_state["personalized_auto_rag_assistant_run_id"] = personalized_auto_rag_assistant.create_run()
except Exception:
st.warning("Could not create assistant, is the database running?")
return

# Load existing messages
assistant_chat_history = auto_rag_assistant.memory.get_chat_history()
assistant_chat_history = personalized_auto_rag_assistant.memory.get_chat_history()
if len(assistant_chat_history) > 0:
logger.debug("Loading chat history")
st.session_state["messages"] = assistant_chat_history
Expand All @@ -84,13 +96,13 @@ def main() -> None:
with st.chat_message("assistant"):
resp_container = st.empty()
response = ""
for delta in auto_rag_assistant.run(question):
for delta in personalized_auto_rag_assistant.run(question):
response += delta # type: ignore
resp_container.markdown(response)
st.session_state["messages"].append({"role": "assistant", "content": response})

# Load knowledge base
if auto_rag_assistant.knowledge_base:
if personalized_auto_rag_assistant.knowledge_base:
# -*- Add websites to knowledge base
if "url_scrape_key" not in st.session_state:
st.session_state["url_scrape_key"] = 0
Expand All @@ -106,7 +118,7 @@ def main() -> None:
scraper = WebsiteReader(max_links=2, max_depth=1)
web_documents: List[Document] = scraper.read(input_url)
if web_documents:
auto_rag_assistant.knowledge_base.load_documents(web_documents, upsert=True)
personalized_auto_rag_assistant.knowledge_base.load_documents(web_documents, upsert=True)
else:
st.sidebar.error("Could not read website")
st.session_state[f"{input_url}_uploaded"] = True
Expand All @@ -126,33 +138,43 @@ def main() -> None:
reader = PDFReader()
auto_rag_documents: List[Document] = reader.read(uploaded_file)
if auto_rag_documents:
auto_rag_assistant.knowledge_base.load_documents(auto_rag_documents, upsert=True)
personalized_auto_rag_assistant.knowledge_base.load_documents(auto_rag_documents, upsert=True)
else:
st.sidebar.error("Could not read PDF")
st.session_state[f"{auto_rag_name}_uploaded"] = True
alert.empty()

if auto_rag_assistant.knowledge_base and auto_rag_assistant.knowledge_base.vector_db:
if personalized_auto_rag_assistant.knowledge_base and personalized_auto_rag_assistant.knowledge_base.vector_db:
if st.sidebar.button("Clear Knowledge Base"):
auto_rag_assistant.knowledge_base.vector_db.clear()
personalized_auto_rag_assistant.knowledge_base.vector_db.clear()
st.sidebar.success("Knowledge base cleared")

if auto_rag_assistant.storage:
auto_rag_assistant_run_ids: List[str] = auto_rag_assistant.storage.get_all_run_ids()
new_auto_rag_assistant_run_id = st.sidebar.selectbox("Run ID", options=auto_rag_assistant_run_ids)
if st.session_state["auto_rag_assistant_run_id"] != new_auto_rag_assistant_run_id:
logger.info(f"---*--- Loading {llm_model} run: {new_auto_rag_assistant_run_id} ---*---")
st.session_state["auto_rag_assistant"] = get_auto_rag_assistant(
llm_model=llm_model, run_id=new_auto_rag_assistant_run_id
if personalized_auto_rag_assistant.storage:
personalized_auto_rag_assistant_run_ids: List[str] = personalized_auto_rag_assistant.storage.get_all_run_ids(
user_id=user_id
)
new_personalized_auto_rag_assistant_run_id = st.sidebar.selectbox(
"Run ID", options=personalized_auto_rag_assistant_run_ids
)
if st.session_state["personalized_auto_rag_assistant_run_id"] != new_personalized_auto_rag_assistant_run_id:
logger.info(f"---*--- Loading {llm_model} run: {new_personalized_auto_rag_assistant_run_id} ---*---")
st.session_state["personalized_auto_rag_assistant"] = get_personalized_auto_rag_assistant(
llm_model=llm_model, user_id=user_id, run_id=new_personalized_auto_rag_assistant_run_id
)
st.rerun()

# Show Assistant memory
if personalized_auto_rag_assistant.memory.memories and len(personalized_auto_rag_assistant.memory.memories) > 0:
logger.info("Loading assistant memory")
with st.status("Assistant Memory", expanded=False, state="complete"):
with st.container():
memory_container = st.empty()
memory_container.markdown(
"\n".join([f"- {m.memory}" for m in personalized_auto_rag_assistant.memory.memories])
)

if st.sidebar.button("New Run"):
restart_assistant()

if "embeddings_model_updated" in st.session_state:
st.sidebar.info("Please add documents again as the embeddings model has changed.")
st.session_state["embeddings_model_updated"] = False


main()
Original file line number Diff line number Diff line change
@@ -1,34 +1,43 @@
from typing import Optional

from phi.assistant import Assistant
from phi.knowledge import AssistantKnowledge
from phi.assistant import Assistant, AssistantMemory, AssistantKnowledge
from phi.llm.openai import OpenAIChat
from phi.tools.duckduckgo import DuckDuckGo
from phi.embedder.openai import OpenAIEmbedder
from phi.vectordb.pgvector import PgVector2
from phi.memory.db.postgres import PgMemoryDb
from phi.storage.assistant.postgres import PgAssistantStorage

db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"


def get_auto_rag_assistant(
llm_model: str = "gpt-4-turbo",
def get_personalized_auto_rag_assistant(
llm_model: str = "gpt-4o",
user_id: Optional[str] = None,
run_id: Optional[str] = None,
debug_mode: bool = True,
) -> Assistant:
"""Get an Auto RAG Assistant."""
"""Get a Personalized Auto RAG Assistant."""

return Assistant(
name="auto_rag_assistant",
name="personalized_auto_rag_assistant",
run_id=run_id,
user_id=user_id,
llm=OpenAIChat(model=llm_model),
storage=PgAssistantStorage(table_name="auto_rag_assistant_openai", db_url=db_url),
# Add personalization to the assistant
# by storing memories in a database and adding them to the system prompt
memory=AssistantMemory(
db=PgMemoryDb(
db_url=db_url,
table_name="personalized_auto_rag_assistant_memory",
),
add_memories=True,
),
storage=PgAssistantStorage(table_name="personalized_auto_rag_assistant_openai", db_url=db_url),
knowledge_base=AssistantKnowledge(
vector_db=PgVector2(
db_url=db_url,
collection="auto_rag_documents_openai",
collection="personalized_auto_rag_documents_openai",
embedder=OpenAIEmbedder(model="text-embedding-3-small", dimensions=1536),
),
# 3 references are added to the prompt
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ pandas==2.2.2
# streamlit
pgvector==0.2.5
# via -r cookbook/llms/openai/auto_rag/requirements.in
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llms/openai/auto_rag/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/examples/research/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ pandas==2.2.2
# via
# altair
# streamlit
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/examples/research/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/examples/sql/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ pandas==2.2.2
# streamlit
pgvector==0.2.5
# via -r cookbook/examples/sql/requirements.in
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/examples/sql/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/examples/worldbuilding/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ pandas==2.2.0
# via
# altair
# streamlit
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/examples/worldbuilding/requirements.in
pillow==10.2.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/integrations/singlestore/ai_apps/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ pandas==2.2.2
# yfinance
peewee==3.17.3
# via yfinance
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/integrations/singlestore/auto_rag/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/llm_os/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ peewee==3.17.5
# via yfinance
pgvector==0.2.5
# via -r cookbook/llm_os/requirements.in
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llm_os/requirements.in
pillow==10.3.0
# via
Expand Down
2 changes: 1 addition & 1 deletion cookbook/llms/groq/ai_apps/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ peewee==3.17.3
# via yfinance
pgvector==0.2.5
# via -r cookbook/llms/groq/ai_apps/requirements.in
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llms/groq/ai_apps/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/llms/groq/auto_rag/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ pandas==2.2.2
# streamlit
pgvector==0.2.5
# via -r cookbook/llms/groq/auto_rag/requirements.in
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llms/groq/auto_rag/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/llms/groq/investment_researcher/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ pandas==2.2.2
# yfinance
peewee==3.17.3
# via yfinance
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llms/groq/investment_researcher/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/llms/groq/news_articles/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ pandas==2.2.2
# altair
# newspaper4k
# streamlit
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llms/groq/news_articles/requirements.in
pillow==10.3.0
# via
Expand Down
2 changes: 1 addition & 1 deletion cookbook/llms/groq/rag/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ pandas==2.2.2
# streamlit
pgvector==0.2.5
# via -r cookbook/llms/groq/rag/requirements.in
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llms/groq/rag/requirements.in
pillow==10.3.0
# via streamlit
Expand Down
2 changes: 1 addition & 1 deletion cookbook/llms/groq/research/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ pandas==2.2.2
# via
# altair
# streamlit
phidata==2.4.8
phidata==2.4.10
# via -r cookbook/llms/groq/research/requirements.in
pillow==10.3.0
# via streamlit
Expand Down

0 comments on commit 06a25cd

Please sign in to comment.