Skip to content

Commit

Permalink
Release v0.4.7 (#5094)
Browse files Browse the repository at this point in the history
  • Loading branch information
lc0rp committed Aug 11, 2023
2 parents ec47a31 + f26ccda commit bb3a06d
Show file tree
Hide file tree
Showing 76 changed files with 1,202 additions and 864 deletions.
7 changes: 7 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -160,3 +160,10 @@ openai/

# news
CURRENT_BULLETIN.md

# AgBenchmark
agbenchmark/reports/

# Nodejs
package-lock.json
package.json
24 changes: 9 additions & 15 deletions BULLETIN.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,18 @@
📖 *User Guide*: https://docs.agpt.co.
👩 *Contributors Wiki*: https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing.

# v0.4.6 RELEASE HIGHLIGHTS! 🚀
# v0.4.7 RELEASE HIGHLIGHTS! 🚀
# -----------------------------
This release includes under-the-hood improvements and bug fixes, including better UTF-8
special character support, workspace write access for sandboxed Python execution,
more robust path resolution for config files and the workspace, and a full restructure
of the Agent class, the "brain" of Auto-GPT, to make it more extensible.
This release introduces initial REST API support, powered by e2b's agent
protocol SDK (https://github.com/e2b-dev/agent-protocol#sdk).

We have also released some documentation updates, including:
It also includes improvements to prompt generation and support
for our new benchmarking tool, Auto-GPT-Benchmarks
(https://github.com/Significant-Gravitas/Auto-GPT-Benchmarks).

- *How to share your system logs*
Visit [docs/share-your-logs.md] to learn how to how to share logs with us
via a log analyzer graciously contributed by https://www.e2b.dev/
We've also moved our documentation to Material Theme, at https://docs.agpt.co.

- *Auto-GPT re-architecture documentation*
You can learn more about the inner-workings of the Auto-GPT re-architecture
released last cycle, via these links:
* [autogpt/core/README.md]
* [autogpt/core/ARCHITECTURE_NOTES.md]
As usual, we've squashed a few bugs and made some under-the-hood improvements.

Take a look at the Release Notes on Github for the full changelog!
Take a look at the Release Notes on Github for the full changelog:
https://github.com/Significant-Gravitas/Auto-GPT/releases.
File renamed without changes.
54 changes: 54 additions & 0 deletions agbenchmark/benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import os
import sys
from pathlib import Path
from typing import Tuple

from autogpt.agents import Agent
from autogpt.app.main import run_interaction_loop
from autogpt.commands import COMMAND_CATEGORIES
from autogpt.config import AIConfig, Config, ConfigBuilder
from autogpt.memory.vector import get_memory
from autogpt.models.command_registry import CommandRegistry
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace

PROJECT_DIR = Path().resolve()


def run_specific_agent(task, continuous_mode=False) -> Tuple[str, int]:
agent = bootstrap_agent(task, continuous_mode)
run_interaction_loop(agent)


def bootstrap_agent(task, continuous_mode) -> Agent:
config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR)
config.debug_mode = True
config.continuous_mode = continuous_mode
config.temperature = 0
config.plain_output = True
command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config)
config.memory_backend = "no_memory"
config.workspace_path = Workspace.init_workspace_directory(config)
config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path)
ai_config = AIConfig(
ai_name="Auto-GPT",
ai_role="a multi-purpose AI assistant.",
ai_goals=[task],
)
ai_config.command_registry = command_registry
return Agent(
memory=get_memory(config),
command_registry=command_registry,
ai_config=ai_config,
config=config,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
)


if __name__ == "__main__":
# The first argument is the script name itself, second is the task
if len(sys.argv) != 2:
print("Usage: python script.py <task>")
sys.exit(1)
task = sys.argv[1]
run_specific_agent(task, continuous_mode=True)
4 changes: 4 additions & 0 deletions agbenchmark/config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"workspace": "auto_gpt_workspace",
"entry_path": "agbenchmark.benchmarks"
}
24 changes: 24 additions & 0 deletions agbenchmark/regression_tests.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
{
"TestBasicCodeGeneration": {
"difficulty": "basic",
"dependencies": [
"TestWriteFile"
],
"data_path": "agbenchmark/challenges/code/d3"
},
"TestBasicMemory": {
"difficulty": "basic",
"data_path": "agbenchmark/challenges/memory/m1"
},
"TestReadFile": {
"difficulty": "basic",
"dependencies": [
"TestWriteFile"
],
"data_path": "agbenchmark/challenges/interface/read_file"
},
"TestWriteFile": {
"dependencies": [],
"data_path": "agbenchmark/challenges/interface/write_file"
}
}
14 changes: 11 additions & 3 deletions autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from autogpt.llm.utils import count_string_tokens
from autogpt.logs import logger
from autogpt.logs.log_cycle import (
CURRENT_CONTEXT_FILE_NAME,
FULL_MESSAGE_HISTORY_FILE_NAME,
NEXT_ACTION_FILE_NAME,
USER_INPUT_FILE_NAME,
Expand Down Expand Up @@ -109,6 +110,13 @@ def on_before_think(self, *args, **kwargs) -> ChatSequence:
self.history.raw(),
FULL_MESSAGE_HISTORY_FILE_NAME,
)
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
prompt.raw(),
CURRENT_CONTEXT_FILE_NAME,
)
return prompt

def execute(
Expand Down Expand Up @@ -285,10 +293,10 @@ def execute_command(
# Handle non-native commands (e.g. from plugins)
for command in agent.ai_config.prompt_generator.commands:
if (
command_name == command["label"].lower()
or command_name == command["name"].lower()
command_name == command.label.lower()
or command_name == command.name.lower()
):
return command["function"](**arguments)
return command.function(**arguments)

raise RuntimeError(
f"Cannot execute '{command_name}': unknown command."
Expand Down
112 changes: 101 additions & 11 deletions autogpt/agents/base.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from __future__ import annotations

import re
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Any, Optional
from typing import TYPE_CHECKING, Any, Literal, Optional

if TYPE_CHECKING:
from autogpt.config import AIConfig, Config
Expand All @@ -23,6 +24,8 @@
class BaseAgent(metaclass=ABCMeta):
"""Base class for all Auto-GPT agents."""

ThoughtProcessID = Literal["one-shot"]

def __init__(
self,
ai_config: AIConfig,
Expand Down Expand Up @@ -91,6 +94,7 @@ def __init__(
def think(
self,
instruction: Optional[str] = None,
thought_process_id: ThoughtProcessID = "one-shot",
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Runs the agent for one cycle.
Expand All @@ -103,9 +107,8 @@ def think(

instruction = instruction or self.default_cycle_instruction

prompt: ChatSequence = self.construct_prompt(instruction)
prompt = self.on_before_think(prompt, instruction)

prompt: ChatSequence = self.construct_prompt(instruction, thought_process_id)
prompt = self.on_before_think(prompt, thought_process_id, instruction)
raw_response = create_chat_completion(
prompt,
self.config,
Expand All @@ -115,7 +118,7 @@ def think(
)
self.cycle_count += 1

return self.on_response(raw_response, prompt, instruction)
return self.on_response(raw_response, thought_process_id, prompt, instruction)

@abstractmethod
def execute(
Expand All @@ -138,6 +141,7 @@ def execute(

def construct_base_prompt(
self,
thought_process_id: ThoughtProcessID,
prepend_messages: list[Message] = [],
append_messages: list[Message] = [],
reserve_tokens: int = 0,
Expand Down Expand Up @@ -179,7 +183,11 @@ def construct_base_prompt(

return prompt

def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
def construct_prompt(
self,
cycle_instruction: str,
thought_process_id: ThoughtProcessID,
) -> ChatSequence:
"""Constructs and returns a prompt with the following structure:
1. System prompt
2. Message history of the agent, truncated & prepended with running summary as needed
Expand All @@ -196,14 +204,86 @@ def construct_prompt(self, cycle_instruction: str) -> ChatSequence:
cycle_instruction_tlength = count_message_tokens(
cycle_instruction_msg, self.llm.name
)
prompt = self.construct_base_prompt(reserve_tokens=cycle_instruction_tlength)

append_messages: list[Message] = []

response_format_instr = self.response_format_instruction(thought_process_id)
if response_format_instr:
append_messages.append(Message("system", response_format_instr))

prompt = self.construct_base_prompt(
thought_process_id,
append_messages=append_messages,
reserve_tokens=cycle_instruction_tlength,
)

# ADD user input message ("triggering prompt")
prompt.append(cycle_instruction_msg)

return prompt

def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequence:
# This can be expanded to support multiple types of (inter)actions within an agent
def response_format_instruction(self, thought_process_id: ThoughtProcessID) -> str:
if thought_process_id != "one-shot":
raise NotImplementedError(f"Unknown thought process '{thought_process_id}'")

RESPONSE_FORMAT_WITH_COMMAND = """```ts
interface Response {
thoughts: {
// Thoughts
text: string;
reasoning: string;
// Short markdown-style bullet list that conveys the long-term plan
plan: string;
// Constructive self-criticism
criticism: string;
// Summary of thoughts to say to the user
speak: string;
};
command: {
name: string;
args: Record<string, any>;
};
}
```"""

RESPONSE_FORMAT_WITHOUT_COMMAND = """```ts
interface Response {
thoughts: {
// Thoughts
text: string;
reasoning: string;
// Short markdown-style bullet list that conveys the long-term plan
plan: string;
// Constructive self-criticism
criticism: string;
// Summary of thoughts to say to the user
speak: string;
};
}
```"""

response_format = re.sub(
r"\n\s+",
"\n",
RESPONSE_FORMAT_WITHOUT_COMMAND
if self.config.openai_functions
else RESPONSE_FORMAT_WITH_COMMAND,
)

use_functions = self.config.openai_functions and self.command_registry.commands
return (
f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions else ''}. "
"The JSON should be compatible with the TypeScript type `Response` from the following:\n"
f"{response_format}\n"
)

def on_before_think(
self,
prompt: ChatSequence,
thought_process_id: ThoughtProcessID,
instruction: str,
) -> ChatSequence:
"""Called after constructing the prompt but before executing it.
Calls the `on_planning` hook of any enabled and capable plugins, adding their
Expand Down Expand Up @@ -238,7 +318,11 @@ def on_before_think(self, prompt: ChatSequence, instruction: str) -> ChatSequenc
return prompt

def on_response(
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
self,
llm_response: ChatModelResponse,
thought_process_id: ThoughtProcessID,
prompt: ChatSequence,
instruction: str,
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Called upon receiving a response from the chat model.
Expand All @@ -261,7 +345,9 @@ def on_response(
) # FIXME: support function calls

try:
return self.parse_and_process_response(llm_response, prompt, instruction)
return self.parse_and_process_response(
llm_response, thought_process_id, prompt, instruction
)
except SyntaxError as e:
logger.error(f"Response could not be parsed: {e}")
# TODO: tune this message
Expand All @@ -276,7 +362,11 @@ def on_response(

@abstractmethod
def parse_and_process_response(
self, llm_response: ChatModelResponse, prompt: ChatSequence, instruction: str
self,
llm_response: ChatModelResponse,
thought_process_id: ThoughtProcessID,
prompt: ChatSequence,
instruction: str,
) -> tuple[CommandName | None, CommandArgs | None, AgentThoughts]:
"""Validate, parse & process the LLM's response.
Expand Down

0 comments on commit bb3a06d

Please sign in to comment.