Skip to content

Commit

Permalink
revert core changes
Browse files Browse the repository at this point in the history
  • Loading branch information
nerdai committed May 1, 2024
1 parent 1eb452c commit a225fbf
Show file tree
Hide file tree
Showing 6 changed files with 5 additions and 274 deletions.
Expand Up @@ -305,7 +305,7 @@ def run_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput:
)

agent_response = AgentChatResponse(
response=response.message.content, sources=task.extra_state["sources"]
response=str(response), sources=task.extra_state["sources"]
)

return TaskStepOutput(
Expand Down
Empty file.
243 changes: 0 additions & 243 deletions llama-index-core/llama_index/core/agent/introspective/step.py

This file was deleted.

23 changes: 4 additions & 19 deletions llama-index-core/llama_index/core/agent/runner/base.py
Expand Up @@ -171,9 +171,6 @@ class AgentState(BaseModel):
default_factory=dict, description="Task dictionary."
)

class Config:
arbitrary_types_allowed = True

def get_task(self, task_id: str) -> Task:
"""Get task state."""
return self.task_dict[task_id].task
Expand Down Expand Up @@ -297,9 +294,7 @@ def reset(self) -> None:
self.memory.reset()
self.state.reset()

def create_task(
self, input: str, parent_task_id: Optional[str] = None, **kwargs: Any
) -> Task:
def create_task(self, input: str, **kwargs: Any) -> Task:
"""Create task."""
if not self.init_task_state_kwargs:
extra_state = kwargs.pop("extra_state", {})
Expand All @@ -314,7 +309,6 @@ def create_task(
callback_manager = kwargs.pop("callback_manager", self.callback_manager)
task = Task(
input=input,
parent_task_id=parent_task_id,
memory=self.memory,
extra_state=extra_state,
callback_manager=callback_manager,
Expand Down Expand Up @@ -538,14 +532,13 @@ def _chat(
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Union[str, dict] = "auto",
mode: ChatResponseMode = ChatResponseMode.WAIT,
parent_task_id: Optional[str] = None,
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Chat with step executor."""
dispatch_event = dispatcher.get_dispatch_event()

if chat_history is not None:
self.memory.set(chat_history)
task = self.create_task(message, parent_task_id)
task = self.create_task(message)

result_output = None
dispatch_event(AgentChatWithStepStartEvent())
Expand Down Expand Up @@ -576,24 +569,20 @@ async def _achat(
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Union[str, dict] = "auto",
mode: ChatResponseMode = ChatResponseMode.WAIT,
parent_task_id: Optional[str] = None,
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Chat with step executor."""
dispatch_event = dispatcher.get_dispatch_event()

if chat_history is not None:
self.memory.set(chat_history)
task = self.create_task(message, parent_task_id)
task = self.create_task(message)

result_output = None
dispatch_event(AgentChatWithStepStartEvent())
while True:
# pass step queue in as argument, assume step executor is stateless
cur_step_output = await self._arun_step(
task.task_id,
mode=mode,
tool_choice=tool_choice,
parent_task_id=parent_task_id,
task.task_id, mode=mode, tool_choice=tool_choice
)

if cur_step_output.is_last:
Expand All @@ -617,7 +606,6 @@ def chat(
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Optional[Union[str, dict]] = None,
parent_task_id: Optional[str] = None,
) -> AgentChatResponse:
# override tool choice is provided as input.
if tool_choice is None:
Expand All @@ -631,7 +619,6 @@ def chat(
chat_history=chat_history,
tool_choice=tool_choice,
mode=ChatResponseMode.WAIT,
parent_task_id=parent_task_id,
)
assert isinstance(chat_response, AgentChatResponse)
e.on_end(payload={EventPayload.RESPONSE: chat_response})
Expand All @@ -644,7 +631,6 @@ async def achat(
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Optional[Union[str, dict]] = None,
parent_task_id: Optional[str] = None,
) -> AgentChatResponse:
# override tool choice is provided as input.
if tool_choice is None:
Expand All @@ -658,7 +644,6 @@ async def achat(
chat_history=chat_history,
tool_choice=tool_choice,
mode=ChatResponseMode.WAIT,
parent_task_id=parent_task_id,
)
assert isinstance(chat_response, AgentChatResponse)
e.on_end(payload={EventPayload.RESPONSE: chat_response})
Expand Down
1 change: 0 additions & 1 deletion llama-index-core/llama_index/core/agent/types.py
Expand Up @@ -161,7 +161,6 @@ class Config:
default_factory=lambda: str(uuid.uuid4()), type=str, description="Task ID"
)
input: str = Field(..., type=str, description="User input")
parent_task_id: Optional[str] = Field(default=None, description="Parent Task ID")

# NOTE: this is state that may be modified throughout the course of execution of the task
memory: BaseMemory = Field(
Expand Down
10 changes: 0 additions & 10 deletions llama-index-core/llama_index/core/agent/utils.py
Expand Up @@ -13,13 +13,3 @@ def add_user_step_to_memory(
memory.put(user_message)
if verbose:
print(f"Added user message to memory: {step.input}")


def add_assistant_step_to_memory(
step: TaskStep, memory: BaseMemory, verbose: bool = False
) -> None:
"""Add user step to memory."""
asst_message = ChatMessage(content=step.input, role=MessageRole.ASSISTANT)
memory.put(asst_message)
if verbose:
print(f"Added assistant message to memory: {step.input}")

0 comments on commit a225fbf

Please sign in to comment.