Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SecGPT - LlamaIndex Integration #13127

Open
wants to merge 14 commits into
base: main
Choose a base branch
from
737 changes: 737 additions & 0 deletions llama-index-packs/llama-index-packs-secgpt/SecGPT.ipynb

Large diffs are not rendered by default.

Binary file not shown.
14 changes: 14 additions & 0 deletions llama-index-packs/llama-index-packs-secgpt/permissions.json
@@ -0,0 +1,14 @@
{
"0": {
"metro_hail": {
"exec": null,
"data": null,
"collab": null
},
"quick_ride": {
"exec": null,
"data": null,
"collab": null
}
}
}
6 changes: 6 additions & 0 deletions llama-index-packs/llama-index-packs-secgpt/requirements.txt
@@ -0,0 +1,6 @@
dirtyjson==1.0.8
jsonschema==4.21.1
langchain_core==0.1.45
llama_index==0.10.30
pyseccomp==0.1.2
tldextract==5.1.2
69 changes: 69 additions & 0 deletions llama-index-packs/llama-index-packs-secgpt/src/hub.py
@@ -0,0 +1,69 @@
"""
SecGPT Hub
"""
from typing import (
Optional,
Sequence,
Callable
)


from llama_index.core.agent.react.output_parser import ReActOutputParser
from llama_index.core.callbacks import (
CallbackManager,
)
from llama_index.core.llms.llm import LLM
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.settings import Settings
from llama_index.core.tools import BaseTool, ToolOutput

from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.llms import ChatMessage, MessageRole

from .planner import HubPlanner
from .tool_importer import ToolImporter
from .hub_operator import HubOperator

class SecGPTHub():
"""SecGPT Hub."""
def __init__(
self,
tools: Sequence[BaseTool],
tool_specs: Sequence[BaseToolSpec],
llm: LLM = None,
memory: BaseMemory = None,
output_parser: Optional[ReActOutputParser] = None,
verbose: bool = False,
handle_reasoning_failure_fn: Optional[
Callable[[CallbackManager, Exception], ToolOutput]
] = None,
user_id: Optional[str] = "0",
) -> None:
"""Init params."""
self.llm = llm or Settings.llm
self.memory = memory or ChatMemoryBuffer.from_defaults(
chat_history=[], llm=self.llm
)
self.output_parser = output_parser
self.verbose = verbose
self.handle_reasoning_failure_fn = handle_reasoning_failure_fn
self.user_id = user_id

self.planner = HubPlanner(self.llm)
self.tool_importer = ToolImporter(tools, tool_specs)
self.hub_operator = HubOperator(self.tool_importer, self.user_id)

def chat(
self,
query: str,
) -> str:

memory_content = self.memory.get()
self.memory.put(ChatMessage(role=MessageRole.USER, content=(query)))
tool_info = self.tool_importer.get_tool_info()
plan = self.planner.plan_generate(query, tool_info, memory_content)
response = self.hub_operator.run(query, plan)
self.memory.put(ChatMessage(role=MessageRole.CHATBOT, content=(response)))

return response