Skip to content

Commit

Permalink
AutoGPT: Convert dataclasses to Pydantic models
Browse files Browse the repository at this point in the history
  • Loading branch information
Pwuts committed Sep 18, 2023
1 parent d8f1d34 commit 6b22abd
Show file tree
Hide file tree
Showing 15 changed files with 164 additions and 155 deletions.
6 changes: 3 additions & 3 deletions autogpts/autogpt/autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def execute(
result: ActionResult

if command_name == "human_feedback":
result = ActionInterruptedByHuman(user_input)
result = ActionInterruptedByHuman(feedback=user_input)
self.message_history.add(
"user",
"I interrupted the execution of the command you proposed "
Expand Down Expand Up @@ -185,9 +185,9 @@ def execute(
)
self.context.add(context_item)

result = ActionSuccessResult(return_value)
result = ActionSuccessResult(outputs=return_value)
except AgentException as e:
result = ActionErrorResult(e.message, e)
result = ActionErrorResult(reason=e.message, error=e)

result_tlength = count_string_tokens(str(result), self.llm.name)
history_tlength = count_string_tokens(
Expand Down
6 changes: 3 additions & 3 deletions autogpts/autogpt/autogpt/agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs
from autogpt.llm.utils import count_message_tokens, create_chat_completion
from autogpt.memory.message_history import MessageHistory
from autogpt.models.agent_actions import ActionHistory, ActionResult
from autogpt.models.agent_actions import EpisodicActionHistory, ActionResult
from autogpt.prompts.generator import PromptGenerator
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT

Expand Down Expand Up @@ -90,10 +90,10 @@ def __init__(
defaults to 75% of `llm.max_tokens`.
"""

self.event_history = ActionHistory()
self.event_history = EpisodicActionHistory()

self.message_history = MessageHistory(
self.llm,
model=self.llm,
max_summary_tlength=summary_max_tlength or self.send_token_limit // 6,
)

Expand Down
6 changes: 3 additions & 3 deletions autogpts/autogpt/autogpt/agents/features/watchdog.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import logging
from contextlib import ExitStack

from autogpt.models.agent_actions import ActionHistory
from autogpt.models.agent_actions import EpisodicActionHistory

from ..base import BaseAgent

Expand All @@ -16,7 +16,7 @@ class WatchdogMixin:
looping, the watchdog will switch from the FAST_LLM to the SMART_LLM and re-think.
"""

event_history: ActionHistory
event_history: EpisodicActionHistory

def __init__(self, **kwargs) -> None:
# Initialize other bases first, because we need the event_history from BaseAgent
Expand All @@ -38,7 +38,7 @@ def think(self, *args, **kwargs) -> BaseAgent.ThoughtProcessOutput:
and self.config.fast_llm != self.config.smart_llm
):
# Detect repetitive commands
previous_cycle = self.event_history.cycles[self.event_history.cursor - 1]
previous_cycle = self.event_history.episodes[self.event_history.cursor - 1]
if (
command_name == previous_cycle.action.name
and command_args == previous_cycle.action.args
Expand Down
12 changes: 6 additions & 6 deletions autogpts/autogpt/autogpt/agents/planning_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
)
from autogpt.models.agent_actions import (
ActionErrorResult,
ActionHistory,
EpisodicActionHistory,
ActionInterruptedByHuman,
ActionResult,
ActionSuccessResult,
Expand Down Expand Up @@ -69,7 +69,7 @@ def __init__(
self.log_cycle_handler = LogCycleHandler()
"""LogCycleHandler for structured debug logging."""

self.action_history = ActionHistory()
self.action_history = EpisodicActionHistory()

self.plan: list[str] = []
"""List of steps that the Agent plans to take"""
Expand Down Expand Up @@ -229,7 +229,7 @@ def on_before_think(self, *args, **kwargs) -> ChatSequence:
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
self.action_history.cycles,
self.action_history.episodes,
"action_history.json",
)
self.log_cycle_handler.log_cycle(
Expand All @@ -250,7 +250,7 @@ def execute(
result: ActionResult

if command_name == "human_feedback":
result = ActionInterruptedByHuman(user_input)
result = ActionInterruptedByHuman(feedback=user_input)
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
Expand Down Expand Up @@ -279,9 +279,9 @@ def execute(
self.context.add(return_value[1])
return_value = return_value[0]

result = ActionSuccessResult(return_value)
result = ActionSuccessResult(outputs=return_value)
except AgentException as e:
result = ActionErrorResult(e.message, e)
result = ActionErrorResult(reason=e.message, error=e)

result_tlength = count_string_tokens(str(result), self.llm.name)
memory_tlength = count_string_tokens(
Expand Down
10 changes: 8 additions & 2 deletions autogpts/autogpt/autogpt/commands/file_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,10 @@ def open_file(file_path: Path, agent: Agent) -> tuple[str, FileContextItem]:

file_path = relative_file_path or file_path

file = FileContextItem(file_path, agent.workspace.root)
file = FileContextItem(
file_path_in_workspace=file_path,
workspace_path=agent.workspace.root,
)
if file in agent_context:
raise DuplicateOperationError(f"The file {file_path} is already open")

Expand Down Expand Up @@ -114,7 +117,10 @@ def open_folder(path: Path, agent: Agent) -> tuple[str, FolderContextItem]:

path = relative_path or path

folder = FolderContextItem(path, agent.workspace.root)
folder = FolderContextItem(
path_in_workspace=path,
workspace_path=agent.workspace.root,
)
if folder in agent_context:
raise DuplicateOperationError(f"The folder {path} is already open")

Expand Down
22 changes: 10 additions & 12 deletions autogpts/autogpt/autogpt/config/ai_config.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
"""A module that contains the AIConfig class object that contains the configuration"""
from __future__ import annotations

from dataclasses import dataclass, field
from pathlib import Path
from pydantic import BaseModel, Field

import yaml


@dataclass
class AIConfig:
class AIConfig(BaseModel):
"""
A class object that contains the configuration information for the AI
Expand All @@ -21,7 +20,7 @@ class AIConfig:

ai_name: str = ""
ai_role: str = ""
ai_goals: list[str] = field(default_factory=list[str])
ai_goals: list[str] = Field(default_factory=list[str])
api_budget: float = 0.0

@staticmethod
Expand Down Expand Up @@ -53,7 +52,12 @@ def load(ai_settings_file: str | Path) -> "AIConfig":
]
api_budget = config_params.get("api_budget", 0.0)

return AIConfig(ai_name, ai_role, ai_goals, api_budget)
return AIConfig(
ai_name=ai_name,
ai_role=ai_role,
ai_goals=ai_goals,
api_budget=api_budget
)

def save(self, ai_settings_file: str | Path) -> None:
"""
Expand All @@ -66,11 +70,5 @@ def save(self, ai_settings_file: str | Path) -> None:
None
"""

config = {
"ai_name": self.ai_name,
"ai_role": self.ai_role,
"ai_goals": self.ai_goals,
"api_budget": self.api_budget,
}
with open(ai_settings_file, "w", encoding="utf-8") as file:
yaml.dump(config, file, allow_unicode=True)
yaml.dump(self.dict(), file, allow_unicode=True)
5 changes: 2 additions & 3 deletions autogpts/autogpt/autogpt/config/ai_directives.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,17 @@
from __future__ import annotations

import logging
from dataclasses import dataclass

import yaml
from pydantic import BaseModel

from autogpt.logs.helpers import request_user_double_check
from autogpt.utils import validate_yaml_file

logger = logging.getLogger(__name__)


@dataclass
class AIDirectives:
class AIDirectives(BaseModel):
"""An object that contains the basic directives for the AI prompt.
Attributes:
Expand Down
74 changes: 48 additions & 26 deletions autogpts/autogpt/autogpt/llm/base.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,9 @@
from __future__ import annotations
import json

from copy import deepcopy
from dataclasses import dataclass, field
from math import ceil, floor
from typing import TYPE_CHECKING, Literal, Optional, Type, TypedDict, TypeVar, overload

if TYPE_CHECKING:
from autogpt.llm.providers.openai import OpenAIFunctionCall
from pydantic import BaseModel, Field
from typing import Any, Literal, Optional, Type, TypedDict, TypeVar, overload

MessageRole = Literal["system", "user", "assistant", "function"]
MessageType = Literal["ai_response", "action_result"]
Expand All @@ -31,20 +28,30 @@ class FunctionCallDict(TypedDict):
arguments: str


@dataclass
class Message:
class Message(BaseModel):
"""OpenAI Message object containing a role and the message content"""

role: MessageRole
content: str
type: MessageType | None = None
type: Optional[MessageType]

def __init__(
self,
role: MessageRole,
content: str,
type: Optional[MessageType] = None
):
super().__init__(
role=role,
content=content,
type=type,
)

def raw(self) -> MessageDict:
return {"role": self.role, "content": self.content}


@dataclass
class ModelInfo:
class ModelInfo(BaseModel):
"""Struct for model information.
Would be lovely to eventually get this directly from APIs, but needs to be scraped from
Expand All @@ -56,26 +63,22 @@ class ModelInfo:
prompt_token_cost: float


@dataclass
class CompletionModelInfo(ModelInfo):
"""Struct for generic completion model information."""

completion_token_cost: float


@dataclass
class ChatModelInfo(CompletionModelInfo):
"""Struct for chat model information."""

supports_functions: bool = False


@dataclass
class TextModelInfo(CompletionModelInfo):
"""Struct for text completion model information."""


@dataclass
class EmbeddingModelInfo(ModelInfo):
"""Struct for embedding model information."""

Expand All @@ -86,12 +89,11 @@ class EmbeddingModelInfo(ModelInfo):
TChatSequence = TypeVar("TChatSequence", bound="ChatSequence")


@dataclass
class ChatSequence:
class ChatSequence(BaseModel):
"""Utility container for a chat sequence"""

model: ChatModelInfo
messages: list[Message] = field(default_factory=list[Message])
messages: list[Message] = Field(default_factory=list[Message])

@overload
def __getitem__(self, key: int) -> Message:
Expand All @@ -103,7 +105,7 @@ def __getitem__(self: TChatSequence, key: slice) -> TChatSequence:

def __getitem__(self: TChatSequence, key: int | slice) -> Message | TChatSequence:
if isinstance(key, slice):
copy = deepcopy(self)
copy = self.copy(deep=True)
copy.messages = self.messages[key]
return copy
return self.messages[key]
Expand Down Expand Up @@ -141,7 +143,7 @@ def for_model(
) -> TChatSequence:
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS

if not model_name in OPEN_AI_CHAT_MODELS:
if model_name not in OPEN_AI_CHAT_MODELS:
raise ValueError(f"Unknown chat model '{model_name}'")

return cls(
Expand Down Expand Up @@ -175,23 +177,43 @@ def separator(text: str):
"""


@dataclass
class LLMResponse:
class LLMResponse(BaseModel):
"""Standard response struct for a response from an LLM model."""

model_info: ModelInfo


@dataclass
class EmbeddingModelResponse(LLMResponse):
"""Standard response struct for a response from an embedding model."""

embedding: list[float] = field(default_factory=list)
embedding: list[float] = Field(default_factory=list)


@dataclass
class ChatModelResponse(LLMResponse):
"""Standard response struct for a response from a chat LLM."""

content: Optional[str]
function_call: Optional[OpenAIFunctionCall]
function_call: Optional[LLMFunctionCall]


class LLMFunctionCall(BaseModel):
"""Represents a function call as generated by an OpenAI model
Attributes:
name: the name of the function that the LLM wants to call
arguments: a stringified JSON object (unverified) containing `arg: value` pairs
"""

name: str
arguments: dict[str, Any] = {}

@staticmethod
def parse(raw: FunctionCallDict):
return LLMFunctionCall(
name=raw["name"],
arguments=json.loads(raw["arguments"]),
)


# Complete model initialization; necessary because of order of definition
ChatModelResponse.update_forward_refs()
Loading

0 comments on commit 6b22abd

Please sign in to comment.