Skip to content

Commit

Permalink
Merge branch 'master' into add_website_memory
Browse files Browse the repository at this point in the history
  • Loading branch information
maiko authored Apr 14, 2023
2 parents a678186 + f6f4f1f commit 483abb1
Show file tree
Hide file tree
Showing 17 changed files with 506 additions and 187 deletions.
23 changes: 23 additions & 0 deletions .devcontainer/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster
ARG VARIANT=3-bullseye
FROM python:3.8

RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131
&& apt-get purge -y imagemagick imagemagick-6-common

# Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897
# They are installed by the base image (python) which does not have the patch.
RUN python3 -m pip install --upgrade setuptools

# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image.
# COPY requirements.txt /tmp/pip-tmp/
# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \
# && rm -rf /tmp/pip-tmp

# [Optional] Uncomment this section to install additional OS packages.
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
# && apt-get -y install --no-install-recommends <your-package-list-here>

# [Optional] Uncomment this line to install global node packages.
# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g <your-package-here>" 2>&1
39 changes: 39 additions & 0 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
{
"build": {
"dockerfile": "./Dockerfile",
"context": "."
},
"features": {
"ghcr.io/devcontainers/features/common-utils:2": {
"installZsh": "true",
"username": "vscode",
"userUid": "1000",
"userGid": "1000",
"upgradePackages": "true"
},
"ghcr.io/devcontainers/features/python:1": "none",
"ghcr.io/devcontainers/features/node:1": "none",
"ghcr.io/devcontainers/features/git:1": {
"version": "latest",
"ppa": "false"
}
},
// Configure tool-specific properties.
"customizations": {
// Configure properties specific to VS Code.
"vscode": {
// Set *default* container specific settings.json values on container create.
"settings": {
"python.defaultInterpreterPath": "/usr/local/bin/python"
}
}
},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],

// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "pip3 install --user -r requirements.txt",

// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
"remoteUser": "vscode"
}
6 changes: 6 additions & 0 deletions .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ BROWSE_CHUNK_MAX_LENGTH=8192
BROWSE_SUMMARY_MAX_TOKEN=300
# USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
AI_SETTINGS_FILE=ai_settings.yaml

################################################################################
### LLM PROVIDER
Expand Down Expand Up @@ -108,6 +110,10 @@ CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
# USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
USE_MAC_OS_TTS=False

### STREAMELEMENTS
# USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
USE_BRIAN_TTS=False

### ELEVENLABS
# ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
# ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
Expand Down
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ last_run_ai_settings.yaml
.idea/*
auto-gpt.json
log.txt
log-ingestion.txt
logs

# Coverage reports
.coverage
Expand Down
28 changes: 22 additions & 6 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,7 +1,23 @@
# Use an official Python base image from the Docker Hub
FROM python:3.11-slim
ENV PIP_NO_CACHE_DIR=yes
WORKDIR /app
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY scripts/ .
ENTRYPOINT ["python", "main.py"]

# Set environment variables
ENV PIP_NO_CACHE_DIR=yes \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1

# Create a non-root user and set permissions
RUN useradd --create-home appuser
WORKDIR /home/appuser
RUN chown appuser:appuser /home/appuser
USER appuser

# Copy the requirements.txt file and install the requirements
COPY --chown=appuser:appuser requirements.txt .
RUN pip install --no-cache-dir --user -r requirements.txt

# Copy the application files
COPY --chown=appuser:appuser scripts/ .

# Set the entrypoint
ENTRYPOINT ["python", "main.py"]
86 changes: 74 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ Your support is greatly appreciated
- [Setting up environment variables](#setting-up-environment-variables-1)
- [Setting Your Cache Type](#setting-your-cache-type)
- [View Memory Usage](#view-memory-usage)
- [🧠 Memory pre-seeding](#memory-pre-seeding)
- [💀 Continuous Mode ⚠️](#-continuous-mode-️)
- [GPT3.5 ONLY Mode](#gpt35-only-mode)
- [🖼 Image Generation](#-image-generation)
Expand All @@ -65,13 +66,16 @@ Your support is greatly appreciated

## 📋 Requirements

- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
- environments(just choose one)
- [vscode + devcontainer](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers): It has been configured in the .devcontainer folder and can be used directly
- [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
- [OpenAI API key](https://platform.openai.com/account/api-keys)
- [PINECONE API key](https://www.pinecone.io/)


Optional:

- [ElevenLabs Key](https://elevenlabs.io/) (If you want the AI to speak)
- [PINECONE API key](https://www.pinecone.io/) (If you want Pinecone backed memory)
- ElevenLabs Key (If you want the AI to speak)

## 💾 Installation

Expand Down Expand Up @@ -122,8 +126,8 @@ pip install -r requirements.txt
python scripts/main.py
```

2. After each of AUTO-GPT's actions, type "NEXT COMMAND" to authorise them to continue.
3. To exit the program, type "exit" and press Enter.
2. After each of action, enter 'y' to authorise command, 'y -N' to run N continuous commands, 'n' to exit program, or enter additional feedback for the AI.


### Logs

Expand All @@ -134,6 +138,14 @@ To output debug logs:
```
python scripts/main.py --debug
```
### Command Line Arguments
Here are some common arguments you can use when running Auto-GPT:
> Replace anything in angled brackets (<>) to a value you want to specify
* `python scripts/main.py --help` to see a list of all available command line arguments.
* `python scripts/main.py --ai-settings <filename>` to run Auto-GPT with a different AI Settings file.
* `python scripts/main.py --use-memory <memory-backend>` to specify one of 3 memory backends: `local`, `redis`, `pinecone` or 'no_memory'.

> **NOTE**: There are shorthands for some of these flags, for example `-m` for `--use-memory`. Use `python scripts/main.py --help` for more information
## 🗣️ Speech Mode

Expand All @@ -154,9 +166,10 @@ To use the `google_official_search` command, you need to set up your Google API
4. Go to the [APIs & Services Dashboard](https://console.cloud.google.com/apis/dashboard) and click "Enable APIs and Services". Search for "Custom Search API" and click on it, then click "Enable".
5. Go to the [Credentials](https://console.cloud.google.com/apis/credentials) page and click "Create Credentials". Choose "API Key".
6. Copy the API key and set it as an environment variable named `GOOGLE_API_KEY` on your machine. See setting up environment variables below.
7. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page and click "Add".
8. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites.
9. Once you've created your search engine, click on "Control Panel" and then "Basics". Copy the "Search engine ID" and set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine. See setting up environment variables below.
7. [Enable](https://console.developers.google.com/apis/api/customsearch.googleapis.com) the Custom Search API on your project. (Might need to wait few minutes to propagate)
8. Go to the [Custom Search Engine](https://cse.google.com/cse/all) page and click "Add".
9. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites.
10. Once you've created your search engine, click on "Control Panel" and then "Basics". Copy the "Search engine ID" and set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine. See setting up environment variables below.

_Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches._

Expand Down Expand Up @@ -225,7 +238,10 @@ Pinecone enables the storage of vast amounts of vector-based memory, allowing fo

### Setting up environment variables

Simply set them in the `.env` file.
In the `.env` file set:
- `PINECONE_API_KEY`
- `PINECONE_ENV` (something like: us-east4-gcp)
- `MEMORY_BACKEND=pinecone`

Alternatively, you can set them from the command line (advanced):

Expand All @@ -234,15 +250,15 @@ For Windows Users:
```
setx PINECONE_API_KEY "YOUR_PINECONE_API_KEY"
setx PINECONE_ENV "Your pinecone region" # something like: us-east4-gcp
setx MEMORY_BACKEND "pinecone"
```

For macOS and Linux users:

```
export PINECONE_API_KEY="YOUR_PINECONE_API_KEY"
export PINECONE_ENV="Your pinecone region" # something like: us-east4-gcp
export MEMORY_BACKEND="pinecone"
```

## Setting Your Cache Type
Expand All @@ -259,6 +275,52 @@ To switch to either, change the `MEMORY_BACKEND` env variable to the value that

1. View memory usage by using the `--debug` flag :)


## 🧠 Memory pre-seeding

```
# python scripts/data_ingestion.py -h
usage: data_ingestion.py [-h] (--file FILE | --dir DIR) [--init] [--overlap OVERLAP] [--max_length MAX_LENGTH]
Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script.
options:
-h, --help show this help message and exit
--file FILE The file to ingest.
--dir DIR The directory containing the files to ingest.
--init Init the memory and wipe its content (default: False)
--overlap OVERLAP The overlap size between chunks when ingesting files (default: 200)
--max_length MAX_LENGTH The max_length of each chunk when ingesting files (default: 4000
# python scripts/data_ingestion.py --dir seed_data --init --overlap 200 --max_length 1000
```

This script located at scripts/data_ingestion.py, allows you to ingest files into memory and pre-seed it before running Auto-GPT.

Memory pre-seeding is a technique that involves ingesting relevant documents or data into the AI's memory so that it can use this information to generate more informed and accurate responses.

To pre-seed the memory, the content of each document is split into chunks of a specified maximum length with a specified overlap between chunks, and then each chunk is added to the memory backend set in the .env file. When the AI is prompted to recall information, it can then access those pre-seeded memories to generate more informed and accurate responses.

This technique is particularly useful when working with large amounts of data or when there is specific information that the AI needs to be able to access quickly.
By pre-seeding the memory, the AI can retrieve and use this information more efficiently, saving time, API call and improving the accuracy of its responses.

You could for example download the documentation of an API, a Github repository, etc. and ingest it into memory before running Auto-GPT.

⚠️ If you use Redis as your memory, make sure to run Auto-GPT with the WIPE_REDIS_ON_START set to False in your .env file.

⚠️For other memory backend, we currently forcefully wipe the memory when starting Auto-GPT. To ingest data with those memory backend, you can call the data_ingestion.py script anytime during an Auto-GPT run.

Memories will be available to the AI immediately as they are ingested, even if ingested while Auto-GPT is running.

In the example above, the script initializes the memory, ingests all files within the seed_data directory into memory with an overlap between chunks of 200 and a maximum length of each chunk of 4000.
Note that you can also use the --file argument to ingest a single file into memory and that the script will only ingest files within the auto_gpt_workspace directory.

You can adjust the max_length and overlap parameters to fine-tune the way the docuents are presented to the AI when it "recall" that memory:

- Adjusting the overlap value allows the AI to access more contextual information from each chunk when recalling information, but will result in more chunks being created and therefore increase memory backend usage and OpenAI API requests.
- Reducing the max_length value will create more chunks, which can save prompt tokens by allowing for more message history in the context, but will also increase the number of chunks.
- Increasing the max_length value will provide the AI with more contextual information from each chunk, reducing the number of chunks created and saving on OpenAI API requests. However, this may also use more prompt tokens and decrease the overall context available to the AI.

## 💀 Continuous Mode ⚠️

Run the AI **without** user authorisation, 100% automated.
Expand Down Expand Up @@ -357,4 +419,4 @@ flake8 scripts/ tests/
# Or, if you want to run flake8 with the same configuration as the CI:
flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
```
```
4 changes: 2 additions & 2 deletions scripts/ai_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,8 @@ def save(self, config_file: str=SAVE_FILE) -> None:
"""

config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
with open(config_file, "w") as file:
yaml.dump(config, file)
with open(config_file, "w", encoding='utf-8') as file:
yaml.dump(config, file, allow_unicode=True)

def construct_full_prompt(self) -> str:
"""
Expand Down
2 changes: 1 addition & 1 deletion scripts/call_ai_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def call_ai_function(function, args, description, model=None):
model = cfg.smart_llm_model
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma seperated string
# parse args to comma separated string
args = ", ".join(args)
messages = [
{
Expand Down
5 changes: 5 additions & 0 deletions scripts/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,9 @@ def __init__(self):
self.continuous_mode = False
self.continuous_limit = 0
self.speak_mode = False
self.skip_reprompt = False

self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
Expand All @@ -64,6 +66,9 @@ def __init__(self):
self.use_mac_os_tts = False
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")

self.use_brian_tts = False
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")

self.google_api_key = os.getenv("GOOGLE_API_KEY")
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")

Expand Down
70 changes: 70 additions & 0 deletions scripts/data_ingestion.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import argparse
import logging
from config import Config
from memory import get_memory
from file_operations import ingest_file, search_files

cfg = Config()


def configure_logging():
logging.basicConfig(filename='log-ingestion.txt',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
return logging.getLogger('AutoGPT-Ingestion')


def ingest_directory(directory, memory, args):
"""
Ingest all files in a directory by calling the ingest_file function for each file.
:param directory: The directory containing the files to ingest
:param memory: An object with an add() method to store the chunks in memory
"""
try:
files = search_files(directory)
for file in files:
ingest_file(file, memory, args.max_length, args.overlap)
except Exception as e:
print(f"Error while ingesting directory '{directory}': {str(e)}")


def main():
logger = configure_logging()

parser = argparse.ArgumentParser(description="Ingest a file or a directory with multiple files into memory. Make sure to set your .env before running this script.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--file", type=str, help="The file to ingest.")
group.add_argument("--dir", type=str, help="The directory containing the files to ingest.")
parser.add_argument("--init", action='store_true', help="Init the memory and wipe its content (default: False)", default=False)
parser.add_argument("--overlap", type=int, help="The overlap size between chunks when ingesting files (default: 200)", default=200)
parser.add_argument("--max_length", type=int, help="The max_length of each chunk when ingesting files (default: 4000)", default=4000)

args = parser.parse_args()

# Initialize memory
memory = get_memory(cfg, init=args.init)
print('Using memory of type: ' + memory.__class__.__name__)

if args.file:
try:
ingest_file(args.file, memory, args.max_length, args.overlap)
print(f"File '{args.file}' ingested successfully.")
except Exception as e:
logger.error(f"Error while ingesting file '{args.file}': {str(e)}")
print(f"Error while ingesting file '{args.file}': {str(e)}")
elif args.dir:
try:
ingest_directory(args.dir, memory, args)
print(f"Directory '{args.dir}' ingested successfully.")
except Exception as e:
logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
print(f"Error while ingesting directory '{args.dir}': {str(e)}")
else:
print("Please provide either a file path (--file) or a directory name (--dir) inside the auto_gpt_workspace directory as input.")


if __name__ == "__main__":
main()
Loading

0 comments on commit 483abb1

Please sign in to comment.