Skip to content

Commit

Permalink
Small refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
Elehiggle committed May 25, 2024
1 parent 76a596c commit eae23e3
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 30 deletions.
40 changes: 20 additions & 20 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,26 +78,26 @@ This project is a chatbot for Mattermost that integrates with the OpenAI API to
### Extended optional configuration variables
| Parameter | Description |
|-------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `AI_SYSTEM_PROMPT` | The system prompt/instructions. Default: [click](https://github.com/Elehiggle/ChatGPTMattermostChatbot/blob/b6a8b9a1467580c1cbe8db861f1ff1e938d4575f/chatbot.py#L64) (Subject to change. current_time and CHATBOT_USERNAME variables inside the prompt will be auto-formatted and substituted. |
| `AI_TIMEOUT` | The timeout for the AI API call in seconds. Default: "120" |
| `MAX_TOKENS` | The maximum number of tokens to generate in the response. Default: "4096" (max) |
| `TEMPERATURE` | The temperature value for controlling the randomness of the generated responses (0.0 = analytical, 1.0 = fully random). Default: "1" |
| `IMAGE_SIZE` | The image size for image generation. Default: "1024x1024" (see [docs](https://platform.openai.com/docs/guides/images/usage?context=node) for allowed types) |
| `IMAGE_QUALITY` | The image quality for image generation. Default: "standard" (also: "hd") |
| `IMAGE_STYLE` | The image style for image generation. Default: "vivid" (also: "natural") |
| `MAX_RESPONSE_SIZE_MB` | The maximum size of the website or file content to extract (in megabytes, per URL/file). Default: "100" |
| `FLARESOLVERR_ENDPOINT` | Endpoint URL to your [FlareSolverr](https://github.com/FlareSolverr/FlareSolverr) instance (eg. "<http://192.168.1.55:8191/v1>"). If you use this, MAX_RESPONSE_SIZE_MB won't be honored since it can't stream content. For most effectiveness, use a residential IP endpoint |
| `KEEP_ALL_URL_CONTENT` | Whether to feed the AI all URL content from the whole conversation thread. The website result is cached in memory. If you only want it to know about the current message's URL content (due to context size or cost), set to "FALSE". Default: "TRUE" |
| `MATTERMOST_IGNORE_SENDER_ID` | The user ID of a user to ignore (optional, useful if you have multiple chatbots that are not real bot accounts to prevent endless loops). Supports multiple, separated by comma |
| `MATTERMOST_PORT` | The port of your Mattermost server. Default: "443" |
| `MATTERMOST_SCHEME` | The scheme of the connection. Default: "https" |
| `MATTERMOST_BASEPATH` | The basepath of your Mattermost server. Default: "/api/v4" |
| `MATTERMOST_CERT_VERIFY` | Cert verification. Default: True (also: string path to your certificate file) |
| `AI_API_BASEURL` | AI API Base URL. Default: None (which will use "<https://api.openai.com/v1/>"). Useful if you want to use a different AI with OpenAI compatible endpoint |
| `LOG_LEVEL` | The log level. Default: "INFO" |
| `LOG_LEVEL_ROOT` | The root log level (for other modules than this chatbot). Default: "INFO" |
| Parameter | Description |
|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `AI_SYSTEM_PROMPT` | The system prompt/instructions. Default: [click](https://github.com/Elehiggle/ChatGPTMattermostChatbot/blob/76a596cda2d26bb2f00f42c96451ec46be941f49/chatbot.py#L102) (Subject to change. current_time and CHATBOT_USERNAME variables inside the prompt will be auto-formatted and substituted. |
| `AI_TIMEOUT` | The timeout for the AI API call in seconds. Default: "120" |
| `MAX_TOKENS` | The maximum number of tokens to generate in the response. Default: "4096" (max) |
| `TEMPERATURE` | The temperature value for controlling the randomness of the generated responses (0.0 = analytical, 1.0 = fully random). Default: "1" |
| `IMAGE_SIZE` | The image size for image generation. Default: "1024x1024" (see [docs](https://platform.openai.com/docs/guides/images/usage?context=node) for allowed types) |
| `IMAGE_QUALITY` | The image quality for image generation. Default: "standard" (also: "hd") |
| `IMAGE_STYLE` | The image style for image generation. Default: "vivid" (also: "natural") |
| `MAX_RESPONSE_SIZE_MB` | The maximum size of the website or file content to extract (in megabytes, per URL/file). Default: "100" |
| `FLARESOLVERR_ENDPOINT` | Endpoint URL to your [FlareSolverr](https://github.com/FlareSolverr/FlareSolverr) instance (eg. "<http://192.168.1.55:8191/v1>"). If you use this, MAX_RESPONSE_SIZE_MB won't be honored since it can't stream content. For most effectiveness, use a residential IP endpoint |
| `KEEP_ALL_URL_CONTENT` | Whether to feed the AI all URL content from the whole conversation thread. The website result is cached in memory. If you only want it to know about the current message's URL content (due to context size or cost), set to "FALSE". Default: "TRUE" |
| `MATTERMOST_IGNORE_SENDER_ID` | The user ID of a user to ignore (optional, useful if you have multiple chatbots that are not real bot accounts to prevent endless loops). Supports multiple, separated by comma |
| `MATTERMOST_PORT` | The port of your Mattermost server. Default: "443" |
| `MATTERMOST_SCHEME` | The scheme of the connection. Default: "https" |
| `MATTERMOST_BASEPATH` | The basepath of your Mattermost server. Default: "/api/v4" |
| `MATTERMOST_CERT_VERIFY` | Cert verification. Default: True (also: string path to your certificate file) |
| `AI_API_BASEURL` | AI API Base URL. Default: None (which will use "<https://api.openai.com/v1/>"). Useful if you want to use a different AI with OpenAI compatible endpoint |
| `LOG_LEVEL` | The log level. Default: "INFO" |
| `LOG_LEVEL_ROOT` | The root log level (for other modules than this chatbot). Default: "INFO" |

## Usage

Expand Down
15 changes: 5 additions & 10 deletions chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,7 @@ def wrapped_f(*args, **kwargs):
If a user sends a link, use the extracted URL content provided, do not assume or make up stories based on the URL alone.
If a user sends a YouTube link, primarily focus on the transcript and do not unnecessarily repeat the title, description or uploader of the video.
In your answer DO NOT contain the link to the video/website the user just provided to you as the user already knows it, unless the task requires it.
If your response contains any URLs, make sure to properly escape them using Markdown syntax for display purposes.
If an error occurs, provide the information from the <chatbot_error> tag to the user along with your answer.""",
If your response contains any URLs, make sure to properly escape them using Markdown syntax for display purposes.""",
)

tools = [
Expand Down Expand Up @@ -594,7 +593,7 @@ def get_stock_ticker_data(arguments):
"cashflow": str(stock.cashflow),
}

return json.dumps(stock_data)
return stock_data


@timed_lru_cache(seconds=7200, maxsize=100)
Expand Down Expand Up @@ -668,7 +667,7 @@ def get_cryptocurrency_data_by_id(arguments):
if matched_crypto:
return matched_crypto

return {"error": "No data found for the specified cryptocurrency ID/symbol."}
return "No data found for the specified cryptocurrency ID/symbol"


def process_message(event_data):
Expand Down Expand Up @@ -711,15 +710,13 @@ def process_message(event_data):
# We don't want to extract information from links the assistant sent
if thread_role == "assistant":
content["message"] = thread_message_text
content = json.dumps(content)
messages.append(construct_text_message(thread_sender_name, thread_role, content))
continue

# If keep content is disabled, we will skip the remaining code to grab content unless its the last message
is_last_message = index == len(thread_messages) - 1
if not keep_all_url_content and not is_last_message:
content["message"] = thread_message_text
content = json.dumps(content)
messages.append(construct_text_message(thread_sender_name, thread_role, content))
continue

Expand Down Expand Up @@ -755,10 +752,8 @@ def process_message(event_data):

content["message"] = thread_message_text

content = json.dumps(content)

if image_messages:
image_messages.append({"type": "text", "text": content})
image_messages.append({"type": "text", "text": json.dumps(content)})
messages.append({"name": thread_sender_name, "role": "user", "content": image_messages})
else:
messages.append(construct_text_message(thread_sender_name, "user", content))
Expand Down Expand Up @@ -811,7 +806,7 @@ def construct_text_message(name, role, message):
"content": [
{
"type": "text",
"text": f"{message}",
"text": json.dumps(message),
}
],
}
Expand Down

0 comments on commit eae23e3

Please sign in to comment.