-
Notifications
You must be signed in to change notification settings - Fork 41
/
openai_api.py
111 lines (94 loc) · 4.05 KB
/
openai_api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
from collections import defaultdict
import json
from openai import OpenAI
from .chat_messages import ChatMessages
openai_config = {
'api_key': 'none',
'base_url': '',
}
gpt_model_config = {
"gpt-4-1106-preview":{
"CONTEXT_WINDOW": 128_000,
"Pricing": (0.01, 0.03),
},
"gpt-3.5-turbo-1106":{
"CONTEXT_WINDOW": 16_385,
"Pricing": (0.001, 0.002),
},
}
client = None
def set_gpt_api_config(**kwargs):
global client
openai_config.update(kwargs)
client = OpenAI(**openai_config)
def count_gpt_api_cost(model, context_tokens, completion_tokens):
cost = gpt_model_config[model]["Pricing"][0] * context_tokens / 1_000 + gpt_model_config[model]["Pricing"][1] * completion_tokens / 1_000
return cost
def stream_function_calling_with_gpt(messages, tools, model='gpt-3.5-turbo-1106', max_tokens=4_096):
if client is None:
raise Exception('未配置openai_api!')
assert model in gpt_model_config, f"model必须是{list(gpt_model_config.keys())}中的一个!"
messages = ChatMessages(messages, model=model, currency_symbol='$')
context_tokens = messages.get_estimated_tokens()
context_cost = count_gpt_api_cost(model, context_tokens, 0)
messages.cost = context_cost
yield messages
response = client.chat.completions.create(
stream=True,
model=model,
messages=messages,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
)
messages.append({'role': 'assistant', 'content': ''})
function_calls = defaultdict(lambda :{"name": "", "arguments": ""})
content = ""
for part in response:
content += part.choices[0].delta.content or ""
for delta_tool_call in part.choices[0].delta.tool_calls or []:
func_call = function_calls[delta_tool_call.index]
func_call["name"] = delta_tool_call.function.name or func_call["name"]
func_call["arguments"] += delta_tool_call.function.arguments or ""
messages[-1]['content'] = content + "\n" + json.dumps(list(function_calls.values()), ensure_ascii=False, indent=1)
messages.cost = count_gpt_api_cost(model, context_tokens, messages[-1:].get_estimated_tokens())
yield messages
return messages, content, list(function_calls.values())
def stream_chat_with_gpt(messages, model='gpt-3.5-turbo-1106', max_tokens=4_096, response_json=False, n=1):
if client is None:
raise Exception('未配置openai_api!')
assert model in gpt_model_config, f"model必须是{list(gpt_model_config.keys())}中的一个!"
messages = ChatMessages(messages, model=model, currency_symbol='$')
context_tokens = messages.get_estimated_tokens()
context_cost = count_gpt_api_cost(model, context_tokens, 0)
messages.cost = context_cost
yield messages
chatstream = client.chat.completions.create(
stream=True,
model=model,
messages=messages,
max_tokens=max_tokens,
response_format={ "type": "json_object" } if response_json else None,
n=n
)
messages.append({'role': 'assistant', 'content': ''})
content = ['' for _ in range(n)]
for part in chatstream:
for choice in part.choices:
content[choice.index] += choice.delta.content or ''
messages[-1]['content'] = content if n > 1 else content[0]
messages.cost = count_gpt_api_cost(model, context_tokens, messages[-1:].get_estimated_tokens())
yield messages
return messages
def test_gpt_api():
report = 'User:Say this is a test\n'
for model in gpt_model_config:
try:
stream = stream_chat_with_gpt([{'role': 'user', 'content': "Say this is a test"}, ], model=model)
response = list(stream)[-1][-1]['content']
except Exception as e:
report += f"(ERROR){model}:{e}\n"
else:
report += f"(Success){model}:{response}\n"
return report
if __name__ == '__main__':
print(test_gpt_api())