Skip to content

Commit

Permalink
rag beta release
Browse files Browse the repository at this point in the history
  • Loading branch information
binary-husky committed Sep 2, 2024
1 parent 08c3c56 commit 80b1a6f
Show file tree
Hide file tree
Showing 4 changed files with 553 additions and 48 deletions.
58 changes: 47 additions & 11 deletions crazy_functions/Rag_Interface.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
from toolbox import CatchException, update_ui, get_conf, get_log_folder
from toolbox import CatchException, update_ui, get_conf, get_log_folder, update_ui_lastest_msg
from crazy_functions.crazy_utils import input_clipping
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
from crazy_functions.rag_fns.llama_index_worker import LlamaIndexRagWorker

RAG_WORKER_REGISTER = {}

MAX_HISTORY_ROUND = 5
MAX_CONTEXT_TOKEN_LIMIT = 4096
REMEMBER_PREVIEW = 1000

@CatchException
def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):

# first, we retrieve rag worker from global context
# 1. we retrieve rag worker from global context
user_name = chatbot.get_user()
if user_name in RAG_WORKER_REGISTER:
rag_worker = RAG_WORKER_REGISTER[user_name]
Expand All @@ -18,22 +23,53 @@ def Rag问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, u
checkpoint_dir=get_log_folder(user_name, plugin_name='experimental_rag'),
auto_load_checkpoint=True)

# second, we search vector store and build prompts
i_say = txt
chatbot.append([txt, '正在召回知识 ...'])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面

# 2. clip history to reduce token consumption
# 2-1. reduce chat round
txt_origin = txt

if len(history) > MAX_HISTORY_ROUND * 2:
history = history[-(MAX_HISTORY_ROUND * 2):]
txt_clip, history, flags = input_clipping(txt, history, max_token_limit=MAX_CONTEXT_TOKEN_LIMIT, return_clip_flags=True)
input_is_clipped_flag = (flags["original_input_len"] != flags["clipped_input_len"])

# 2-2. if input is clipped, add input to vector store before retrieve
if input_is_clipped_flag:
yield from update_ui_lastest_msg('检测到长输入, 正在向量化 ...', chatbot, history, delay=0) # 刷新界面
# save input to vector store
rag_worker.add_text_to_vector_store(txt_origin)
yield from update_ui_lastest_msg('向量化完成 ...', chatbot, history, delay=0) # 刷新界面
if len(txt_origin) > REMEMBER_PREVIEW:
HALF = REMEMBER_PREVIEW//2
i_say_to_remember = txt[:HALF] + f" ...\n...(省略{len(txt_origin)-REMEMBER_PREVIEW}字)...\n... " + txt[-HALF:]
if (flags["original_input_len"] - flags["clipped_input_len"]) > HALF:
txt_clip = txt_clip + f" ...\n...(省略{len(txt_origin)-len(txt_clip)-HALF}字)...\n... " + txt[-HALF:]
else:
pass
i_say = txt_clip
else:
i_say_to_remember = i_say = txt_clip
else:
i_say_to_remember = i_say = txt_clip

# 3. we search vector store and build prompts
nodes = rag_worker.retrieve_from_store_with_query(i_say)
prompt = rag_worker.build_prompt(query=i_say, nodes=nodes)

# third, it is time to query llms
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
# 4. it is time to query llms
if len(chatbot) != 0: chatbot.pop(-1) # pop temp chat, because we are going to add them again inside `request_gpt_model_in_new_thread_with_ui_alive`
model_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
inputs=prompt, inputs_show_user=i_say,
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
sys_prompt=system_prompt,
retry_times_at_unknown_error=0
)

# finally, remember what has been asked / answered
rag_worker.remember_qa(i_say, gpt_say)
history.extend([i_say, gpt_say])
# 5. remember what has been asked / answered
yield from update_ui_lastest_msg(model_say + '</br></br>' + '对话记忆中, 请稍等 ...', chatbot, history, delay=0.5) # 刷新界面
rag_worker.remember_qa(i_say_to_remember, model_say)
history.extend([i_say, model_say])

# yield, see you next time
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
yield from update_ui_lastest_msg(model_say, chatbot, history, delay=0) # 刷新界面
25 changes: 21 additions & 4 deletions crazy_functions/crazy_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import os
import logging

def input_clipping(inputs, history, max_token_limit):
def input_clipping(inputs, history, max_token_limit, return_clip_flags=False):
"""
当输入文本 + 历史文本超出最大限制时,采取措施丢弃一部分文本。
输入:
Expand All @@ -20,17 +20,20 @@ def input_clipping(inputs, history, max_token_limit):
enc = model_info["gpt-3.5-turbo"]['tokenizer']
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))


mode = 'input-and-history'
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
input_token_num = get_token_num(inputs)
original_input_len = len(inputs)
if input_token_num < max_token_limit//2:
mode = 'only-history'
max_token_limit = max_token_limit - input_token_num

everything = [inputs] if mode == 'input-and-history' else ['']
everything.extend(history)
n_token = get_token_num('\n'.join(everything))
full_token_num = n_token = get_token_num('\n'.join(everything))
everything_token = [get_token_num(e) for e in everything]
everything_token_num = sum(everything_token)
delta = max(everything_token) // 16 # 截断时的颗粒度

while n_token > max_token_limit:
Expand All @@ -43,10 +46,24 @@ def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))

if mode == 'input-and-history':
inputs = everything[0]
full_token_num = everything_token_num
else:
pass
full_token_num = everything_token_num + input_token_num

history = everything[1:]
return inputs, history

flags = {
"mode": mode,
"original_input_token_num": input_token_num,
"original_full_token_num": full_token_num,
"original_input_len": original_input_len,
"clipped_input_len": len(inputs),
}

if not return_clip_flags:
return inputs, history
else:
return inputs, history, flags

def request_gpt_model_in_new_thread_with_ui_alive(
inputs, inputs_show_user, llm_kwargs,
Expand Down
Loading

0 comments on commit 80b1a6f

Please sign in to comment.