Spaces:
Sleeping
Sleeping
File size: 1,666 Bytes
68ba2e8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
from openai import OpenAI
def predict(message, history, character, api_key, progress=gr.Progress()):
client = OpenAI(api_key=api_key)
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human})
history_openai_format.append({"role": "assistant", "content": assistant})
history_openai_format.append({"role": "user", "content": message})
response = client.chat.completions.create(
model='gpt-4',
messages=history_openai_format,
temperature=1.0,
stream=True
)
partial_message = ""
for chunk in progress.tqdm(response, desc="Generating"):
if chunk.choices[0].delta.content:
partial_message += chunk.choices[0].delta.content
yield partial_message
def reset(character):
return [], []
# Gradio app
with gr.Blocks() as demo:
gr.Markdown(f"<h1 style='text-align: center; margin-bottom: 1rem'>{'My Chatbot'}</h1>")
bot = gr.Chatbot(render=False)
dropdown = gr.Dropdown(
["Character 1", "Character 2", "Character 3", "Character 4", "Character 5", "Character 6", "Character 7", "Character 8", "Character 9", "Character 10", "Character 11", "Character 12", "Character 13"],
label="Characters",
info="Select the character that you'd like to speak to",
value="Character 1"
)
chat = gr.ChatInterface(
fn=predict,
chatbot=bot,
additional_inputs=[dropdown, gr.Textbox(label="API Key")],
)
dropdown.change(fn=reset, inputs=dropdown, outputs=[bot, chat.chatbot_state])
demo.queue()
demo.launch()
|