|
import chainlit as cl |
|
from gradio_client import Client |
|
from openai import OpenAI |
|
from groq import Groq |
|
import requests |
|
from chainlit.input_widget import Select, Slider |
|
import os |
|
|
|
hf_token = os.environ.get("HF_TOKEN") |
|
openai_api_key = os.environ.get('OPENAI_API_KEY') |
|
groq_api_key = os.environ.get('GROQ_API_KEY') |
|
|
|
hf_text_client = Client("Artin2009/text-generation", hf_token=hf_token) |
|
|
|
openai_client = OpenAI(api_key=openai_api_key) |
|
groq_client = Groq(api_key=groq_api_key) |
|
|
|
API_URL = "https://api-inference.huggingface.co/models/PartAI/TookaBERT-Large" |
|
headers = {"Authorization": f"Bearer {hf_token}"} |
|
|
|
|
|
def query(payload): |
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
return response.json() |
|
|
|
@cl.set_chat_profiles |
|
async def chat_profile(): |
|
return [ |
|
cl.ChatProfile( |
|
name="None", |
|
markdown_description="None", |
|
), |
|
cl.ChatProfile( |
|
name="neural-brain-AI", |
|
markdown_description="The main model of neural brain", |
|
), |
|
cl.ChatProfile( |
|
name="Dorna-AI", |
|
markdown_description="One of the open-sourced models that neural brain team fine-tuned", |
|
), |
|
|
|
|
|
|
|
|
|
cl.ChatProfile( |
|
name="GPT-4", |
|
markdown_description="OpenAI's GPT-4 model", |
|
), |
|
cl.ChatProfile( |
|
name="gpt-3.5-turbo", |
|
markdown_description="OpenAI's GPT-3.5 Turbo model", |
|
), |
|
cl.ChatProfile( |
|
name="GPT-3.5-turbo-0125", |
|
markdown_description="OpenAI's GPT-3.5 Turbo 0125 model", |
|
), |
|
cl.ChatProfile( |
|
name="gpt-3.5-turbo-1106", |
|
markdown_description="OpenAI's GPT-3.5 Turbo 1106 model", |
|
), |
|
|
|
|
|
|
|
|
|
cl.ChatProfile( |
|
name="TTS", |
|
markdown_description="OpenAI's Text-to-Speech model", |
|
), |
|
cl.ChatProfile( |
|
name="Llama-3-70B", |
|
markdown_description="Meta Open Source model Llama-2 with 70B parameters", |
|
), |
|
cl.ChatProfile( |
|
name="Llama-3-8B", |
|
markdown_description="Meta Open Source model Llama-2 with 7B parameters", |
|
), |
|
cl.ChatProfile( |
|
name = "gemma-7B", |
|
markdown_description = 'Google Open Source LLM' |
|
), |
|
cl.ChatProfile( |
|
name="zephyr-7B", |
|
markdown_description="Open Source model Zephyr with 7B parameters", |
|
), |
|
cl.ChatProfile( |
|
name='mistral-7B', |
|
markdown_description = 'mistral open source LLM with 7B parameters' |
|
), |
|
cl.ChatProfile( |
|
name="Toka-353M", |
|
markdown_description="PartAI Open Source model Toka with 353M parameters", |
|
) |
|
] |
|
|
|
@cl.on_chat_start |
|
async def on_chat_start(): |
|
chat_profile = cl.user_session.get("chat_profile") |
|
if chat_profile == 'neural-brain-AI': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="NB-Model", |
|
label="NeuralBrain - Models", |
|
values=["Neural Brain AI"], |
|
initial_index=0, |
|
) |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Hello, I am the main model of neural brain team, i am an instance of ChatGPT-4, This team finetuned me and i am ready to help you" |
|
).send() |
|
|
|
if chat_profile == 'Dorna-AI': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="param_3", |
|
label="Parameter 3", |
|
values=["512"], |
|
initial_index=0, |
|
tooltip="Config parameter 3 (e.g., max tokens)", |
|
), |
|
Select( |
|
id="param_4", |
|
label="Parameter 4", |
|
values=["0.7"], |
|
initial_index=0, |
|
tooltip="Config parameter 4 (e.g., temperature)", |
|
), |
|
Select( |
|
id="param_5", |
|
label="Parameter 5", |
|
values=["0.95"], |
|
initial_index=0, |
|
tooltip="Config parameter 5 (e.g., top_p)", |
|
), |
|
Select( |
|
id="api_name", |
|
label="API Name", |
|
values=["/chat"], |
|
initial_index=0, |
|
), |
|
] |
|
).send() |
|
|
|
await cl.Message( |
|
content='my name is Dorna, Your AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!' |
|
).send() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if chat_profile == 'GPT-4': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="OpenAI-Model", |
|
label="OpenAI - Model", |
|
values=["gpt-4"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im OpenAI's latest and biggest model. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
|
|
if chat_profile == 'gpt-3.5-turbo': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="OpenAI-Model", |
|
label="OpenAI - Model", |
|
values=["gpt-3.5-turbo"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
if chat_profile == 'GPT-3.5-turbo-0125': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="OpenAI-Model", |
|
label="OpenAI - Model", |
|
values=["gpt-3.5-turbo-0125"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
|
|
if chat_profile == 'gpt-3.5-turbo-1106': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="OpenAI-Model", |
|
label="OpenAI - Model", |
|
values=["gpt-3.5-turbo-1106"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im one of the OpenAI's models. one of the best models. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if chat_profile == 'TTS': |
|
await cl.Message( |
|
content="Im TTS. of the best models OpenAI ever created. i can convert text to speech! . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
if chat_profile == 'Llama-3-70B': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="Meta-Model", |
|
label="Meta - Model", |
|
values=["Llama-3-70B"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im the big Llama!. one of the best open source models released by Meta! i am the Big version of meta's open source LLMs., i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
if chat_profile == 'Llama-3-8B': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="Meta-Model", |
|
label="Meta - Model", |
|
values=["Llama-3-8B"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im The small Llama!. one of the best open source models released by Meta! i am the small version of meta's open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
if chat_profile == 'gemma-7B': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="Google-Model", |
|
label="Google - Model", |
|
values=["Gemma-7B"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im Gemma. the small version of google open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
if chat_profile == 'zephyr-7B': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="zephyr-Model", |
|
label="zephyr - Model", |
|
values=["zephyr-7B"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im Zephyr. One of the best open source LLMs. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
if chat_profile == 'mistral-7B': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="Mistral-Model", |
|
label="Mistral - Model", |
|
values=["Mistral-7B"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im Mistral. the small version of Mistral Family. i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? " |
|
).send() |
|
if chat_profile == 'Toka-353M': |
|
await cl.ChatSettings( |
|
[ |
|
Select( |
|
id="PartAI-Model", |
|
label="PartAI - Model", |
|
values=["TokaBert-353M"], |
|
initial_index=0, |
|
), |
|
Slider( |
|
id="Temperature", |
|
label="Model Temperature", |
|
initial=0.7, |
|
min=0, |
|
max=1, |
|
step=0.1, |
|
), |
|
] |
|
).send() |
|
await cl.Message( |
|
content="Im Toka. An opens source persian LLM . i was configured by Artin Daneshvar and Sadra Noadoust, 2 iranian students to help you, how can i assist you today ? you should ask me your questions like : the capital of england is <mask> " |
|
).send() |
|
|
|
|
|
@cl.on_message |
|
async def main(message: cl.Message): |
|
chat_profile = cl.user_session.get("chat_profile") |
|
if not chat_profile or chat_profile == 'None': |
|
await cl.Message( |
|
content="Please select a model first." |
|
).send() |
|
return |
|
if chat_profile == 'neural-brain-AI': |
|
completion = openai_client.chat.completions.create( |
|
model="ft:gpt-3.5-turbo-1106:nb:aria1:9UWDrLJK", |
|
messages=[ |
|
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, |
|
{"role": "user", "content": message.content} |
|
] |
|
) |
|
model_response = completion.choices[0].message.content |
|
await cl.Message( |
|
content=model_response |
|
).send() |
|
|
|
elif chat_profile == "Dorna-AI": |
|
result = hf_text_client.predict( |
|
message=message.content, |
|
request="your name is Dorna,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!", |
|
param_3=512, |
|
param_4=0.7, |
|
param_5=0.95, |
|
api_name="/chat" |
|
) |
|
model_response = result.strip("</s>") |
|
await cl.Message( |
|
content=model_response |
|
).send() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif chat_profile == 'GPT-4': |
|
completion = openai_client.chat.completions.create( |
|
model="gpt-4", |
|
messages=[ |
|
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, |
|
{"role": "user", "content": message.content} |
|
] |
|
) |
|
model_response = completion.choices[0].message.content |
|
await cl.Message( |
|
content=model_response |
|
).send() |
|
|
|
elif chat_profile == 'gpt-3.5-turbo': |
|
completion = openai_client.chat.completions.create( |
|
model="gpt-3.5-turbo", |
|
messages=[ |
|
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, |
|
{"role": "user", "content": message.content} |
|
] |
|
) |
|
model_response = completion.choices[0].message.content |
|
await cl.Message( |
|
content=model_response |
|
).send() |
|
elif chat_profile == 'GPT-3.5-turbo-0125': |
|
completion = openai_client.chat.completions.create( |
|
model="GPT-3.5-turbo-0125", |
|
messages=[ |
|
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, |
|
{"role": "user", "content": message.content} |
|
] |
|
) |
|
model_response = completion.choices[0].message.content |
|
await cl.Message( |
|
content=model_response |
|
).send() |
|
elif chat_profile == 'gpt-3.5-turbo-1106': |
|
completion = openai_client.chat.completions.create( |
|
model="gpt-3.5-turbo-1106", |
|
messages=[ |
|
{"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"}, |
|
{"role": "user", "content": message.content} |
|
] |
|
) |
|
model_response = completion.choices[0].message.content |
|
await cl.Message( |
|
content=model_response |
|
).send() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif chat_profile == 'TTS': |
|
response = openai_client.audio.speech.create( |
|
model="tts-1", |
|
voice="alloy", |
|
input=message.content, |
|
) |
|
|
|
response.stream_to_file("output.mp3") |
|
|
|
elements = [ |
|
cl.Audio(name="output.mp3", path="./output.mp3", display="inline"), |
|
] |
|
await cl.Message( |
|
content="Here it is the response!", |
|
elements=elements, |
|
).send() |
|
|
|
elif chat_profile == 'Llama-3-70B': |
|
completion = groq_client.chat.completions.create( |
|
model="llama3-70b-8192", |
|
messages=[ |
|
{ |
|
"role": "user", |
|
"content": message.content |
|
} |
|
], |
|
temperature=1, |
|
max_tokens=1024, |
|
top_p=1, |
|
stream=True, |
|
stop=None, |
|
) |
|
|
|
complete_content = "" |
|
|
|
|
|
for chunk in completion: |
|
|
|
content = chunk.choices[0].delta.content |
|
|
|
|
|
if content is not None: |
|
complete_content += content |
|
|
|
|
|
await cl.Message(content=complete_content).send() |
|
|
|
elif chat_profile == 'Llama-3-8B': |
|
completion = groq_client.chat.completions.create( |
|
model="llama3-8b-8192", |
|
messages=[ |
|
{ |
|
"role": "user", |
|
"content": message.content |
|
} |
|
], |
|
temperature=1, |
|
max_tokens=1024, |
|
top_p=1, |
|
stream=True, |
|
stop=None, |
|
) |
|
|
|
complete_content = "" |
|
|
|
|
|
for chunk in completion: |
|
|
|
content = chunk.choices[0].delta.content |
|
|
|
|
|
if content is not None: |
|
complete_content += content |
|
|
|
|
|
await cl.Message(content=complete_content).send() |
|
|
|
elif chat_profile == 'gemma-7B': |
|
completion = groq_client.chat.completions.create( |
|
model="gemma-7b-it", |
|
messages=[ |
|
{ |
|
"role": "user", |
|
"content": message.content |
|
} |
|
], |
|
temperature=1, |
|
max_tokens=1024, |
|
top_p=1, |
|
stream=True, |
|
stop=None, |
|
) |
|
|
|
complete_content = "" |
|
|
|
|
|
for chunk in completion: |
|
|
|
content = chunk.choices[0].delta.content |
|
|
|
|
|
if content is not None: |
|
complete_content += content |
|
|
|
|
|
await cl.Message(content=complete_content).send() |
|
|
|
elif chat_profile == "zephyr-7B": |
|
result = hf_text_client.predict( |
|
message=message.content, |
|
request="your name is zephyr,An AI Assistant designed by neural nexus team. i was made by Artin Daneshvar and Sadra Noadoust, 2 iranian students!", |
|
param_3=512, |
|
param_4=0.7, |
|
param_5=0.95, |
|
api_name="/chat" |
|
) |
|
model_response = result.strip("</s>") |
|
await cl.Message( |
|
content=model_response |
|
).send() |
|
|
|
elif chat_profile == 'mistral-7B': |
|
completion = groq_client.chat.completions.create( |
|
model="mixtral-8x7b-32768", |
|
messages=[ |
|
{ |
|
"role": "user", |
|
"content": message.content |
|
} |
|
], |
|
temperature=1, |
|
max_tokens=1024, |
|
top_p=1, |
|
stream=True, |
|
stop=None, |
|
) |
|
|
|
complete_content = "" |
|
|
|
for chunk in completion: |
|
content = chunk.choices[0].delta.content |
|
|
|
if content is not None: |
|
complete_content += content |
|
|
|
await cl.Message(content=complete_content).send() |
|
|
|
elif chat_profile == 'Toka-353M': |
|
output = query({ |
|
"inputs": message.content, |
|
}) |
|
await cl.Message(content=output[0]['sequence']).send() |
|
|
|
@cl.on_settings_update |
|
async def setup_agent(settings): |
|
print("on_settings_update", settings) |