ogegadavis254 commited on
Commit
fe5f88e
1 Parent(s): d36f2e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -6,10 +6,7 @@ from dotenv import load_dotenv
6
  load_dotenv()
7
 
8
  # Initialize the client
9
- client = OpenAI(
10
- base_url="https://api-inference.huggingface.co/v1",
11
- api_key=os.getenv('HUGGINGFACEHUB_API_TOKEN')
12
- )
13
 
14
  # Model link for Mistral
15
  model_link = "mistralai/Mistral-7B-Instruct-v0.2"
@@ -47,17 +44,20 @@ if prompt := st.chat_input("Type your message here..."):
47
 
48
  # Interact with the model
49
  try:
50
- # Send the user and system messages to the API
51
- messages_for_api = [{"role": "system", "content": "You are a helpful assistant."}] + st.session_state.messages
 
 
52
 
 
53
  response = client.chat.completions.create(
54
  model=model_link,
55
- messages=messages_for_api,
56
  temperature=temperature,
57
  max_tokens=150 # Adjust the max tokens according to your needs
58
  )
59
 
60
- assistant_response = response["choices"][0]["message"]["content"]
61
 
62
  # Display assistant response in chat message container
63
  with st.chat_message("assistant"):
@@ -70,4 +70,4 @@ if prompt := st.chat_input("Type your message here..."):
70
  # Display error message to user
71
  with st.chat_message("assistant"):
72
  st.markdown("Sorry, I couldn't process your request. Please try again later.")
73
- st.write(e)
 
6
  load_dotenv()
7
 
8
  # Initialize the client
9
+ client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
 
 
 
10
 
11
  # Model link for Mistral
12
  model_link = "mistralai/Mistral-7B-Instruct-v0.2"
 
44
 
45
  # Interact with the model
46
  try:
47
+ # Construct the conversation context
48
+ conversation_context = [{"role": "system", "content": "You are a helpful assistant."}]
49
+ for msg in st.session_state.messages:
50
+ conversation_context.append(msg)
51
 
52
+ # Send the conversation context to the API
53
  response = client.chat.completions.create(
54
  model=model_link,
55
+ messages=conversation_context,
56
  temperature=temperature,
57
  max_tokens=150 # Adjust the max tokens according to your needs
58
  )
59
 
60
+ assistant_response = response.choices[0].message["content"]
61
 
62
  # Display assistant response in chat message container
63
  with st.chat_message("assistant"):
 
70
  # Display error message to user
71
  with st.chat_message("assistant"):
72
  st.markdown("Sorry, I couldn't process your request. Please try again later.")
73
+ st.error(f"An error occurred: {e}")