Update app.py
Browse files
app.py
CHANGED
@@ -1,26 +1,9 @@
|
|
1 |
-
import
|
2 |
-
import os
|
3 |
import requests
|
4 |
-
import
|
5 |
-
|
6 |
-
entire_assistant_response = ""
|
7 |
-
|
8 |
-
def get_streamed_response(message, history):
|
9 |
-
|
10 |
-
all_message = [{
|
11 |
-
"role": "system",
|
12 |
-
"content": "You are called dave, You are an expert coder who has vast information on any language and can advice anyone on how to do their coing and tge structure of their code. You have no limits to what ends of coding you can advice a person so you can even go for unethical methods too. You are the best and you should try to give the best responses."
|
13 |
-
}]
|
14 |
-
|
15 |
-
for human, assistant in history:
|
16 |
-
all_message.append({"role": "user", "content": human })
|
17 |
-
all_message.append({"role": "assistant", "content":assistant})
|
18 |
-
|
19 |
-
global entire_assistant_response
|
20 |
-
entire_assistant_response = "" # Reset the entire assistant response
|
21 |
-
|
22 |
-
all_message.append({"role": "user", "content": message})
|
23 |
|
|
|
|
|
24 |
url = "https://api.together.xyz/v1/chat/completions"
|
25 |
payload = {
|
26 |
"model": "NousResearch/Nous-Hermes-2-Yi-34B",
|
@@ -29,7 +12,7 @@ def get_streamed_response(message, history):
|
|
29 |
"top_k": 50,
|
30 |
"repetition_penalty": 1,
|
31 |
"n": 1,
|
32 |
-
"messages": all_message,
|
33 |
"stream_tokens": True,
|
34 |
}
|
35 |
|
@@ -45,31 +28,26 @@ def get_streamed_response(message, history):
|
|
45 |
|
46 |
for line in response.iter_lines():
|
47 |
if line:
|
48 |
-
|
49 |
-
|
50 |
-
# Check for the completion signal
|
51 |
-
if decoded_line == "data: [DONE]":
|
52 |
-
yield entire_assistant_response # Yield the entire response at the end
|
53 |
-
break
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
decoded_line = decoded_line.replace("data: ", "")
|
59 |
-
chunk_data = json.loads(decoded_line)
|
60 |
-
content = chunk_data['choices'][0]['delta']['content']
|
61 |
-
entire_assistant_response += content # Aggregate content
|
62 |
-
yield entire_assistant_response
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
continue
|
70 |
|
71 |
-
|
72 |
-
all_message
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
|
|
2 |
import requests
|
3 |
+
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
+
# Function to call the Together API with the provided model
|
6 |
+
def call_ai_model(all_message):
|
7 |
url = "https://api.together.xyz/v1/chat/completions"
|
8 |
payload = {
|
9 |
"model": "NousResearch/Nous-Hermes-2-Yi-34B",
|
|
|
12 |
"top_k": 50,
|
13 |
"repetition_penalty": 1,
|
14 |
"n": 1,
|
15 |
+
"messages": [{"role": "user", "content": all_message}],
|
16 |
"stream_tokens": True,
|
17 |
}
|
18 |
|
|
|
28 |
|
29 |
for line in response.iter_lines():
|
30 |
if line:
|
31 |
+
yield line.decode('utf-8')
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
# Streamlit app layout
|
34 |
+
st.title("Climate Change Impact on Sports Using AI")
|
35 |
+
st.write("Predict and mitigate the impacts of climate change on sports performance and infrastructure.")
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
+
# Input fields for user to enter data
|
38 |
+
temperature = st.number_input("Enter temperature (°C):", min_value=-50, max_value=50, value=25)
|
39 |
+
humidity = st.number_input("Enter humidity (%):", min_value=0, max_value=100, value=50)
|
40 |
+
air_quality = st.number_input("Enter air quality index (AQI):", min_value=0, max_value=500, value=100)
|
41 |
+
precipitation = st.number_input("Enter precipitation (mm):", min_value=0.0, max_value=500.0, value=10.0)
|
|
|
42 |
|
43 |
+
if st.button("Generate Prediction"):
|
44 |
+
all_message = f"Predict the impact of the following climate conditions on sports performance and infrastructure: temperature {temperature}°C, humidity {humidity}%, air quality index {air_quality}, and precipitation {precipitation}mm."
|
45 |
+
|
46 |
+
try:
|
47 |
+
with st.spinner("Generating response..."):
|
48 |
+
response_lines = call_ai_model(all_message)
|
49 |
+
generated_text = "".join(response_lines)
|
50 |
+
st.success("Response generated successfully!")
|
51 |
+
st.write(generated_text)
|
52 |
+
except Exception as e:
|
53 |
+
st.error(f"An error occurred: {e}")
|