wavesoumen commited on
Commit
f933b1a
1 Parent(s): f007a46
Files changed (1) hide show
  1. app.py +28 -0
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from ctransformers import AutoModelForCausalLM
3
+
4
+ # Load the model outside the main function to avoid reloading on every run
5
+ llm = AutoModelForCausalLM.from_pretrained("TheBloke/Mistral-7B-v0.1-GGUF",
6
+ model_file="mistral-7b-v0.1.Q4_K_M.gguf",
7
+ model_type="mistral",
8
+ gpu_layers=50)
9
+
10
+ def generate_response(prompt):
11
+ return llm(prompt)
12
+
13
+ def main():
14
+ st.title("AI Text Generation App")
15
+
16
+ prompt = st.text_input("Enter your prompt:")
17
+ if prompt:
18
+ try:
19
+ response = generate_response(prompt)
20
+
21
+ # Display the generated response
22
+ st.subheader("Generated Response")
23
+ st.write(response)
24
+ except Exception as e:
25
+ st.error(f"Error generating response: {e}")
26
+
27
+ if __name__ == "__main__":
28
+ main()