try7 / app.py
stevessschen's picture
Update app.py
e0416a0
raw
history blame contribute delete
892 Bytes
import streamlit as st
x = st.slider('Select a value')
st.write('steve test1')
st.write(x, 'squared is', x * x)
# Use a pipeline as a high-level helper
from transformers import pipeline
from transformers import AutoTokenizer
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("stevessschen/llama-2-7b-miniguanaco")
model = TFAutoModelForCausalLM.from_pretrained("stevessschen/llama-2-7b-miniguanaco")
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=200)
# Run text generation pipeline with our next model
prompt = "What is a large language model?"
#pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
result = pipe(f"<s>[INST] {prompt} [/INST]")
#print(result[0]['generated_text'])
st.write('steve test2')
st.write(result[0]['generated_text'])