llm-ft / app.py
nlux's picture
Add application file
64ce13d
raw
history blame
897 Bytes
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from transformers import pipeline
# Load our Tokenizer
tokenizer = AutoTokenizer.from_pretrained("huggingface/nlux/CodeLlama-7b-hf")
# Load your model
model = "./nlux/CodeLlama-7b-hf_merge"
# load into pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
def predict(input):
# Generate text using the pipeline
outputs = pipe(input, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, eos_token_id=pipe.tokenizer.eos_token_id, pad_token_id=pipe.tokenizer.pad_token_id)
output = outputs[0]['generated_text'].strip()
# Print results
print(f"Generated Answer:\\n{output}")
return output
# Create a Gradio interface
iface = gr.Interface(fn=predict, inputs="text", outputs="text")
# Launch the interface
iface.launch(share=True)