Spaces:
Runtime error
Runtime error
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
from transformers import pipeline | |
import torch | |
import gradio as gr | |
# chatgpt-gpt4-prompts-bart-large-cnn-samsum | |
tokenizer = AutoTokenizer.from_pretrained( | |
"Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum") | |
model = AutoModelForSeq2SeqLM.from_pretrained( | |
"Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True) | |
# zephyr | |
pipe = pipeline("text-generation", model="HuggingFaceH4/zephyr-7b-alpha", | |
torch_dtype=torch.bfloat16, device_map="auto") | |
def useZephyr(prompt): | |
messages = [ | |
{ | |
"role": "system", | |
"content": "You are a friendly chatbot who always responds in the style of a pirate.", | |
}, | |
{"role": "user", "content": prompt}, | |
] | |
# https://huggingface.co/docs/transformers/main/en/chat_templating | |
prompt = pipe.tokenizer.apply_chat_template( | |
messages, tokenize=False, add_generation_prompt=True) | |
print(prompt) | |
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, | |
temperature=0.7, top_k=50, top_p=0.95) | |
return outputs[0]["generated_text"] | |
def generatePrompt(prompt, max_new_tokens): | |
batch = tokenizer(prompt, return_tensors="pt") | |
generated_ids = model.generate( | |
batch["input_ids"], max_new_tokens=int(max_new_tokens)) | |
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) | |
prompt = output[0] | |
return useZephyr(prompt) | |
def generate_test(prompt): | |
batch = tokenizer(prompt, return_tensors="pt") | |
generated_ids = model.generate(batch["input_ids"], max_new_tokens=150) | |
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) | |
return output[0] | |
def generate_prompt(prompt, max_new_tokens): | |
return generatePrompt(prompt, max_new_tokens) | |
# | |
# Interface | |
input_prompt = gr.Textbox(label="Prompt", value="photographer") | |
input_maxtokens = gr.Textbox(label="Max tokens", value="150") | |
output_component = gr.Textbox(label="Output") | |
examples = [["photographer"], ["developer"], ["teacher"], [ | |
"human resources staff"], ["recipe for ham croquettes"]] | |
description = "" | |
PerfectGPT = gr.Interface(useZephyr, inputs=[input_prompt, input_maxtokens], outputs=output_component, | |
examples=examples, title="๐ฟ PerfectGPT v1 ๐ฟ", description=description).launch(share=True) | |
PerfectGPT.launch() | |