File size: 897 Bytes
64ce13d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from transformers import pipeline

# Load our Tokenizer
tokenizer = AutoTokenizer.from_pretrained("huggingface/nlux/CodeLlama-7b-hf")

# Load your model
model = "./nlux/CodeLlama-7b-hf_merge"

# load into pipeline
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)

def predict(input):
    # Generate text using the pipeline
    outputs = pipe(input, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, eos_token_id=pipe.tokenizer.eos_token_id, pad_token_id=pipe.tokenizer.pad_token_id)
    output = outputs[0]['generated_text'].strip()

    # Print results
    print(f"Generated Answer:\\n{output}")
    return output

# Create a Gradio interface
iface = gr.Interface(fn=predict, inputs="text", outputs="text")

# Launch the interface
iface.launch(share=True)