import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch from transformers import pipeline # Load our Tokenizer tokenizer = AutoTokenizer.from_pretrained("https://huggingface.co/nlux/CodeLlama-7b-hf_merge/tree/main") # Load your model model = "https://huggingface.co/nlux/CodeLlama-7b-hf_merge/tree/main" # load into pipeline pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) def predict(input): # Generate text using the pipeline outputs = pipe(input, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, eos_token_id=pipe.tokenizer.eos_token_id, pad_token_id=pipe.tokenizer.pad_token_id) output = outputs[0]['generated_text'].strip() # Print results print(f"Generated Answer:\\n{output}") return output # Create a Gradio interface iface = gr.Interface(fn=predict, inputs="text", outputs="text") # Launch the interface iface.launch(share=True)