nldemo commited on
Commit
30353c2
1 Parent(s): 0255925

Fix up the Gradio inputs, outputs, and functions

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -6,7 +6,7 @@ import torch
6
  from transformers import AutoTokenizer, AutoModelForCausalLM
7
 
8
  @spaces.GPU
9
- def sentience_check(n):
10
  huggingface_hub.login(token=os.environ["HUGGINGFACE_TOKEN"])
11
  device = torch.device("cuda")
12
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it")
@@ -21,5 +21,5 @@ def sentience_check(n):
21
 
22
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
23
 
24
- demo = gr.Interface(fn=greet, inputs=gr.Number(), outputs=gr.Text())
25
  demo.launch()
 
6
  from transformers import AutoTokenizer, AutoModelForCausalLM
7
 
8
  @spaces.GPU
9
+ def sentience_check():
10
  huggingface_hub.login(token=os.environ["HUGGINGFACE_TOKEN"])
11
  device = torch.device("cuda")
12
  tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it")
 
21
 
22
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
23
 
24
+ demo = gr.Interface(fn=sentience_check, outputs=gr.Text())
25
  demo.launch()