diabolic6045 commited on
Commit
fde8dbf
1 Parent(s): b6b0e9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -9,7 +9,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
9
 
10
  DESCRIPTION = """\
11
  # Llama 3.2 1B Instruct Finetuned on 10% of Open Hermes Dataset
12
- This is a demo of [`diabolic6045/open-llama-Instruct`](https://huggingface.co/diabolic6045/open-llama-Instruct).
13
  """
14
 
15
  MAX_MAX_NEW_TOKENS = 1024
@@ -18,7 +18,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "1024"))
18
 
19
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
20
 
21
- model_id = "diabolic6045/open-llama-Instruct"
22
  tokenizer = AutoTokenizer.from_pretrained(model_id)
23
  model = AutoModelForCausalLM.from_pretrained(
24
  model_id,
@@ -127,7 +127,6 @@ chat_interface = gr.ChatInterface(
127
 
128
  with gr.Blocks(fill_height=True, theme=gr.themes.Ocean()) as demo:
129
  gr.Markdown(DESCRIPTION)
130
- gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
131
  chat_interface.render()
132
 
133
  if __name__ == "__main__":
 
9
 
10
  DESCRIPTION = """\
11
  # Llama 3.2 1B Instruct Finetuned on 10% of Open Hermes Dataset
12
+ This is a demo of [`diabolic6045/open-llama-3.2-1B-Instruct`](https://huggingface.co/diabolic6045/open-llama-3.2-1B-Instruct).
13
  """
14
 
15
  MAX_MAX_NEW_TOKENS = 1024
 
18
 
19
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
20
 
21
+ model_id = "diabolic6045/open-llama-3.2-1B-Instruct"
22
  tokenizer = AutoTokenizer.from_pretrained(model_id)
23
  model = AutoModelForCausalLM.from_pretrained(
24
  model_id,
 
127
 
128
  with gr.Blocks(fill_height=True, theme=gr.themes.Ocean()) as demo:
129
  gr.Markdown(DESCRIPTION)
 
130
  chat_interface.render()
131
 
132
  if __name__ == "__main__":