include sample code to run the model in readme

#14
Files changed (1) hide show
  1. README.md +30 -0
README.md CHANGED
@@ -49,6 +49,36 @@ We evaluate on an extensive set of downstream tasks including reasoning, reading
49
  | Open-LLaMA-3B-v2 | 1T | 55.7 |
50
  | Sheared-LLaMA-2.7B | 50B | 56.7 |
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  ## Bibtex
53
  ```
54
  @article{xia2023sheared,
 
49
  | Open-LLaMA-3B-v2 | 1T | 55.7 |
50
  | Sheared-LLaMA-2.7B | 50B | 56.7 |
51
 
52
+ ## Code Sample using transformers library
53
+
54
+ ```
55
+ from transformers import AutoModelForCausalLM, AutoTokenizer
56
+
57
+ # Load model and tokenizer
58
+ model = "Sheared-LLaMA-1.3B/" # Replace with the actual path
59
+ tokenizer = AutoTokenizer.from_pretrained(model)
60
+ model = AutoModelForCausalLM.from_pretrained(model)
61
+
62
+ # Input prompt
63
+ input_text = "Once upon a time"
64
+ input_ids = tokenizer.encode(input_text, return_tensors='pt')
65
+
66
+ # Generate text
67
+ output = model.generate(
68
+ input_ids,
69
+ max_length=100,
70
+ num_return_sequences=1,
71
+ no_repeat_ngram_size=2,
72
+ temperature=0.7,
73
+ top_p=0.9,
74
+ do_sample=True
75
+ )
76
+
77
+ # Decode and print the generated text
78
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
79
+ print(generated_text)
80
+ ```
81
+
82
  ## Bibtex
83
  ```
84
  @article{xia2023sheared,