Islam YAHIAOUI commited on
Commit
d5dcf9b
1 Parent(s): c41e5ec
Files changed (1) hide show
  1. app.py +36 -43
app.py CHANGED
@@ -5,7 +5,7 @@ from rag import run_rag
5
 
6
  # ================================================================================================================================
7
  TOKEN = os.getenv("HF_TOKEN")
8
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta" )
9
  system_message ="You are a capable and freindly assistant."
10
  history = []
11
  no_change_btn = gr.Button()
@@ -103,9 +103,9 @@ def chat(
103
  for val in history:
104
  messages.append(val)
105
 
106
- messages.append({"role": "user", "content": message})
107
  response = ""
108
-
109
  for msg in client.chat_completion(
110
  messages,
111
  max_tokens=max_tokens,
@@ -116,6 +116,8 @@ def chat(
116
 
117
  token = msg.choices[0].delta.content
118
  response += str(token)
 
 
119
  # yield "" , chatbot
120
  chatbot.append((question , response))
121
  state.save_response(response)
@@ -136,34 +138,7 @@ EXAMPLES = [
136
  [ "Tell me about the actual situation in Ukraine ?"],
137
  [ "Tell me about current situation in palestine ?"],
138
  ]
139
- # max_new_tokens = gr.Slider(
140
- # minimum=1,
141
- # maximum=2048,
142
- # value=1024,
143
- # step=1,
144
- # interactive=True,
145
- # label="Max new tokens",
146
- # )
147
- # temperature = gr.Slider(
148
- # minimum=0.1,
149
- # maximum=0.9,
150
- # value=0.6,
151
- # step=0.1,
152
- # visible=True,
153
- # interactive=True,
154
- # label="Temperature",
155
- # info="Higher values will produce more diverse outputs.",
156
- # )
157
- # top_p = gr.Slider(
158
- # minimum=0.1,
159
- # maximum=1,
160
- # value=0.9,
161
- # step=0.05,
162
- # visible=True,
163
- # interactive=True,
164
- # label="Top-p (nucleus sampling)",
165
- # info="Higher values is equivalent to sampling more low-probability tokens.",
166
- # )
167
  # ================================================================================================================================
168
 
169
 
@@ -179,23 +154,34 @@ textbox = gr.Textbox(show_label=False,
179
  show_copy_button=True
180
  )
181
 
182
- with gr.Blocks(title="RAG", theme=theme, css=block_css) as demo:
183
 
184
- gr.Markdown("Retrieval Augmented Generation (RAG) Chatbot" )
185
- with gr.Row():
186
- with gr.Column(scale=8):
 
 
 
 
 
 
 
187
  chatbot = gr.Chatbot(
188
  elem_id="chatbot",
189
  label="Retrieval Augmented Generation (RAG) Chatbot",
190
- height=400,
191
  layout="bubble",
 
 
 
 
192
  )
193
  with gr.Row():
194
  with gr.Column(scale=8):
195
  textbox.render()
196
 
197
- with gr.Column(scale=1, min_width=100):
198
- submit_btn = gr.Button(value="Submit", variant="primary", interactive=True)
199
 
200
 
201
  with gr.Row(elem_id="buttons") as button_row:
@@ -204,20 +190,27 @@ with gr.Blocks(title="RAG", theme=theme, css=block_css) as demo:
204
  flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
205
  #stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
206
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
207
- clear_btn = gr.Button(value="🗑️ Clear", interactive=False)
208
-
209
  with gr.Column(scale=3):
 
 
210
  gr.Examples(examples=[
211
  [f"Tell me about the latest news in the world ?"],
212
  [f"Tell me about the increase in the price of Bitcoin ?"],
213
  [f"Tell me about the actual situation in Ukraine ?"],
 
214
  [f"Tell me about current situation in palestinian ?"],
 
 
 
215
  ],inputs=[textbox], label="Examples")
 
 
216
 
217
  with gr.Accordion("Parameters", open=False) as parameter_row:
218
- temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature",)
219
- top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P",)
220
- max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
 
221
 
222
  # ================================================================================================================================
223
  btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
 
5
 
6
  # ================================================================================================================================
7
  TOKEN = os.getenv("HF_TOKEN")
8
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta" , token=TOKEN)
9
  system_message ="You are a capable and freindly assistant."
10
  history = []
11
  no_change_btn = gr.Button()
 
103
  for val in history:
104
  messages.append(val)
105
 
106
+ messages.append({"role": "user", "content": run_rag(message)})
107
  response = ""
108
+ # chatbot.append((question))
109
  for msg in client.chat_completion(
110
  messages,
111
  max_tokens=max_tokens,
 
116
 
117
  token = msg.choices[0].delta.content
118
  response += str(token)
119
+ # chatbot.append(( response))
120
+
121
  # yield "" , chatbot
122
  chatbot.append((question , response))
123
  state.save_response(response)
 
138
  [ "Tell me about the actual situation in Ukraine ?"],
139
  [ "Tell me about current situation in palestine ?"],
140
  ]
141
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  # ================================================================================================================================
143
 
144
 
 
154
  show_copy_button=True
155
  )
156
 
157
+ with gr.Blocks(title="RAG", theme=theme, css=block_css , fill_height=True) as demo:
158
 
159
+ gr.Markdown("# **Retrieval Augmented Generation (RAG) Chatbot**" )
160
+ gr.Markdown("This is a demo of a chatbot that uses the RAG system to generate responses to user queries. RAG is a combination of a retriever and a generator, which allows it to generate responses based on the context of the conversation. The chatbot can be used to answer questions, provide information, and engage in conversation with users.")
161
+ with gr.Row(variant="panel"):
162
+ # with gr.Column(scale=2):
163
+ # # gr.Markdown("# ****")
164
+ # new_chat=gr.Button(value=" New Chat", variant="secondary", interactive=True)
165
+
166
+ # with gr.Column(scale=1):
167
+ # exit_btn = gr.Button(value="🚪 Exit", interactive=True , variant="stop")
168
+ with gr.Column(scale=10):
169
  chatbot = gr.Chatbot(
170
  elem_id="chatbot",
171
  label="Retrieval Augmented Generation (RAG) Chatbot",
172
+ height=300,
173
  layout="bubble",
174
+ min_width=1200,
175
+ show_copy_button=True,
176
+ show_share_button=True,
177
+ placeholder="Ask a question or type a message...",
178
  )
179
  with gr.Row():
180
  with gr.Column(scale=8):
181
  textbox.render()
182
 
183
+ with gr.Column(scale=1, min_width=100):
184
+ submit_btn = gr.Button(value="Submit", variant="primary", interactive=True)
185
 
186
 
187
  with gr.Row(elem_id="buttons") as button_row:
 
190
  flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
191
  #stop_btn = gr.Button(value="⏹️ Stop Generation", interactive=False)
192
  regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
 
 
193
  with gr.Column(scale=3):
194
+ clear_btn = gr.Button(value="🗑️ Clear", interactive=False , variant="stop")
195
+ with gr.Accordion("Examples", open=True) as Examples_row:
196
  gr.Examples(examples=[
197
  [f"Tell me about the latest news in the world ?"],
198
  [f"Tell me about the increase in the price of Bitcoin ?"],
199
  [f"Tell me about the actual situation in Ukraine ?"],
200
+ [f"How true is the news about the increase in the price of oil ?"],
201
  [f"Tell me about current situation in palestinian ?"],
202
+ [f"Tell me about the current situation in Afghanistan ?"],
203
+ [f"what are the agenda of the United Nations ?"],
204
+ ["how trump's compain going ?"],
205
  ],inputs=[textbox], label="Examples")
206
+
207
+
208
 
209
  with gr.Accordion("Parameters", open=False) as parameter_row:
210
+ temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, interactive=True, label="Temperature",)
211
+ top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, interactive=True, label="Top P",)
212
+ max_output_tokens = gr.Slider(minimum=0, maximum=4096, value=1024, step=64, interactive=True, label="Max output tokens",)
213
+
214
 
215
  # ================================================================================================================================
216
  btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]