Wajidbinaqeel commited on
Commit
f0f71ad
1 Parent(s): 80a0b8b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -13
app.py CHANGED
@@ -1,4 +1,3 @@
1
- # import required packages
2
  import google.generativeai as genai
3
  import os
4
  import PIL.Image
@@ -12,17 +11,10 @@ from gradio.data_classes import FileData
12
  GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
13
  genai.configure(api_key=GOOGLE_API_KEY)
14
 
15
- # These codelines are just to verify if your api key is correct or not
16
- # Use them when you clone the repo and build locally
17
- #!curl \
18
- #-H 'Content-Type: application/json' \
19
- #-d '{ "prompt": { "text": "Write a very short story about a magic backpack"} }' \
20
- #"https://generativelanguage.googleapis.com/v1beta3/models/text-bison-001:generateText?key=<enter-your-key-here>"
21
 
22
  # Initialize genai models
23
  model = genai.GenerativeModel('gemini-pro')
24
 
25
-
26
  def gemini(input, file, chatbot=[]):
27
  """
28
  Function to handle gemini model and gemini vision model interactions.
@@ -56,9 +48,7 @@ def gemini(input, file, chatbot=[]):
56
  user_msg = {"text": input, "files": []}
57
  bot_msg = {"text": gemini_resp, "files": []}
58
  chatbot.append([user_msg, bot_msg])
59
-
60
-
61
-
62
  except Exception as e:
63
  # Handling exceptions and raising error to the modal
64
  print(f"An error occurred: {e}")
@@ -69,7 +59,7 @@ def gemini(input, file, chatbot=[]):
69
  # Define the Gradio Blocks interface
70
  with gr.Blocks() as demo:
71
  # Add a centered header using HTML
72
- gr.HTML("<center><h1>Gemini PRO API</h1></center>")
73
 
74
  # Initialize the MultimodalChatbot component
75
  multi = MultimodalChatbot(value=[], height=800)
@@ -78,11 +68,11 @@ with gr.Blocks() as demo:
78
  # Textbox for user input with increased scale for better visibility
79
  tb = gr.Textbox(scale=4, placeholder='Input text and press Enter')
80
 
 
81
 
82
  # Define the behavior on text submission
83
  tb.submit(gemini, [tb, multi], [multi, tb])
84
 
85
-
86
 
87
  # Launch the demo with a queue to handle multiple users
88
  demo.queue().launch()
 
 
1
  import google.generativeai as genai
2
  import os
3
  import PIL.Image
 
11
  GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY')
12
  genai.configure(api_key=GOOGLE_API_KEY)
13
 
 
 
 
 
 
 
14
 
15
  # Initialize genai models
16
  model = genai.GenerativeModel('gemini-pro')
17
 
 
18
  def gemini(input, file, chatbot=[]):
19
  """
20
  Function to handle gemini model and gemini vision model interactions.
 
48
  user_msg = {"text": input, "files": []}
49
  bot_msg = {"text": gemini_resp, "files": []}
50
  chatbot.append([user_msg, bot_msg])
51
+
 
 
52
  except Exception as e:
53
  # Handling exceptions and raising error to the modal
54
  print(f"An error occurred: {e}")
 
59
  # Define the Gradio Blocks interface
60
  with gr.Blocks() as demo:
61
  # Add a centered header using HTML
62
+ gr.HTML("<center><h1>Gemini Chat PRO API</h1></center>")
63
 
64
  # Initialize the MultimodalChatbot component
65
  multi = MultimodalChatbot(value=[], height=800)
 
68
  # Textbox for user input with increased scale for better visibility
69
  tb = gr.Textbox(scale=4, placeholder='Input text and press Enter')
70
 
71
+
72
 
73
  # Define the behavior on text submission
74
  tb.submit(gemini, [tb, multi], [multi, tb])
75
 
 
76
 
77
  # Launch the demo with a queue to handle multiple users
78
  demo.queue().launch()