gopalagra commited on
Commit
c9edc6a
·
verified ·
1 Parent(s): 1b286f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +179 -179
app.py CHANGED
@@ -65,164 +65,19 @@
65
  # interface.launch()
66
  # # demo.launch(share=True)
67
 
68
- # import gradio as gr
69
- # from transformers import (
70
- # BlipProcessor,
71
- # BlipForConditionalGeneration,
72
- # BlipForQuestionAnswering,
73
- # pipeline
74
- # )
75
- # moderation_model = pipeline(
76
- # "text-classification",
77
- # model="Vrandan/Comment-Moderation",
78
- # return_all_scores=True
79
- # )
80
-
81
- # from PIL import Image
82
- # import torch
83
- # from gtts import gTTS
84
- # import tempfile
85
-
86
- # # ----------------------
87
- # # Device setup
88
- # # ----------------------
89
- # device = "cuda" if torch.cuda.is_available() else "cpu"
90
-
91
- # # ----------------------
92
- # # Load Models Once
93
- # # ----------------------
94
- # print("🔄 Loading models...")
95
-
96
- # # Captioning
97
- # caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
98
- # caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to(device)
99
-
100
- # # VQA
101
- # vqa_processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
102
- # vqa_model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(device)
103
-
104
- # # Translation
105
- # translation_models = {
106
- # "Hindi": pipeline("translation", model="Helsinki-NLP/opus-mt-en-hi"),
107
- # "French": pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr"),
108
- # "Spanish": pipeline("translation", model="Helsinki-NLP/opus-mt-en-es"),
109
- # }
110
-
111
- # # Safety Moderation Pipeline
112
- # moderation_model = pipeline("text-classification", model="unitary/toxic-bert")
113
-
114
- # print("✅ All models loaded!")
115
-
116
- # # ----------------------
117
- # # Safety Filter Function
118
- # # ----------------------
119
- # def is_caption_safe(caption):
120
- # try:
121
- # votes = moderation_model(caption)
122
- # # If return_all_scores=True, it's [[{label, score}, ...]]
123
- # if isinstance(votes, list) and isinstance(votes[0], list):
124
- # votes = votes[0]
125
- # # Now safe to loop
126
- # for item in votes:
127
- # if isinstance(item, dict) and item.get("label") in ["V", "V2"] and item.get("score", 0) > 0.5:
128
- # return False
129
- # except Exception as e:
130
- # print("⚠️ Moderation failed:", e)
131
-
132
- # # Fallback keywords
133
- # unsafe_keywords = [
134
- # "gun", "blood", "skull", "kill", "corpse", "gore", "knife", "weapon",
135
- # "fire", "murder", "dead", "death", "suicide", "bomb", "explosion",
136
- # "terrorist", "assault", "stab", "shoot", "pistol", "rifle", "shotgun",
137
- # "grenade", "horror", "beheaded", "torture", "hostage", "rape",
138
- # "war", "massacre", "chainsaw", "poison", "strangle", "hang", "drown"
139
- # ]
140
- # if any(word in caption.lower() for word in unsafe_keywords):
141
- # return False
142
- # return True
143
-
144
-
145
-
146
-
147
- # # ----------------------
148
- # # Caption + Translate + Speak
149
- # # ----------------------
150
- # def generate_caption_translate_speak(image, target_lang):
151
- # # Step 1: Caption
152
- # inputs = caption_processor(images=image, return_tensors="pt").to(device)
153
- # with torch.no_grad():
154
- # out = caption_model.generate(**inputs, max_new_tokens=50)
155
- # english_caption = caption_processor.decode(out[0], skip_special_tokens=True)
156
-
157
- # # Step 1.5: Safety Check
158
- # if not is_caption_safe(english_caption):
159
- # return "⚠️ Warning: Unsafe or inappropriate content detected!", "", None
160
-
161
- # # Step 2: Translate
162
- # if target_lang in translation_models:
163
- # translated = translation_models[target_lang](english_caption)[0]['translation_text']
164
- # else:
165
- # translated = "Translation not available"
166
-
167
- # # Step 3: Generate Speech (English caption for now)
168
- # tts = gTTS(english_caption, lang="en")
169
- # tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
170
- # tts.save(tmp_file.name)
171
-
172
- # return english_caption, translated, tmp_file.name
173
-
174
- # # ----------------------
175
- # # VQA
176
- # # ----------------------
177
- # def vqa_answer(image, question):
178
- # inputs = vqa_processor(image, question, return_tensors="pt").to(device)
179
- # with torch.no_grad():
180
- # out = vqa_model.generate(**inputs, max_new_tokens=50)
181
- # answer = vqa_processor.decode(out[0], skip_special_tokens=True)
182
-
183
- # # Run safety filter on answers too
184
- # if not is_caption_safe(answer):
185
- # return "⚠️ Warning: Unsafe or inappropriate content detected!"
186
-
187
- # return answer
188
-
189
- # # ----------------------
190
- # # Gradio UI
191
- # # ----------------------
192
- # with gr.Blocks(title="BLIP Vision App") as demo:
193
- # gr.Markdown("## 🖼️ BLIP: Image Captioning + Translation + Speech + VQA (with Safety Filter)")
194
-
195
- # with gr.Tab("Caption + Translate + Speak"):
196
- # with gr.Row():
197
- # img_in = gr.Image(type="pil", label="Upload Image")
198
- # lang_in = gr.Dropdown(["Hindi", "French", "Spanish"], label="Translate To", value="Hindi")
199
- # eng_out = gr.Textbox(label="English Caption")
200
- # trans_out = gr.Textbox(label="Translated Caption")
201
- # audio_out = gr.Audio(label="Spoken Caption", type="filepath")
202
- # btn1 = gr.Button("Generate Caption, Translate & Speak")
203
- # btn1.click(generate_caption_translate_speak, inputs=[img_in, lang_in], outputs=[eng_out, trans_out, audio_out])
204
-
205
- # with gr.Tab("Visual Question Answering (VQA)"):
206
- # with gr.Row():
207
- # img_vqa = gr.Image(type="pil", label="Upload Image")
208
- # q_in = gr.Textbox(label="Ask a Question about the Image")
209
- # ans_out = gr.Textbox(label="Answer")
210
- # btn2 = gr.Button("Ask")
211
- # btn2.click(vqa_answer, inputs=[img_vqa, q_in], outputs=ans_out)
212
-
213
- # demo.launch()
214
-
215
-
216
-
217
-
218
-
219
  import gradio as gr
220
  from transformers import (
221
- BlipProcessor,
222
- BlipForConditionalGeneration,
223
- BlipForQuestionAnswering,
224
  pipeline
225
  )
 
 
 
 
 
 
226
  from PIL import Image
227
  import torch
228
  from gtts import gTTS
@@ -267,25 +122,28 @@ def is_caption_safe(caption):
267
  # If return_all_scores=True, it's [[{label, score}, ...]]
268
  if isinstance(votes, list) and isinstance(votes[0], list):
269
  votes = votes[0]
270
- # Loop through scores
271
  for item in votes:
272
  if isinstance(item, dict) and item.get("label") in ["V", "V2"] and item.get("score", 0) > 0.5:
273
  return False
274
  except Exception as e:
275
  print("⚠️ Moderation failed:", e)
276
-
277
- # Fallback keyword check
278
  unsafe_keywords = [
279
- "gun", "blood", "skull", "kill", "corpse", "gore", "knife", "weapon", "fire",
280
- "murder", "dead", "death", "suicide", "bomb", "explosion", "terrorist", "assault",
281
- "stab", "shoot", "pistol", "rifle", "shotgun", "grenade", "horror", "beheaded",
282
- "torture", "hostage", "rape", "war", "massacre", "chainsaw", "poison", "strangle",
283
- "hang", "drown"
284
  ]
285
  if any(word in caption.lower() for word in unsafe_keywords):
286
  return False
287
  return True
288
 
 
 
 
289
  # ----------------------
290
  # Caption + Translate + Speak
291
  # ----------------------
@@ -295,22 +153,22 @@ def generate_caption_translate_speak(image, target_lang):
295
  with torch.no_grad():
296
  out = caption_model.generate(**inputs, max_new_tokens=50)
297
  english_caption = caption_processor.decode(out[0], skip_special_tokens=True)
298
-
299
  # Step 1.5: Safety Check
300
  if not is_caption_safe(english_caption):
301
  return "⚠️ Warning: Unsafe or inappropriate content detected!", "", None
302
-
303
  # Step 2: Translate
304
  if target_lang in translation_models:
305
  translated = translation_models[target_lang](english_caption)[0]['translation_text']
306
  else:
307
  translated = "Translation not available"
308
-
309
  # Step 3: Generate Speech (English caption for now)
310
  tts = gTTS(english_caption, lang="en")
311
  tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
312
  tts.save(tmp_file.name)
313
-
314
  return english_caption, translated, tmp_file.name
315
 
316
  # ----------------------
@@ -321,11 +179,11 @@ def vqa_answer(image, question):
321
  with torch.no_grad():
322
  out = vqa_model.generate(**inputs, max_new_tokens=50)
323
  answer = vqa_processor.decode(out[0], skip_special_tokens=True)
324
-
325
- # Safety filter
326
  if not is_caption_safe(answer):
327
  return "⚠️ Warning: Unsafe or inappropriate content detected!"
328
-
329
  return answer
330
 
331
  # ----------------------
@@ -333,24 +191,24 @@ def vqa_answer(image, question):
333
  # ----------------------
334
  with gr.Blocks(title="BLIP Vision App") as demo:
335
  gr.Markdown("## 🖼️ BLIP: Image Captioning + Translation + Speech + VQA (with Safety Filter)")
336
-
337
  with gr.Tab("Caption + Translate + Speak"):
338
  with gr.Row():
339
  img_in = gr.Image(type="pil", label="Upload Image")
340
  lang_in = gr.Dropdown(["Hindi", "French", "Spanish"], label="Translate To", value="Hindi")
341
- eng_out = gr.Textbox(label="English Caption")
342
- trans_out = gr.Textbox(label="Translated Caption")
343
- audio_out = gr.Audio(label="Spoken Caption", type="filepath")
344
- btn1 = gr.Button("Generate Caption, Translate & Speak")
345
- btn1.click(generate_caption_translate_speak, inputs=[img_in, lang_in], outputs=[eng_out, trans_out, audio_out])
346
-
347
  with gr.Tab("Visual Question Answering (VQA)"):
348
  with gr.Row():
349
  img_vqa = gr.Image(type="pil", label="Upload Image")
350
  q_in = gr.Textbox(label="Ask a Question about the Image")
351
- ans_out = gr.Textbox(label="Answer")
352
- btn2 = gr.Button("Ask")
353
- btn2.click(vqa_answer, inputs=[img_vqa, q_in], outputs=ans_out)
354
 
355
  demo.launch()
356
 
@@ -358,6 +216,148 @@ demo.launch()
358
 
359
 
360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
 
362
 
363
 
 
65
  # interface.launch()
66
  # # demo.launch(share=True)
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  import gradio as gr
69
  from transformers import (
70
+ BlipProcessor,
71
+ BlipForConditionalGeneration,
72
+ BlipForQuestionAnswering,
73
  pipeline
74
  )
75
+ moderation_model = pipeline(
76
+ "text-classification",
77
+ model="Vrandan/Comment-Moderation",
78
+ return_all_scores=True
79
+ )
80
+
81
  from PIL import Image
82
  import torch
83
  from gtts import gTTS
 
122
  # If return_all_scores=True, it's [[{label, score}, ...]]
123
  if isinstance(votes, list) and isinstance(votes[0], list):
124
  votes = votes[0]
125
+ # Now safe to loop
126
  for item in votes:
127
  if isinstance(item, dict) and item.get("label") in ["V", "V2"] and item.get("score", 0) > 0.5:
128
  return False
129
  except Exception as e:
130
  print("⚠️ Moderation failed:", e)
131
+
132
+ # Fallback keywords
133
  unsafe_keywords = [
134
+ "gun", "blood", "skull", "kill", "corpse", "gore", "knife", "weapon",
135
+ "fire", "murder", "dead", "death", "suicide", "bomb", "explosion",
136
+ "terrorist", "assault", "stab", "shoot", "pistol", "rifle", "shotgun",
137
+ "grenade", "horror", "beheaded", "torture", "hostage", "rape",
138
+ "war", "massacre", "chainsaw", "poison", "strangle", "hang", "drown"
139
  ]
140
  if any(word in caption.lower() for word in unsafe_keywords):
141
  return False
142
  return True
143
 
144
+
145
+
146
+
147
  # ----------------------
148
  # Caption + Translate + Speak
149
  # ----------------------
 
153
  with torch.no_grad():
154
  out = caption_model.generate(**inputs, max_new_tokens=50)
155
  english_caption = caption_processor.decode(out[0], skip_special_tokens=True)
156
+
157
  # Step 1.5: Safety Check
158
  if not is_caption_safe(english_caption):
159
  return "⚠️ Warning: Unsafe or inappropriate content detected!", "", None
160
+
161
  # Step 2: Translate
162
  if target_lang in translation_models:
163
  translated = translation_models[target_lang](english_caption)[0]['translation_text']
164
  else:
165
  translated = "Translation not available"
166
+
167
  # Step 3: Generate Speech (English caption for now)
168
  tts = gTTS(english_caption, lang="en")
169
  tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
170
  tts.save(tmp_file.name)
171
+
172
  return english_caption, translated, tmp_file.name
173
 
174
  # ----------------------
 
179
  with torch.no_grad():
180
  out = vqa_model.generate(**inputs, max_new_tokens=50)
181
  answer = vqa_processor.decode(out[0], skip_special_tokens=True)
182
+
183
+ # Run safety filter on answers too
184
  if not is_caption_safe(answer):
185
  return "⚠️ Warning: Unsafe or inappropriate content detected!"
186
+
187
  return answer
188
 
189
  # ----------------------
 
191
  # ----------------------
192
  with gr.Blocks(title="BLIP Vision App") as demo:
193
  gr.Markdown("## 🖼️ BLIP: Image Captioning + Translation + Speech + VQA (with Safety Filter)")
194
+
195
  with gr.Tab("Caption + Translate + Speak"):
196
  with gr.Row():
197
  img_in = gr.Image(type="pil", label="Upload Image")
198
  lang_in = gr.Dropdown(["Hindi", "French", "Spanish"], label="Translate To", value="Hindi")
199
+ eng_out = gr.Textbox(label="English Caption")
200
+ trans_out = gr.Textbox(label="Translated Caption")
201
+ audio_out = gr.Audio(label="Spoken Caption", type="filepath")
202
+ btn1 = gr.Button("Generate Caption, Translate & Speak")
203
+ btn1.click(generate_caption_translate_speak, inputs=[img_in, lang_in], outputs=[eng_out, trans_out, audio_out])
204
+
205
  with gr.Tab("Visual Question Answering (VQA)"):
206
  with gr.Row():
207
  img_vqa = gr.Image(type="pil", label="Upload Image")
208
  q_in = gr.Textbox(label="Ask a Question about the Image")
209
+ ans_out = gr.Textbox(label="Answer")
210
+ btn2 = gr.Button("Ask")
211
+ btn2.click(vqa_answer, inputs=[img_vqa, q_in], outputs=ans_out)
212
 
213
  demo.launch()
214
 
 
216
 
217
 
218
 
219
+ # import gradio as gr
220
+ # from transformers import (
221
+ # BlipProcessor,
222
+ # BlipForConditionalGeneration,
223
+ # BlipForQuestionAnswering,
224
+ # pipeline
225
+ # )
226
+ # from PIL import Image
227
+ # import torch
228
+ # from gtts import gTTS
229
+ # import tempfile
230
+
231
+ # # ----------------------
232
+ # # Device setup
233
+ # # ----------------------
234
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
235
+
236
+ # # ----------------------
237
+ # # Load Models Once
238
+ # # ----------------------
239
+ # print("🔄 Loading models...")
240
+
241
+ # # Captioning
242
+ # caption_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
243
+ # caption_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to(device)
244
+
245
+ # # VQA
246
+ # vqa_processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
247
+ # vqa_model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(device)
248
+
249
+ # # Translation
250
+ # translation_models = {
251
+ # "Hindi": pipeline("translation", model="Helsinki-NLP/opus-mt-en-hi"),
252
+ # "French": pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr"),
253
+ # "Spanish": pipeline("translation", model="Helsinki-NLP/opus-mt-en-es"),
254
+ # }
255
+
256
+ # # Safety Moderation Pipeline
257
+ # moderation_model = pipeline("text-classification", model="unitary/toxic-bert")
258
+
259
+ # print("✅ All models loaded!")
260
+
261
+ # # ----------------------
262
+ # # Safety Filter Function
263
+ # # ----------------------
264
+ # def is_caption_safe(caption):
265
+ # try:
266
+ # votes = moderation_model(caption)
267
+ # # If return_all_scores=True, it's [[{label, score}, ...]]
268
+ # if isinstance(votes, list) and isinstance(votes[0], list):
269
+ # votes = votes[0]
270
+ # # Loop through scores
271
+ # for item in votes:
272
+ # if isinstance(item, dict) and item.get("label") in ["V", "V2"] and item.get("score", 0) > 0.5:
273
+ # return False
274
+ # except Exception as e:
275
+ # print("⚠️ Moderation failed:", e)
276
+
277
+ # # Fallback keyword check
278
+ # unsafe_keywords = [
279
+ # "gun", "blood", "skull", "kill", "corpse", "gore", "knife", "weapon", "fire",
280
+ # "murder", "dead", "death", "suicide", "bomb", "explosion", "terrorist", "assault",
281
+ # "stab", "shoot", "pistol", "rifle", "shotgun", "grenade", "horror", "beheaded",
282
+ # "torture", "hostage", "rape", "war", "massacre", "chainsaw", "poison", "strangle",
283
+ # "hang", "drown"
284
+ # ]
285
+ # if any(word in caption.lower() for word in unsafe_keywords):
286
+ # return False
287
+ # return True
288
+
289
+ # # ----------------------
290
+ # # Caption + Translate + Speak
291
+ # # ----------------------
292
+ # def generate_caption_translate_speak(image, target_lang):
293
+ # # Step 1: Caption
294
+ # inputs = caption_processor(images=image, return_tensors="pt").to(device)
295
+ # with torch.no_grad():
296
+ # out = caption_model.generate(**inputs, max_new_tokens=50)
297
+ # english_caption = caption_processor.decode(out[0], skip_special_tokens=True)
298
+
299
+ # # Step 1.5: Safety Check
300
+ # if not is_caption_safe(english_caption):
301
+ # return "⚠️ Warning: Unsafe or inappropriate content detected!", "", None
302
+
303
+ # # Step 2: Translate
304
+ # if target_lang in translation_models:
305
+ # translated = translation_models[target_lang](english_caption)[0]['translation_text']
306
+ # else:
307
+ # translated = "Translation not available"
308
+
309
+ # # Step 3: Generate Speech (English caption for now)
310
+ # tts = gTTS(english_caption, lang="en")
311
+ # tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
312
+ # tts.save(tmp_file.name)
313
+
314
+ # return english_caption, translated, tmp_file.name
315
+
316
+ # # ----------------------
317
+ # # VQA
318
+ # # ----------------------
319
+ # def vqa_answer(image, question):
320
+ # inputs = vqa_processor(image, question, return_tensors="pt").to(device)
321
+ # with torch.no_grad():
322
+ # out = vqa_model.generate(**inputs, max_new_tokens=50)
323
+ # answer = vqa_processor.decode(out[0], skip_special_tokens=True)
324
+
325
+ # # Safety filter
326
+ # if not is_caption_safe(answer):
327
+ # return "⚠️ Warning: Unsafe or inappropriate content detected!"
328
+
329
+ # return answer
330
+
331
+ # # ----------------------
332
+ # # Gradio UI
333
+ # # ----------------------
334
+ # with gr.Blocks(title="BLIP Vision App") as demo:
335
+ # gr.Markdown("## 🖼️ BLIP: Image Captioning + Translation + Speech + VQA (with Safety Filter)")
336
+
337
+ # with gr.Tab("Caption + Translate + Speak"):
338
+ # with gr.Row():
339
+ # img_in = gr.Image(type="pil", label="Upload Image")
340
+ # lang_in = gr.Dropdown(["Hindi", "French", "Spanish"], label="Translate To", value="Hindi")
341
+ # eng_out = gr.Textbox(label="English Caption")
342
+ # trans_out = gr.Textbox(label="Translated Caption")
343
+ # audio_out = gr.Audio(label="Spoken Caption", type="filepath")
344
+ # btn1 = gr.Button("Generate Caption, Translate & Speak")
345
+ # btn1.click(generate_caption_translate_speak, inputs=[img_in, lang_in], outputs=[eng_out, trans_out, audio_out])
346
+
347
+ # with gr.Tab("Visual Question Answering (VQA)"):
348
+ # with gr.Row():
349
+ # img_vqa = gr.Image(type="pil", label="Upload Image")
350
+ # q_in = gr.Textbox(label="Ask a Question about the Image")
351
+ # ans_out = gr.Textbox(label="Answer")
352
+ # btn2 = gr.Button("Ask")
353
+ # btn2.click(vqa_answer, inputs=[img_vqa, q_in], outputs=ans_out)
354
+
355
+ # demo.launch()
356
+
357
+
358
+
359
+
360
+
361
 
362
 
363