traffic_signal_images / traffic_object_detection.py
Sayali9141's picture
Upload traffic_object_detection.py
845bf66 verified
raw
history blame
3.63 kB
# -*- coding: utf-8 -*-
"""traffic_object_detection.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1B7DIM9ABIA6RRhA8tL_3rcxL9M1iIP7D
"""
!pip install datasets
from datasets import load_dataset
dataset = load_dataset("Sayali9141/traffic_signal_images")
next(iter(dataset['train']))
import matplotlib.pyplot as plt
from IPython.display import display
from PIL import Image
"""Trying out hugging face YOLO
"""
from transformers import AutoFeatureExtractor
feature_extractor = AutoFeatureExtractor.from_pretrained("hustvl/yolos-small")
from transformers import YolosForObjectDetection
model = YolosForObjectDetection.from_pretrained("hustvl/yolos-small")
"""This code shows how to get image from the url"""
device = 'cuda'
model = model.to(device)
from PIL import Image
import requests
import base64
from io import BytesIO
from time import time
import matplotlib.pyplot as plt
import torch
# colors for visualization
COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
[0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]
def plot_results(pil_img, prob, boxes):
count=0
plt.figure(figsize=(16,10))
plt.imshow(pil_img)
ax = plt.gca()
colors = COLORS * 100
for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors):
cl = p.argmax()
if model.config.id2label[cl.item()] in ['car', 'truck'] :
ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
fill=False, color=c, linewidth=3))
text = f'{model.config.id2label[cl.item()]}: {p[cl]:0.2f}'
ax.text(xmin, ymin, text, fontsize=15,
bbox=dict(facecolor='yellow', alpha=0.5))
count+=1
plt.axis('off')
plt.show()
# print(count)
return(count)
all_counts = []
for i in range (22000, 22005):
row = dataset['train'][i]
start= time()
pixel_values = feature_extractor(row['image_url'], return_tensors="pt").pixel_values
pixel_values = pixel_values.to(device)
# pixel_values.shape
with torch.no_grad():
outputs = model(pixel_values, output_attentions=True)
probas = outputs.logits.softmax(-1)[0, :, :-1]
keep = probas.max(-1).values > 0.8
target_sizes = torch.tensor(row['image_url'].size[::-1]).unsqueeze(0)
postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes)
bboxes_scaled = postprocessed_outputs[0]['boxes']
plot_results(row['image_url'], probas[keep], bboxes_scaled[keep])
count = 0
for p, boxes in zip(probas[keep], bboxes_scaled[keep]):
cl = p.argmax()
if model.config.id2label[cl.item()] in ['car', 'truck']:
count += 1
all_counts.append(count)
print(time()-start)
# def select_columns(example):
# return {key: example[key] for key in ['timestamp', 'camera_id', 'latitude', 'longitude']}
# subset_dataset = dataset['train'].map(select_columns[dataset['train']])
# data_yolo= subset_dataset.to_pandas()
# data_yolo['box_count'][22000:22004]= [x for x in all_counts]
#create interactive map
#create interactive map using latitude and longitude of counts column
# import folium
# from folium import plugins
# # Create a map object and center it to the avarage coordinates to m
# m = folium.Map(location=[df['latitude'].mean(), df['longitude'].mean()], zoom_start=10)
# # Add marker for each row in the data
# for i in range(0,len(df)):
# folium.Marker([df.iloc[i]['latitude'], df.iloc[i]['longitude']], popup=df.iloc[i]['counts']).add_to(m)
# # Display the map
# m.save('map.html')
# m