awacke1's picture
Create app.py
9ab5bd1 verified
raw
history blame
11.2 kB
import streamlit as st
import pandas as pd
import numpy as np
import time
import random
from PIL import Image
# Additional imports for CoTracker3 Demo
import torch
import imageio.v3 as iio
import matplotlib.pyplot as plt
import colorsys
import tempfile
import cv2
import os
# Set up the page configuration
st.set_page_config(
page_title="Streamlit Super Fun Guide 🎈",
page_icon="🎈",
layout="wide",
initial_sidebar_state="expanded",
)
# Add a header with an emoji
st.title("Welcome to the Streamlit Super Fun Guide 🎉")
# Add a limerick
st.write("""
There once was a coder so bright,
Who coded with all of their might.
With Streamlit's delight,
They coded all night,
And apps popped up left and right!
""")
# Add a wise quote
st.write("> “Any fool can write code that a computer can understand. Good programmers write code that humans can understand.” – Martin Fowler")
# Sidebar navigation
st.sidebar.title("Navigation 🧭")
options = st.sidebar.radio("Go to", [
"Introduction 🚀",
"Upload Media 📁",
"Animated Charts 📊",
"Caching Demo 🗃️",
"Query Parameters 🔍",
"Character Gallery 🐾",
"CoTracker3 Demo 🕵️‍♂️"
])
# Introduction Page
if options == "Introduction 🚀":
st.header("Introduction 🚀")
st.write("Hello there 👋")
st.write("Thanks for stopping by! Let's embark on a fun journey to learn Streamlit together!")
st.write("Here's a joke to start with:")
st.write("> Why did the programmer quit his job? Because he didn't get arrays! 😂")
st.image("https://media.giphy.com/media/3oEjI6SIIHBdRxXI40/giphy.gif")
# Upload Media Page
elif options == "Upload Media 📁":
st.header("Upload Media Files 📁")
st.write("You can upload images, videos, and audio files here, and we'll display them in galleries!")
uploaded_files = st.file_uploader("Choose media files", accept_multiple_files=True, type=["png", "jpg", "jpeg", "mp4", "mp3", "wav"])
if uploaded_files:
images = []
videos = []
audios = []
for uploaded_file in uploaded_files:
if uploaded_file.type.startswith("image/"):
images.append(uploaded_file)
elif uploaded_file.type.startswith("video/"):
videos.append(uploaded_file)
elif uploaded_file.type.startswith("audio/"):
audios.append(uploaded_file)
if images:
st.subheader("Image Gallery 🖼️")
cols = st.columns(3)
for idx, img in enumerate(images):
with cols[idx % 3]:
st.image(img, use_column_width=True)
if videos:
st.subheader("Video Gallery 🎥")
for vid in videos:
st.video(vid)
if audios:
st.subheader("Audio Gallery 🎵")
for aud in audios:
st.audio(aud)
# Animated Charts Page
elif options == "Animated Charts 📊":
st.header("Animated Charts 📊")
st.write("Watch the data come alive with live-updating charts!")
# Live-updating line chart
progress_bar = st.progress(0)
status_text = st.empty()
chart = st.line_chart(np.random.randn(10, 2))
for i in range(100):
new_rows = np.random.randn(1, 2)
status_text.text(f"Iteration {i+1}")
chart.add_rows(new_rows)
progress_bar.progress((i + 1) % 100)
time.sleep(0.1)
st.success("Animation Complete! 🎉")
# Caching Demo Page
elif options == "Caching Demo 🗃️":
st.header("Caching Demo 🗃️")
st.write("This demonstrates how caching can speed up your app.")
@st.cache_data
def expensive_computation(n):
time.sleep(2) # Simulate a long computation
return np.random.randn(n)
st.write("Click the button to compute some data.")
if st.button("Compute Data 🔄"):
with st.spinner("Computing..."):
data = expensive_computation(1000)
st.success("Computation Complete! 🎉")
st.line_chart(data)
else:
st.write("Awaiting your command...")
# Query Parameters Page
elif options == "Query Parameters 🔍":
st.header("Query Parameters 🔍")
st.write("You can control the app via URL query parameters!")
st.write("Try adding `?name=YourName` to the URL and see what happens.")
name = st.experimental_get_query_params().get("name", ["Stranger"])[0]
st.write(f"Hello, {name}! 👋")
st.write("Change the 'name' parameter in the URL to personalize this message.")
# Character Gallery Page
elif options == "Character Gallery 🐾":
st.header("Character Gallery 🐾")
st.write("Meet our delightful characters!")
characters = [
{"name": "Leo", "emoji": "🦁"},
{"name": "Mia", "emoji": "🐱"},
{"name": "Max", "emoji": "🐶"},
{"name": "Zoe", "emoji": "🦊"},
{"name": "Sam", "emoji": "🐵"},
{"name": "Lily", "emoji": "🐰"},
{"name": "Oscar", "emoji": "🐼"},
{"name": "Ella", "emoji": "🐨"},
{"name": "Jack", "emoji": "🐸"},
{"name": "Nina", "emoji": "🐙"},
{"name": "Charlie", "emoji": "🐵"},
{"name": "Daisy", "emoji": "🐷"},
{"name": "Felix", "emoji": "🐧"},
{"name": "Grace", "emoji": "🐮"},
{"name": "Henry", "emoji": "🐴"},
]
random.shuffle(characters)
cols = st.columns(5)
for idx, character in enumerate(characters):
with cols[idx % 5]:
st.write(f"{character['emoji']} **{character['name']}**")
# CoTracker3 Demo Page
elif options == "CoTracker3 Demo 🕵️‍♂️":
st.header("CoTracker3 Demo 🕵️‍♂️")
st.write("This demo showcases point tracking using [CoTracker3](https://cotracker3.github.io/).")
st.write("Upload a video or select one of the example videos, then click **Track Points** to see CoTracker3 in action!")
# Example videos
example_videos = {
"Apple": "https://github.com/facebookresearch/co-tracker/raw/refs/heads/main/assets/apple.mp4",
"Bear": "https://github.com/facebookresearch/co-tracker/raw/refs/heads/main/assets/bear.mp4",
"Paragliding Launch": "https://github.com/facebookresearch/co-tracker/raw/refs/heads/main/assets/paragliding-launch.mp4",
"Paragliding": "https://github.com/facebookresearch/co-tracker/raw/refs/heads/main/assets/paragliding.mp4",
"Cat": "https://github.com/facebookresearch/co-tracker/raw/refs/heads/main/assets/cat.mp4",
"Pillow": "https://github.com/facebookresearch/co-tracker/raw/refs/heads/main/assets/pillow.mp4",
"Teddy": "https://github.com/facebookresearch/co-tracker/raw/refs/heads/main/assets/teddy.mp4",
"Backpack": "https://github.com/facebookresearch/co-tracker/raw/refs/heads/main/assets/backpack.mp4",
}
# Video uploader
uploaded_video = st.file_uploader("Upload a video (mp4 format)", type=["mp4"])
# Option to select example videos
selected_example = st.selectbox("Or select an example video", ["None"] + list(example_videos.keys()))
# Tracking button
if st.button("Track Points"):
with st.spinner("Processing..."):
# Load the video
if uploaded_video is not None:
# Process the uploaded video
video_bytes = uploaded_video.read()
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
tmp_file.write(video_bytes)
tmp_video_path = tmp_file.name
frames = iio.imread(tmp_video_path, plugin="pyav")
elif selected_example != "None":
# Download and read the example video
video_url = example_videos[selected_example]
frames = iio.imread(video_url, plugin="pyav")
else:
st.warning("Please upload a video or select an example video.")
st.stop()
# Check if video is too long
FRAME_LIMIT = 300
if frames.shape[0] > FRAME_LIMIT:
frames = frames[:FRAME_LIMIT]
st.warning(f"Video is too long. Only the first {FRAME_LIMIT} frames will be processed.")
# Process with CoTracker3
device = 'cuda' if torch.cuda.is_available() else 'cpu'
grid_size = 10
video = torch.tensor(frames).permute(0, 3, 1, 2)[None].float().to(device) # B T C H W
# Run Offline CoTracker
cotracker = torch.hub.load("facebookresearch/co-tracker", "cotracker3_offline").to(device)
pred_tracks, pred_visibility = cotracker(video, grid_size=grid_size) # B T N 2, B T N 1
# Visualize the results
def get_colors(num_colors):
colors = []
for i in np.arange(0.0, 360.0, 360.0 / num_colors):
hue = i / 360.0
lightness = (50 + np.random.rand() * 10) / 100.0
saturation = (90 + np.random.rand() * 10) / 100.0
color = colorsys.hls_to_rgb(hue, lightness, saturation)
colors.append(
(int(color[0] * 255), int(color[1] * 255), int(color[2] * 255))
)
random.shuffle(colors)
return colors
def paint_point_track(frames, point_tracks, visibles, colormap=None):
num_points, num_frames = point_tracks.shape[0:2]
if colormap is None:
colormap = get_colors(num_colors=num_points)
height, width = frames.shape[1:3]
radius = int(round(min(height, width) * 0.015))
video = frames.copy()
for t in range(num_frames):
image = video[t]
for i in range(num_points):
x, y = point_tracks[i, t, :]
x = int(np.clip(x, 0, width - 1))
y = int(np.clip(y, 0, height - 1))
if visibles[i, t]:
cv2.circle(image, (x, y), radius, colormap[i], -1)
video[t] = image
return video
# Prepare data for visualization
pred_tracks = pred_tracks[0].cpu().numpy().transpose(1, 0, 2) # N x T x 2
pred_visibility = pred_visibility[0].cpu().numpy().transpose(1, 0) # N x T
output_frames = paint_point_track(frames, pred_tracks, pred_visibility)
# Save the output video
output_video_path = os.path.join(tempfile.gettempdir(), "output_video.mp4")
writer = imageio.get_writer(output_video_path, fps=25)
for frame in output_frames:
writer.append_data(frame)
writer.close()
# Display the output video
st.video(output_video_path)
st.success("Tracking Complete! 🎉")
# Footer with self-deprecating humor
st.write("If you find any bugs, remember they're just features in disguise! 🐞")