Spaces:
Sleeping
Sleeping
github-actions[bot]
commited on
Commit
Β·
6d2f665
1
Parent(s):
b120a3a
Automated UV deployment
Browse files- =0.1.4 +2 -2
- Dockerfile +7 -4
- README.md +1 -1
- entrypoint.sh +2 -2
- ui/streamlit_app.py +933 -0
=0.1.4
CHANGED
|
@@ -23,9 +23,9 @@ Requirement already satisfied: sniffio>=1.1 in /opt/hostedtoolcache/Python/3.12.
|
|
| 23 |
Requirement already satisfied: click>=8.0.0 in /opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/site-packages (from typer-slim->huggingface-hub>=0.30) (8.2.1)
|
| 24 |
Downloading huggingface_hub-1.1.2-py3-none-any.whl (514 kB)
|
| 25 |
Downloading hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.3 MB)
|
| 26 |
-
ββββββββββββββββββββββββββββββββββββββββ 3.3/3.3 MB
|
| 27 |
Downloading hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.6 MB)
|
| 28 |
-
ββββββββββββββββββββββββββββββββββββββββ 3.6/3.6 MB
|
| 29 |
Downloading typer_slim-0.20.0-py3-none-any.whl (47 kB)
|
| 30 |
Installing collected packages: typer-slim, hf-xet, hf-transfer, huggingface-hub
|
| 31 |
|
|
|
|
| 23 |
Requirement already satisfied: click>=8.0.0 in /opt/hostedtoolcache/Python/3.12.12/x64/lib/python3.12/site-packages (from typer-slim->huggingface-hub>=0.30) (8.2.1)
|
| 24 |
Downloading huggingface_hub-1.1.2-py3-none-any.whl (514 kB)
|
| 25 |
Downloading hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.3 MB)
|
| 26 |
+
ββββββββββββββββββββββββββββββββββββββββ 3.3/3.3 MB 138.8 MB/s 0:00:00
|
| 27 |
Downloading hf_transfer-0.1.9-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.6 MB)
|
| 28 |
+
ββββββββββββββββββββββββββββββββββββββββ 3.6/3.6 MB 231.0 MB/s 0:00:00
|
| 29 |
Downloading typer_slim-0.20.0-py3-none-any.whl (47 kB)
|
| 30 |
Installing collected packages: typer-slim, hf-xet, hf-transfer, huggingface-hub
|
| 31 |
|
Dockerfile
CHANGED
|
@@ -18,14 +18,17 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
| 18 |
&& rm -rf /var/lib/apt/lists/*
|
| 19 |
|
| 20 |
# Copy requirements first for caching
|
| 21 |
-
COPY
|
| 22 |
|
| 23 |
# Install pip dependencies (use --no-cache-dir in production)
|
| 24 |
RUN python -m pip install --upgrade pip setuptools wheel \
|
| 25 |
-
&& pip install --no-cache-dir -r
|
| 26 |
|
| 27 |
-
# Copy the
|
|
|
|
| 28 |
COPY ui/ ./ui/
|
|
|
|
|
|
|
| 29 |
|
| 30 |
# Add entrypoint script
|
| 31 |
COPY entrypoint.sh /entrypoint.sh
|
|
@@ -38,7 +41,7 @@ EXPOSE 8501 8000
|
|
| 38 |
ENV APP_MODULE="app:app" \
|
| 39 |
APP_TYPE="streamlit" \
|
| 40 |
PORT=8501 \
|
| 41 |
-
PYTHONPATH="/app
|
| 42 |
|
| 43 |
# Entrypoint handles which server to start
|
| 44 |
ENTRYPOINT ["/entrypoint.sh"]
|
|
|
|
| 18 |
&& rm -rf /var/lib/apt/lists/*
|
| 19 |
|
| 20 |
# Copy requirements first for caching
|
| 21 |
+
COPY requirements.txt ./requirements.txt
|
| 22 |
|
| 23 |
# Install pip dependencies (use --no-cache-dir in production)
|
| 24 |
RUN python -m pip install --upgrade pip setuptools wheel \
|
| 25 |
+
&& pip install --no-cache-dir -r requirements.txt
|
| 26 |
|
| 27 |
+
# Copy the entire project structure
|
| 28 |
+
COPY app/ ./app/
|
| 29 |
COPY ui/ ./ui/
|
| 30 |
+
COPY config.py ./config.py
|
| 31 |
+
COPY __init__.py ./
|
| 32 |
|
| 33 |
# Add entrypoint script
|
| 34 |
COPY entrypoint.sh /entrypoint.sh
|
|
|
|
| 41 |
ENV APP_MODULE="app:app" \
|
| 42 |
APP_TYPE="streamlit" \
|
| 43 |
PORT=8501 \
|
| 44 |
+
PYTHONPATH="/app"
|
| 45 |
|
| 46 |
# Entrypoint handles which server to start
|
| 47 |
ENTRYPOINT ["/entrypoint.sh"]
|
README.md
CHANGED
|
@@ -5,6 +5,6 @@ colorFrom: "blue"
|
|
| 5 |
colorTo: "purple"
|
| 6 |
sdk: "streamlit"
|
| 7 |
sdk_version: "1.38.0"
|
| 8 |
-
app_file: "ui/
|
| 9 |
pinned: false
|
| 10 |
---
|
|
|
|
| 5 |
colorTo: "purple"
|
| 6 |
sdk: "streamlit"
|
| 7 |
sdk_version: "1.38.0"
|
| 8 |
+
app_file: "ui/streamlit_app.py"
|
| 9 |
pinned: false
|
| 10 |
---
|
entrypoint.sh
CHANGED
|
@@ -11,9 +11,9 @@ cd ${WORKDIR} || exit 1
|
|
| 11 |
echo "Starting app (type=${APP_TYPE}, port=${PORT})"
|
| 12 |
|
| 13 |
if [ "${APP_TYPE}" = "streamlit" ]; then
|
| 14 |
-
# Streamlit: expects a file like
|
| 15 |
# --server.enableCORS false is often needed when embedding or dev
|
| 16 |
-
exec streamlit run
|
| 17 |
|
| 18 |
elif [ "${APP_TYPE}" = "fastapi" ]; then
|
| 19 |
# FastAPI: expects ASGI app in variable APP_MODULE (e.g. app:app)
|
|
|
|
| 11 |
echo "Starting app (type=${APP_TYPE}, port=${PORT})"
|
| 12 |
|
| 13 |
if [ "${APP_TYPE}" = "streamlit" ]; then
|
| 14 |
+
# Streamlit: expects a file like streamlit_app.py β run streamlit with network binding
|
| 15 |
# --server.enableCORS false is often needed when embedding or dev
|
| 16 |
+
exec streamlit run streamlit_app.py --server.port ${PORT} --server.address 0.0.0.0 --server.enableCORS false
|
| 17 |
|
| 18 |
elif [ "${APP_TYPE}" = "fastapi" ]; then
|
| 19 |
# FastAPI: expects ASGI app in variable APP_MODULE (e.g. app:app)
|
ui/streamlit_app.py
ADDED
|
@@ -0,0 +1,933 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
import sys
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
import signal
|
| 6 |
+
import traceback
|
| 7 |
+
import threading
|
| 8 |
+
import unicodedata
|
| 9 |
+
import hashlib
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
import plotly.express as px
|
| 12 |
+
import yt_dlp
|
| 13 |
+
import streamlit as st
|
| 14 |
+
|
| 15 |
+
# -----------------------------
|
| 16 |
+
# Safe signal handling for non-main thread environments (yt_dlp)
|
| 17 |
+
# -----------------------------
|
| 18 |
+
if threading.current_thread() is not threading.main_thread():
|
| 19 |
+
_orig_signal = signal.signal
|
| 20 |
+
|
| 21 |
+
def _safe(sig, handler):
|
| 22 |
+
if sig in (signal.SIGTERM, signal.SIGINT):
|
| 23 |
+
return
|
| 24 |
+
return _orig_signal(sig, handler)
|
| 25 |
+
|
| 26 |
+
signal.signal = _safe
|
| 27 |
+
|
| 28 |
+
# -----------------------------
|
| 29 |
+
# Project paths & imports
|
| 30 |
+
# -----------------------------
|
| 31 |
+
ROOT = Path(__file__).resolve().parents[1] # project root
|
| 32 |
+
if str(ROOT) not in sys.path:
|
| 33 |
+
sys.path.insert(0, str(ROOT))
|
| 34 |
+
|
| 35 |
+
# For debugging path issues
|
| 36 |
+
print("sys.path:", sys.path)
|
| 37 |
+
print("ROOT:", ROOT)
|
| 38 |
+
|
| 39 |
+
from config import make_path
|
| 40 |
+
from app.pipeline.scene_detect import SceneDetector
|
| 41 |
+
from app.pipeline.frame_extract import FrameExtractor
|
| 42 |
+
|
| 43 |
+
# -----------------------------
|
| 44 |
+
# Storage layout
|
| 45 |
+
# -----------------------------
|
| 46 |
+
DATA_DIR = ROOT / "data"
|
| 47 |
+
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
| 48 |
+
|
| 49 |
+
for name in ["raw", "interim", "processed", "reports"]:
|
| 50 |
+
(DATA_DIR / name).mkdir(parents=True, exist_ok=True)
|
| 51 |
+
|
| 52 |
+
RAW_DIR = DATA_DIR / "raw"
|
| 53 |
+
INTERIM_DIR = DATA_DIR / "interim"
|
| 54 |
+
PROCESSED_DIR = DATA_DIR / "processed"
|
| 55 |
+
REPORTS_DIR = DATA_DIR / "reports"
|
| 56 |
+
|
| 57 |
+
# -----------------------------
|
| 58 |
+
# Utilities
|
| 59 |
+
# -----------------------------
|
| 60 |
+
def sanitize_title(title: str, max_length: int = 150) -> str:
|
| 61 |
+
title = unicodedata.normalize("NFKD", title)
|
| 62 |
+
title = title.encode("ascii", "ignore").decode("ascii")
|
| 63 |
+
title = re.sub(r"#\w+", "", title)
|
| 64 |
+
title = re.sub(r"[^\w\s]", "", title)
|
| 65 |
+
title = re.sub(r"\s+", " ", title).strip()
|
| 66 |
+
title = title.lower()
|
| 67 |
+
return title[:max_length]
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def sanitize_filename(filename: str) -> str:
|
| 71 |
+
filename = filename.lower().replace(" ", "_")
|
| 72 |
+
filename = unicodedata.normalize("NFKD", filename)
|
| 73 |
+
filename = filename.encode("ascii", "ignore").decode("ascii")
|
| 74 |
+
filename = re.sub(r"[^a-z0-9._-]", "", filename)
|
| 75 |
+
return filename.strip()
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def create_short_path(video_path: Path) -> str:
|
| 79 |
+
"""Create a short identifier for frame directories to avoid Windows path limits"""
|
| 80 |
+
path_str = str(video_path)
|
| 81 |
+
# Create a short hash of the full path
|
| 82 |
+
path_hash = hashlib.md5(path_str.encode()).hexdigest()[:12]
|
| 83 |
+
return f"frames_{path_hash}"
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def get_frames_directory(video_path: Path) -> Path:
|
| 87 |
+
"""Get the frames directory path using short naming to avoid Windows path limits"""
|
| 88 |
+
short_id = create_short_path(video_path)
|
| 89 |
+
return INTERIM_DIR / "frames" / short_id
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def download_video(url: str) -> tuple[Path, str]:
|
| 93 |
+
with yt_dlp.YoutubeDL({"quiet": True}) as ydl:
|
| 94 |
+
info = ydl.extract_info(url, download=False)
|
| 95 |
+
original_title = info.get("title", "video")
|
| 96 |
+
ext = info.get("ext", "mp4")
|
| 97 |
+
|
| 98 |
+
clean_title = sanitize_title(original_title)
|
| 99 |
+
sanitized_name = sanitize_filename(clean_title) or "video"
|
| 100 |
+
filename = f"{sanitized_name}.{ext}"
|
| 101 |
+
file_path = RAW_DIR / filename
|
| 102 |
+
|
| 103 |
+
if not file_path.exists():
|
| 104 |
+
ydl_opts = {
|
| 105 |
+
"outtmpl": str(file_path),
|
| 106 |
+
"restrictfilenames": True,
|
| 107 |
+
"quiet": True,
|
| 108 |
+
"noplaylist": True,
|
| 109 |
+
"no_color": True,
|
| 110 |
+
"format": "bv*+ba/b",
|
| 111 |
+
}
|
| 112 |
+
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
| 113 |
+
ydl.download([url])
|
| 114 |
+
|
| 115 |
+
return file_path, clean_title
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def get_paths(video_path: Path):
|
| 119 |
+
vp_str = str(video_path)
|
| 120 |
+
audio_json = make_path("processed/audio-analysis", vp_str, "audio_analysis", "json")
|
| 121 |
+
report_json = make_path("reports", vp_str, "final_report", "json")
|
| 122 |
+
scene_json = make_path("processed/scene-detection", vp_str, "scene", "json")
|
| 123 |
+
frame_json = make_path("processed/frame-analysis", vp_str, "frame_analysis", "json")
|
| 124 |
+
hook_json = make_path("processed/hook-analysis", vp_str, "hook_analysis", "json")
|
| 125 |
+
return scene_json, frame_json, audio_json, hook_json, report_json
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def safe_load_json(path: Path | str):
|
| 129 |
+
p = Path(path)
|
| 130 |
+
if p.exists():
|
| 131 |
+
try:
|
| 132 |
+
with p.open(encoding="utf-8") as f:
|
| 133 |
+
return json.load(f)
|
| 134 |
+
except Exception:
|
| 135 |
+
return {}
|
| 136 |
+
return {}
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def remove_artifacts(video_path: Path):
|
| 140 |
+
try:
|
| 141 |
+
if video_path and video_path.exists():
|
| 142 |
+
video_path.unlink(missing_ok=True)
|
| 143 |
+
except Exception:
|
| 144 |
+
pass
|
| 145 |
+
|
| 146 |
+
# -----------------------------
|
| 147 |
+
# Streamlit page config & styles
|
| 148 |
+
# -----------------------------
|
| 149 |
+
st.set_page_config(page_title="Virality Coach", layout="wide")
|
| 150 |
+
|
| 151 |
+
st.markdown(
|
| 152 |
+
"""
|
| 153 |
+
<style>
|
| 154 |
+
footer{display:none}
|
| 155 |
+
.block-container{padding-top:1rem;padding-bottom:2rem;max-width:1100px}
|
| 156 |
+
.title-center{text-align:center;margin-bottom:0.2rem}
|
| 157 |
+
.desc-center{text-align:center;margin-bottom:1.2rem;color:#dbdbdb}
|
| 158 |
+
.metric-card{background:#1f2937;border-radius:12px;padding:1.25rem;text-align:center;box-shadow:0 2px 8px rgba(0,0,0,.08);height:100%}
|
| 159 |
+
.metric-card h4{margin:0;font-size:0.95rem;color:#d1d5db}
|
| 160 |
+
.metric-card p{margin:0;font-size:1.8rem;font-weight:700;color:#ffffff}
|
| 161 |
+
video{max-height:240px;border-radius:10px;margin-bottom:0.5rem}
|
| 162 |
+
.status-msg{font-size:0.9rem;margin:0}
|
| 163 |
+
</style>
|
| 164 |
+
""",
|
| 165 |
+
unsafe_allow_html=True,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
st.markdown('<h1 class="title-center">Video Virality Coach</h1>', unsafe_allow_html=True)
|
| 169 |
+
st.markdown('<p class="desc-center">An AI-powered system that analyzes and scores the virality potential of short-form videos (TikTok, Reels, Shorts) and delivers clear, actionable feedback to creators and marketers.</p>', unsafe_allow_html=True)
|
| 170 |
+
|
| 171 |
+
# -----------------------------
|
| 172 |
+
# Session state
|
| 173 |
+
# -----------------------------
|
| 174 |
+
DEFAULT_STATE = {
|
| 175 |
+
"mode": None,
|
| 176 |
+
"url": "",
|
| 177 |
+
"uploaded_name": None,
|
| 178 |
+
"video_path": None,
|
| 179 |
+
"clean_title": None,
|
| 180 |
+
"stage": None,
|
| 181 |
+
"progress": 0,
|
| 182 |
+
"status": [],
|
| 183 |
+
"cancel": False,
|
| 184 |
+
"error_msg": None,
|
| 185 |
+
"_ready_to_run": False,
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
for k, v in DEFAULT_STATE.items():
|
| 189 |
+
if k not in st.session_state:
|
| 190 |
+
st.session_state[k] = v
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def reset_state(clear_video: bool = True):
|
| 194 |
+
keep = st.session_state.get("video_path") if not clear_video else None
|
| 195 |
+
st.session_state.update(DEFAULT_STATE | {"video_path": keep})
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def push_status(msg: str):
|
| 199 |
+
st.session_state.status.append(msg)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
# -----------------------------
|
| 203 |
+
# Pipeline step executor
|
| 204 |
+
# -----------------------------
|
| 205 |
+
STAGES = ["download video", "scene detection", "frames extraction", "frame analysis", "audio analysis", "hook analysis", "report"]
|
| 206 |
+
|
| 207 |
+
PROGRESS_MAP = {
|
| 208 |
+
"download video": 10,
|
| 209 |
+
"scene detection": 25,
|
| 210 |
+
"frames extraction": 40,
|
| 211 |
+
"frame analysis": 55,
|
| 212 |
+
"audio analysis": 70,
|
| 213 |
+
"hook analysis": 85,
|
| 214 |
+
"report": 100,
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def _run_current_stage():
|
| 219 |
+
"""
|
| 220 |
+
Run the heavy work for the current stage.
|
| 221 |
+
This is called only when _ready_to_run is True,
|
| 222 |
+
so the UI has already rendered progress/cancel.
|
| 223 |
+
"""
|
| 224 |
+
stage = st.session_state.stage
|
| 225 |
+
if not stage or stage in ("done", "error"):
|
| 226 |
+
return
|
| 227 |
+
|
| 228 |
+
if st.session_state.cancel:
|
| 229 |
+
push_status("β οΈ Process canceled by user.")
|
| 230 |
+
print("[INFO] Processing canceled by user.")
|
| 231 |
+
st.session_state.stage = None
|
| 232 |
+
st.session_state.progress = 0
|
| 233 |
+
try:
|
| 234 |
+
vp = st.session_state.video_path
|
| 235 |
+
if vp:
|
| 236 |
+
remove_artifacts(Path(vp))
|
| 237 |
+
except Exception:
|
| 238 |
+
pass
|
| 239 |
+
st.session_state._ready_to_run = False
|
| 240 |
+
st.rerun()
|
| 241 |
+
|
| 242 |
+
try:
|
| 243 |
+
vp = Path(st.session_state.video_path) if st.session_state.video_path else None
|
| 244 |
+
|
| 245 |
+
if stage == "download video":
|
| 246 |
+
push_status("Starting downloadβ¦")
|
| 247 |
+
print(f"[INFO] Stage: Downloading video from {st.session_state.url}")
|
| 248 |
+
path, title = download_video(st.session_state.url)
|
| 249 |
+
st.session_state.video_path = str(path)
|
| 250 |
+
st.session_state.clean_title = title
|
| 251 |
+
|
| 252 |
+
# Skip full pipeline if a report already exists
|
| 253 |
+
_, _, _, _, report_json = get_paths(path)
|
| 254 |
+
if Path(report_json).exists():
|
| 255 |
+
push_status("π Report already exists. Skipping analysis.")
|
| 256 |
+
print("[INFO] Report already exists, skipping pipeline.")
|
| 257 |
+
st.session_state.progress = 100
|
| 258 |
+
st.session_state.stage = "done"
|
| 259 |
+
st.session_state._ready_to_run = False
|
| 260 |
+
st.rerun()
|
| 261 |
+
|
| 262 |
+
st.session_state.progress = PROGRESS_MAP[stage]
|
| 263 |
+
push_status("β
Download complete.")
|
| 264 |
+
print("[INFO] Download complete.")
|
| 265 |
+
st.session_state.stage = "scene detection"
|
| 266 |
+
st.session_state._ready_to_run = False
|
| 267 |
+
st.rerun()
|
| 268 |
+
|
| 269 |
+
elif stage == "scene detection":
|
| 270 |
+
push_status("Detecting scenesβ¦")
|
| 271 |
+
print("[INFO] Stage: Scene detection started.")
|
| 272 |
+
try:
|
| 273 |
+
scene_detector = SceneDetector(str(vp))
|
| 274 |
+
scene_detector.detect_and_save()
|
| 275 |
+
|
| 276 |
+
# Verify the scene detection file was created
|
| 277 |
+
scene_json, _, _, _, _ = get_paths(vp)
|
| 278 |
+
if not Path(scene_json).exists():
|
| 279 |
+
raise FileNotFoundError("Scene detection failed - no output file")
|
| 280 |
+
|
| 281 |
+
# Verify the scene file has the expected structure
|
| 282 |
+
scene_data = safe_load_json(scene_json)
|
| 283 |
+
if not scene_data or 'scenes' not in scene_data:
|
| 284 |
+
raise ValueError("Scene detection produced invalid results - no 'scenes' key")
|
| 285 |
+
|
| 286 |
+
# Check if scenes have the required 'start_time' field
|
| 287 |
+
if scene_data['scenes'] and 'start_time' not in scene_data['scenes'][0]:
|
| 288 |
+
print("[WARNING] Scene data missing 'start_time' field, adding compatible structure")
|
| 289 |
+
# Convert the scene data to the expected format
|
| 290 |
+
fixed_scenes = []
|
| 291 |
+
for i, scene in enumerate(scene_data['scenes']):
|
| 292 |
+
fixed_scene = {
|
| 293 |
+
'start_time': scene.get('start', 0), # Use 'start' if available, else 0
|
| 294 |
+
'end_time': scene.get('end', 0), # Use 'end' if available, else 0
|
| 295 |
+
'duration': scene.get('duration', 0), # Use 'duration' if available, else 0
|
| 296 |
+
'scene_number': i
|
| 297 |
+
}
|
| 298 |
+
fixed_scenes.append(fixed_scene)
|
| 299 |
+
|
| 300 |
+
scene_data['scenes'] = fixed_scenes
|
| 301 |
+
|
| 302 |
+
# Save the fixed scene data
|
| 303 |
+
with open(scene_json, 'w', encoding='utf-8') as f:
|
| 304 |
+
json.dump(scene_data, f, indent=2)
|
| 305 |
+
|
| 306 |
+
st.session_state.progress = PROGRESS_MAP[stage]
|
| 307 |
+
push_status("β
Scene detection done.")
|
| 308 |
+
print("[INFO] Scene detection complete.")
|
| 309 |
+
st.session_state.stage = "frames extraction"
|
| 310 |
+
st.session_state._ready_to_run = False
|
| 311 |
+
st.rerun()
|
| 312 |
+
|
| 313 |
+
except Exception as e:
|
| 314 |
+
# If scene detection fails, create a compatible scene file
|
| 315 |
+
print(f"[WARNING] Scene detection failed: {e}. Creating fallback scene data.")
|
| 316 |
+
push_status("β οΈ Scene detection failed. Using fallback scene data.")
|
| 317 |
+
|
| 318 |
+
scene_json, _, _, _, _ = get_paths(vp)
|
| 319 |
+
|
| 320 |
+
# Create compatible scene data with required 'start_time' field
|
| 321 |
+
fallback_scene_data = {
|
| 322 |
+
"scenes": [{
|
| 323 |
+
"start_time": 0,
|
| 324 |
+
"end_time": 30, # Assume 30 second scenes
|
| 325 |
+
"duration": 30,
|
| 326 |
+
"scene_number": 0
|
| 327 |
+
}]
|
| 328 |
+
}
|
| 329 |
+
|
| 330 |
+
# Ensure directory exists
|
| 331 |
+
Path(scene_json).parent.mkdir(parents=True, exist_ok=True)
|
| 332 |
+
|
| 333 |
+
with open(scene_json, 'w', encoding='utf-8') as f:
|
| 334 |
+
json.dump(fallback_scene_data, f, indent=2)
|
| 335 |
+
|
| 336 |
+
st.session_state.progress = PROGRESS_MAP[stage]
|
| 337 |
+
push_status("β
Using fallback scene detection.")
|
| 338 |
+
st.session_state.stage = "frames extraction"
|
| 339 |
+
st.session_state._ready_to_run = False
|
| 340 |
+
st.rerun()
|
| 341 |
+
|
| 342 |
+
elif stage == "frames extraction":
|
| 343 |
+
push_status("Extracting framesβ¦")
|
| 344 |
+
print("[INFO] Stage: Frame extraction started.")
|
| 345 |
+
# Use the original FrameExtractor without modification
|
| 346 |
+
FrameExtractor(str(vp)).extract()
|
| 347 |
+
st.session_state.progress = PROGRESS_MAP[stage]
|
| 348 |
+
push_status("β
Frame extraction done.")
|
| 349 |
+
print("[INFO] Frame extraction complete.")
|
| 350 |
+
st.session_state.stage = "frame analysis"
|
| 351 |
+
st.session_state._ready_to_run = False
|
| 352 |
+
st.rerun()
|
| 353 |
+
|
| 354 |
+
elif stage == "frame analysis":
|
| 355 |
+
push_status("Analyzing framesβ¦")
|
| 356 |
+
if st.session_state.openai_key and st.session_state.openai_key.strip():
|
| 357 |
+
from app.pipeline.frame_analysis import FrameAnalyzer
|
| 358 |
+
try:
|
| 359 |
+
FrameAnalyzer(str(vp), openai_api_key=st.session_state.openai_key.strip()).analyze()
|
| 360 |
+
except Exception as api_error:
|
| 361 |
+
error_msg = str(api_error)
|
| 362 |
+
if "invalid" in error_msg.lower() or "401" in error_msg or "authentication" in error_msg.lower():
|
| 363 |
+
st.session_state.stage = "error"
|
| 364 |
+
st.session_state.error_msg = f"OPENAI API KEY FAILED: Invalid OpenAI API Key provided. Please verify your API key is correct."
|
| 365 |
+
st.session_state._ready_to_run = False
|
| 366 |
+
st.rerun()
|
| 367 |
+
else:
|
| 368 |
+
raise
|
| 369 |
+
else:
|
| 370 |
+
st.session_state.stage = "error"
|
| 371 |
+
st.session_state.error_msg = "OPENAI API KEY FAILED: OpenAI API Key is required for frame analysis but was not provided."
|
| 372 |
+
st.session_state._ready_to_run = False
|
| 373 |
+
st.rerun()
|
| 374 |
+
st.session_state.progress = PROGRESS_MAP[stage]
|
| 375 |
+
push_status("β
Frame analysis done.")
|
| 376 |
+
st.session_state.stage = "audio analysis"
|
| 377 |
+
st.session_state._ready_to_run = False
|
| 378 |
+
st.rerun()
|
| 379 |
+
|
| 380 |
+
elif stage == "audio analysis":
|
| 381 |
+
push_status("Analyzing audioβ¦")
|
| 382 |
+
if st.session_state.gemini_key and st.session_state.gemini_key.strip():
|
| 383 |
+
from app.pipeline.audio_analysis import AudioAnalyzer
|
| 384 |
+
try:
|
| 385 |
+
AudioAnalyzer(str(vp), gemini_api_key=st.session_state.gemini_key.strip()).analyze()
|
| 386 |
+
except (ValueError, Exception) as api_error:
|
| 387 |
+
error_msg = str(api_error)
|
| 388 |
+
if "invalid" in error_msg.lower() or "401" in error_msg or "403" in error_msg or "api_key" in error_msg.lower() or "authentication" in error_msg.lower():
|
| 389 |
+
st.session_state.stage = "error"
|
| 390 |
+
st.session_state.error_msg = f"GEMINI API KEY FAILED: Invalid Gemini API Key provided. Error: {error_msg}"
|
| 391 |
+
st.session_state._ready_to_run = False
|
| 392 |
+
st.rerun()
|
| 393 |
+
else:
|
| 394 |
+
raise
|
| 395 |
+
else:
|
| 396 |
+
st.session_state.stage = "error"
|
| 397 |
+
st.session_state.error_msg = "GEMINI API KEY FAILED: Gemini API Key is required for audio analysis but was not provided."
|
| 398 |
+
st.session_state._ready_to_run = False
|
| 399 |
+
st.rerun()
|
| 400 |
+
st.session_state.progress = PROGRESS_MAP[stage]
|
| 401 |
+
push_status("β
Audio analysis done.")
|
| 402 |
+
st.session_state.stage = "hook analysis"
|
| 403 |
+
st.session_state._ready_to_run = False
|
| 404 |
+
st.rerun()
|
| 405 |
+
|
| 406 |
+
elif stage == "hook analysis":
|
| 407 |
+
push_status("Evaluating hookβ¦")
|
| 408 |
+
if st.session_state.gemini_key and st.session_state.gemini_key.strip():
|
| 409 |
+
from app.pipeline.frame_analysis import HookAnalyzer
|
| 410 |
+
try:
|
| 411 |
+
HookAnalyzer(str(vp), gemini_api_key=st.session_state.gemini_key.strip()).analyze()
|
| 412 |
+
except (ValueError, Exception) as api_error:
|
| 413 |
+
error_msg = str(api_error)
|
| 414 |
+
if "invalid" in error_msg.lower() or "401" in error_msg or "403" in error_msg or "api_key" in error_msg.lower() or "authentication" in error_msg.lower():
|
| 415 |
+
st.session_state.stage = "error"
|
| 416 |
+
st.session_state.error_msg = f"GEMINI API KEY FAILED: Invalid Gemini API Key provided. Error: {error_msg}"
|
| 417 |
+
st.session_state._ready_to_run = False
|
| 418 |
+
st.rerun()
|
| 419 |
+
else:
|
| 420 |
+
raise
|
| 421 |
+
else:
|
| 422 |
+
st.session_state.stage = "error"
|
| 423 |
+
st.session_state.error_msg = "GEMINI API KEY FAILED: Gemini API Key is required for hook analysis but was not provided."
|
| 424 |
+
st.session_state._ready_to_run = False
|
| 425 |
+
st.rerun()
|
| 426 |
+
st.session_state.progress = PROGRESS_MAP[stage]
|
| 427 |
+
push_status("β
Hook analysis done.")
|
| 428 |
+
st.session_state.stage = "report"
|
| 429 |
+
st.session_state._ready_to_run = False
|
| 430 |
+
st.rerun()
|
| 431 |
+
|
| 432 |
+
elif stage == "report":
|
| 433 |
+
push_status("Generating final reportβ¦")
|
| 434 |
+
if st.session_state.openai_key and st.session_state.openai_key.strip():
|
| 435 |
+
from app.pipeline.scoring import VideoReport
|
| 436 |
+
try:
|
| 437 |
+
VideoReport(str(vp), openai_api_key=st.session_state.openai_key.strip()).generate()
|
| 438 |
+
except Exception as api_error:
|
| 439 |
+
error_msg = str(api_error)
|
| 440 |
+
if "invalid" in error_msg.lower() or "401" in error_msg or "authentication" in error_msg.lower():
|
| 441 |
+
st.session_state.stage = "error"
|
| 442 |
+
st.session_state.error_msg = f"OPENAI API KEY FAILED: Invalid OpenAI API Key provided. Error: {error_msg}"
|
| 443 |
+
st.session_state._ready_to_run = False
|
| 444 |
+
st.rerun()
|
| 445 |
+
else:
|
| 446 |
+
raise
|
| 447 |
+
else:
|
| 448 |
+
st.session_state.stage = "error"
|
| 449 |
+
st.session_state.error_msg = "OPENAI API KEY FAILED: OpenAI API Key is required for report generation but was not provided."
|
| 450 |
+
st.session_state._ready_to_run = False
|
| 451 |
+
st.rerun()
|
| 452 |
+
st.session_state.progress = PROGRESS_MAP[stage]
|
| 453 |
+
push_status("π Video report ready!")
|
| 454 |
+
st.session_state.stage = "done"
|
| 455 |
+
st.session_state._ready_to_run = False
|
| 456 |
+
st.rerun()
|
| 457 |
+
|
| 458 |
+
except Exception as e:
|
| 459 |
+
err_type = type(e).__name__
|
| 460 |
+
err_msg = str(e).strip()
|
| 461 |
+
tb_last = traceback.format_exc(limit=1).strip()
|
| 462 |
+
st.session_state.stage = "error"
|
| 463 |
+
st.session_state.error_msg = f"{err_type}: {err_msg}\nβ‘οΈ {tb_last}"
|
| 464 |
+
st.session_state.progress = 0
|
| 465 |
+
st.session_state._ready_to_run = False
|
| 466 |
+
push_status(f"β {err_type}: {err_msg}")
|
| 467 |
+
st.rerun()
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
def run_next_stage_if_needed():
|
| 471 |
+
if not st.session_state.stage or st.session_state.stage in ("done", "error"):
|
| 472 |
+
return
|
| 473 |
+
if not st.session_state._ready_to_run:
|
| 474 |
+
st.session_state._ready_to_run = True
|
| 475 |
+
time.sleep(0.01)
|
| 476 |
+
st.rerun()
|
| 477 |
+
else:
|
| 478 |
+
_run_current_stage()
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
# -----------------------------
|
| 482 |
+
# Input section
|
| 483 |
+
# -----------------------------
|
| 484 |
+
|
| 485 |
+
report, api_tab = st.tabs(["Upload Video", "π API Configuration"])
|
| 486 |
+
|
| 487 |
+
with api_tab:
|
| 488 |
+
st.markdown("### Configure Your API Keys")
|
| 489 |
+
st.markdown("Enter your API keys below. Keys will be validated during analysis. If a key is invalid, you'll see an error message during the analysis stage.")
|
| 490 |
+
|
| 491 |
+
col1, col2 = st.columns(2)
|
| 492 |
+
|
| 493 |
+
with col1:
|
| 494 |
+
st.session_state.openai_key = st.text_input(
|
| 495 |
+
"OpenAI API Key",
|
| 496 |
+
type="password",
|
| 497 |
+
placeholder="sk-...",
|
| 498 |
+
help="Required for frame analysis and report generation",
|
| 499 |
+
value=st.session_state.get("openai_key", "")
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
with col2:
|
| 503 |
+
st.session_state.gemini_key = st.text_input(
|
| 504 |
+
"Gemini API Key",
|
| 505 |
+
type="password",
|
| 506 |
+
placeholder="AIza...",
|
| 507 |
+
help="Required for audio analysis and hook analysis",
|
| 508 |
+
value=st.session_state.get("gemini_key", "")
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
st.markdown("---")
|
| 512 |
+
st.info("π‘ Add your API keys and return to the Upload Video tab to start analysis. Invalid keys will show error messages during the analysis process.")
|
| 513 |
+
|
| 514 |
+
if 'openai_key' not in st.session_state:
|
| 515 |
+
st.session_state.openai_key = ""
|
| 516 |
+
if 'gemini_key' not in st.session_state:
|
| 517 |
+
st.session_state.gemini_key = ""
|
| 518 |
+
|
| 519 |
+
with report:
|
| 520 |
+
method = st.radio("Choose Upload Method", ["Paste Video URL", "Upload MP4 File"], horizontal=True)
|
| 521 |
+
|
| 522 |
+
col_in_1, col_in_2 = st.columns([1, 1])
|
| 523 |
+
|
| 524 |
+
if method == "Paste Video URL":
|
| 525 |
+
st.session_state.mode = "url"
|
| 526 |
+
url = st.text_input(
|
| 527 |
+
"Paste direct video URL [insta / tiktok / yt-shorts]",
|
| 528 |
+
placeholder="https://example.com/@username/video/123",
|
| 529 |
+
value=st.session_state.url,
|
| 530 |
+
)
|
| 531 |
+
st.session_state.url = url
|
| 532 |
+
run_from_url = col_in_1.button("Run Analysis", key="run_url")
|
| 533 |
+
|
| 534 |
+
if run_from_url:
|
| 535 |
+
if not url:
|
| 536 |
+
st.error("β Please enter a video URL.")
|
| 537 |
+
else:
|
| 538 |
+
st.session_state.cancel = False
|
| 539 |
+
st.session_state.stage = "download video"
|
| 540 |
+
st.session_state.status = []
|
| 541 |
+
st.session_state.progress = 0
|
| 542 |
+
st.session_state._ready_to_run = False
|
| 543 |
+
st.rerun()
|
| 544 |
+
|
| 545 |
+
else:
|
| 546 |
+
st.session_state.mode = "file"
|
| 547 |
+
uploaded = st.file_uploader("Upload MP4 File", type=["mp4"])
|
| 548 |
+
run_from_file = col_in_1.button("Run Analysis", key="run_file")
|
| 549 |
+
|
| 550 |
+
if uploaded and run_from_file:
|
| 551 |
+
clean_name = sanitize_filename(Path(uploaded.name).stem) + ".mp4"
|
| 552 |
+
dest = RAW_DIR / clean_name
|
| 553 |
+
with dest.open("wb") as f:
|
| 554 |
+
f.write(uploaded.getbuffer())
|
| 555 |
+
st.session_state.video_path = str(dest)
|
| 556 |
+
st.session_state.clean_title = Path(clean_name).stem
|
| 557 |
+
|
| 558 |
+
# Skip if a report is already present
|
| 559 |
+
_, _, _, _, report_json = get_paths(dest)
|
| 560 |
+
if Path(report_json).exists():
|
| 561 |
+
st.session_state.stage = "done"
|
| 562 |
+
st.session_state.status = ["π Report already exists. Skipping analysis."]
|
| 563 |
+
st.session_state.progress = 100
|
| 564 |
+
st.rerun()
|
| 565 |
+
|
| 566 |
+
st.session_state.cancel = False
|
| 567 |
+
st.session_state.status = ["β
Upload complete."]
|
| 568 |
+
st.session_state.progress = 0
|
| 569 |
+
st.session_state.stage = "scene detection"
|
| 570 |
+
st.session_state._ready_to_run = False
|
| 571 |
+
st.rerun()
|
| 572 |
+
|
| 573 |
+
# -----------------------------
|
| 574 |
+
# Progress & Status
|
| 575 |
+
# -----------------------------
|
| 576 |
+
if st.session_state.stage and st.session_state.stage not in ("done", "error"):
|
| 577 |
+
percent = st.session_state.progress
|
| 578 |
+
stage = st.session_state.stage.replace("_", " ").title()
|
| 579 |
+
|
| 580 |
+
st.markdown(f"##### {stage}: {percent}%")
|
| 581 |
+
st.progress(percent)
|
| 582 |
+
|
| 583 |
+
if st.button("Cancel Processing"):
|
| 584 |
+
st.session_state.cancel = True
|
| 585 |
+
st.rerun()
|
| 586 |
+
|
| 587 |
+
run_next_stage_if_needed()
|
| 588 |
+
|
| 589 |
+
# -----------------------------
|
| 590 |
+
# Error state
|
| 591 |
+
# -----------------------------
|
| 592 |
+
if st.session_state.stage == "error":
|
| 593 |
+
error_msg = st.session_state.error_msg or "An unknown error occurred."
|
| 594 |
+
|
| 595 |
+
# Detect which API key failed and display prominently
|
| 596 |
+
if "openai" in error_msg.lower() or "openai" in str(st.session_state.error_msg).lower():
|
| 597 |
+
st.error("π¨ **API KEY ERROR: OpenAI Key Failed**")
|
| 598 |
+
st.markdown("""
|
| 599 |
+
<div style='background-color: #fee2e2; border-left: 4px solid #ef4444; padding: 1rem; margin: 1rem 0; border-radius: 4px;'>
|
| 600 |
+
<h4 style='color: #991b1b; margin-top: 0;'>β OpenAI API Key Invalid or Missing</h4>
|
| 601 |
+
<p style='color: #7f1d1d; margin-bottom: 0;'><strong>Error Details:</strong> {}</p>
|
| 602 |
+
<p style='color: #7f1d1d; margin-top: 0.5rem;'>Please go to the <strong>π API Configuration</strong> tab and update your OpenAI API key.</p>
|
| 603 |
+
</div>
|
| 604 |
+
""".format(error_msg), unsafe_allow_html=True)
|
| 605 |
+
elif "gemini" in error_msg.lower() or "gemini" in str(st.session_state.error_msg).lower():
|
| 606 |
+
st.error("π¨ **API KEY ERROR: Gemini Key Failed**")
|
| 607 |
+
st.markdown("""
|
| 608 |
+
<div style='background-color: #fee2e2; border-left: 4px solid #ef4444; padding: 1rem; margin: 1rem 0; border-radius: 4px;'>
|
| 609 |
+
<h4 style='color: #991b1b; margin-top: 0;'>β Gemini API Key Invalid or Missing</h4>
|
| 610 |
+
<p style='color: #7f1d1d; margin-bottom: 0;'><strong>Error Details:</strong> {}</p>
|
| 611 |
+
<p style='color: #7f1d1d; margin-top: 0.5rem;'>Please go to the <strong>π API Configuration</strong> tab and update your Gemini API key.</p>
|
| 612 |
+
</div>
|
| 613 |
+
""".format(error_msg), unsafe_allow_html=True)
|
| 614 |
+
else:
|
| 615 |
+
# Generic error display
|
| 616 |
+
st.error("π¨ **ANALYSIS FAILED**")
|
| 617 |
+
st.markdown(f"""
|
| 618 |
+
<div style='background-color: #fee2e2; border-left: 4px solid #ef4444; padding: 1rem; margin: 1rem 0; border-radius: 4px;'>
|
| 619 |
+
<h4 style='color: #991b1b; margin-top: 0;'>β Error Occurred</h4>
|
| 620 |
+
<p style='color: #7f1d1d; margin-bottom: 0;'><strong>Error Details:</strong> {error_msg}</p>
|
| 621 |
+
</div>
|
| 622 |
+
""", unsafe_allow_html=True)
|
| 623 |
+
|
| 624 |
+
st.warning("β οΈ **Report Not Generated**: The analysis pipeline stopped due to the error above. No report was created.")
|
| 625 |
+
|
| 626 |
+
if st.button("π Reset & Try Again", type="primary", use_container_width=True):
|
| 627 |
+
reset_state(clear_video=True)
|
| 628 |
+
st.rerun()
|
| 629 |
+
|
| 630 |
+
# -----------------------------
|
| 631 |
+
# Results section
|
| 632 |
+
# -----------------------------
|
| 633 |
+
if st.session_state.stage == "done" and st.session_state.video_path:
|
| 634 |
+
vp = Path(st.session_state.video_path)
|
| 635 |
+
scene_json, frame_json, audio_json, hook_json, report_json = get_paths(vp)
|
| 636 |
+
|
| 637 |
+
st.success("Analysis complete.")
|
| 638 |
+
|
| 639 |
+
with st.expander("Preview Video", expanded=False):
|
| 640 |
+
if vp.exists():
|
| 641 |
+
st.video(str(vp), format="video/mp4")
|
| 642 |
+
|
| 643 |
+
report = safe_load_json(report_json)
|
| 644 |
+
audio_data = safe_load_json(audio_json)
|
| 645 |
+
hook_data = safe_load_json(hook_json)
|
| 646 |
+
|
| 647 |
+
if not report:
|
| 648 |
+
st.warning("No report found. You can rerun the analysis.")
|
| 649 |
+
else:
|
| 650 |
+
results_tab, json_tab = st.tabs(["Results", "JSON Reports"])
|
| 651 |
+
|
| 652 |
+
with results_tab:
|
| 653 |
+
st.markdown(
|
| 654 |
+
"<h2 style='text-align: center;'>π Video Virality Report</h2>",
|
| 655 |
+
unsafe_allow_html=True
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
# --- Main Score Cards ---
|
| 659 |
+
total = report.get("total_score", 0)
|
| 660 |
+
st.markdown(f"""
|
| 661 |
+
<div style="text-align:center; margin-bottom:1rem;">
|
| 662 |
+
<div style="font-size:2rem; font-weight:bold; color:#10b981;">Total Score: {total}</div>
|
| 663 |
+
<p style="color:#9ca3af;">Overall Virality Potential</p>
|
| 664 |
+
</div>
|
| 665 |
+
""", unsafe_allow_html=True)
|
| 666 |
+
|
| 667 |
+
scores = report.get("scores", {})
|
| 668 |
+
if scores:
|
| 669 |
+
cols = st.columns(len(scores))
|
| 670 |
+
for col, (cat, val) in zip(cols, scores.items()):
|
| 671 |
+
color = "#10b981" if val >= 70 else "#fbbf24" if val >= 50 else "#ef4444"
|
| 672 |
+
with col:
|
| 673 |
+
st.markdown(
|
| 674 |
+
f"""
|
| 675 |
+
<div style="background:{color}22;
|
| 676 |
+
border-radius:12px;
|
| 677 |
+
padding:1rem;
|
| 678 |
+
text-align:center;
|
| 679 |
+
box-shadow:0 2px 8px rgba(0,0,0,.08);height:100%">
|
| 680 |
+
<h4 style="margin:0; font-size:0.9rem; color:#d1d5db">{cat.title()}</h4>
|
| 681 |
+
<p style="margin:0; font-size:1.5rem; font-weight:700; color:{color}">{val}</p>
|
| 682 |
+
</div>
|
| 683 |
+
""",
|
| 684 |
+
unsafe_allow_html=True,
|
| 685 |
+
)
|
| 686 |
+
|
| 687 |
+
# --- Matrices (tone, emotion, pace, facial_sync) ---
|
| 688 |
+
st.markdown(
|
| 689 |
+
"""
|
| 690 |
+
<div style="text-align:center; margin-bottom:1rem; margin-top:1rem;">
|
| 691 |
+
<p style="color:#9ca3af;">Video Attributes</p>
|
| 692 |
+
</div>
|
| 693 |
+
""",
|
| 694 |
+
unsafe_allow_html=True
|
| 695 |
+
)
|
| 696 |
+
matrices = report.get("matrices", {})
|
| 697 |
+
if matrices:
|
| 698 |
+
attr_cols = st.columns(len(matrices))
|
| 699 |
+
for col, (k, v) in zip(attr_cols, matrices.items()):
|
| 700 |
+
color = "#10b981" if str(v).lower() in ["high", "positive", "fast", "good", "funny", "joy"] else "#fbbf24" if str(v).lower() in ["medium", "neutral", "mixed"] else "#ef4444"
|
| 701 |
+
|
| 702 |
+
with col:
|
| 703 |
+
st.markdown(f"""
|
| 704 |
+
<div style="background:{color}22;
|
| 705 |
+
border-radius:12px;
|
| 706 |
+
padding:1rem;
|
| 707 |
+
text-align:center;
|
| 708 |
+
box-shadow:0 2px 6px rgba(0,0,0,0.1)">
|
| 709 |
+
<h4 style="margin:0; font-size:0.9rem; color:#d1d5db">{k.title()}</h4>
|
| 710 |
+
<p style="margin:0; font-size:1.3rem; font-weight:700; color:{color}">{v}</p>
|
| 711 |
+
</div>
|
| 712 |
+
""", unsafe_allow_html=True)
|
| 713 |
+
|
| 714 |
+
# --- Summary ---
|
| 715 |
+
if "summary" in report:
|
| 716 |
+
st.markdown(
|
| 717 |
+
"""
|
| 718 |
+
<h2 style='text-align: center; font-size:1.4rem; margin-top:1.3rem;'>
|
| 719 |
+
Report Summary
|
| 720 |
+
</h2>
|
| 721 |
+
""",
|
| 722 |
+
unsafe_allow_html=True
|
| 723 |
+
)
|
| 724 |
+
st.markdown(
|
| 725 |
+
f"""
|
| 726 |
+
<div style='background-color:#1e3a8a20;
|
| 727 |
+
border-left: 0.25rem solid #3b82f6;
|
| 728 |
+
border-radius: 8px;
|
| 729 |
+
padding: 1rem;
|
| 730 |
+
text-align: center;
|
| 731 |
+
color: #d1d5db;'>
|
| 732 |
+
{report["summary"]}
|
| 733 |
+
</div>
|
| 734 |
+
""",
|
| 735 |
+
unsafe_allow_html=True
|
| 736 |
+
)
|
| 737 |
+
|
| 738 |
+
# --- Suggestions ---
|
| 739 |
+
st.markdown(
|
| 740 |
+
"""
|
| 741 |
+
<h2 style='text-align: center; font-size:1.4rem; margin-top:1.3rem;'>
|
| 742 |
+
Suggestions
|
| 743 |
+
</h2>
|
| 744 |
+
""",
|
| 745 |
+
unsafe_allow_html=True
|
| 746 |
+
)
|
| 747 |
+
|
| 748 |
+
suggestions = report.get("suggestions", [])
|
| 749 |
+
if suggestions:
|
| 750 |
+
for i, s in enumerate(suggestions, start=1):
|
| 751 |
+
st.markdown(
|
| 752 |
+
f"<p style='text-align:center; font-size:1rem;'> {s}</p>",
|
| 753 |
+
unsafe_allow_html=True
|
| 754 |
+
)
|
| 755 |
+
else:
|
| 756 |
+
st.markdown(
|
| 757 |
+
"<p style='text-align:center; color:gray;'>No improvement suggestions provided.</p>",
|
| 758 |
+
unsafe_allow_html=True
|
| 759 |
+
)
|
| 760 |
+
|
| 761 |
+
# --- Audio Analysis ---
|
| 762 |
+
if audio_data:
|
| 763 |
+
st.markdown(
|
| 764 |
+
"""
|
| 765 |
+
<h2 style='text-align: center; font-size:1.4rem; margin-top:1.5rem;'>
|
| 766 |
+
Audio Analysis
|
| 767 |
+
</h2>
|
| 768 |
+
""",
|
| 769 |
+
unsafe_allow_html=True
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
# --- Audio Score Cards ---
|
| 773 |
+
metrics = {
|
| 774 |
+
"Delivery Score": audio_data.get("delivery_score", ""),
|
| 775 |
+
"Duration (s)": round(audio_data.get("duration_seconds", 0), 2),
|
| 776 |
+
"Words/Sec": audio_data.get("words_per_second", 0),
|
| 777 |
+
"Tone": audio_data.get("tone", ""),
|
| 778 |
+
"Emotion": audio_data.get("emotion", ""),
|
| 779 |
+
"Pace": audio_data.get("pace", ""),
|
| 780 |
+
}
|
| 781 |
+
|
| 782 |
+
cols = st.columns(len(metrics))
|
| 783 |
+
for col, (title, value) in zip(cols, metrics.items()):
|
| 784 |
+
color = "#10b981"
|
| 785 |
+
|
| 786 |
+
if title in ["Delivery Score", "Tone", "Emotion", "Pace"]:
|
| 787 |
+
if title == "Delivery Score" and isinstance(value, (int, float)):
|
| 788 |
+
color = "#10b981" if value >= 70 else "#fbbf24" if value >= 50 else "#ef4444"
|
| 789 |
+
else:
|
| 790 |
+
val = str(value).lower()
|
| 791 |
+
if val in ["high", "positive", "fast", "good", "funny", "clear", "joy"]:
|
| 792 |
+
color = "#10b981"
|
| 793 |
+
elif val in ["medium", "neutral", "mixed", "average"]:
|
| 794 |
+
color = "#fbbf24"
|
| 795 |
+
elif val in ["low", "negative", "slow", "bad", "sad"]:
|
| 796 |
+
color = "#ef4444"
|
| 797 |
+
else:
|
| 798 |
+
color = "#d1d5db"
|
| 799 |
+
|
| 800 |
+
with col:
|
| 801 |
+
st.markdown(
|
| 802 |
+
f"""
|
| 803 |
+
<div style="background:{color}22;
|
| 804 |
+
border-radius:12px;
|
| 805 |
+
padding:1rem;
|
| 806 |
+
text-align:center;
|
| 807 |
+
box-shadow:0 2px 6px rgba(0,0,0,0.15);
|
| 808 |
+
margin-bottom:0.8rem;">
|
| 809 |
+
<h4 style="margin:0; font-size:0.85rem; color:#d1d5db">{title}</h4>
|
| 810 |
+
<p style="margin:0; font-size:1.3rem; font-weight:700; color:{color}">{value}</p>
|
| 811 |
+
</div>
|
| 812 |
+
""",
|
| 813 |
+
unsafe_allow_html=True,
|
| 814 |
+
)
|
| 815 |
+
|
| 816 |
+
# Transcript box
|
| 817 |
+
st.markdown(
|
| 818 |
+
f"""
|
| 819 |
+
<div style='background:#111827;
|
| 820 |
+
border-left: 4px solid #3b82f6;
|
| 821 |
+
padding:1rem;
|
| 822 |
+
margin-top:1rem;
|
| 823 |
+
border-radius:8px;
|
| 824 |
+
text-align:left;
|
| 825 |
+
color:#e5e7eb;'>
|
| 826 |
+
<b>Transcript:</b><br>
|
| 827 |
+
<i>{audio_data.get("full_transcript","")}</i>
|
| 828 |
+
</div>
|
| 829 |
+
""",
|
| 830 |
+
unsafe_allow_html=True
|
| 831 |
+
)
|
| 832 |
+
|
| 833 |
+
# Comment box
|
| 834 |
+
st.markdown(
|
| 835 |
+
f"""
|
| 836 |
+
<div style='background:#1e293b;
|
| 837 |
+
border-radius:8px;
|
| 838 |
+
padding:0.8rem;
|
| 839 |
+
margin-top:0.5rem;
|
| 840 |
+
text-align:center;
|
| 841 |
+
font-size:0.95rem;
|
| 842 |
+
color:#d1d5db;'>
|
| 843 |
+
{audio_data.get("comment","")}
|
| 844 |
+
</div>
|
| 845 |
+
""",
|
| 846 |
+
unsafe_allow_html=True
|
| 847 |
+
)
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
# --- Hook Analysis ---
|
| 851 |
+
if hook_data:
|
| 852 |
+
st.markdown(
|
| 853 |
+
"""
|
| 854 |
+
<h2 style='text-align: center; font-size:1.4rem; margin-top:1.5rem;'>
|
| 855 |
+
Hook Analysis
|
| 856 |
+
</h2>
|
| 857 |
+
""",
|
| 858 |
+
unsafe_allow_html=True
|
| 859 |
+
)
|
| 860 |
+
|
| 861 |
+
# --- Hook Score Card ---
|
| 862 |
+
score = hook_data.get("hook_alignment_score", 0)
|
| 863 |
+
color = "#10b981" if score >= 70 else "#fbbf24" if score >= 50 else "#ef4444"
|
| 864 |
+
|
| 865 |
+
st.markdown(
|
| 866 |
+
f"""
|
| 867 |
+
<div style="background:{color}22;
|
| 868 |
+
border-radius:12px;
|
| 869 |
+
padding:1.2rem;
|
| 870 |
+
text-align:center;
|
| 871 |
+
box-shadow:0 2px 6px rgba(0,0,0,0.1);
|
| 872 |
+
margin:0 auto;
|
| 873 |
+
width:50%;">
|
| 874 |
+
<h4 style="margin:0; font-size:1rem; color:#d1d5db;">Hook Alignment Score</h4>
|
| 875 |
+
<p style="margin:0; font-size:2rem; font-weight:700; color:{color};">{score}</p>
|
| 876 |
+
</div>
|
| 877 |
+
""",
|
| 878 |
+
unsafe_allow_html=True
|
| 879 |
+
)
|
| 880 |
+
|
| 881 |
+
# --- Comment Box ---
|
| 882 |
+
st.markdown(
|
| 883 |
+
f"""
|
| 884 |
+
<div style='background:#1e293b;
|
| 885 |
+
border-radius:8px;
|
| 886 |
+
padding:0.8rem;
|
| 887 |
+
margin-top:0.5rem;
|
| 888 |
+
text-align:center;
|
| 889 |
+
font-size:0.95rem;
|
| 890 |
+
color:#d1d5db;'>
|
| 891 |
+
{audio_data.get("comment","")}
|
| 892 |
+
</div>
|
| 893 |
+
""",
|
| 894 |
+
unsafe_allow_html=True
|
| 895 |
+
)
|
| 896 |
+
|
| 897 |
+
# --- Download Report ---
|
| 898 |
+
st.markdown("<br>", unsafe_allow_html=True)
|
| 899 |
+
st.download_button(
|
| 900 |
+
"Download Final Report",
|
| 901 |
+
json.dumps(report, indent=2),
|
| 902 |
+
file_name="final_report.json",
|
| 903 |
+
)
|
| 904 |
+
|
| 905 |
+
with json_tab:
|
| 906 |
+
with st.expander("Scene Detection", expanded=False):
|
| 907 |
+
st.json(safe_load_json(scene_json))
|
| 908 |
+
with st.expander("Extracted Frames", expanded=False):
|
| 909 |
+
frames_dir = INTERIM_DIR / "frames" / f"{vp.stem}_"
|
| 910 |
+
if frames_dir.exists():
|
| 911 |
+
imgs = sorted(frames_dir.glob("*.jpg"))
|
| 912 |
+
if imgs:
|
| 913 |
+
cols = st.columns(4)
|
| 914 |
+
for i, img in enumerate(imgs):
|
| 915 |
+
with cols[i % 4]:
|
| 916 |
+
st.image(str(img), use_container_width=True)
|
| 917 |
+
else:
|
| 918 |
+
st.info("No frames found.")
|
| 919 |
+
else:
|
| 920 |
+
st.info("No frames directory found.")
|
| 921 |
+
with st.expander("Frame Analysis", expanded=False):
|
| 922 |
+
st.json(safe_load_json(frame_json))
|
| 923 |
+
with st.expander("Audio Analysis", expanded=False):
|
| 924 |
+
st.json(audio_data)
|
| 925 |
+
with st.expander("Hook Analysis", expanded=False):
|
| 926 |
+
st.json(hook_data)
|
| 927 |
+
with st.expander("Final Report", expanded=False):
|
| 928 |
+
st.json(report)
|
| 929 |
+
|
| 930 |
+
# Reset button only after analysis is done
|
| 931 |
+
if st.button("Reset Session"):
|
| 932 |
+
reset_state(clear_video=True)
|
| 933 |
+
st.rerun()
|