Ajusta demais partes do projeto

This commit is contained in:
LeoMortari
2025-10-25 00:54:30 -03:00
parent b9e1dcd1e2
commit ba768cf093
8 changed files with 30 additions and 40 deletions

BIN
Montserrat.ttf Normal file

Binary file not shown.

View File

@@ -1,31 +1,21 @@
# GEMINI_API_KEY="AIzaSyB5TPjSPPZG1Qb6EtblhKFAjvCOdY15rcw"
# YOUTUBE_API="https://totally-real-dingo.ngrok-free.app"
# OPENROUTER_API_KEY="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"
# OPENROUTER_MODEL="openai/gpt-oss-20b:free"
services:
video-render:
restart: unless-stopped
build: .
container_name: video-render
environment:
# RabbitMQ credentials
# - RABBITMQ_PASS=${RABBITMQ_PASS}
- RABBITMQ_PASS="L@l321321321"
- RABBITMQ_PASS=L@l321321321
- RABBITMQ_HOST=154.12.229.181
- RABBITMQ_PORT=32790
# - GEMINI_API_KEY=${GEMINI_API_KEY}
- GEMINI_API_KEY="AIzaSyB5TPjSPPZG1Qb6EtblhKFAjvCOdY15rcw"
- GEMINI_API_KEY=AIzaSyB5TPjSPPZG1Qb6EtblhKFAjvCOdY15rcw
- GEMINI_MODEL=${GEMINI_MODEL:-gemini-2.5-pro}
# - OPENROUTER_API_KEY=${OPENROUTER_API_KEY}
- OPENROUTER_API_KEY="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"
- OPENROUTER_API_KEY=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
- OPENROUTER_MODEL=${OPENROUTER_MODEL:-openai/gpt-oss-20b:free}
- FASTER_WHISPER_MODEL_SIZE=${FASTER_WHISPER_MODEL_SIZE:-small}
# ports:
# - "5000:5000"
volumes:
# Mount host directories into the container so that videos can be
# provided and outputs collected. These paths can be customised when
# deploying the stack. The defaults assume /root/videos and
# /root/outputs on the host.
# - "/root/videos:/app/videos"
# - "/root/outputs:/app/outputs"
- "./videos:/app/videos"

View File

@@ -2,12 +2,10 @@ FROM python:3.11-slim
WORKDIR /app
# Set environment variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1
# Install system dependencies
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ffmpeg \
@@ -27,22 +25,16 @@ RUN apt-get update && \
wget \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements first to leverage Docker cache
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir --upgrade pip && \
pip install --no-cache-dir setuptools wheel && \
pip install --no-cache-dir -r requirements.txt
# Copy the rest of the application
COPY . .
# Create necessary directories
RUN mkdir -p /app/videos /app/outputs
# Set volumes
VOLUME ["/app/videos", "/app/outputs"]
# Set the command to run your application
CMD ["python", "-u", "main.py"]

View File

@@ -16,7 +16,7 @@ class RabbitMQSettings:
host: str = os.environ.get("RABBITMQ_HOST", "rabbitmq")
port: int = int(os.environ.get("RABBITMQ_PORT", 5672))
user: str = os.environ.get("RABBITMQ_USER", "admin")
password: str = os.environ.get("RABBITMQ_PASS", "")
password: str = os.environ.get("RABBITMQ_PASS")
consume_queue: str = os.environ.get("RABBITMQ_QUEUE", "to-render")
publish_queue: str = os.environ.get("RABBITMQ_UPLOAD_QUEUE", "to-upload")
prefetch_count: int = int(os.environ.get("RABBITMQ_PREFETCH", 1))
@@ -27,7 +27,7 @@ class RabbitMQSettings:
@dataclass(frozen=True)
class GeminiSettings:
api_key: str = os.environ.get("GEMINI_API_KEY", "")
model: str = os.environ.get("GEMINI_MODEL", "gemini-1.5-pro-latest")
model: str = os.environ.get("GEMINI_MODEL", "gemini-2.5-pro")
safety_settings: str | None = os.environ.get("GEMINI_SAFETY_SETTINGS")
temperature: float = float(os.environ.get("GEMINI_TEMPERATURE", 0.2))
top_k: int | None = (

View File

@@ -150,8 +150,6 @@ class OpenRouterCopywriter:
headers = {
"Authorization": f"Bearer {self.settings.openrouter.api_key}",
"Content-Type": "application/json",
"HTTP-Referer": "https://localhost",
"X-Title": "video-render-pipeline",
}
response = requests.post(
@@ -159,19 +157,22 @@ class OpenRouterCopywriter:
)
response.raise_for_status()
data = response.json()
choices = data.get("choices") or []
if not choices:
raise RuntimeError("OpenRouter nao retornou escolhas")
message = choices[0].get("message", {}).get("content")
if not message:
raise RuntimeError("Resposta do OpenRouter sem conteudo")
parsed = self._extract_json(message)
titles = parsed.get("titles")
if not isinstance(titles, list):
raise ValueError("Resposta do OpenRouter invalida: campo 'titles'")
return [str(title) for title in titles]
@staticmethod

View File

@@ -15,6 +15,7 @@ MessageHandler = Callable[[Dict[str, Any]], Dict[str, Any]]
class RabbitMQWorker:
def __init__(self, settings: Settings) -> None:
print(settings)
self.settings = settings
self._params = pika.ConnectionParameters(
host=settings.rabbitmq.host,
@@ -27,6 +28,7 @@ class RabbitMQWorker:
)
def consume_forever(self, handler: MessageHandler) -> None:
while True:
try:
with pika.BlockingConnection(self._params) as connection:

View File

@@ -74,6 +74,7 @@ class VideoPipeline:
def _parse_job(self, message: Dict[str, Any]) -> JobMessage:
filename = message.get("filename")
if not filename:
raise ValueError("Mensagem inválida: 'filename' é obrigatório")

View File

@@ -1,19 +1,14 @@
from __future__ import annotations
import logging
import math
import re
from dataclasses import dataclass
from typing import Iterable, List, Sequence, Tuple
import numpy as np
from moviepy.editor import (
ColorClip,
CompositeVideoClip,
ImageClip,
TextClip,
VideoFileClip,
)
from moviepy.video.VideoClip import ColorClip, ImageClip, TextClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
from moviepy.video.io.VideoFileClip import VideoFileClip
from PIL import Image, ImageColor, ImageDraw, ImageFont
from video_render.config import Settings
@@ -36,6 +31,7 @@ class CaptionBuilder:
def __init__(self, settings: Settings) -> None:
self.settings = settings
self.font_path = settings.rendering.font_path
if not self.font_path.exists():
raise FileNotFoundError(f"Fonte nao encontrada: {self.font_path}")
@@ -50,6 +46,7 @@ class CaptionBuilder:
self.max_words = settings.rendering.caption_max_words
bbox = self.font.getbbox("Ay")
self.text_height = bbox[3] - bbox[1]
self.baseline = (self.canvas_height - self.text_height) // 2 - bbox[1]
self.space_width = self.font.getbbox(" ")[2] - self.font.getbbox(" ")[0]
@@ -73,6 +70,7 @@ class CaptionBuilder:
)
highlight_clips: List[ImageClip] = []
for word, image in zip(group, highlight_images):
h_start = clamp_time(word.start, minimum=clip_start) - clip_start
h_end = clamp_time(word.end, minimum=word.start + 0.02) - clip_start
@@ -90,13 +88,14 @@ class CaptionBuilder:
def _render_group(self, group: Sequence[WordTiming]) -> Tuple[Image.Image, List[Image.Image]]:
texts = [self._clean_word(word.word) for word in group]
widths = []
for text in texts:
bbox = self.font.getbbox(text)
widths.append(bbox[2] - bbox[0])
total_width = sum(widths)
if len(widths) > 1:
total_width += self.space_width * (len(widths) - 1)
@@ -105,8 +104,8 @@ class CaptionBuilder:
base_image = Image.new("RGBA", (self.canvas_width, self.canvas_height), (0, 0, 0, 0))
base_draw = ImageDraw.Draw(base_image)
highlight_images: List[Image.Image] = []
x = start_x
for text, width in zip(texts, widths):
base_draw.text((x, self.baseline), text, font=self.font, fill=self.base_color)
@@ -130,6 +129,7 @@ class CaptionBuilder:
for word in words:
buffer.append(word)
if len(buffer) == self.max_words:
grouped.append(buffer)
buffer = []
@@ -140,7 +140,6 @@ class CaptionBuilder:
else:
grouped.append(buffer)
# Rebalance groups to respect minimum size when possible
for idx, group in enumerate(grouped[:-1]):
if len(group) < self.min_words and len(grouped[idx + 1]) > self.min_words:
deficit = self.min_words - len(group)
@@ -149,6 +148,7 @@ class CaptionBuilder:
grouped[idx + 1] = grouped[idx + 1][deficit:]
grouped = [grp for grp in grouped if grp]
return grouped
@staticmethod
@@ -175,16 +175,20 @@ class VideoRenderer:
with VideoFileClip(workspace_path) as base_clip:
video_duration = base_clip.duration or 0
for index, window in enumerate(highlight_windows, start=1):
start = clamp_time(window.start)
end = clamp_time(window.end)
start = min(start, video_duration)
end = min(end, video_duration)
if end <= start:
logger.info("Janela ignorada por intervalo invalido: %s", window)
continue
subclip = base_clip.subclipped(start, end)
try:
rendered_path = self._render_single_clip(
subclip=subclip,
@@ -236,7 +240,6 @@ class VideoRenderer:
)
resized_clip = subclip.resized(scale_factor)
video_y = top_h + (video_area_h - resized_clip.h) // 2
video_clip = resized_clip.with_position(
((frame_w - resized_clip.w) // 2, video_y)
)
@@ -277,6 +280,7 @@ class VideoRenderer:
caption_clips = []
caption_resources: List[ImageClip] = []
caption_y = frame_h - bottom_h + (bottom_h - self.captions.canvas_height) // 2
for clip_set in caption_sets:
base_positioned = clip_set.base.with_position(("center", caption_y))
caption_clips.append(base_positioned)