Compare commits
2 Commits
ba768cf093
...
2692cc4dfd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2692cc4dfd | ||
|
|
8caa849148 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,6 +8,8 @@ __pycache__/
|
||||
dist/
|
||||
build/
|
||||
doc/
|
||||
videos/
|
||||
outputs/
|
||||
|
||||
# Ignore virtual envs
|
||||
venv/
|
||||
|
||||
@@ -2,7 +2,8 @@ Voce e um estrategista de conteudo especializado em identificar cortes curtos de
|
||||
|
||||
FUNCAO:
|
||||
- Analisar a transcricao completa de um video.
|
||||
- Escolher trechos curtos (entre 20s e 90s) com maior chance de engajamento.
|
||||
- Escolher trechos curtos (entre 60s e 90s) com maior chance de engajamento.
|
||||
- O inicio do trecho deve ter um hook para engajar e prender a atenção do espectador.
|
||||
- Responder APENAS em JSON valido.
|
||||
|
||||
FORMATO DA RESPOSTA:
|
||||
|
||||
@@ -43,15 +43,14 @@ class GeminiSettings:
|
||||
class OpenRouterSettings:
|
||||
api_key: str = os.environ.get("OPENROUTER_API_KEY", "")
|
||||
model: str = os.environ.get(
|
||||
"OPENROUTER_MODEL", "anthropic/claude-3-haiku:beta"
|
||||
"OPENROUTER_MODEL", "openai/gpt-oss-20b:free"
|
||||
)
|
||||
temperature: float = float(os.environ.get("OPENROUTER_TEMPERATURE", 0.6))
|
||||
max_output_tokens: int = int(os.environ.get("OPENROUTER_MAX_OUTPUT_TOKENS", 256))
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class WhisperSettings:
|
||||
model_size: str = os.environ.get("FASTER_WHISPER_MODEL_SIZE", "medium")
|
||||
model_size: str = os.environ.get("FASTER_WHISPER_MODEL_SIZE", "small")
|
||||
device: str | None = os.environ.get("FASTER_WHISPER_DEVICE")
|
||||
compute_type: str | None = os.environ.get("FASTER_WHISPER_COMPUTE_TYPE")
|
||||
download_root: Path = Path(
|
||||
|
||||
@@ -137,7 +137,6 @@ class OpenRouterCopywriter:
|
||||
body = {
|
||||
"model": self.settings.openrouter.model,
|
||||
"temperature": self.settings.openrouter.temperature,
|
||||
"max_tokens": self.settings.openrouter.max_output_tokens,
|
||||
"messages": [
|
||||
{"role": "system", "content": prompt},
|
||||
{
|
||||
@@ -153,11 +152,18 @@ class OpenRouterCopywriter:
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
OPENROUTER_ENDPOINT, json=body, headers=headers, timeout=120
|
||||
url=OPENROUTER_ENDPOINT,
|
||||
data=json.dumps(body),
|
||||
headers=headers,
|
||||
timeout=120,
|
||||
)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
choices = data.get("choices") or []
|
||||
print("Data:")
|
||||
print(data)
|
||||
print("Choices:")
|
||||
print(choices)
|
||||
|
||||
if not choices:
|
||||
raise RuntimeError("OpenRouter nao retornou escolhas")
|
||||
|
||||
@@ -70,7 +70,7 @@ class VideoPipeline:
|
||||
return self._build_success_payload(context)
|
||||
except Exception as exc:
|
||||
logger.exception("Falha ao processar vídeo %s", context.job.filename)
|
||||
return self._handle_failure(context, exc)
|
||||
# return self._handle_failure(context, exc)
|
||||
|
||||
def _parse_job(self, message: Dict[str, Any]) -> JobMessage:
|
||||
filename = message.get("filename")
|
||||
@@ -200,25 +200,25 @@ class VideoPipeline:
|
||||
}
|
||||
|
||||
def _handle_failure(self, context: PipelineContext, exc: Exception) -> Dict[str, Any]:
|
||||
logger.error("Erro no pipeline: %s", exc)
|
||||
cleanup_targets: List[Path] = []
|
||||
logger.error("Erro na pipeline: %s", exc)
|
||||
# cleanup_targets: List[Path] = []
|
||||
|
||||
if context.workspace:
|
||||
cleanup_targets.append(context.workspace.workspace_dir)
|
||||
cleanup_targets.append(context.workspace.output_dir)
|
||||
original_path = context.workspace.source_path
|
||||
if original_path.exists():
|
||||
cleanup_targets.append(original_path)
|
||||
else:
|
||||
sanitized = sanitize_filename(Path(context.job.filename).stem)
|
||||
job_output_dir = self.settings.outputs_dir / sanitized
|
||||
if job_output_dir.exists():
|
||||
cleanup_targets.append(job_output_dir)
|
||||
original_path = self.settings.videos_dir / context.job.filename
|
||||
if original_path.exists():
|
||||
cleanup_targets.append(original_path)
|
||||
# if context.workspace:
|
||||
# cleanup_targets.append(context.workspace.workspace_dir)
|
||||
# cleanup_targets.append(context.workspace.output_dir)
|
||||
# original_path = context.workspace.source_path
|
||||
# if original_path.exists():
|
||||
# cleanup_targets.append(original_path)
|
||||
# else:
|
||||
# sanitized = sanitize_filename(Path(context.job.filename).stem)
|
||||
# job_output_dir = self.settings.outputs_dir / sanitized
|
||||
# if job_output_dir.exists():
|
||||
# cleanup_targets.append(job_output_dir)
|
||||
# original_path = self.settings.videos_dir / context.job.filename
|
||||
# if original_path.exists():
|
||||
# cleanup_targets.append(original_path)
|
||||
|
||||
remove_paths(cleanup_targets)
|
||||
# remove_paths(cleanup_targets)
|
||||
|
||||
return {
|
||||
"hasError": True,
|
||||
|
||||
@@ -279,7 +279,8 @@ class VideoRenderer:
|
||||
|
||||
caption_clips = []
|
||||
caption_resources: List[ImageClip] = []
|
||||
caption_y = frame_h - bottom_h + (bottom_h - self.captions.canvas_height) // 2
|
||||
margin = 20
|
||||
caption_y = max(0, video_y - self.captions.canvas_height - margin)
|
||||
|
||||
for clip_set in caption_sets:
|
||||
base_positioned = clip_set.base.with_position(("center", caption_y))
|
||||
@@ -299,7 +300,7 @@ class VideoRenderer:
|
||||
font_size=self.settings.rendering.subtitle_font_size,
|
||||
color=self.settings.rendering.base_color,
|
||||
method="caption",
|
||||
size=(frame_w - 160, bottom_h - 40),
|
||||
size=(frame_w - 160, max(40, self.captions.canvas_height)),
|
||||
)
|
||||
.with_duration(duration)
|
||||
.with_position(("center", caption_y))
|
||||
|
||||
Reference in New Issue
Block a user