diff --git a/main.py b/main.py index 742fedf..05778e4 100755 --- a/main.py +++ b/main.py @@ -1,31 +1,31 @@ #!/usr/bin/env python import math import sys -from os import name -from pathlib import Path +import typing +import os from subprocess import Popen -from typing import Dict, NoReturn +from typing import Dict, NoReturn, Optional from prawcore import ResponseException from reddit.subreddit import get_subreddit_threads -from utils import settings from utils.cleanup import cleanup -from utils.console import print_markdown, print_step, print_substep +from utils.console import print_markdown, print_step, print_substep, format_ordinal from utils.ffmpeg_install import ffmpeg_install from utils.id import extract_id -from utils.version import checkversion -from video_creation.background import ( +from utils.settings import get_config +from utils.version import check_python, checkversion +from video_creation import ( chop_background, download_background_audio, download_background_video, get_background_config, + get_screenshots_of_reddit_posts, + make_final_video, ) -from video_creation.final_video import make_final_video -from video_creation.screenshot_downloader import get_screenshots_of_reddit_posts from video_creation.voices import save_text_to_mp3 -__VERSION__ = "3.4.0" +__VERSION__ = "4.0.0" print( """ @@ -38,15 +38,17 @@ print( """ ) print_markdown( - "### Thanks for using this tool! Feel free to contribute to this project on GitHub! If you have any questions, feel free to join my Discord server or submit a GitHub issue. You can find solutions to many common problems in the documentation: https://reddit-video-maker-bot.netlify.app/" + "### Thanks for using this tool! Feel free to contribute to this project on GitHub! If you have any questions," + " feel free to join my Discord server or submit a GitHub issue." + " You can find solutions to many common problems in the documentation: https://reddit-video-maker-bot.netlify.app/" ) checkversion(__VERSION__) -reddit_id: str +reddit_id: Optional[str] = None reddit_object: Dict[str, str | list] -def main(POST_ID=None) -> None: +def make_video(POST_ID: typing.Optional[str] = None) -> None: global reddit_id, reddit_object reddit_object = get_subreddit_threads(POST_ID) reddit_id = extract_id(reddit_object) @@ -66,15 +68,13 @@ def main(POST_ID=None) -> None: def run_many(times) -> None: for x in range(1, times + 1): - print_step( - f'on the {x}{("th", "st", "nd", "rd", "th", "th", "th", "th", "th", "th")[x % 10]} iteration of {times}' - ) - main() - Popen("cls" if name == "nt" else "clear", shell=True).wait() + print_step(f"on the {format_ordinal(x)} iteration of {times}") + make_video() + Popen("cls" if os.name == "nt" else "clear", shell=True).wait() def shutdown() -> NoReturn: - if "reddit_id" in globals(): + if reddit_id is not None: print_markdown("## Clearing temp files") cleanup(reddit_id) @@ -82,41 +82,23 @@ def shutdown() -> NoReturn: sys.exit() -if __name__ == "__main__": - if sys.version_info.major != 3 or sys.version_info.minor not in [10, 11, 12]: - print( - "Hey! Congratulations, you've made it so far (which is pretty rare with no Python 3.10). Unfortunately, this program only works on Python 3.10. Please install Python 3.10 and try again." - ) - sys.exit() +def main(): + check_python() ffmpeg_install() - directory = Path().absolute() - config = settings.check_toml( - f"{directory}/utils/.config.template.toml", f"{directory}/config.toml" - ) - config is False and sys.exit() - - if ( - not settings.config["settings"]["tts"]["tiktok_sessionid"] - or settings.config["settings"]["tts"]["tiktok_sessionid"] == "" - ) and config["settings"]["tts"]["voice_choice"] == "tiktok": - print_substep( - "TikTok voice requires a sessionid! Check our documentation on how to obtain one.", - "bold red", - ) - sys.exit() + config = get_config() + try: - if config["reddit"]["thread"]["post_id"]: - for index, post_id in enumerate(config["reddit"]["thread"]["post_id"].split("+")): + post_ids = config["reddit"]["thread"]["post_id"].split("+") + if post_ids: + for index, post_id in enumerate(post_ids): index += 1 - print_step( - f'on the {index}{("st" if index % 10 == 1 else ("nd" if index % 10 == 2 else ("rd" if index % 10 == 3 else "th")))} post of {len(config["reddit"]["thread"]["post_id"].split("+"))}' - ) - main(post_id) - Popen("cls" if name == "nt" else "clear", shell=True).wait() + print_step(f"on the {format_ordinal(index)} post of {len(post_ids)}") + make_video(post_id) + Popen("cls" if os.name == "nt" else "clear", shell=True).wait() elif config["settings"]["times_to_run"]: run_many(config["settings"]["times_to_run"]) else: - main() + make_video() except KeyboardInterrupt: shutdown() except ResponseException: @@ -128,9 +110,14 @@ if __name__ == "__main__": config["settings"]["tts"]["elevenlabs_api_key"] = "REDACTED" config["settings"]["tts"]["openai_api_key"] = "REDACTED" print_step( - f"Sorry, something went wrong with this version! Try again, and feel free to report this issue at GitHub or the Discord community.\n" + f"Sorry, something went wrong with this version! Try again," + "and feel free to report this issue at GitHub or the Discord community.\n" f"Version: {__VERSION__} \n" f"Error: {err} \n" f'Config: {config["settings"]}' ) raise err + + +if __name__ == "__main__": + main() diff --git a/reddit/subreddit.py b/reddit/subreddit.py index 5f2ac5f..919fcf5 100644 --- a/reddit/subreddit.py +++ b/reddit/subreddit.py @@ -1,4 +1,5 @@ import re +import typing import praw from praw.models import MoreComments @@ -13,7 +14,7 @@ from utils.videos import check_done from utils.voice import sanitize_text -def get_subreddit_threads(POST_ID: str): +def get_subreddit_threads(POST_ID: typing.Optional[str]): """ Returns a list of threads from the AskReddit subreddit. """ diff --git a/test.py b/test.py new file mode 100644 index 0000000..4c3b517 --- /dev/null +++ b/test.py @@ -0,0 +1,508 @@ +# from typing import Annotated, Literal, Optional +# from pydantic import BaseModel, Field, StringConstraints + + +# class RedditCreds(BaseModel): +# client_id: Annotated[ +# str, +# StringConstraints( +# min_length=12, max_length=30, pattern=r"^[-a-zA-Z0-9._~+/]+=*$" +# ), +# ] = Field(..., description="The ID of your Reddit app of SCRIPT type") + +# client_secret: Annotated[ +# str, +# StringConstraints( +# min_length=20, max_length=40, pattern=r"^[-a-zA-Z0-9._~+/]+=*$" +# ), +# ] = Field(..., description="The SECRET of your Reddit app of SCRIPT type") + +# username: Annotated[ +# str, StringConstraints(min_length=3, max_length=20, pattern=r"^[-_0-9a-zA-Z]+$") +# ] = Field(..., description="The username of your Reddit account") + +# password: Annotated[str, StringConstraints(min_length=8)] = Field( +# ..., description="The password of your Reddit account" +# ) + +# twofa: Optional[bool] = Field(False, description="Whether Reddit 2FA is enabled") + + +# class RedditThread(BaseModel): +# random: Optional[bool] = Field( +# False, description="If true, picks a random thread instead of asking for URL" +# ) + +# subreddit: Annotated[ +# str, StringConstraints(min_length=3, max_length=20, pattern=r"[_0-9a-zA-Z\+]+$") +# ] = Field(..., description="Name(s) of subreddit(s), '+' separated") + +# post_id: Annotated[Optional[str], StringConstraints(pattern=r"^[+a-zA-Z0-9]*$")] = ( +# Field("", description="Specify a Reddit post ID if desired") +# ) + +# max_comment_length: Annotated[int, Field(ge=10, le=10000)] = Field( +# 500, description="Max number of characters per comment" +# ) + +# min_comment_length: Annotated[int, Field(ge=0, le=10000)] = Field( +# 1, description="Min number of characters per comment" +# ) + +# post_lang: Optional[str] = Field( +# "", description="Target language code for translation (e.g., 'es-cr')" +# ) + +# min_comments: Annotated[int, Field(ge=10)] = Field( +# 20, description="Minimum number of comments required" +# ) + + +# class RedditThreadExtras(BaseModel): +# min_comments: Annotated[ +# int, +# Field( +# default=20, +# ge=10, +# le=999999, +# description="The minimum number of comments a post should have to be included. Default is 20.", +# examples=[29], +# ), +# ] + + +# class AIConfig(BaseModel): +# ai_similarity_enabled: Annotated[ +# bool, +# Field( +# default=False, +# description="Threads read from Reddit are sorted based on their similarity to the keywords given below.", +# ), +# ] +# ai_similarity_keywords: Annotated[ +# str, +# Field( +# default="", +# description="Every keyword or sentence, separated by commas, is used to sort Reddit threads based on similarity.", +# examples=["Elon Musk, Twitter, Stocks"], +# ), +# ] + + +# class SettingsTTS(BaseModel): +# voice_choice: Annotated[ +# Literal[ +# "elevenlabs", +# "streamlabspolly", +# "tiktok", +# "googletranslate", +# "awspolly", +# "pyttsx", +# ], +# Field( +# default="tiktok", +# description="The voice platform used for TTS generation.", +# examples=["tiktok"], +# ), +# ] +# random_voice: Annotated[ +# bool, +# Field( +# default=True, +# description="Randomizes the voice used for each comment.", +# examples=[True], +# ), +# ] +# elevenlabs_voice_name: Annotated[ +# Literal[ +# "Adam", "Antoni", "Arnold", "Bella", "Domi", "Elli", "Josh", "Rachel", "Sam" +# ], +# Field( +# default="Bella", +# description="The voice used for ElevenLabs.", +# examples=["Bella"], +# ), +# ] +# elevenlabs_api_key: Annotated[ +# str, +# Field( +# default="", +# description="ElevenLabs API key.", +# examples=["21f13f91f54d741e2ae27d2ab1b99d59"], +# ), +# ] +# aws_polly_voice: Annotated[ +# str, +# Field( +# default="Matthew", +# description="The voice used for AWS Polly.", +# examples=["Matthew"], +# ), +# ] +# streamlabs_polly_voice: Annotated[ +# str, +# Field( +# default="Matthew", +# description="The voice used for Streamlabs Polly.", +# examples=["Matthew"], +# ), +# ] +# tiktok_voice: Annotated[ +# str, +# Field( +# default="en_us_001", +# description="The voice used for TikTok TTS.", +# examples=["en_us_006"], +# ), +# ] +# tiktok_sessionid: Annotated[ +# str, +# Field( +# default="", +# description="TikTok sessionid needed for TikTok TTS.", +# examples=["c76bcc3a7625abcc27b508c7db457ff1"], +# ), +# ] +# python_voice: Annotated[ +# str, +# Field( +# default="1", +# description="The index of the system TTS voices (starts from 0).", +# examples=["1"], +# ), +# ] +# py_voice_num: Annotated[ +# str, +# Field( +# default="2", +# description="The number of system voices available.", +# examples=["2"], +# ), +# ] +# silence_duration: Annotated[ +# float, +# Field( +# default=0.3, +# description="Time in seconds between TTS comments.", +# examples=["0.1"], +# ), +# ] +# no_emojis: Annotated[ +# bool, +# Field( +# default=False, +# description="Whether to remove emojis from the comments.", +# examples=[False], +# ), +# ] +# openai_api_url: Annotated[ +# str, +# Field( +# default="https://api.openai.com/v1/", +# description="The API endpoint URL for OpenAI TTS generation.", +# examples=["https://api.openai.com/v1/"], +# ), +# ] +# openai_api_key: Annotated[ +# str, +# Field( +# default="", +# description="Your OpenAI API key for TTS generation.", +# examples=["sk-abc123def456..."], +# ), +# ] +# openai_voice_name: Annotated[ +# Literal[ +# "alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer" +# ], +# Field( +# default="alloy", +# description="The voice used for OpenAI TTS generation.", +# examples=["alloy"], +# ), +# ] +# openai_model: Annotated[ +# Literal["tts-1", "tts-1-hd"], +# Field( +# default="tts-1", +# description="The model variant used for OpenAI TTS generation.", +# examples=["tts-1"], +# ), +# ] + + +# class SettingsBackground(BaseModel): +# background_video: Annotated[ +# str, +# Field( +# default="minecraft", +# description="Sets the background for the video based on game name", +# examples=["rocket-league"], +# ), +# StringConstraints(strip_whitespace=True), +# ] = "minecraft" + +# background_audio: Annotated[ +# str, +# Field( +# default="lofi", +# description="Sets the background audio for the video", +# examples=["chill-summer"], +# ), +# StringConstraints(strip_whitespace=True), +# ] = "lofi" + +# background_audio_volume: Annotated[ +# float, +# Field( +# default=0.15, +# ge=0, +# le=1, +# description="Sets the volume of the background audio. If you don't want background audio, set it to 0.", +# examples=[0.05], +# ), +# ] = 0.15 + +# enable_extra_audio: Annotated[ +# bool, +# Field( +# default=False, +# description="Used if you want to render another video without background audio in a separate folder", +# ), +# ] = False + +# background_thumbnail: Annotated[ +# bool, +# Field( +# default=False, +# description="Generate a thumbnail for the video (put a thumbnail.png file in the assets/backgrounds directory.)", +# ), +# ] = False + +# background_thumbnail_font_family: Annotated[ +# str, +# Field( +# default="arial", +# description="Font family for the thumbnail text", +# examples=["arial"], +# ), +# ] = "arial" + +# background_thumbnail_font_size: Annotated[ +# int, +# Field( +# default=96, +# description="Font size in pixels for the thumbnail text", +# examples=[96], +# ), +# ] = 96 + +# background_thumbnail_font_color: Annotated[ +# str, +# Field( +# default="255,255,255", +# description="Font color in RGB format for the thumbnail text", +# examples=["255,255,255"], +# ), +# ] = "255,255,255" + + +# class Settings(BaseModel): +# allow_nsfw: Annotated[ +# bool, +# Field( +# default=False, +# description="Whether to allow NSFW content. True or False.", +# examples=[False], +# ), +# ] +# theme: Annotated[ +# Literal["dark", "light", "transparent"], +# Field( +# default="dark", +# description="Sets the Reddit theme. For story mode, 'transparent' is also allowed.", +# examples=["light"], +# ), +# ] +# times_to_run: Annotated[ +# int, +# Field( +# default=1, +# ge=1, +# description="Used if you want to run multiple times. Must be an int >= 1.", +# examples=[2], +# ), +# ] +# opacity: Annotated[ +# float, +# Field( +# default=0.9, +# ge=0.0, +# le=1.0, +# description="Sets the opacity of comments when overlaid over the background.", +# examples=[0.8], +# ), +# ] +# storymode: Annotated[ +# bool, +# Field( +# default=False, +# description="Only read out title and post content. Great for story-based subreddits.", +# examples=[False], +# ), +# ] +# storymodemethod: Annotated[ +# Literal[0, 1], +# Field( +# default=1, +# description="Style used for story mode: 0 = static image, 1 = fancy video.", +# examples=[1], +# ), +# ] +# storymode_max_length: Annotated[ +# int, +# Field( +# default=1000, +# ge=1, +# description="Max length (in characters) of the story mode video.", +# examples=[1000], +# ), +# ] +# resolution_w: Annotated[ +# int, +# Field( +# default=1080, +# description="Sets the width in pixels of the final video.", +# examples=[1440], +# ), +# ] +# resolution_h: Annotated[ +# int, +# Field( +# default=1920, +# description="Sets the height in pixels of the final video.", +# examples=[2560], +# ), +# ] +# zoom: Annotated[ +# float, +# Field( +# default=1.0, +# ge=0.1, +# le=2.0, +# description="Sets the browser zoom level. Useful for making text larger.", +# examples=[1.1], +# ), +# ] +# channel_name: Annotated[ +# str, +# Field( +# default="Reddit Tales", +# description="Sets the channel name for the video.", +# examples=["Reddit Stories"], +# ), +# ] +# tts: SettingsTTS +# background: SettingsBackground + + +# class Reddit(BaseModel): +# creds: RedditCreds +# thread: RedditThread + + +# class Config(BaseModel): +# reddit: Reddit +# ai: AIConfig +# settings: Settings + + +# # from pydantic import ValidationError +# # import toml + +# # try: +# # with open("config.toml") as f: +# # t = toml.load(f) + +# # c = Config(**t) +# # print(c.model_dump()) + +# # except ValidationError as e: +# # print(e.json(indent=2)) + +# from typing import get_args, get_origin +# from pydantic import BaseModel, Field, ValidationError +# from pydantic.fields import FieldInfo +# import sys +# from pydantic_core import PydanticUndefined + +# def prompt_recursive(model_class: type[BaseModel], prefix="") -> BaseModel: +# obj = model_class.model_construct() +# for field_name, field in model_class.model_fields.items(): +# value = getattr(obj, field_name, PydanticUndefined) +# if isinstance(field.annotation, type) and issubclass(field.annotation, BaseModel): +# # Recurse into nested model +# # print(f"\n[{prefix + field_name}]") +# nested = prompt_recursive(field.annotation, prefix=prefix + field_name + ".") +# setattr(obj, field_name, nested) +# continue + +# if value is not PydanticUndefined and value is not None: +# continue # Already has a value + +# # Print the description +# description = field.description or "" +# default_str = f" (✨ default: {field.default})" if field.default is not PydanticUndefined else "" +# prompt = f"🧩 {field_name}: \n{description}{default_str}\n> " + +# while True: +# raw_input = input(prompt).strip() +# if raw_input == "" and field.default is not PydanticUndefined: +# value_to_assign = field.default +# else: +# try: +# # Try parsing based on field type +# value_to_assign = parse_value(raw_input, field.annotation) +# except Exception as e: +# print(f"⚠️ Invalid input: {e}") +# continue + +# try: +# # Validate single field using Pydantic's validator +# model_class.__pydantic_validator__.validate_assignment(obj, field_name, value_to_assign) +# setattr(obj, field_name, value_to_assign) +# break +# except ValidationError as ve: +# for err in ve.errors(): +# print(f"❌ {err['loc'][0]}: {err['msg']}") +# return obj + +# def parse_value(raw: str, expected_type: type): +# origin = get_origin(expected_type) +# args = get_args(expected_type) + +# if expected_type == bool: +# if raw.lower() in ["true", "yes", "1"]: +# return True +# elif raw.lower() in ["false", "no", "0"]: +# return False +# else: +# raise ValueError("Expected true/false") +# elif expected_type == int: +# return int(raw) +# elif expected_type == float: +# return float(raw) +# elif expected_type == str: +# return raw +# elif origin is list and args: +# return [parse_value(v.strip(), args[0]) for v in raw.split(",")] +# else: +# raise ValueError(f"Unsupported type: {expected_type}") + +# background_config = prompt_recursive(Config) +# print(background_config) + +from utils.settings import get_config + + +print(get_config()) \ No newline at end of file diff --git a/utils/.config.template.toml b/utils/.config.template.toml deleted file mode 100644 index 4732782..0000000 --- a/utils/.config.template.toml +++ /dev/null @@ -1,61 +0,0 @@ -[reddit.creds] -client_id = { optional = false, nmin = 12, nmax = 30, explanation = "The ID of your Reddit app of SCRIPT type", example = "fFAGRNJru1FTz70BzhT3Zg", regex = "^[-a-zA-Z0-9._~+/]+=*$", input_error = "The client ID can only contain printable characters.", oob_error = "The ID should be over 12 and under 30 characters, double check your input." } -client_secret = { optional = false, nmin = 20, nmax = 40, explanation = "The SECRET of your Reddit app of SCRIPT type", example = "fFAGRNJru1FTz70BzhT3Zg", regex = "^[-a-zA-Z0-9._~+/]+=*$", input_error = "The client ID can only contain printable characters.", oob_error = "The secret should be over 20 and under 40 characters, double check your input." } -username = { optional = false, nmin = 3, nmax = 20, explanation = "The username of your reddit account", example = "JasonLovesDoggo", regex = "^[-_0-9a-zA-Z]+$", oob_error = "A username HAS to be between 3 and 20 characters" } -password = { optional = false, nmin = 8, explanation = "The password of your reddit account", example = "fFAGRNJru1FTz70BzhT3Zg", oob_error = "Password too short" } -2fa = { optional = true, type = "bool", options = [true, false, ], default = false, explanation = "Whether you have Reddit 2FA enabled, Valid options are True and False", example = true } - - -[reddit.thread] -random = { optional = true, options = [true, false, ], default = false, type = "bool", explanation = "If set to no, it will ask you a thread link to extract the thread, if yes it will randomize it. Default: 'False'", example = "True" } -subreddit = { optional = false, regex = "[_0-9a-zA-Z\\+]+$", nmin = 3, explanation = "What subreddit to pull posts from, the name of the sub, not the URL. You can have multiple subreddits, add an + with no spaces.", example = "AskReddit+Redditdev", oob_error = "A subreddit name HAS to be between 3 and 20 characters" } -post_id = { optional = true, default = "", regex = "^((?!://|://)[+a-zA-Z0-9])*$", explanation = "Used if you want to use a specific post.", example = "urdtfx" } -max_comment_length = { default = 500, optional = false, nmin = 10, nmax = 10000, type = "int", explanation = "max number of characters a comment can have. default is 500", example = 500, oob_error = "the max comment length should be between 10 and 10000" } -min_comment_length = { default = 1, optional = true, nmin = 0, nmax = 10000, type = "int", explanation = "min_comment_length number of characters a comment can have. default is 0", example = 50, oob_error = "the max comment length should be between 1 and 100" } -post_lang = { default = "", optional = true, explanation = "The language you would like to translate to.", example = "es-cr", options = ['','af', 'ak', 'am', 'ar', 'as', 'ay', 'az', 'be', 'bg', 'bho', 'bm', 'bn', 'bs', 'ca', 'ceb', 'ckb', 'co', 'cs', 'cy', 'da', 'de', 'doi', 'dv', 'ee', 'el', 'en', 'en-US', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gn', 'gom', 'gu', 'ha', 'haw', 'hi', 'hmn', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'iw', 'ja', 'jw', 'ka', 'kk', 'km', 'kn', 'ko', 'kri', 'ku', 'ky', 'la', 'lb', 'lg', 'ln', 'lo', 'lt', 'lus', 'lv', 'mai', 'mg', 'mi', 'mk', 'ml', 'mn', 'mni-Mtei', 'mr', 'ms', 'mt', 'my', 'ne', 'nl', 'no', 'nso', 'ny', 'om', 'or', 'pa', 'pl', 'ps', 'pt', 'qu', 'ro', 'ru', 'rw', 'sa', 'sd', 'si', 'sk', 'sl', 'sm', 'sn', 'so', 'sq', 'sr', 'st', 'su', 'sv', 'sw', 'ta', 'te', 'tg', 'th', 'ti', 'tk', 'tl', 'tr', 'ts', 'tt', 'ug', 'uk', 'ur', 'uz', 'vi', 'xh', 'yi', 'yo', 'zh-CN', 'zh-TW', 'zu'] } -min_comments = { default = 20, optional = false, nmin = 10, type = "int", explanation = "The minimum number of comments a post should have to be included. default is 20", example = 29, oob_error = "the minimum number of comments should be between 15 and 999999" } - -[ai] -ai_similarity_enabled = {optional = true, option = [true, false], default = false, type = "bool", explanation = "Threads read from Reddit are sorted based on their similarity to the keywords given below"} -ai_similarity_keywords = {optional = true, type="str", example= 'Elon Musk, Twitter, Stocks', explanation = "Every keyword or even sentence, seperated with comma, is used to sort the reddit threads based on similarity"} - -[settings] -allow_nsfw = { optional = false, type = "bool", default = false, example = false, options = [true, false, ], explanation = "Whether to allow NSFW content, True or False" } -theme = { optional = false, default = "dark", example = "light", options = ["dark", "light", "transparent", ], explanation = "Sets the Reddit theme, either LIGHT or DARK. For story mode you can also use a transparent background." } -times_to_run = { optional = false, default = 1, example = 2, explanation = "Used if you want to run multiple times. Set to an int e.g. 4 or 29 or 1", type = "int", nmin = 1, oob_error = "It's very hard to run something less than once." } -opacity = { optional = false, default = 0.9, example = 0.8, explanation = "Sets the opacity of the comments when overlayed over the background", type = "float", nmin = 0, nmax = 1, oob_error = "The opacity HAS to be between 0 and 1", input_error = "The opacity HAS to be a decimal number between 0 and 1" } -#transition = { optional = true, default = 0.2, example = 0.2, explanation = "Sets the transition time (in seconds) between the comments. Set to 0 if you want to disable it.", type = "float", nmin = 0, nmax = 2, oob_error = "The transition HAS to be between 0 and 2", input_error = "The opacity HAS to be a decimal number between 0 and 2" } -storymode = { optional = true, type = "bool", default = false, example = false, options = [true, false,], explanation = "Only read out title and post content, great for subreddits with stories" } -storymodemethod= { optional = true, default = 1, example = 1, explanation = "Style that's used for the storymode. Set to 0 for single picture display in whole video, set to 1 for fancy looking video ", type = "int", nmin = 0, oob_error = "It's very hard to run something less than once.", options = [0, 1] } -storymode_max_length = { optional = true, default = 1000, example = 1000, explanation = "Max length of the storymode video in characters. 200 characters are approximately 50 seconds.", type = "int", nmin = 1, oob_error = "It's very hard to make a video under a second." } -resolution_w = { optional = false, default = 1080, example = 1440, explantation = "Sets the width in pixels of the final video" } -resolution_h = { optional = false, default = 1920, example = 2560, explantation = "Sets the height in pixels of the final video" } -zoom = { optional = true, default = 1, example = 1.1, explanation = "Sets the browser zoom level. Useful if you want the text larger.", type = "float", nmin = 0.1, nmax = 2, oob_error = "The text is really difficult to read at a zoom level higher than 2" } -channel_name = { optional = true, default = "Reddit Tales", example = "Reddit Stories", explanation = "Sets the channel name for the video" } - -[settings.background] -background_video = { optional = true, default = "minecraft", example = "rocket-league", options = ["minecraft", "gta", "rocket-league", "motor-gta", "csgo-surf", "cluster-truck", "minecraft-2","multiversus","fall-guys","steep", ""], explanation = "Sets the background for the video based on game name" } -background_audio = { optional = true, default = "lofi", example = "chill-summer", options = ["lofi","lofi-2","chill-summer",""], explanation = "Sets the background audio for the video" } -background_audio_volume = { optional = true, type = "float", nmin = 0, nmax = 1, default = 0.15, example = 0.05, explanation="Sets the volume of the background audio. If you don't want background audio, set it to 0.", oob_error = "The volume HAS to be between 0 and 1", input_error = "The volume HAS to be a float number between 0 and 1"} -enable_extra_audio = { optional = true, type = "bool", default = false, example = false, explanation="Used if you want to render another video without background audio in a separate folder", input_error = "The value HAS to be true or false"} -background_thumbnail = { optional = true, type = "bool", default = false, example = false, options = [true, false,], explanation = "Generate a thumbnail for the video (put a thumbnail.png file in the assets/backgrounds directory.)" } -background_thumbnail_font_family = { optional = true, default = "arial", example = "arial", explanation = "Font family for the thumbnail text" } -background_thumbnail_font_size = { optional = true, type = "int", default = 96, example = 96, explanation = "Font size in pixels for the thumbnail text" } -background_thumbnail_font_color = { optional = true, default = "255,255,255", example = "255,255,255", explanation = "Font color in RGB format for the thumbnail text" } - -[settings.tts] -random_voice = { optional = false, type = "bool", default = true, example = true, options = [true, false,], explanation = "Randomizes the voice used for each comment" } -elevenlabs_voice_name = { optional = false, default = "Bella", example = "Bella", explanation = "The voice used for elevenlabs", options = ["Adam", "Antoni", "Arnold", "Bella", "Domi", "Elli", "Josh", "Rachel", "Sam", ] } -elevenlabs_api_key = { optional = true, example = "21f13f91f54d741e2ae27d2ab1b99d59", explanation = "Elevenlabs API key" } -aws_polly_voice = { optional = false, default = "Matthew", example = "Matthew", explanation = "The voice used for AWS Polly" } -streamlabs_polly_voice = { optional = false, default = "Matthew", example = "Matthew", explanation = "The voice used for Streamlabs Polly" } -tiktok_voice = { optional = true, default = "en_us_001", example = "en_us_006", explanation = "The voice used for TikTok TTS" } -tiktok_sessionid = { optional = true, example = "c76bcc3a7625abcc27b508c7db457ff1", explanation = "TikTok sessionid needed if you're using the TikTok TTS. Check documentation if you don't know how to obtain it." } -python_voice = { optional = false, default = "1", example = "1", explanation = "The index of the system tts voices (can be downloaded externally, run ptt.py to find value, start from zero)" } -py_voice_num = { optional = false, default = "2", example = "2", explanation = "The number of system voices (2 are pre-installed in Windows)" } -silence_duration = { optional = true, example = "0.1", explanation = "Time in seconds between TTS comments", default = 0.3, type = "float" } -no_emojis = { optional = false, type = "bool", default = false, example = false, options = [true, false,], explanation = "Whether to remove emojis from the comments" } -openai_api_url = { optional = true, default = "https://api.openai.com/v1/", example = "https://api.openai.com/v1/", explanation = "The API endpoint URL for OpenAI TTS generation" } -openai_api_key = { optional = true, example = "sk-abc123def456...", explanation = "Your OpenAI API key for TTS generation" } -openai_voice_name = { optional = false, default = "alloy", example = "alloy", explanation = "The voice used for OpenAI TTS generation", options = ["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "af_heart"] } -openai_model = { optional = false, default = "tts-1", example = "tts-1", explanation = "The model variant used for OpenAI TTS generation", options = ["tts-1", "tts-1-hd", "gpt-4o-mini-tts"] } \ No newline at end of file diff --git a/utils/cleanup.py b/utils/cleanup.py index 8c73b15..3765d69 100644 --- a/utils/cleanup.py +++ b/utils/cleanup.py @@ -1,13 +1,14 @@ import os import shutil from os.path import exists +from typing import Literal def _listdir(d): # listdir with full path return [os.path.join(d, f) for f in os.listdir(d)] -def cleanup(reddit_id) -> int: +def cleanup(reddit_id) -> None | Literal[1]: """Deletes all temporary assets in assets/temp Returns: diff --git a/utils/config_model.py b/utils/config_model.py new file mode 100644 index 0000000..7a57776 --- /dev/null +++ b/utils/config_model.py @@ -0,0 +1,417 @@ +from typing import Annotated, Literal, Optional +from pydantic import BaseModel, Field, StringConstraints + + +class RedditCreds(BaseModel): + client_id: Annotated[ + str, + StringConstraints( + min_length=12, max_length=30, pattern=r"^[-a-zA-Z0-9._~+/]+=*$" + ), + ] = Field(..., description="The ID of your Reddit app of SCRIPT type") + + client_secret: Annotated[ + str, + StringConstraints( + min_length=20, max_length=40, pattern=r"^[-a-zA-Z0-9._~+/]+=*$" + ), + ] = Field(..., description="The SECRET of your Reddit app of SCRIPT type") + + username: Annotated[ + str, StringConstraints(min_length=3, max_length=20, pattern=r"^[-_0-9a-zA-Z]+$") + ] = Field(..., description="The username of your Reddit account") + + password: Annotated[str, StringConstraints(min_length=8)] = Field( + ..., description="The password of your Reddit account" + ) + + twofa: Optional[bool] = Field(False, description="Whether Reddit 2FA is enabled") + + +class RedditThread(BaseModel): + random: Optional[bool] = Field( + False, description="If true, picks a random thread instead of asking for URL" + ) + + subreddit: Annotated[ + str, StringConstraints(min_length=3, max_length=20, pattern=r"[_0-9a-zA-Z\+]+$") + ] = Field(..., description="Name(s) of subreddit(s), '+' separated") + + post_id: Annotated[Optional[str], StringConstraints(pattern=r"^[+a-zA-Z0-9]*$")] = ( + Field("", description="Specify a Reddit post ID if desired") + ) + + max_comment_length: Annotated[int, Field(ge=10, le=10000)] = Field( + 500, description="Max number of characters per comment" + ) + + min_comment_length: Annotated[int, Field(ge=0, le=10000)] = Field( + 1, description="Min number of characters per comment" + ) + + post_lang: Optional[str] = Field( + "", description="Target language code for translation (e.g., 'es-cr')" + ) + + min_comments: Annotated[int, Field(ge=10)] = Field( + 20, description="Minimum number of comments required" + ) + + +class RedditThreadExtras(BaseModel): + min_comments: Annotated[ + int, + Field( + default=20, + ge=10, + le=999999, + description="The minimum number of comments a post should have to be included. Default is 20.", + examples=[29], + ), + ] + + +class AIConfig(BaseModel): + ai_similarity_enabled: Annotated[ + bool, + Field( + default=False, + description="Threads read from Reddit are sorted based on their similarity to the keywords given below.", + ), + ] + ai_similarity_keywords: Annotated[ + str, + Field( + default="", + description="Every keyword or sentence, separated by commas, is used to sort Reddit threads based on similarity.", + examples=["Elon Musk, Twitter, Stocks"], + ), + ] + + +class SettingsTTS(BaseModel): + voice_choice: Annotated[ + Literal[ + "elevenlabs", + "streamlabspolly", + "tiktok", + "googletranslate", + "awspolly", + "pyttsx", + ], + Field( + default="tiktok", + description="The voice platform used for TTS generation.", + examples=["tiktok"], + ), + ] + random_voice: Annotated[ + bool, + Field( + default=True, + description="Randomizes the voice used for each comment.", + examples=[True], + ), + ] + elevenlabs_voice_name: Annotated[ + Literal[ + "Adam", "Antoni", "Arnold", "Bella", "Domi", "Elli", "Josh", "Rachel", "Sam" + ], + Field( + default="Bella", + description="The voice used for ElevenLabs.", + examples=["Bella"], + ), + ] + elevenlabs_api_key: Annotated[ + str, + Field( + default="", + description="ElevenLabs API key.", + examples=["21f13f91f54d741e2ae27d2ab1b99d59"], + ), + ] + aws_polly_voice: Annotated[ + str, + Field( + default="Matthew", + description="The voice used for AWS Polly.", + examples=["Matthew"], + ), + ] + streamlabs_polly_voice: Annotated[ + str, + Field( + default="Matthew", + description="The voice used for Streamlabs Polly.", + examples=["Matthew"], + ), + ] + tiktok_voice: Annotated[ + str, + Field( + default="en_us_001", + description="The voice used for TikTok TTS.", + examples=["en_us_006"], + ), + ] + tiktok_sessionid: Annotated[ + str, + Field( + default="", + description="TikTok sessionid needed for TikTok TTS.", + examples=["c76bcc3a7625abcc27b508c7db457ff1"], + ), + ] + python_voice: Annotated[ + str, + Field( + default="1", + description="The index of the system TTS voices (starts from 0).", + examples=["1"], + ), + ] + py_voice_num: Annotated[ + str, + Field( + default="2", + description="The number of system voices available.", + examples=["2"], + ), + ] + silence_duration: Annotated[ + float, + Field( + default=0.3, + description="Time in seconds between TTS comments.", + examples=["0.1"], + ), + ] + no_emojis: Annotated[ + bool, + Field( + default=False, + description="Whether to remove emojis from the comments.", + examples=[False], + ), + ] + openai_api_url: Annotated[ + str, + Field( + default="https://api.openai.com/v1/", + description="The API endpoint URL for OpenAI TTS generation.", + examples=["https://api.openai.com/v1/"], + ), + ] + openai_api_key: Annotated[ + str, + Field( + default="", + description="Your OpenAI API key for TTS generation.", + examples=["sk-abc123def456..."], + ), + ] + openai_voice_name: Annotated[ + Literal[ + "alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer" + ], + Field( + default="alloy", + description="The voice used for OpenAI TTS generation.", + examples=["alloy"], + ), + ] + openai_model: Annotated[ + Literal["tts-1", "tts-1-hd"], + Field( + default="tts-1", + description="The model variant used for OpenAI TTS generation.", + examples=["tts-1"], + ), + ] + + +class SettingsBackground(BaseModel): + background_video: Annotated[ + str, + Field( + default="minecraft", + description="Sets the background for the video based on game name", + examples=["rocket-league"], + ), + StringConstraints(strip_whitespace=True), + ] = "minecraft" + + background_audio: Annotated[ + str, + Field( + default="lofi", + description="Sets the background audio for the video", + examples=["chill-summer"], + ), + StringConstraints(strip_whitespace=True), + ] = "lofi" + + background_audio_volume: Annotated[ + float, + Field( + default=0.15, + ge=0, + le=1, + description="Sets the volume of the background audio. If you don't want background audio, set it to 0.", + examples=[0.05], + ), + ] = 0.15 + + enable_extra_audio: Annotated[ + bool, + Field( + default=False, + description="Used if you want to render another video without background audio in a separate folder", + ), + ] = False + + background_thumbnail: Annotated[ + bool, + Field( + default=False, + description="Generate a thumbnail for the video (put a thumbnail.png file in the assets/backgrounds directory.)", + ), + ] = False + + background_thumbnail_font_family: Annotated[ + str, + Field( + default="arial", + description="Font family for the thumbnail text", + examples=["arial"], + ), + ] = "arial" + + background_thumbnail_font_size: Annotated[ + int, + Field( + default=96, + description="Font size in pixels for the thumbnail text", + examples=[96], + ), + ] = 96 + + background_thumbnail_font_color: Annotated[ + str, + Field( + default="255,255,255", + description="Font color in RGB format for the thumbnail text", + examples=["255,255,255"], + ), + ] = "255,255,255" + + +class Settings(BaseModel): + allow_nsfw: Annotated[ + bool, + Field( + default=False, + description="Whether to allow NSFW content. True or False.", + examples=[False], + ), + ] + theme: Annotated[ + Literal["dark", "light", "transparent"], + Field( + default="dark", + description="Sets the Reddit theme. For story mode, 'transparent' is also allowed.", + examples=["light"], + ), + ] + times_to_run: Annotated[ + int, + Field( + default=1, + ge=1, + description="Used if you want to run multiple times. Must be an int >= 1.", + examples=[2], + ), + ] + opacity: Annotated[ + float, + Field( + default=0.9, + ge=0.0, + le=1.0, + description="Sets the opacity of comments when overlaid over the background.", + examples=[0.8], + ), + ] + storymode: Annotated[ + bool, + Field( + default=False, + description="Only read out title and post content. Great for story-based subreddits.", + examples=[False], + ), + ] + storymodemethod: Annotated[ + Literal[0, 1], + Field( + default=1, + description="Style used for story mode: 0 = static image, 1 = fancy video.", + examples=[1], + ), + ] + storymode_max_length: Annotated[ + int, + Field( + default=1000, + ge=1, + description="Max length (in characters) of the story mode video.", + examples=[1000], + ), + ] + resolution_w: Annotated[ + int, + Field( + default=1080, + description="Sets the width in pixels of the final video.", + examples=[1440], + ), + ] + resolution_h: Annotated[ + int, + Field( + default=1920, + description="Sets the height in pixels of the final video.", + examples=[2560], + ), + ] + zoom: Annotated[ + float, + Field( + default=1.0, + ge=0.1, + le=2.0, + description="Sets the browser zoom level. Useful for making text larger.", + examples=[1.1], + ), + ] + channel_name: Annotated[ + str, + Field( + default="Reddit Tales", + description="Sets the channel name for the video.", + examples=["Reddit Stories"], + ), + ] + tts: SettingsTTS + background: SettingsBackground + + +class Reddit(BaseModel): + creds: RedditCreds + thread: RedditThread + + +class Config(BaseModel): + reddit: Reddit + ai: AIConfig + settings: Settings diff --git a/utils/console.py b/utils/console.py index a9abf4b..ccd6ba8 100644 --- a/utils/console.py +++ b/utils/console.py @@ -118,3 +118,16 @@ def handle_input( console.print( "[red bold]" + err_message + "\nValid options are: " + ", ".join(map(str, options)) + "." ) + + +def format_ordinal(x): + if 10 <= x % 100 <= 20: + suffix = 'th' + else: + suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(x % 10, 'th') + return f"{x}{suffix}" + + +if __name__ == "__main__": + for i in range(20): + print(format_ordinal(i)) \ No newline at end of file diff --git a/utils/ffmpeg_install.py b/utils/ffmpeg_install.py index b41bad6..7b4d733 100644 --- a/utils/ffmpeg_install.py +++ b/utils/ffmpeg_install.py @@ -7,9 +7,7 @@ import requests def ffmpeg_install_windows(): try: - ffmpeg_url = ( - "https://github.com/GyanD/codexffmpeg/releases/download/6.0/ffmpeg-6.0-full_build.zip" - ) + ffmpeg_url = "https://github.com/GyanD/codexffmpeg/releases/download/6.0/ffmpeg-6.0-full_build.zip" ffmpeg_zip_filename = "ffmpeg.zip" ffmpeg_extracted_folder = "ffmpeg" @@ -129,14 +127,19 @@ def ffmpeg_install(): elif os.name == "mac": ffmpeg_install_mac() else: - print("Your OS is not supported. Please install FFmpeg manually and try again.") + print( + "Your OS is not supported. Please install FFmpeg manually and try again." + ) exit() else: print("Please install FFmpeg manually and try again.") exit() except Exception as e: print( - "Welcome fellow traveler! You're one of the few who have made it this far. We have no idea how you got at this error, but we're glad you're here. Please report this error to the developer, and we'll try to fix it as soon as possible. Thank you for your patience!" + "Welcome fellow traveler! You're one of the few who have made it this far.", + "We have no idea how you got at this error, but we're glad you're here.", + "Please report this error to the developer, and we'll try to fix it as soon as possible.", + "Thank you for your patience!", ) print(e) return None diff --git a/utils/imagenarator.py b/utils/imagenarator.py index ad75331..61bcad4 100644 --- a/utils/imagenarator.py +++ b/utils/imagenarator.py @@ -20,7 +20,9 @@ def draw_multiple_line_text( font_height = getheight(font, text) image_width, image_height = image.size lines = textwrap.wrap(text, width=wrap) - y = (image_height / 2) - (((font_height + (len(lines) * padding) / len(lines)) * len(lines)) / 2) + y = (image_height / 2) - ( + ((font_height + (len(lines) * padding) / len(lines)) * len(lines)) / 2 + ) for line in lines: line_width, line_height = getsize(font, line) if transparent: @@ -54,7 +56,14 @@ def draw_multiple_line_text( y += line_height + padding -def imagemaker(theme, reddit_obj: dict, txtclr, padding=5, transparent=False) -> None: +def imagemaker( + theme, + reddit_obj: dict, + txtclr, + size: tuple[int, int], + padding=5, + transparent=False, +) -> None: """ Render Images for video """ @@ -65,10 +74,10 @@ def imagemaker(theme, reddit_obj: dict, txtclr, padding=5, transparent=False) -> else: font = ImageFont.truetype(os.path.join("fonts", "Roboto-Regular.ttf"), 100) - size = (1920, 1080) - for idx, text in track(enumerate(texts), "Rendering Image"): image = Image.new("RGBA", size, theme) text = process_text(text, False) - draw_multiple_line_text(image, text, font, txtclr, padding, wrap=30, transparent=transparent) + draw_multiple_line_text( + image, text, font, txtclr, padding, wrap=30, transparent=transparent + ) image.save(f"assets/temp/{reddit_id}/png/img{idx}.png") diff --git a/utils/settings.py b/utils/settings.py index 6b8242b..d489e5d 100755 --- a/utils/settings.py +++ b/utils/settings.py @@ -1,170 +1,163 @@ -import re +import sys from pathlib import Path -from typing import Dict, Tuple +from typing import Any, Dict import toml from rich.console import Console -from utils.console import handle_input +from utils.config_model import Config +from utils.console import print_substep console = Console() -config = dict # autocomplete - - -def crawl(obj: dict, func=lambda x, y: print(x, y, end="\n"), path=None): - if path is None: # path Default argument value is mutable - path = [] - for key in obj.keys(): - if type(obj[key]) is dict: - crawl(obj[key], func, path + [key]) +config: dict # autocomplete +from typing import Any + +from pydantic import ValidationError, BaseModel +from pydantic_core import PydanticUndefined + + +def prompt_recursive(obj: BaseModel): + """ + Recursively prompt for missing or invalid fields in a Pydantic model instance 'obj'. + """ + for field_name, field in obj.model_fields.items(): + value = getattr(obj, field_name, None) + # If field is a nested BaseModel, recurse into it + if hasattr(field.annotation, "model_fields"): + nested_obj = value or field.annotation.model_construct() + fixed_nested = prompt_recursive(nested_obj) + setattr(obj, field_name, fixed_nested) continue - func(path + [key], obj[key]) - -def check(value, checks, name): - def get_check_value(key, default_result): - return checks[key] if key in checks else default_result - - incorrect = False - if value == {}: - incorrect = True - if not incorrect and "type" in checks: - try: - value = eval(checks["type"])(value) # fixme remove eval - except: - incorrect = True + # If the value is valid and not None, skip prompt + if value not in [None, "", [], {}]: + continue - if ( - not incorrect and "options" in checks and value not in checks["options"] - ): # FAILSTATE Value is not one of the options - incorrect = True - if ( - not incorrect - and "regex" in checks - and ( - (isinstance(value, str) and re.match(checks["regex"], value) is None) - or not isinstance(value, str) + description = field.description or "" + default_str = ( + f" (default: {field.default})" + if (field.default is not None) or field.default == PydanticUndefined + else "" ) - ): # FAILSTATE Value doesn't match regex, or has regex but is not a string. - incorrect = True - - if ( - not incorrect - and not hasattr(value, "__iter__") - and ( - ("nmin" in checks and checks["nmin"] is not None and value < checks["nmin"]) - or ("nmax" in checks and checks["nmax"] is not None and value > checks["nmax"]) - ) - ): - incorrect = True - if ( - not incorrect - and hasattr(value, "__iter__") - and ( - ("nmin" in checks and checks["nmin"] is not None and len(value) < checks["nmin"]) - or ("nmax" in checks and checks["nmax"] is not None and len(value) > checks["nmax"]) - ) - ): - incorrect = True - - if incorrect: - value = handle_input( - message=( - (("[blue]Example: " + str(checks["example"]) + "\n") if "example" in checks else "") - + "[red]" - + ("Non-optional ", "Optional ")["optional" in checks and checks["optional"] is True] - ) - + "[#C0CAF5 bold]" - + str(name) - + "[#F7768E bold]=", - extra_info=get_check_value("explanation", ""), - check_type=eval(get_check_value("type", "False")), # fixme remove eval - default=get_check_value("default", NotImplemented), - match=get_check_value("regex", ""), - err_message=get_check_value("input_error", "Incorrect input"), - nmin=get_check_value("nmin", None), - nmax=get_check_value("nmax", None), - oob_error=get_check_value( - "oob_error", "Input out of bounds(Value too high/low/long/short)" - ), - options=get_check_value("options", None), - optional=get_check_value("optional", False), - ) - return value - + prompt_msg = f"🧩 {field_name}\n 📘 {description}{default_str}\n ⚠️ Required: {field.is_required()}\n ❓ Enter value: " + + while True: + user_input = input(prompt_msg).strip() + if not user_input: + if field.default is not None: + value_to_set = field.default + elif not field.required: + value_to_set = None + else: + print("⚠️ This field is required.") + continue + else: + # Convert input based on type, you can expand this logic + try: + value_to_set = parse_value(user_input, field.annotation) + except Exception as e: + print(f"⚠️ Invalid input: {e}") + continue + + # Validate the assignment + try: + obj.__pydantic_validator__.validate_assignment( + obj, field_name, value_to_set + ) + setattr(obj, field_name, value_to_set) + break + except ValidationError as ve: + for err in ve.errors(): + print(f"❌ {err['loc'][0]}: {err['msg']}") -def crawl_and_check(obj: dict, path: list, checks: dict = {}, name=""): - if len(path) == 0: - return check(obj, checks, name) - if path[0] not in obj.keys(): - obj[path[0]] = {} - obj[path[0]] = crawl_and_check(obj[path[0]], path[1:], checks, path[0]) return obj -def check_vars(path, checks): - global config - crawl_and_check(config, path, checks) +def parse_value(raw: str, expected_type: type): + from typing import get_args, get_origin + origin = get_origin(expected_type) + args = get_args(expected_type) -def check_toml(template_file, config_file) -> Tuple[bool, Dict]: - global config - config = None - try: - template = toml.load(template_file) - except Exception as error: - console.print(f"[red bold]Encountered error when trying to to load {template_file}: {error}") - return False - try: - config = toml.load(config_file) - except toml.TomlDecodeError: - console.print( - f"""[blue]Couldn't read {config_file}. -Overwrite it?(y/n)""" - ) - if not input().startswith("y"): - print("Unable to read config, and not allowed to overwrite it. Giving up.") + if expected_type == bool: + if raw.lower() in ("true", "yes", "1"): + return True + elif raw.lower() in ("false", "no", "0"): return False else: - try: - with open(config_file, "w") as f: - f.write("") - except: - console.print( - f"[red bold]Failed to overwrite {config_file}. Giving up.\nSuggestion: check {config_file} permissions for the user." - ) - return False - except FileNotFoundError: - console.print( - f"""[blue]Couldn't find {config_file} -Creating it now.""" - ) - try: - with open(config_file, "x") as f: - f.write("") - config = {} - except: - console.print( - f"[red bold]Failed to write to {config_file}. Giving up.\nSuggestion: check the folder's permissions for the user." - ) - return False + raise ValueError("Expected boolean value (true/false)") + elif expected_type == int: + return int(raw) + elif expected_type == float: + return float(raw) + elif expected_type == str: + return raw + elif origin == list and args: + return [parse_value(x.strip(), args[0]) for x in raw.split(",")] + else: + raise ValueError(f"Unsupported field type: {expected_type}") + + +def check_toml(template_file: str, config_file: str): + """ + Load the template and config TOML files. + Validate config with Pydantic. + If invalid, prompt for missing or invalid fields. + Save fixed config back. + Return the valid Config model. + """ + try: + config_dict = toml.load(config_file) + except Exception as e: + print(f"Failed to load config {config_file}: {e}") + config_dict = {} - console.print( - """\ -[blue bold]############################### -# # -# Checking TOML configuration # -# # -############################### -If you see any prompts, that means that you have unset/incorrectly set variables, please input the correct values.\ -""" - ) - crawl(template, check_vars) - with open(config_file, "w") as f: - toml.dump(config, f) + try: + config_instance = Config.model_validate(config_dict) + except ValidationError as e: + print("Config validation failed, will prompt for missing/invalid fields:") + print(e) + # Start from a clean model + config_instance = Config.model_construct() + # Update model with any valid partial data loaded from config + for k, v in config_dict.items(): + if hasattr(config_instance, k): + setattr(config_instance, k, v) + + # Prompt for missing or invalid fields recursively + config_instance = prompt_recursive(config_instance) + + # Validate again to be sure + config_instance = Config.model_validate(config_instance.model_dump()) + + # Save fixed config back to file + with open(config_file, "w", encoding="utf-8") as f: + toml.dump(config_instance.model_dump(), f) + print(f"Updated config saved to {config_file}") + config = config_instance.model_dump() return config if __name__ == "__main__": directory = Path().absolute() check_toml(f"{directory}/utils/.config.template.toml", "config.toml") + + +def get_config() -> Dict[str, Any]: + directory = Path().absolute() + config = check_toml( + f"{directory}/utils/.config.template.toml", f"{directory}/config.toml" + ) + if not config: + sys.exit() + + if ( + not config["settings"]["tts"]["tiktok_sessionid"] + or config["settings"]["tts"]["tiktok_sessionid"] == "" + ) and config["settings"]["tts"]["voice_choice"] == "tiktok": + print_substep( + "TikTok voice requires a sessionid! Check our documentation on how to obtain one.", + "bold red", + ) + sys.exit() + return config diff --git a/utils/version.py b/utils/version.py index 0818c87..e8b095f 100644 --- a/utils/version.py +++ b/utils/version.py @@ -1,3 +1,4 @@ +import sys import requests from utils.console import print_step @@ -19,3 +20,12 @@ def checkversion(__VERSION__: str): print_step( f"Welcome to the test version ({__VERSION__}) of the bot. Thanks for testing and feel free to report any bugs you find." ) + + +def check_python() -> None: + minor_versions = [10, 11, 12, 13] + if sys.version_info.major != 3 or sys.version_info.minor not in minor_versions: + print( + f"Hey! Congratulations, you've made it so far (which is pretty rare with no Python 3.{minor_versions}). Unfortunately, this program only works on Python 3.{minor_versions}. Please install Python 3.{minor_versions} and try again." + ) + sys.exit() diff --git a/video_creation/__init__.py b/video_creation/__init__.py index e69de29..11a33cd 100644 --- a/video_creation/__init__.py +++ b/video_creation/__init__.py @@ -0,0 +1,13 @@ +from .create_fancy_thumbnail import create_fancy_thumbnail +from .background import ( + download_background_audio, + download_background_video, + load_background_options, + get_start_and_end_times, + get_background_config, + chop_background, + ffmpeg_extract_subclip, +) + +from .final_video import create_thumbnail, make_final_video +from .screenshot_downloader import get_screenshots_of_reddit_posts diff --git a/video_creation/create_fancy_thumbnail.py b/video_creation/create_fancy_thumbnail.py new file mode 100644 index 0000000..fbe27c1 --- /dev/null +++ b/video_creation/create_fancy_thumbnail.py @@ -0,0 +1,68 @@ +from utils import settings +from utils.console import print_step +from video_creation.final_video import get_text_height + + +from PIL import Image, ImageDraw, ImageFont + + +import os +import textwrap + + +def create_fancy_thumbnail(image, text, text_color, padding, wrap=35): + """ + It will take the 1px from the middle of the template and will be resized (stretched) vertically to accommodate the extra height needed for the title. + """ + print_step(f"Creating fancy thumbnail for: {text}") + font_title_size = 47 + font = ImageFont.truetype(os.path.join("fonts", "Roboto-Bold.ttf"), font_title_size) + image_width, image_height = image.size + + # Calculate text height to determine new image height + draw = ImageDraw.Draw(image) + text_height = get_text_height(draw, text, font, wrap) + lines = textwrap.wrap(text, width=wrap) + # This are -50 to reduce the empty space at the bottom of the image, + # change it as per your requirement if needed otherwise leave it. + new_image_height = image_height + text_height + padding * (len(lines) - 1) - 50 + + # Separate the image into top, middle (1px), and bottom parts + top_part_height = image_height // 2 + middle_part_height = 1 # 1px height middle section + bottom_part_height = image_height - top_part_height - middle_part_height + + top_part = image.crop((0, 0, image_width, top_part_height)) + middle_part = image.crop((0, top_part_height, image_width, top_part_height + middle_part_height)) + bottom_part = image.crop((0, top_part_height + middle_part_height, image_width, image_height)) + + # Stretch the middle part + new_middle_height = new_image_height - top_part_height - bottom_part_height + middle_part = middle_part.resize((image_width, new_middle_height)) + + # Create new image with the calculated height + new_image = Image.new("RGBA", (image_width, new_image_height)) + + # Paste the top, stretched middle, and bottom parts into the new image + new_image.paste(top_part, (0, 0)) + new_image.paste(middle_part, (0, top_part_height)) + new_image.paste(bottom_part, (0, top_part_height + new_middle_height)) + + # Draw the title text on the new image + draw = ImageDraw.Draw(new_image) + y = top_part_height + padding + for line in lines: + draw.text((120, y), line, font=font, fill=text_color, align="left") + y += get_text_height(draw, line, font, wrap) + padding + + # Draw the username "PlotPulse" at the specific position + username_font = ImageFont.truetype(os.path.join("fonts", "Roboto-Bold.ttf"), 30) + draw.text( + (205, 825), + settings.config["settings"]["channel_name"], + font=username_font, + fill=text_color, + align="left", + ) + + return new_image \ No newline at end of file diff --git a/video_creation/final_video.py b/video_creation/final_video.py index c8be6f5..e685741 100644 --- a/video_creation/final_video.py +++ b/video_creation/final_video.py @@ -11,17 +11,18 @@ from typing import Dict, Final, Tuple import ffmpeg import translators -from PIL import Image, ImageDraw, ImageFont +from PIL import Image from rich.console import Console from rich.progress import track +from tqdm import tqdm from utils import settings from utils.cleanup import cleanup from utils.console import print_step, print_substep -from utils.fonts import getheight from utils.id import extract_id from utils.thumbnail import create_thumbnail from utils.videos import save_data +from video_creation.create_fancy_thumbnail import create_fancy_thumbnail console = Console() @@ -80,8 +81,8 @@ def name_normalize(name: str) -> str: print_substep("Translating filename...") translated_name = translators.translate_text(name, translator="google", to_language=lang) return translated_name - else: - return name + + return name def prepare_background(reddit_id: str, W: int, H: int) -> str: @@ -94,7 +95,7 @@ def prepare_background(reddit_id: str, W: int, H: int) -> str: an=None, **{ "c:v": "h264_nvenc", - "b:v": "20M", + "b:v": "5M", "b:a": "192k", "threads": multiprocessing.cpu_count(), }, @@ -118,65 +119,7 @@ def get_text_height(draw, text, font, max_width): return total_height -def create_fancy_thumbnail(image, text, text_color, padding, wrap=35): - """ - It will take the 1px from the middle of the template and will be resized (stretched) vertically to accommodate the extra height needed for the title. - """ - print_step(f"Creating fancy thumbnail for: {text}") - font_title_size = 47 - font = ImageFont.truetype(os.path.join("fonts", "Roboto-Bold.ttf"), font_title_size) - image_width, image_height = image.size - - # Calculate text height to determine new image height - draw = ImageDraw.Draw(image) - text_height = get_text_height(draw, text, font, wrap) - lines = textwrap.wrap(text, width=wrap) - # This are -50 to reduce the empty space at the bottom of the image, - # change it as per your requirement if needed otherwise leave it. - new_image_height = image_height + text_height + padding * (len(lines) - 1) - 50 - - # Separate the image into top, middle (1px), and bottom parts - top_part_height = image_height // 2 - middle_part_height = 1 # 1px height middle section - bottom_part_height = image_height - top_part_height - middle_part_height - - top_part = image.crop((0, 0, image_width, top_part_height)) - middle_part = image.crop((0, top_part_height, image_width, top_part_height + middle_part_height)) - bottom_part = image.crop((0, top_part_height + middle_part_height, image_width, image_height)) - - # Stretch the middle part - new_middle_height = new_image_height - top_part_height - bottom_part_height - middle_part = middle_part.resize((image_width, new_middle_height)) - - # Create new image with the calculated height - new_image = Image.new("RGBA", (image_width, new_image_height)) - - # Paste the top, stretched middle, and bottom parts into the new image - new_image.paste(top_part, (0, 0)) - new_image.paste(middle_part, (0, top_part_height)) - new_image.paste(bottom_part, (0, top_part_height + new_middle_height)) - - # Draw the title text on the new image - draw = ImageDraw.Draw(new_image) - y = top_part_height + padding - for line in lines: - draw.text((120, y), line, font=font, fill=text_color, align="left") - y += get_text_height(draw, line, font, wrap) + padding - - # Draw the username "PlotPulse" at the specific position - username_font = ImageFont.truetype(os.path.join("fonts", "Roboto-Bold.ttf"), 30) - draw.text( - (205, 825), - settings.config["settings"]["channel_name"], - font=username_font, - fill=text_color, - align="left", - ) - - return new_image - - -def merge_background_audio(audio: ffmpeg, reddit_id: str): +def merge_background_audio(audio, reddit_id: str): """Gather an audio and merge with assets/backgrounds/background.mp3 Args: audio (ffmpeg): The TTS final audio but without background. @@ -195,6 +138,40 @@ def merge_background_audio(audio: ffmpeg, reddit_id: str): merged_audio = ffmpeg.filter([audio, bg_audio], "amix", duration="longest") return merged_audio # Return merged audio +def make_tts_only_video(length, background_clip, audio, filename, on_update_example, defaultPath): + pbar = pbar = tqdm(total=100, desc="Progress: ", bar_format="{l_bar}{bar}", unit=" %") + path = defaultPath + f"/OnlyTTS/{filename}" + path = ( + path[:251] + ".mp4" + ) # Prevent a error by limiting the path length, do not change this. + print_step("Rendering the Only TTS Video 🎥") + with ProgressFfmpeg(length, on_update_example) as progress: + try: + ffmpeg.output( + background_clip, + audio, + path, + f="mp4", + **{ + "c:v": "h264_nvenc", + "b:v": "20M", + "b:a": "192k", + "threads": multiprocessing.cpu_count(), + }, + ).overwrite_output().global_args("-progress", progress.output_file.name).run( + quiet=True, + overwrite_output=True, + capture_stdout=False, + capture_stderr=False, + ) + except ffmpeg.Error as e: + print(e.stderr.decode("utf8")) + exit(1) + + old_percentage = pbar.n + pbar.update(100 - old_percentage) + pbar.close() + def make_final_video( number_of_clips: int, @@ -277,20 +254,16 @@ def make_final_video( # get the title_template image and draw a text in the middle part of it with the title of the thread title_template = Image.open("assets/title_template.png") - title = reddit_obj["thread_title"] - - title = name_normalize(title) - - font_color = "#000000" - padding = 5 + title_path = f"assets/temp/{reddit_id}/png/title.png" # create_fancy_thumbnail(image, text, text_color, padding - title_img = create_fancy_thumbnail(title_template, title, font_color, padding) + title = name_normalize(reddit_obj["thread_title"]) + title_img = create_fancy_thumbnail(title_template, title, "#000000", 5) + title_img.save(title_path) - title_img.save(f"assets/temp/{reddit_id}/png/title.png") image_clips.insert( 0, - ffmpeg.input(f"assets/temp/{reddit_id}/png/title.png")["v"].filter( + ffmpeg.input(title_path)["v"].filter( "scale", screenshot_width, -1 ), ) @@ -416,17 +389,16 @@ def make_final_video( ) background_clip = background_clip.filter("scale", W, H) print_step("Rendering the video 🎥") - from tqdm import tqdm pbar = tqdm(total=100, desc="Progress: ", bar_format="{l_bar}{bar}", unit=" %") - def on_update_example(progress) -> None: + def on_update(progress) -> None: status = round(progress * 100, 2) old_percentage = pbar.n pbar.update(status - old_percentage) defaultPath = f"results/{subreddit}" - with ProgressFfmpeg(length, on_update_example) as progress: + with ProgressFfmpeg(length, on_update) as progress: path = defaultPath + f"/{filename}" path = ( path[:251] + ".mp4" @@ -438,8 +410,8 @@ def make_final_video( path, f="mp4", **{ - "c:v": "h264_nvenc", - "b:v": "20M", + "c:v": "h264", + "b:v": "5M", "b:a": "192k", "threads": multiprocessing.cpu_count(), }, @@ -454,40 +426,14 @@ def make_final_video( exit(1) old_percentage = pbar.n pbar.update(100 - old_percentage) + pbar.close() + if allowOnlyTTSFolder: - path = defaultPath + f"/OnlyTTS/{filename}" - path = ( - path[:251] + ".mp4" - ) # Prevent a error by limiting the path length, do not change this. - print_step("Rendering the Only TTS Video 🎥") - with ProgressFfmpeg(length, on_update_example) as progress: - try: - ffmpeg.output( - background_clip, - audio, - path, - f="mp4", - **{ - "c:v": "h264_nvenc", - "b:v": "20M", - "b:a": "192k", - "threads": multiprocessing.cpu_count(), - }, - ).overwrite_output().global_args("-progress", progress.output_file.name).run( - quiet=True, - overwrite_output=True, - capture_stdout=False, - capture_stderr=False, - ) - except ffmpeg.Error as e: - print(e.stderr.decode("utf8")) - exit(1) + make_tts_only_video(length, background_clip, audio, filename, on_update, defaultPath) - old_percentage = pbar.n - pbar.update(100 - old_percentage) - pbar.close() save_data(subreddit, filename + ".mp4", title, idx, background_config["video"][2]) print_step("Removing temporary files 🗑") cleanups = cleanup(reddit_id) print_substep(f"Removed {cleanups} temporary files 🗑") print_step("Done! 🎉 The video is in the results folder 📁") + diff --git a/video_creation/screenshot_downloader.py b/video_creation/screenshot_downloader.py index 8dafaf6..8ab178d 100644 --- a/video_creation/screenshot_downloader.py +++ b/video_creation/screenshot_downloader.py @@ -13,8 +13,6 @@ from utils.imagenarator import imagemaker from utils.playwright import clear_cookie_by_name from utils.videos import save_data -__all__ = ["get_screenshots_of_reddit_posts"] - def get_screenshots_of_reddit_posts(reddit_object: dict, screenshot_num: int): """Downloads screenshots of reddit posts as seen on the web. Downloads to assets/temp/png @@ -64,11 +62,11 @@ def get_screenshots_of_reddit_posts(reddit_object: dict, screenshot_num: int): return imagemaker( theme=bgcolor, reddit_obj=reddit_object, + size=(W,H), txtclr=txtcolor, transparent=transparent, ) - screenshot_num: int with sync_playwright() as p: print_substep("Launching Headless Browser...")