syncing with words fixed on storymode 1

pull/2041/head
cyteon 1 year ago
parent 7c31473d96
commit 39544805d0

@ -33,6 +33,7 @@ resolution_w = { optional = false, default = 1080, example = 1440, explantation
resolution_h = { optional = false, default = 1920, example = 2560, explantation = "Sets the height in pixels of the final video" }
zoom = { optional = true, default = 1, example = 1.1, explanation = "Sets the browser zoom level. Useful if you want the text larger.", type = "float", nmin = 0.1, nmax = 2, oob_error = "The text is really difficult to read at a zoom level higher than 2" }
channel_name = { optional = true, default = "Reddit Tales", example = "Reddit Stories", explanation = "Sets the username that appaers in the fancy title screen" }
words_on_screen = { optional = true, default = 10, example = 1, explanation = "Sets how many words should be shown on screen" }
[settings.background]
background_video = { optional = true, default = "minecraft", example = "rocket-league", options = ["minecraft", "gta", "rocket-league", "motor-gta", "csgo-surf", "cluster-truck", "minecraft-2","multiversus","fall-guys","steep", ""], explanation = "Sets the background for the video based on game name" }

@ -2,12 +2,11 @@ import os
import re
import textwrap
from utils import settings
from PIL import Image, ImageDraw, ImageFont
from rich.progress import track
from TTS.engine_wrapper import process_text
from utils.process_post import split_text
def draw_multiple_line_text(
@ -60,10 +59,12 @@ def imagemaker(theme, reddit_obj: dict, txtclr, padding=5, transparent=False) ->
"""
Render Images for video
"""
title = process_text(reddit_obj["thread_title"], False)
texts = reddit_obj["thread_post"]
id = re.sub(r"[^\w\s-]", "", reddit_obj["thread_id"])
texts = split_text(" ".join(texts))
if transparent:
font = ImageFont.truetype(os.path.join("fonts", "Roboto-Bold.ttf"), 100)
else:

@ -0,0 +1,16 @@
import re
from typing import List
from utils import settings
# working good
def split_text(obj) -> List[str]:
text: str = re.sub("\n", " ", obj)
words_on_screen = settings.config["settings"]["words_on_screen"]
words = text.split()
grouped_words = [' '.join(words[i: i + words_on_screen]) for i in range(0, len(words), words_on_screen)]
return grouped_words

@ -20,6 +20,7 @@ from utils.cleanup import cleanup
from utils.console import print_step, print_substep
from utils.thumbnail import create_thumbnail
from utils.videos import save_data
from utils.process_post import split_text
from pathlib import Path
@ -112,7 +113,7 @@ def prepare_background(reddit_id: str, W: int, H: int) -> str:
# The following function is based on code under: Copyright 2024 beingbored (aka. Tim), MIT License, permission granted to use, copy, modify, and distribute.
def create_fancy_thumbnail(image, text, text_color, padding, wrap=35):
print_step(f"Creating fancy thumbnail for {text}")
print_step(f"Creating fancy thumbnail for: {text}")
font_title_size = 47
font = ImageFont.truetype(os.path.join("fonts", "Roboto-Bold.ttf"), font_title_size)
image_width, image_height = image.size
@ -299,19 +300,33 @@ def make_final_video(
)
current_time += audio_clips_durations[0]
elif settings.config["settings"]["storymodemethod"] == 1:
for i in track(range(0, number_of_clips + 1), "Collecting the image files..."):
total_audio_duration = float(
ffmpeg.probe(f"assets/temp/{reddit_id}/audio.mp3")["format"]["duration"]
) - float(
ffmpeg.probe(f"assets/temp/{reddit_id}/mp3/title.mp3")["format"]["duration"]
)
text = " ".join(reddit_obj["thread_post"])
number_of_clips_splitted = len(split_text(text))
# TODO: Fix that it sometimes goes out of sync
for i in track(range(0, number_of_clips_splitted + 1), "Collecting the image files..."):
time_for_clip = total_audio_duration/(number_of_clips_splitted+1)
if i == 0:
time_for_clip = float(ffmpeg.probe(f"assets/temp/{reddit_id}/mp3/title.mp3")["format"]["duration"])
image_clips.append(
ffmpeg.input(f"assets/temp/{reddit_id}/png/img{i}.png")["v"].filter(
"scale", screenshot_width, -1
)
)
background_clip = background_clip.overlay(
image_clips[i],
enable=f"between(t,{current_time},{current_time + audio_clips_durations[i]})",
enable=f"between(t,{current_time},{current_time + time_for_clip})",
x="(main_w-overlay_w)/2",
y="(main_h-overlay_h)/2",
)
current_time += audio_clips_durations[i]
current_time += time_for_clip
else:
for i in range(0, number_of_clips + 1):
image_clips.append(

Loading…
Cancel
Save