diff --git a/TTS/engine_wrapper.py b/TTS/engine_wrapper.py index 8241c01..209ce3d 100644 --- a/TTS/engine_wrapper.py +++ b/TTS/engine_wrapper.py @@ -93,8 +93,12 @@ class TTSEngine: self.call_tts(f"postaudio-{idx}", process_text(text)) else: + comments = [] os.makedirs("assets/temp/" + self.redditid + "/png", exist_ok=True) for idx, comment in track(enumerate(self.reddit_object["comments"]), "Saving..."): + # TODO: Maybe move this somewhere better? + comments.append(comment["comment_body"]) + # ! Stop creating mp3 files if the length is greater than max length. if self.length > self.max_length and idx > 1: self.length -= self.last_clip_length @@ -103,12 +107,11 @@ class TTSEngine: if ( len(comment["comment_body"]) > self.tts_module.max_chars ): # Split the comment if it is too long - self.split_post(comment["comment_body"], idx) # Split the comment + self.split_post(comment["comment_body"], idx) # Split the comment else: # If the comment is not too long, just call the tts engine self.call_tts(f"{idx}", process_text(comment["comment_body"])) - # TODO: Maybe move this somewhere better? - comment_image_maker((0, 0, 0, 0), self.reddit_object, comment["comment_body"], idx, (255, 255, 255), transparent=True) + comment_image_maker((0, 0, 0, 0), self.reddit_object, comments, (255, 255, 255), transparent=True) print_substep("Saved Text to MP3 files successfully.", style="bold green") return self.length, idx @@ -158,7 +161,7 @@ class TTSEngine: self.tts_module.run( text, filepath=f"{self.path}/{filename}.mp3", - random_voice=settings.config["settings"]["tts"]["random_voice"], + random_voice=settings.config["settings"]["tts"]["random_voice"], ) # try: # self.length += MP3(f"{self.path}/{filename}.mp3").info.length diff --git a/utils/imagenarator.py b/utils/imagenarator.py index da53328..0f42876 100644 --- a/utils/imagenarator.py +++ b/utils/imagenarator.py @@ -6,6 +6,7 @@ from PIL import Image, ImageDraw, ImageFont from rich.progress import track from TTS.engine_wrapper import process_text +from utils.console import print_step, print_substep from utils.process_post import split_text @@ -77,20 +78,28 @@ def imagemaker(theme, reddit_obj: dict, txtclr, padding=5, transparent=False) -> draw_multiple_line_text(image, text, font, txtclr, padding, wrap=30, transparent=transparent) image.save(f"assets/temp/{id}/png/img{idx}.png") -def comment_image_maker(theme, reddit_obj: dict, text, idx, txtclr, padding=5, transparent=False) -> None: +def comment_image_maker(theme, reddit_obj: dict, comments, txtclr, padding=5, transparent=False) -> None: """ Render Images for video """ id = re.sub(r"[^\w\s-]", "", reddit_obj["thread_id"]) if transparent: - font = ImageFont.truetype(os.path.join("fonts", "Roboto-Bold.ttf"), 100) + font = ImageFont.truetype(os.path.join("fonts", "Roboto-Bold.ttf"), 0) else: - font = ImageFont.truetype(os.path.join("fonts", "Roboto-Regular.ttf"), 100) + font = ImageFont.truetype(os.path.join("fonts", "Roboto-Regular.ttf"), 0) size = (1920, 1080) - #for idx, text in track(enumerate(texts), "Rendering Image"): - image = Image.new("RGBA", size, theme) - text = process_text(text, False) - draw_multiple_line_text(image, text, font, txtclr, padding, wrap=30, transparent=transparent) - image.save(f"assets/temp/{id}/png/comment_{idx}.png") \ No newline at end of file + print_step("Rendering Images") + + i = 0 + for comment in comments: + texts = split_text(comment) + for text in texts: + image = Image.new("RGBA", size, theme) + text = process_text(text, False) + draw_multiple_line_text(image, text, font, txtclr, padding, wrap=30, transparent=transparent) + print_substep("Made image: " + str(i)) + image.save(f"assets/temp/{id}/png/comment_{i}.png") + + i+=1 \ No newline at end of file diff --git a/utils/subreddit.py b/utils/subreddit.py index c5818d2..eb13d67 100644 --- a/utils/subreddit.py +++ b/utils/subreddit.py @@ -90,7 +90,7 @@ def get_subreddit_undone(submissions: list, subreddit, times_checked=0, similari ), subreddit, times_checked=index, - ) # all the videos in hot have already been done + ) # all the videos in top have already been done def already_done(done_videos: list, submission) -> bool: diff --git a/video_creation/final_video.py b/video_creation/final_video.py index 8277844..7029df6 100644 --- a/video_creation/final_video.py +++ b/video_creation/final_video.py @@ -328,20 +328,37 @@ def make_final_video( ) current_time += time_for_clip else: - for i in range(0, number_of_clips + 1): + total_audio_duration = float( + ffmpeg.probe(f"assets/temp/{reddit_id}/audio.mp3")["format"]["duration"] + ) - float( + ffmpeg.probe(f"assets/temp/{reddit_id}/mp3/title.mp3")["format"]["duration"] + ) + + dir_path = f"assets/temp/{reddit_id}/png" + num_files = len( + [ + f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f)) + ] + ) + + for i in range(0, num_files): + time_for_clip = total_audio_duration/(num_files) image_clips.append( ffmpeg.input(f"assets/temp/{reddit_id}/png/comment_{i}.png")["v"].filter( "scale", screenshot_width, -1 ) ) + if i == 0: + time_for_clip = float(ffmpeg.probe(f"assets/temp/{reddit_id}/mp3/title.mp3")["format"]["duration"]) + image_overlay = image_clips[i].filter("colorchannelmixer", aa=opacity) background_clip = background_clip.overlay( image_overlay, - enable=f"between(t,{current_time},{current_time + audio_clips_durations[i]})", + enable=f"between(t,{current_time},{current_time + time_for_clip})", x="(main_w-overlay_w)/2", y="(main_h-overlay_h)/2", ) - current_time += audio_clips_durations[i] + current_time += time_for_clip title = re.sub(r"[^\w\s-]", "", reddit_obj["thread_title"]) idx = re.sub(r"[^\w\s-]", "", reddit_obj["thread_id"])