moved vars in config, fixes in clip indexes & cal_tts in audio length

pull/963/head
Drugsosos 2 years ago
parent a9775f348a
commit 0b99c69110
No known key found for this signature in database
GPG Key ID: 8E35176FE617E28D

@ -25,10 +25,17 @@ theme = { optional = false, default = "dark", example = "light", options = ["dar
"light", "light",
], explanation = "sets the Reddit theme, either LIGHT or DARK" } ], explanation = "sets the Reddit theme, either LIGHT or DARK" }
times_to_run = { optional = false, default = 1, example = 2, explanation = "used if you want to run multiple times. set to an int e.g. 4 or 29 or 1", type = "int", nmin = 1, oob_error = "It's very hard to run something less than once." } times_to_run = { optional = false, default = 1, example = 2, explanation = "used if you want to run multiple times. set to an int e.g. 4 or 29 or 1", type = "int", nmin = 1, oob_error = "It's very hard to run something less than once." }
opacity = { optional = false, default = 0.9, example = 0.8, explanation = "Sets the opacity of the comments when overlayed over the background", type = "float", nmin = 0, nmax = 1, oob_error = "The opacity HAS to be between 0 and 1", input_error = "The opacity HAS to be a decimal number between 0 and 1" } opacity = { optional = false, default = 90, example = 80, explanation = "Sets the opacity (in percents) of the comments when overlayed over the background", type = "int", nmin = 10, nmax = 100, oob_error = "The opacity HAS to be between 10 and 100 percents", input_error = "The opacity HAS to be a number between 10 and 100" }
storymode = { optional = true, type = "bool", default = false, example = false, options = [true, storymode = { optional = true, type = "bool", default = false, example = false, options = [true,
false, false,
], explanation = "not yet implemented" } ], explanation = "not yet implemented" }
video_length = { optional = false, default = 50, example = 60, explanation = "Approximated final video length", type = "int", nmin = 15, oob_error = "15 seconds is short enought" }
time_before_first_picture = { optional = false, default = 0.5, example = 1.0, explanation = "Deley before first screenshot apears", type = "float", nmin = 0, oob_error = "Choose at least 0 second" }
time_before_tts = { optional = false, default = 0.5, example = 1.0, explanation = "Deley between screenshot and TTS", type = "float", nmin = 0, oob_error = "Choose at least 0 second" }
time_between_pictures = { optional = false, default = 0.5, example = 1.0, explanation = "Time between every screenshot", type = "float", nmin = 0, oob_error = "Choose at least 0 second" }
delay_before_end = { optional = false, default = 0.5, example = 1.0, explanation = "Deley before video ends", type = "float", nmin = 0, oob_error = "Choose at least 0 second" }
video_width = { optional = true, default = 1080, example = 1080, explanation = "Final video width", type = "int", nmin = 600, oob_error = "Choose at least 600 pixels wide" }
video_height = { optional = true, default = 1920, example = 1920, explanation = "Final video height", type = "int", nmin = 800, oob_error = "Choose at least 800 pixels long" }
[settings.background] [settings.background]
background_choice = { optional = true, default = "minecraft", example = "minecraft", options = ["minecraft", "gta", "rocket-league", "motor-gta", ""], explanation = "Sets the background for the video" } background_choice = { optional = true, default = "minecraft", example = "minecraft", options = ["minecraft", "gta", "rocket-league", "motor-gta", ""], explanation = "Sets the background for the video" }

@ -1,22 +1,21 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
from pathlib import Path from pathlib import Path
from typing import Tuple from typing import Union
import re
# import sox
# from mutagen import MutagenError
# from mutagen.mp3 import MP3, HeaderNotFoundError
import translators as ts import translators as ts
from rich.progress import track from rich.progress import track
from attr import attrs, attrib from attr import attrs, attrib
from moviepy.editor import AudioFileClip, CompositeAudioClip, concatenate_audioclips
from utils.console import print_step, print_substep from utils.console import print_step, print_substep
from utils.voice import sanitize_text from utils.voice import sanitize_text
from utils import settings from utils import settings
from TTS.common import audio_length from TTS.common import audio_length
from TTS.GTTS import GTTS
from TTS.streamlabs_polly import StreamlabsPolly
from TTS.TikTok import TikTok
from TTS.aws_polly import AWSPolly
@attrs(auto_attribs=True) @attrs(auto_attribs=True)
class TTSEngine: class TTSEngine:
@ -31,15 +30,24 @@ class TTSEngine:
Notes: Notes:
tts_module must take the arguments text and filepath. tts_module must take the arguments text and filepath.
""" """
tts_module: object tts_module: Union[GTTS, StreamlabsPolly, TikTok, AWSPolly]
reddit_object: dict reddit_object: dict
path: str = 'assets/temp/mp3' path: str = 'assets/temp/mp3'
max_length: int = 50 # TODO move to config
__total_length: int = attrib( __total_length: int = attrib(
default=0, default=0,
kw_only=True kw_only=True
) )
def __attrs_post_init__(self):
self.tts_module = self.tts_module()
self.max_length: int = settings.config['settings']['video_length']
self.time_before_tts: float = settings.config['settings']['time_before_tts']
self.time_between_pictures: float = settings.config['settings']['time_between_pictures']
self.__total_length = (
settings.config['settings']['time_before_first_picture'] +
settings.config['settings']['delay_before_end']
)
def run( def run(
self self
) -> list: ) -> list:
@ -82,15 +90,16 @@ class TTSEngine:
if not text: if not text:
return False return False
self.tts_module().run( self.tts_module.run(
text=self.process_text(text), text=self.process_text(text),
filepath=f'{self.path}/{filename}.mp3' filepath=f'{self.path}/{filename}.mp3'
) )
clip_length = audio_length(f'{self.path}/{filename}.mp3') clip_length = audio_length(f'{self.path}/{filename}.mp3')
clip_offset = self.time_between_pictures + self.time_before_tts * 2
if clip_length and self.__total_length + clip_length <= self.max_length: if clip_length and self.__total_length + clip_length + clip_offset <= self.max_length:
self.__total_length += clip_length self.__total_length += clip_length + clip_offset
return True return True
return False return False

@ -9,8 +9,6 @@ from utils import settings
# from utils.checker import envUpdate # from utils.checker import envUpdate
from video_creation.background import ( from video_creation.background import (
download_background,
chop_background_video,
get_background_config, get_background_config,
) )
from video_creation.final_video import make_final_video from video_creation.final_video import make_final_video

@ -9,7 +9,7 @@ from utils.console import handle_input
console = Console() console = Console()
config = dict # autocomplete config = dict() # calling instance of a dict to calm lint down
def crawl(obj: dict, func=lambda x, y: print(x, y, end="\n"), path=None): def crawl(obj: dict, func=lambda x, y: print(x, y, end="\n"), path=None):

@ -10,7 +10,7 @@ from moviepy.editor import (
AudioFileClip, AudioFileClip,
ImageClip, ImageClip,
concatenate_videoclips, concatenate_videoclips,
concatenate_audioclips, CompositeAudioClip,
CompositeVideoClip, CompositeVideoClip,
) )
from moviepy.video.io.ffmpeg_tools import ffmpeg_merge_video_audio, ffmpeg_extract_subclip from moviepy.video.io.ffmpeg_tools import ffmpeg_merge_video_audio, ffmpeg_extract_subclip
@ -25,14 +25,6 @@ from video_creation.background import download_background, chop_background_video
console = Console() console = Console()
W, H = 1080, 1920 # TODO move to config
max_length: int = 50 # TODO move to config
time_before_first_picture: float = 1 # TODO move to config
time_before_tts: float = 0.5 # TODO move to config
time_between_pictures: float = 1 # TODO move to config
delay_before_end: int = 1 # TODO move to config
def name_normalize( def name_normalize(
name: str name: str
@ -69,10 +61,22 @@ def make_final_video(
reddit_obj (dict): The reddit object that contains the posts to read. reddit_obj (dict): The reddit object that contains the posts to read.
background_config (Tuple[str, str, str, Any]): The background config to use. background_config (Tuple[str, str, str, Any]): The background config to use.
""" """
W: int = int(settings.config['settings']['video_width'])
H: int = int(settings.config['settings']['video_height'])
if not W or not H:
W, H = 1080, 1920
max_length: int = int(settings.config['settings']['video_length'])
time_before_first_picture: float = settings.config['settings']['time_before_first_picture']
time_before_tts: float = settings.config['settings']['time_before_tts']
time_between_pictures: float = settings.config['settings']['time_between_pictures']
delay_before_end: float = settings.config['settings']['delay_before_end']
print_step('Creating the final video 🎥') print_step('Creating the final video 🎥')
VideoFileClip.reW = lambda clip: clip.resize(width=W) VideoFileClip.reW = lambda clip: clip.resize(width=W)
VideoFileClip.reH = lambda clip: clip.resize(width=H) VideoFileClip.reH = lambda clip: clip.resize(width=H)
opacity = settings.config['settings']['opacity'] opacity = settings.config['settings']['opacity'] / 100
def create_audio_clip( def create_audio_clip(
clip_title: str | int, clip_title: str | int,
@ -97,39 +101,38 @@ def make_final_video(
audio_clips.append(audio_title) audio_clips.append(audio_title)
indexes_for_videos = list() indexes_for_videos = list()
for idx, audio in track( for audio_title in track(
enumerate(indexes_of_clips, start=1), indexes_of_clips,
description='Gathering audio clips...', description='Gathering audio clips...',
): ):
temp_audio_clip = create_audio_clip( temp_audio_clip = create_audio_clip(
audio, audio_title,
correct_audio_offset + video_duration, correct_audio_offset + video_duration,
) )
if video_duration + temp_audio_clip.duration + correct_audio_offset + delay_before_end <= max_length: if video_duration + temp_audio_clip.duration + correct_audio_offset + delay_before_end <= max_length:
video_duration += temp_audio_clip.duration + correct_audio_offset video_duration += temp_audio_clip.duration + correct_audio_offset
audio_clips.append(temp_audio_clip) audio_clips.append(temp_audio_clip)
indexes_for_videos.append(idx) indexes_for_videos.append(audio_title)
video_duration += delay_before_end video_duration += delay_before_end + time_before_tts
audio_composite = concatenate_audioclips(audio_clips) # Can't use concatenate_audioclips here, it resets clips' start point
audio_composite = CompositeAudioClip(audio_clips)
console.log('[bold green] Video Will Be: %.2f Seconds Long' % video_duration) console.log('[bold green] Video Will Be: %.2f Seconds Long' % video_duration)
# Gather all images # Gather all images
new_opacity = 1 if opacity is None or float(opacity) >= 1 else float(opacity) # TODO move to pydentic and percents new_opacity = 1 if opacity is None or opacity >= 1 else opacity
def create_image_clip( def create_image_clip(
image_title: str | int, image_title: str | int,
audio_start: float, audio_start: float,
audio_end: float,
audio_duration: float, audio_duration: float,
) -> 'ImageClip': ) -> 'ImageClip':
return ( return (
ImageClip(f'assets/temp/png/{image_title}.png') ImageClip(f'assets/temp/png/{image_title}.png')
.set_start(audio_start - time_before_tts) .set_start(audio_start - time_before_tts)
.set_end(audio_end + time_before_tts) .set_duration(time_before_tts * 2 + audio_duration)
.set_duration(time_before_tts * 2 + audio_duration, change_end=False)
.set_opacity(new_opacity) .set_opacity(new_opacity)
.resize(width=W - 100) .resize(width=W - 100)
) )
@ -137,22 +140,26 @@ def make_final_video(
# add title to video # add title to video
image_clips = list() image_clips = list()
# Accounting for title and other stuff if audio_clips
index_offset = 1
image_clips.append( image_clips.append(
create_image_clip( create_image_clip(
'title', 'title',
audio_clips[0].start, audio_clips[0].start,
audio_clips[0].end,
audio_clips[0].duration audio_clips[0].duration
) )
) )
for photo_idx in indexes_for_videos: for idx, photo_idx in enumerate(
indexes_for_videos,
start=index_offset,
):
image_clips.append( image_clips.append(
create_image_clip( create_image_clip(
f'comment_{indexes_of_clips[photo_idx]}', f'comment_{indexes_of_clips[photo_idx]}',
audio_clips[photo_idx].start, audio_clips[idx].start,
audio_clips[photo_idx].end, audio_clips[idx].duration
audio_clips[photo_idx].duration
) )
) )
@ -166,16 +173,14 @@ def make_final_video(
# .set_opacity(float(opacity)), # .set_opacity(float(opacity)),
# ) # )
# else: story mode stuff # else: story mode stuff
img_clip_pos = background_config[3] image_concat = concatenate_videoclips(image_clips).set_position(background_config[3])
image_concat = concatenate_videoclips(image_clips).set_position(img_clip_pos)
image_concat.audio = audio_composite
download_background(background_config) download_background(background_config)
chop_background_video(background_config, video_duration) chop_background_video(background_config, video_duration)
background_clip = ( background_clip = (
VideoFileClip('assets/temp/background.mp4') VideoFileClip('assets/temp/background.mp4')
.set_start(0) .set_start(0)
.set_end(video_duration + delay_before_end) .set_end(video_duration)
.without_audio() .without_audio()
.resize(height=H) .resize(height=H)
) )
@ -203,7 +208,11 @@ def make_final_video(
y2=back_video_height y2=back_video_height
) )
[print(image.start, audio.start, '|', audio.end, image.end, end=f'\n{"-" * 10}\n') for
audio, image in zip(audio_clips, image_clips)]
final = CompositeVideoClip([background_clip, image_concat]) final = CompositeVideoClip([background_clip, image_concat])
final.audio = audio_composite
title = re.sub(r'[^\w\s-]', '', reddit_obj['thread_title']) title = re.sub(r'[^\w\s-]', '', reddit_obj['thread_title'])
idx = re.sub(r'[^\w\s-]', '', reddit_obj['thread_id']) idx = re.sub(r'[^\w\s-]', '', reddit_obj['thread_id'])

@ -375,9 +375,10 @@ class RedditScreenshot(Browser, Wait):
[chunk for chunk in chunks(async_tasks_primary, 10)], [chunk for chunk in chunks(async_tasks_primary, 10)],
start=1, start=1,
): ):
chunk_list = async_tasks_primary.__len__() // 10 + (1 if async_tasks_primary.__len__() % 10 != 0 else 0)
for task in track( for task in track(
as_completed(chunked_tasks), as_completed(chunked_tasks),
description=f'Downloading comments: Chunk {idx}/{chunked_tasks.__len__()}', description=f'Downloading comments: Chunk {idx}/{chunk_list}',
): ):
await task await task

Loading…
Cancel
Save