From 31b300331027f4902b96438c82727c911bdba6fc Mon Sep 17 00:00:00 2001 From: Drugsosos <44712637+Drugsosos@users.noreply.github.com> Date: Mon, 11 Jul 2022 04:37:21 +0300 Subject: [PATCH] fixes in async code --- TTS/TikTok.py | 19 +++++---- TTS/engine_wrapper.py | 23 ++++++----- TTS/streamlabs_polly.py | 10 ++--- video_creation/final_video.py | 55 +++++++++++-------------- video_creation/screenshot_downloader.py | 31 +++++++------- video_creation/voices.py | 15 +++++-- 6 files changed, 82 insertions(+), 71 deletions(-) diff --git a/TTS/TikTok.py b/TTS/TikTok.py index 198866b..9f5a3b4 100644 --- a/TTS/TikTok.py +++ b/TTS/TikTok.py @@ -71,7 +71,7 @@ class TikTok(BaseApiTTS): # TikTok Text-to-Speech Wrapper ) random_voice: bool = False uri_base: str = attrib( - default='https://api16-normal-useast5.us.tiktokv.com/media/api/text/speech/invoke', + default='https://api16-normal-useast5.us.tiktokv.com/media/api/text/speech/invoke/', kw_only=True, ) max_chars = 300 @@ -91,11 +91,12 @@ class TikTok(BaseApiTTS): # TikTok Text-to-Speech Wrapper self, text: str, ): - return await self.client.post( - f'{self.uri_base}', - params={ - 'text_speaker': self.voice, - 'req_text': text, - 'speaker_map_type': 0, - } - ) + async with self.client.post( + f'{self.uri_base}', + params={ + 'text_speaker': self.voice, + 'req_text': text, + 'speaker_map_type': 0, + }) as results: + results = await results.json() + return results['data']['v_str'] diff --git a/TTS/engine_wrapper.py b/TTS/engine_wrapper.py index 0b97809..9b6600f 100644 --- a/TTS/engine_wrapper.py +++ b/TTS/engine_wrapper.py @@ -49,29 +49,31 @@ class TTSEngine: print_step('Saving Text to MP3 files...') - await self.call_tts('title', self.reddit_object['thread_title']) - async_tasks_offset = 1 + async_tasks_primary = list() + + async_tasks_primary.append(self.call_tts('title', self.reddit_object['thread_title'])) if self.reddit_object['thread_post'] and settings.config['settings']['storymode']: - await self.call_tts('posttext', self.reddit_object['thread_post']) - async_tasks_offset += 1 + async_tasks_primary.append(self.call_tts('posttext', self.reddit_object['thread_post'])) - async_tasks_primary = [ + async_tasks_primary.extend([ self.call_tts(str(idx), comment['comment_body']) for idx, comment in enumerate(self.reddit_object['comments']) - ] + ]) + async_tasks_primary_results = list() for task in track( as_completed(async_tasks_primary), description='Saving...', total=async_tasks_primary.__len__() ): - await task + async_tasks_primary_results.append(await task) + async_tasks_primary.clear() print_substep('Saved Text to MP3 files successfully.', style='bold green') return [ comments for comments, condition in - zip(self.reddit_object['comments'], async_tasks_primary[async_tasks_offset:]) + zip(range(self.reddit_object['comments'].__len__()), async_tasks_primary_results) if condition ] @@ -85,10 +87,11 @@ class TTSEngine: filepath=f'{self.path}/{filename}.mp3' ) - clip_length = audio_length(f'assets/audio/{filename}.mp3') + clip_length = audio_length(f'{self.path}/{filename}.mp3') if self.__total_length + clip_length <= self.max_length: - self.max_length += clip_length + self.__total_length += clip_length + print(clip_length, '/', self.__total_length) return True return False diff --git a/TTS/streamlabs_polly.py b/TTS/streamlabs_polly.py index 9ec01cb..befa2a3 100644 --- a/TTS/streamlabs_polly.py +++ b/TTS/streamlabs_polly.py @@ -56,16 +56,16 @@ class StreamlabsPolly(BaseApiTTS): else get_random_voice(voices) ) - response = await self.client.post( + async with self.client.post( self.url, data={ 'voice': voice, 'text': text, 'service': 'polly', } - ) - speak_url = await( - await response.json() - )['speak_url'] + ) as response: + speak_url = await( + await response.json() + )['speak_url'] return await self.client.get(speak_url) diff --git a/video_creation/final_video.py b/video_creation/final_video.py index 2bc94f3..4aecc33 100755 --- a/video_creation/final_video.py +++ b/video_creation/final_video.py @@ -79,7 +79,7 @@ def make_final_video( clip_start: float, ) -> 'AudioFileClip': return ( - AudioFileClip(f'assets/audio/{clip_title}.mp3') + AudioFileClip(f'assets/temp/mp3/{clip_title}.mp3') .set_start(clip_start) ) @@ -97,34 +97,29 @@ def make_final_video( audio_clips.append(audio_title) indexes_for_videos = list() - for audio in track( - indexes_of_clips, + for idx, audio in track( + enumerate(indexes_of_clips), description='Gathering audio clips...', ): temp_audio_clip = create_audio_clip( audio, correct_audio_offset + video_duration, ) - if video_duration + temp_audio_clip.duration + correct_audio_offset + delay_before_end > max_length: - continue - video_duration += temp_audio_clip.duration + correct_audio_offset - audio_clips.append(temp_audio_clip) - indexes_for_videos.append(audio) + if video_duration + temp_audio_clip.duration + correct_audio_offset + delay_before_end <= max_length: + video_duration += temp_audio_clip.duration + correct_audio_offset + audio_clips.append(temp_audio_clip) + indexes_for_videos.append(idx) video_duration += delay_before_end - for idx in indexes_of_clips: - audio_clips.append(AudioFileClip(f'assets/temp/mp3/{idx}.mp3')) audio_composite = concatenate_audioclips(audio_clips) - console.log(f'[bold green] Video Will Be: {audio_composite.length} Seconds Long') + console.log(f'[bold green] Video Will Be: {audio_composite.end} Seconds Long') # add title to video - image_clips = list() # Gather all images new_opacity = 1 if opacity is None or float(opacity) >= 1 else float(opacity) # TODO move to pydentic and percents def create_image_clip( - self, image_title: str | int, audio_start: float, audio_end: float, @@ -132,30 +127,30 @@ def make_final_video( ) -> 'ImageClip': return ( ImageClip(f'assets/temp/png/{image_title}.png') - .set_start(audio_start - self.time_before_tts) - .set_end(audio_end + self.time_before_tts) - .set_duration(self.time_before_tts * 2 + audio_duration, change_end=False) + .set_start(audio_start - time_before_tts) + .set_end(audio_end + time_before_tts) + .set_duration(time_before_tts * 2 + audio_duration, change_end=False) .set_opacity(new_opacity) .resize(width=W - 100) ) + image_clips = list() - index_offset = 1 - - image_clips.insert( - 0, - ImageClip('assets/temp/png/title.png') - .set_duration(audio_clips[0].duration) - .resize(width=W - 100) - .set_opacity(new_opacity), + image_clips.append( + create_image_clip( + 'title', + audio_clips[0].start, + audio_clips[0].end, + audio_clips[0].duration + ) ) - for photo_idx in indexes_of_clips: + for photo_idx in indexes_for_videos: image_clips.append( create_image_clip( f'comment_{photo_idx}', - audio_clips[photo_idx + index_offset].start, - audio_clips[photo_idx + index_offset].end, - audio_clips[photo_idx + index_offset].duration + audio_clips[photo_idx].start, + audio_clips[photo_idx].end, + audio_clips[photo_idx].duration ) ) @@ -175,7 +170,7 @@ def make_final_video( download_background(background_config) background_clip = ( - VideoFileClip('assets/temp/background.mp4') + VideoFileClip('assets/backgrounds/background.mp4') .set_start(0) .set_end(video_duration + delay_before_end) .without_audio() @@ -248,7 +243,7 @@ def make_final_video( 'assets/temp/temp_audio.mp4', 0, video_duration, - targetname=f"results/{subreddit}/{filename}", + targetname=f'results/{subreddit}/{filename}', ) else: print('debug duck') diff --git a/video_creation/screenshot_downloader.py b/video_creation/screenshot_downloader.py index 95109a6..81c7850 100644 --- a/video_creation/screenshot_downloader.py +++ b/video_creation/screenshot_downloader.py @@ -18,7 +18,6 @@ from attr import attrs, attrib from attr.validators import instance_of, optional from typing import TypeVar, Optional, Callable, Union - _function = TypeVar('_function', bound=Callable[..., object]) _exceptions = TypeVar('_exceptions', bound=Optional[Union[type, tuple, list]]) @@ -212,12 +211,7 @@ class RedditScreenshot(Browser, Wait): screenshot_idx (int): List with indexes of voiced comments """ reddit_object: dict - screenshot_idx: list = attrib() - - @screenshot_idx.validator - def validate_screenshot_idx(self, attribute, value): - if value <= 0: - raise ValueError('Check screenshot_num in config') + screenshot_idx: list async def __dark_theme( self, @@ -352,12 +346,21 @@ class RedditScreenshot(Browser, Wait): self.screenshot_idx ] - for task in track( - as_completed(async_tasks_primary), - description='Downloading screenshots...', - total=async_tasks_primary.__len__(), - ): - await task + def chunks(lst, n): + """Yield successive n-sized chunks from lst.""" + for i in range(0, len(lst), n): + yield lst[i:i + n] - print_substep('Screenshots downloaded Successfully.', style='bold green') + for idx, tasks in enumerate( + [chunk for chunk in chunks(async_tasks_primary, 15)], + start=1, + ): + for task in track( + as_completed(tasks), + description=f'Downloading comments: Chunk {idx}', + total=tasks.__len__() + ): + await task + + print_substep('Comments downloaded Successfully.', style='bold green') await self.close_browser() diff --git a/video_creation/voices.py b/video_creation/voices.py index b4eaf1f..e792ec3 100644 --- a/video_creation/voices.py +++ b/video_creation/voices.py @@ -37,13 +37,22 @@ async def save_text_to_mp3( if voice.casefold() in map(lambda _: _.casefold(), TTSProviders): break print('Unknown Choice') - engine_instance = TTSEngine(get_case_insensitive_key_value(TTSProviders, voice), reddit_obj) - return await engine_instance.run() + TTS_instance = get_case_insensitive_key_value(TTSProviders, voice) + if TTS_instance == StreamlabsPolly or TTS_instance == TikTok: + from aiohttp import ClientSession + + async with ClientSession() as client: + engine_instance = TTSEngine(TTS_instance(client), reddit_obj) + results = await engine_instance.run() + else: + engine_instance = TTSEngine(TTS_instance, reddit_obj) + results = await engine_instance.run() + return results def get_case_insensitive_key_value( input_dict, - key + key, ) -> object: return next( (value for dict_key, value in input_dict.items() if dict_key.lower() == key.lower()),