chore: reformatted

feat:adds tiny delay

closes #40
pull/418/head
Jason 2 years ago
parent bd165a2473
commit 5beef765d7

@ -43,7 +43,9 @@ def get_subreddit_threads():
Ask user for subreddit input
"""
print_step("Getting subreddit threads...")
if not getenv("SUBREDDIT"): # note to self. you can have multiple subreddits via reddit.subreddit("redditdev+learnpython")
if not getenv(
"SUBREDDIT"
): # note to self. you can have multiple subreddits via reddit.subreddit("redditdev+learnpython")
subreddit = reddit.subreddit(
input("What subreddit would you like to pull from? ")
) # if the env isnt set, ask user
@ -60,7 +62,7 @@ def get_subreddit_threads():
else:
threads = subreddit.hot(limit=25)
submission = get_subreddit_undone(threads, subreddit)
submission = check_done(submission) # double checking
submission = check_done(submission) # double checking
if submission is None:
return get_subreddit_threads() # submission already done. rerun
upvotes = submission.score
@ -80,7 +82,7 @@ def get_subreddit_threads():
try:
content["thread_url"] = submission.url
content["thread_title"] = submission.title
#ontent["thread_content"] = submission.content
# ontent["thread_content"] = submission.content
content["comments"] = []
for top_level_comment in submission.comments:

@ -1,6 +1,7 @@
from typing import List
import json
def get_subreddit_undone(submissions: List, subreddit):
"""
recursively checks if the top submission in the list was already done.
@ -11,7 +12,10 @@ def get_subreddit_undone(submissions: List, subreddit):
if already_done(done_videos, submission):
continue
return submission
return get_subreddit_undone(subreddit.top(time_filter="hour"), subreddit) # all of the videos in hot have already been done
return get_subreddit_undone(
subreddit.top(time_filter="hour"), subreddit
) # all of the videos in hot have already been done
def already_done(done_videos: list, submission):

@ -116,11 +116,9 @@ class TTTTSWrapper: # TikTok Text-to-Speech Wrapper
chunkId = chunkId + 1
if(len(audio_clips) > 1):
if len(audio_clips) > 1:
cbn.convert(samplerate=44100, n_channels=2)
cbn.build(
audio_clips, filename, 'concatenate'
)
cbn.build(audio_clips, filename, "concatenate")
else:
os.rename(audio_clips[0], filename)

@ -2,7 +2,7 @@ import json
from os import getenv
from pathlib import Path
#from playwright.async_api import async_playwright
# from playwright.async_api import async_playwright
from playwright.sync_api import sync_playwright, ViewportSize
from rich.progress import track
@ -55,10 +55,12 @@ def download_screenshots_of_reddit_posts(reddit_object, screenshot_num):
path="assets/temp/png/title.png"
)
if storymode:
page.locator('[data-click-id="text"]').screenshot(path="assets/temp/png/story_content.png")
page.locator('[data-click-id="text"]').screenshot(
path="assets/temp/png/story_content.png"
)
else:
for idx, comment in track(
enumerate(reddit_object["comments"]), "Downloading screenshots..."
enumerate(reddit_object["comments"]), "Downloading screenshots..."
):
# Stop if we have reached the screenshot_num

@ -36,7 +36,11 @@ def save_text_to_mp3(reddit_obj):
except HeaderNotFoundError: # note to self AudioFileClip
length += sox.file_info.duration(f"assets/temp/mp3/title.mp3")
if getenv("STORYMODE").casefold() == "true":
ttttsw.tts(sanitize_text(reddit_obj["thread_content"]), filename=f"assets/temp/mp3/story_content.mp3", random_speaker=False)
ttttsw.tts(
sanitize_text(reddit_obj["thread_content"]),
filename=f"assets/temp/mp3/story_content.mp3",
random_speaker=False,
)
#'story_content'
com = 0
for comment in track((reddit_obj["comments"]), "Saving..."):

Loading…
Cancel
Save