Merge pull request #58 from PatatjeMC/master

Unlimited comment length
pull/418/head
Jason 3 years ago committed by GitHub
commit 0a028358b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -6,3 +6,4 @@ SUBREDDIT="AskReddit"
ALLOW_NSFW="False" ALLOW_NSFW="False"
POST_ID="" POST_ID=""
THEME="LIGHT" THEME="LIGHT"
MAX_COMMENT_LENGTH="500"

@ -16,7 +16,7 @@ def textify(text):
def get_subreddit_threads(): def get_subreddit_threads():
""" """
Returns a list of threads from the AskReddit subreddit. Returns a list of threads from the selected subreddit.
""" """
print_step("Getting subreddit threads...") print_step("Getting subreddit threads...")
@ -24,7 +24,7 @@ def get_subreddit_threads():
content = {} content = {}
load_dotenv() load_dotenv()
reddit = praw.Reddit(client_id=getenv("REDDIT_CLIENT_ID"), client_secret=getenv("REDDIT_CLIENT_SECRET"), reddit = praw.Reddit(client_id=getenv("REDDIT_CLIENT_ID"), client_secret=getenv("REDDIT_CLIENT_SECRET"),
user_agent="Accessing AskReddit threads", username=getenv("REDDIT_USERNAME"), user_agent="Accessing subreddit threads", username=getenv("REDDIT_USERNAME"),
password=getenv("REDDIT_PASSWORD"), ) password=getenv("REDDIT_PASSWORD"), )
""" """
Ask user for subreddit input Ask user for subreddit input
@ -58,7 +58,7 @@ def get_subreddit_threads():
content["comments"] = [] content["comments"] = []
for top_level_comment in submission.comments: for top_level_comment in submission.comments:
if len(top_level_comment.body) <= 250: if len(top_level_comment.body) <= int(environ["MAX_COMMENT_LENGTH"]):
content["comments"].append( content["comments"].append(
{"comment_body": top_level_comment.body, "comment_url": top_level_comment.permalink, {"comment_body": top_level_comment.body, "comment_url": top_level_comment.permalink,
"comment_id": top_level_comment.id, }) "comment_id": top_level_comment.id, })

@ -1,4 +1,6 @@
import requests, base64, random, os import requests, base64, random, os
import re
from moviepy.editor import AudioFileClip, concatenate_audioclips, CompositeAudioClip
# https://twitter.com/scanlime/status/1512598559769702406 # https://twitter.com/scanlime/status/1512598559769702406
voices = [ # DISNEY VOICES voices = [ # DISNEY VOICES
@ -55,15 +57,29 @@ class TTTTSWrapper: # TikTok Text-to-Speech Wrapper
def tts(self, req_text: str = "TikTok Text To Speech", filename: str = 'title.mp3', random_speaker: bool = False): def tts(self, req_text: str = "TikTok Text To Speech", filename: str = 'title.mp3', random_speaker: bool = False):
req_text = req_text.replace("+", "plus").replace(" ", "+").replace("&", "and") req_text = req_text.replace("+", "plus").replace(" ", "+").replace("&", "and")
voice = self.randomvoice() if random_speaker else 'en_us_002' voice = self.randomvoice() if random_speaker else 'en_us_002'
r = requests.post(f"{self.URI_BASE}{voice}&req_text={req_text}&speaker_map_type=0")
vstr = [r.json()["data"]["v_str"]][0] chunks = [m.group().strip() for m in re.finditer(r' *((.{0,200})(\.|.$))',req_text)]
audio_clips = []
chunkId = 0
for chunk in chunks:
r = requests.post(f"{self.URI_BASE}{voice}&req_text={chunk}&speaker_map_type=0")
vstr = [r.json()["data"]["v_str"]][0]
b64d = base64.b64decode(vstr)
with open(f"{filename}-{chunkId}", "wb") as out:
out.write(b64d)
audio_clips.append(AudioFileClip(f"{filename}-{chunkId}"))
b64d = base64.b64decode(vstr) chunkId = chunkId+1;
with open(filename, "wb") as out: audio_concat = concatenate_audioclips(audio_clips)
out.write(b64d) audio_composite = CompositeAudioClip([audio_concat])
audio_composite.write_audiofile(filename, 44100, 2, 2000, None)
@staticmethod @staticmethod
def randomvoice(): def randomvoice():

@ -4,5 +4,11 @@
"value": "eyJwcmVmcyI6eyJ0b3BDb250ZW50RGlzbWlzc2FsVGltZSI6MCwiZ2xvYmFsVGhlbWUiOiJSRURESVQiLCJuaWdodG1vZGUiOnRydWUsImNvbGxhcHNlZFRyYXlTZWN0aW9ucyI6eyJmYXZvcml0ZXMiOmZhbHNlLCJtdWx0aXMiOmZhbHNlLCJtb2RlcmF0aW5nIjpmYWxzZSwic3Vic2NyaXB0aW9ucyI6ZmFsc2UsInByb2ZpbGVzIjpmYWxzZX0sInRvcENvbnRlbnRUaW1lc0Rpc21pc3NlZCI6MH19", "value": "eyJwcmVmcyI6eyJ0b3BDb250ZW50RGlzbWlzc2FsVGltZSI6MCwiZ2xvYmFsVGhlbWUiOiJSRURESVQiLCJuaWdodG1vZGUiOnRydWUsImNvbGxhcHNlZFRyYXlTZWN0aW9ucyI6eyJmYXZvcml0ZXMiOmZhbHNlLCJtdWx0aXMiOmZhbHNlLCJtb2RlcmF0aW5nIjpmYWxzZSwic3Vic2NyaXB0aW9ucyI6ZmFsc2UsInByb2ZpbGVzIjpmYWxzZX0sInRvcENvbnRlbnRUaW1lc0Rpc21pc3NlZCI6MH19",
"domain": ".reddit.com", "domain": ".reddit.com",
"path": "/" "path": "/"
},
{
"name": "eu_cookie",
"value": "{%22opted%22:true%2C%22nonessential%22:false}",
"domain": ".reddit.com",
"path": "/"
} }
] ]

@ -0,0 +1,8 @@
[
{
"name": "eu_cookie",
"value": "{%22opted%22:true%2C%22nonessential%22:false}",
"domain": ".reddit.com",
"path": "/"
}
]

@ -26,9 +26,11 @@ def download_screenshots_of_reddit_posts(reddit_object, screenshot_num):
context = browser.new_context() context = browser.new_context()
if getenv("THEME").upper() == "DARK": if getenv("THEME").upper() == "DARK":
cookie_file = open('./video_creation/data/cookie.json') cookie_file = open('./video_creation/data/cookie-dark-mode.json')
cookies = json.load(cookie_file) else:
context.add_cookies(cookies) cookie_file = open('./video_creation/data/cookie-light-mode.json')
cookies = json.load(cookie_file)
context.add_cookies(cookies)
# Get the thread screenshot # Get the thread screenshot
page = context.new_page() page = context.new_page()
page.goto(reddit_object["thread_url"]) page.goto(reddit_object["thread_url"])

Loading…
Cancel
Save