Remove trailing whitespace

chore/remove-ads
Yuan Chen 4 years ago
parent ffef0c2cd9
commit 5c29e25221

@ -16,8 +16,8 @@ def main():
sys.exit(1) sys.exit(1)
word = sys.argv[1] word = sys.argv[1]
pronunciations = get_pronunciations(word) pronunciations = get_pronunciations(word)
britsh_en = '[🔊](' + pronunciations[0][0] +')' + ' ' + pronunciations[0][1] britsh_en = '[🔊](' + pronunciations[0][0] +')' + ' ' + '/' + pronunciations[0][1] + '/'
american_en = '[🔊](' + pronunciations[1][0] +')' + ' ' + pronunciations[1][1] american_en = '[🔊](' + pronunciations[1][0] +')' + ' ' + '/' + pronunciations[1][1] + '/'
line = '| ' + word + ' | ' + britsh_en + ' | ' + american_en + ' | ' + ' ' + '|' line = '| ' + word + ' | ' + britsh_en + ' | ' + american_en + ' | ' + ' ' + '|'
print(line) print(line)
@ -26,19 +26,20 @@ def get_pronunciations(word):
from youdao.com if available""" from youdao.com if available"""
word = word.strip() word = word.strip()
word_url = "http://dict.youdao.com/w/en/"+word word_url = "http://dict.youdao.com/w/en/"+word
britsh_en = [" ", "/ /"] pron_url = "http://dict.youdao.com/dictvoice?audio="+word+"&"
american_en = [" ","/ /"] britsh_en = [" ", " "]
american_en = [" "," "]
try: try:
response = urllib.request.urlopen(word_url).read() response = urllib.request.urlopen(word_url).read()
soup = BeautifulSoup(response, "html.parser") soup = BeautifulSoup(response, "html.parser")
spans = soup.find_all('span', {'class' : 'pronounce'}) spans = soup.find_all('span', {'class' : 'pronounce'})
lines = [span.get_text() for span in spans] lines = [span.get_text() for span in spans]
match = re.findall(r'\[.+\]', lines[0]) match = re.findall(r'\[.+\]', lines[0])
britsh_en[0] = "http://dict.youdao.com/dictvoice?audio="+word+"&type=1" britsh_en[0] = pron_url + "type=1"
britsh_en[1] = match[0].replace('[', '/').replace(']', '/') britsh_en[1] = match[0].replace('[', '').replace(']', '')
match = re.findall(r'\[.+\]', lines[1]) match = re.findall(r'\[.+\]', lines[1])
american_en[0] = "http://dict.youdao.com/dictvoice?audio="+word+"&type=2" american_en[0] = pron_url + "type=2"
american_en[1] = match[0].replace('[', '/').replace(']', '/') american_en[1] = match[0].replace('[', '').replace(']', '')
except: except:
return britsh_en, american_en return britsh_en, american_en

Loading…
Cancel
Save