mirror of https://github.com/Black-Gold/Learn
master
parent
6aefff0af3
commit
246e7cd5e9
@ -0,0 +1,117 @@
|
||||
import re
|
||||
import csv
|
||||
import urllib.request
|
||||
from urllib.request import urlopen, Request
|
||||
from bs4 import BeautifulSoup
|
||||
import xlrd
|
||||
import time
|
||||
|
||||
dots = []
|
||||
|
||||
|
||||
def read_excel_file():
|
||||
loc = "dots.xls"
|
||||
wb = xlrd.open_workbook(loc)
|
||||
sheet = wb.sheet_by_index(0)
|
||||
sheet.cell_value(0, 0)
|
||||
for i in range(1, 5):
|
||||
dot = str(sheet.cell_value(i, 0)).replace(".0", "")
|
||||
dots.append(dot)
|
||||
|
||||
|
||||
def crawl_data(url):
|
||||
req = Request(url, headers={"User-Agent": "Mozilla/5.0"})
|
||||
html = urlopen(req).read()
|
||||
bs = BeautifulSoup(html, "html.parser")
|
||||
bold_texts = bs.find_all("b")
|
||||
for b in bold_texts:
|
||||
try:
|
||||
date = (
|
||||
re.search(
|
||||
"The information below reflects the content of the FMCSA management information systems as of(.*).",
|
||||
b.get_text(strip=True, separator=" "),
|
||||
)
|
||||
.group(1)
|
||||
.strip()
|
||||
)
|
||||
if len(date) > 11:
|
||||
date = date.split(".", 1)[0]
|
||||
print(date)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
information = bs.find("center").get_text(strip=True, separator=" ")
|
||||
|
||||
operating = re.search("Operating Status:(.*)Out", information).group(1).strip()
|
||||
legal_name = re.search("Legal Name:(.*)DBA", information).group(1).strip()
|
||||
physical_address = (
|
||||
re.search("Physical Address:(.*)Phone", information).group(1).strip()
|
||||
)
|
||||
mailing_address = (
|
||||
re.search("Mailing Address:(.*)USDOT", information).group(1).strip()
|
||||
)
|
||||
usdot_address = (
|
||||
re.search("USDOT Number:(.*)State Carrier ID Number", information)
|
||||
.group(1)
|
||||
.strip()
|
||||
)
|
||||
power_units = re.search("Power Units:(.*)Drivers", information).group(1).strip()
|
||||
drivers = re.search("Drivers:(.*)MCS-150 Form Date", information).group(1).strip()
|
||||
|
||||
write_csv(
|
||||
date,
|
||||
operating,
|
||||
legal_name,
|
||||
physical_address,
|
||||
mailing_address,
|
||||
usdot_address,
|
||||
power_units,
|
||||
drivers,
|
||||
)
|
||||
|
||||
|
||||
def write_csv(
|
||||
date,
|
||||
operating,
|
||||
legal_name,
|
||||
physical_address,
|
||||
mailing_address,
|
||||
usdot_address,
|
||||
power_units,
|
||||
drivers,
|
||||
):
|
||||
with open(
|
||||
usdot_address + ".csv", mode="w", newline="", encoding="utf-8"
|
||||
) as csv_file:
|
||||
fieldnames = [
|
||||
"Date",
|
||||
"Operating Status",
|
||||
"Legal_Name",
|
||||
"Physical Address",
|
||||
"Mailing Address",
|
||||
"Power Units",
|
||||
"Drivers",
|
||||
]
|
||||
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
writer.writerow(
|
||||
{
|
||||
"Date": date,
|
||||
"Operating Status": operating,
|
||||
"Legal_Name": legal_name,
|
||||
"Physical Address": physical_address,
|
||||
"Mailing Address": mailing_address,
|
||||
"Power Units": power_units,
|
||||
"Drivers": drivers,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
read_excel_file()
|
||||
print(dots)
|
||||
for dot in dots:
|
||||
crawl_data(
|
||||
"https://safer.fmcsa.dot.gov/query.asp?searchtype=ANY&query_type=queryCarrierSnapshot&query_param=USDOT&query_string="
|
||||
+ dot
|
||||
)
|
||||
time.sleep(5)
|
@ -0,0 +1,55 @@
|
||||
# Python多线程例子来示例加线程锁
|
||||
# 1。使用线程定义一个子类。线程类
|
||||
# 2。实例化子类并触发线程
|
||||
# 3。在线程的运行方法中实现锁
|
||||
|
||||
import threading
|
||||
import datetime
|
||||
|
||||
exitFlag = 0
|
||||
|
||||
|
||||
class myThread(threading.Thread):
|
||||
def __init__(self, name, counter):
|
||||
threading.Thread.__init__(self)
|
||||
self.threadID = counter
|
||||
self.name = name
|
||||
self.counter = counter
|
||||
|
||||
def run(self):
|
||||
print("\n开始 " + self.name)
|
||||
# Acquire lock to synchronize thread
|
||||
threadLock.acquire()
|
||||
print_date(self.name, self.counter)
|
||||
# Release lock for the next thread
|
||||
threadLock.release()
|
||||
print("退出 " + self.name)
|
||||
|
||||
|
||||
def print_date(threadName, counter):
|
||||
datefields = []
|
||||
today = datetime.date.today()
|
||||
datefields.append(today)
|
||||
print("{}[{}]: {}".format(threadName, counter, datefields[0]))
|
||||
|
||||
|
||||
threadLock = threading.Lock()
|
||||
threads = []
|
||||
|
||||
# Create new threads
|
||||
thread1 = myThread("线程", 1)
|
||||
thread2 = myThread("线程", 2)
|
||||
|
||||
# Start new Threads
|
||||
thread1.start()
|
||||
thread2.start()
|
||||
|
||||
# Add threads to thread list
|
||||
threads.append(thread1)
|
||||
threads.append(thread2)
|
||||
|
||||
# Wait for all threads to complete
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
print("\n退出程序!!!")
|
@ -0,0 +1,127 @@
|
||||
import time
|
||||
import datetime
|
||||
import re
|
||||
import xlwt
|
||||
from xlwt import Workbook
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.keys import Keys
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
|
||||
|
||||
class Bolagsverket:
|
||||
def __init__(self):
|
||||
self.bot = webdriver.Firefox(
|
||||
executable_path="E:/geckodriver"
|
||||
)
|
||||
|
||||
def navigate_and_crawl(self):
|
||||
bot = self.bot
|
||||
bot.get("https://poit.bolagsverket.se/poit/PublikPoitIn.do")
|
||||
time.sleep(5)
|
||||
bot.find_element_by_id("nav1-2").click()
|
||||
time.sleep(5)
|
||||
bot.find_element_by_tag_name("form").find_element_by_tag_name("a").click()
|
||||
time.sleep(5)
|
||||
|
||||
search_form = bot.find_element_by_tag_name("form")
|
||||
search_form.find_element_by_xpath(
|
||||
"//select[@id='tidsperiod']/option[text()='Annan period']"
|
||||
).click()
|
||||
wait = WebDriverWait(bot, 10)
|
||||
input_from = wait.until(
|
||||
EC.element_to_be_clickable((By.XPATH, "//input[@id='from']"))
|
||||
)
|
||||
input_from.send_keys("2019-09-23")
|
||||
# input_from.send_keys(str(datetime.date.today()-datetime.timedelta(1)))
|
||||
input_to = wait.until(
|
||||
EC.element_to_be_clickable((By.XPATH, "//input[@id='tom']"))
|
||||
)
|
||||
input_to.send_keys("2019-09-24")
|
||||
# input_to.send_keys(str(datetime.date.today()))
|
||||
time.sleep(5)
|
||||
|
||||
amnesomrade = wait.until(
|
||||
EC.element_to_be_clickable((By.XPATH, "//select[@id='amnesomrade']"))
|
||||
)
|
||||
amnesomrade.find_element_by_xpath(
|
||||
"//select[@id='amnesomrade']/option[text()='Bolagsverkets registreringar']"
|
||||
).click()
|
||||
time.sleep(5)
|
||||
kungorelserubrik = wait.until(
|
||||
EC.element_to_be_clickable((By.XPATH, "//select[@id='kungorelserubrik']"))
|
||||
)
|
||||
kungorelserubrik.find_element_by_xpath(
|
||||
"//select[@id='kungorelserubrik']/option[text()='Aktiebolagsregistret']"
|
||||
).click()
|
||||
time.sleep(5)
|
||||
underrubrik = wait.until(
|
||||
EC.element_to_be_clickable((By.XPATH, "//select[@id='underrubrik']"))
|
||||
)
|
||||
underrubrik.find_element_by_xpath(
|
||||
"//select[@id='underrubrik']/option[text()='Nyregistreringar']"
|
||||
).click()
|
||||
|
||||
# Search Button
|
||||
button_sok = wait.until(
|
||||
EC.element_to_be_clickable((By.XPATH, "//input[@id='SokKungorelse']"))
|
||||
)
|
||||
button_sok.click()
|
||||
time.sleep(5)
|
||||
|
||||
number_of_pages = bot.find_element_by_xpath(
|
||||
"//div[@class='gotopagediv']/em[@class='gotopagebuttons']"
|
||||
).text.split("av", 1)[1]
|
||||
number_of_pages.strip().replace(" ", "")
|
||||
|
||||
number_of_results = bot.find_elements_by_xpath("//table/tbody/tr")
|
||||
|
||||
wb = Workbook()
|
||||
for page in range(int(number_of_pages)):
|
||||
sheet = wb.add_sheet("Sheet" + str(page))
|
||||
style = xlwt.easyxf("font: bold 1")
|
||||
sheet.write(0, 0, "Post Address", style)
|
||||
sheet.write(0, 1, "Bildat", style)
|
||||
sheet.write(0, 2, "Foretagsnamn", style)
|
||||
sheet.write(0, 3, "Email", style)
|
||||
|
||||
for i in range(len(number_of_results)):
|
||||
result = bot.find_elements_by_xpath("//table/tbody/tr")[i]
|
||||
link = result.find_element_by_tag_name("a")
|
||||
bot.execute_script("arguments[0].click();", link)
|
||||
time.sleep(5)
|
||||
|
||||
information = [bot.find_element_by_class_name("kungtext").text]
|
||||
try:
|
||||
postaddress = re.search("Postadress:(.*),", information[0])
|
||||
sheet.write(i + 1, 0, str(postaddress.group(1)))
|
||||
bildat = re.search("Bildat:(.*)\n", information[0])
|
||||
sheet.write(i + 1, 1, str(bildat.group(1)))
|
||||
foretagsnamn = re.search("Företagsnamn:(.*)\n", information[0])
|
||||
sheet.write(i + 1, 2, str(foretagsnamn.group(1)))
|
||||
email = re.search("E-post:(.*)\n", information[0])
|
||||
sheet.write(i + 1, 3, str(email.group(1)))
|
||||
print(
|
||||
postaddress.group(1),
|
||||
bildat.group(1),
|
||||
foretagsnamn.group(1),
|
||||
email.group(1),
|
||||
)
|
||||
except AttributeError as e:
|
||||
print("Email is null")
|
||||
sheet.write(i + 1, 3, "null")
|
||||
pass
|
||||
bot.back()
|
||||
time.sleep(5)
|
||||
wb.save("emails.xls")
|
||||
print("Going to next page ...")
|
||||
button_next = wait.until(
|
||||
EC.element_to_be_clickable((By.XPATH, "//input/[@id='movenextTop']"))
|
||||
)
|
||||
button_next.click()
|
||||
time.sleep(5)
|
||||
|
||||
|
||||
bot = Bolagsverket()
|
||||
bot.navigate_and_crawl()
|
Loading…
Reference in new issue