python - 下载大约 15000 个 URL Python 的元内容 - 线程
问题描述
我的 csv 中有大约 30000 个网址。我需要检查每个 url 是否存在元内容。我正在使用 request_cache 基本上将响应缓存到 sqlite db。即使在使用缓存系统后也需要大约 24 小时。因此我转向并发。我想我做错了什么out = executor.map(download_site, sites, headers)
。并且不知道如何解决它。
AttributeError:“str”对象没有属性“items”
import concurrent.futures
import requests
import threading
import time
import pandas as pd
import requests_cache
from PIL import Image
from io import BytesIO
thread_local = threading.local()
df = pd.read_csv("test.csv")
sites = []
for row in df['URLS']:
sites.append(row)
# print("URL is shortened")
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers={'User-Agent':user_agent,}
requests_cache.install_cache('network_call', backend='sqlite', expire_after=2592000)
def getSess():
if not hasattr(thread_local, "session"):
thread_local.session = requests.Session()
return thread_local.session
def networkCall(url, headers):
print("In Download site")
session = getSess()
with session.get(url, headers=headers) as response:
print(f"Read {len(response.content)} from {url}")
return response.content
out = []
def getMeta(meta_res):
print("Get data")
for each in meta_res:
meta = each.find_all('meta')
for tag in meta:
if 'name' in tag.attrs.keys() and tag.attrs['name'].strip().lower() in ['description', 'keywords']:
content = tag.attrs['content']
if content != '':
out.append("Absent")
else:
out.append("Present")
return out
def allSites(sites):
with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
out = executor.map(networkCall, sites, headers)
return list(out)
if __name__ == "__main__":
sites = [
"https://www.jython.org",
"http://olympus.realpython.org/dice",
] * 15000
start_time = time.time()
list_meta = allSites(sites)
print("META ", list_meta)
duration = time.time() - start_time
print(f"Downloaded {len(sites)} in {duration} seconds")
output = getMeta(list_meta)
df["is it there"] = pd.Series(output)
df.to_csv('new.csv',index=False, header=True)
解决方案
我试图模仿你的功能。以下代码在 4 分钟内执行:-
from bs4 import BeautifulSoup as BS
import concurrent.futures
import time
import queue
import requests
URLs = [
"https://www.jython.org",
"http://olympus.realpython.org/dice"
] * 15_000
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers = {'User-Agent': user_agent}
class SessionCache():
def __init__(self, cachesize=20):
self.cachesize = cachesize
self.sessions = 0
self.q = queue.Queue()
def getSession(self):
try:
return self.q.get(block=False)
except queue.Empty:
pass
if self.sessions < self.cachesize:
self.q.put(requests.Session())
self.sessions += 1
return self.q.get()
def putSession(self, session):
self.q.put(session)
CACHE = SessionCache()
def doGet(url):
try:
session = CACHE.getSession()
response = session.get(url, headers=headers)
response.raise_for_status()
soup = BS(response.text, 'lxml')
for meta in soup.find_all('meta'):
if (name := meta.attrs.get('name', None)):
if name.strip().lower() in ['description', 'keywords']:
if meta.attrs.get('content', '') != '':
return url, 'Present'
return url, 'Absent'
except Exception as e:
return url, str(e)
finally:
CACHE.putSession(session)
def main():
start = time.perf_counter()
with concurrent.futures.ThreadPoolExecutor() as executor:
for r in executor.map(doGet, URLs):
print(f'{r[0]} -> {r[1]}')
end = time.perf_counter()
print(f'Duration={end-start:.4f}s')
if __name__ == '__main__':
main()
推荐阅读
- java - Kafka Spark Streaming LocationStrategies java class def not found 异常
- sql - 如何仅在表 b 中选择记录
- python - AWS SAM 未与 Python 3.7 一起安装
- python-3.x - 如何使用 boto3 过滤器获取布尔值
- javascript - AngularJS ng-repeat 使用新的 API 调用刷新
- python - 仅当它们存在于 python 中时才将命令行选项添加到子进程
- html - Bootstrap Navbar Dropdowns 每次打开第一个下拉菜单
- c# - 在另一个线程上继续时异步
- c# - 读取和写入具有命名空间的 XML 文件,而无需遍历每个元素
- regex - url中的正则表达式。django 2.0中的py