问题描述
我尝试使用 asyncio
库以及来自 Internet 的一些随机代理向网站执行并行 http 请求,但我注意到请求仍在按顺序执行,并且每个请求请求花费的时间太长。
from itertools import cycle
import pandas as pd
import asyncio
from requests import get,Session
from requests.exceptions import ProxyError
from bs4 import BeautifulSoup
from lxml.html import fromstring
from traceback import print_exc
def get_proxies():
url = 'https://free-proxy-list.net/'
response = get(url)
parser = fromstring(response.text)
proxies = set()
for i in parser.xpath('//tbody/tr')[:10]:
if i.xpath('.//td[7][contains(text(),"yes")]'):
# Grabbing IP and corresponding PORT
proxy = ":".join([i.xpath('.//td[1]/text()')[0],i.xpath('.//td[2]/text()')[0]])
proxies.add(proxy)
return proxies
proxies = get_proxies()
proxy_pool = cycle(proxies)
df = pd.DataFrame(columns=["col1"])
def addRow(row):
df.loc[df["col1"].count()] = row
df.to_excel("result.xlsx",index=False)
async def onPartNum(opn):
while True:
try:
proxy = next(proxy_pool)
response = get("some-url-{}".format(opn),proxies={"https": proxy})
soup = BeautifulSoup(response.text,"html.parser")
if soup.find("p",{"id":"searchFoundZeroItem"}):
print("== No results found for",opn)
break
pnames = soup.find_all("span",{"class":"item-name"})
for i in range(len(pnames)):
pname = pnames[i].text
addRow([pname])
print("== Done with",opn)
break
except ProxyError:
pass
except:
print("Failed to load data for {}".format(opn))
print_exc()
break
async def main():
xl = pd.ExcelFile('input.xlsx')
sheet = xl.parse('1st round of scraping')
opns = [str(number) for number in sheet["Number"].values.tolist()]
asyncio.gather(*[onPartNum(opn) for opn in opns])
asyncio.run(main())
我做错了吗?
另外,有人可以推荐一个相对较快的免费随机代理服务吗(因为这些代理需要超过 20 秒并且只在几次重试后才能工作)
解决方法
暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!
如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。
小编邮箱:dio#foxmail.com (将#修改为@)