问题描述
我是网络,通过Beautifulsoup抓取数据并打印数据。现在我想将导入内容导入到下面的excel / csv我的程序中。我是python的新手,需要帮助我已经抓取了多个页面,现在我需要将它们导出到csv / excel
import requests
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup as bs
def scrap_bid_data():
page_no = 1 #initial page number
while True:
print('Hold on creating URL to fetch data...')
URL = 'https://bidplus.gem.gov.in/bidlists?bidlists&page_no=' + str(page_no) #create dynamic URL
print('URL cerated: ' + URL)
scraped_data = requests.get(URL,verify=False) # request to get the data
soup_data = bs(scraped_data.text,'lxml') #parse the scraped data using lxml
extracted_data = soup_data.find('div',{'id':'pagi_content'}) #find divs which contains required data
if len(extracted_data) == 0: # **if block** which will check the length of extracted_data if it is 0 then quit and stop the further execution of script.
break
else:
for idx in range(len(extracted_data)): # loops through all the divs and extract and print data
if(idx % 2 == 1): #get data from odd indexes only because we have required data on odd indexes
bid_data = extracted_data.contents[idx].text.strip().split('\n')
print('-' * 100)
print(bid_data[0]) #BID number
print(bid_data[5]) #Items
print(bid_data[6]) #Quantitiy Required
print(bid_data[10] + bid_data[12].strip()) #Department name and address
print(bid_data[16]) #Start date
print(bid_data[17]) #End date
print('-' * 100)
page_no +=1 #increments the page number by 1
scrap_bid_data()
解决方法
您可以使用熊猫
pip install pandas
obj可以是
bid_data = []
for obj in list:
obj= {
"bid_data_0" :bid_data[0],"bid_data_5" :bid_data[5],"bid_data_6" :bid_data[6],"bid_data_10" :bid_data[10],"bid_data_12" :bid_data[12].strip(),"bid_data_17" :bid_data_17,}
bid_data.append(obj)
您可以将bid_data格式化为dict obj,然后在该对象中仅添加必填字段
import pandas as pd
bid_data = pd.DataFrame(bid_data)
bid_data.to_csv("file_name.csv",index=True,encoding='utf-8')
这是我曾经用于将数据导出到csv的最简单的方法。 让我知道是否遇到任何问题
,import requests
from urllib3.exceptions import InsecureRequestWarning
import csv
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup as bs
f = csv.writer(open('gembid.csv','w'))
f.writerow(['Bidnumber','Items','Quantitiy','Department','Enddate'])
def scrap_bid_data():
page_no = 1
while page_no < 911:
print('Hold on creating URL to fetch data...')
url = 'https://bidplus.gem.gov.in/bidlists?bidlists&page_no=' + str(page_no)
print('URL created: ' + url)
scraped_data = requests.get(url,verify=False)
soup_data = bs(scraped_data.text,'lxml')
extracted_data = soup_data.find('div',{'id': 'pagi_content'})
if len(extracted_data) == 0:
break
else:
for idx in range(len(extracted_data)):
if (idx % 2 == 1):
bid_data = extracted_data.contents[idx].text.strip().split('\n')
bidno = bid_data[0].split(":")[-1]
items = bid_data[5].split(":")[-1]
qnty = int(bid_data[6].split(':')[1].strip())
dept = (bid_data[10] + bid_data[12].strip()).split(":")[-1]
edate = bid_data[17].split("End Date:")[-1]
f.writerow([bidno,items,qnty,dept,edate])
page_no=page_no+1
scrap_bid_data()