首页 > 代码库 > 用python抓一了一些数据存到本地
用python抓一了一些数据存到本地
import codecs from xml.dom.minidom import Document import requests from bs4 import BeautifulSoup doc = Document() def getAllUrl(pageCount): url=‘https://www.xxx.co/xxxx/{page}‘ return url.format(page=pageCount) def getHtml(pageCount): html = requests.get(getAllUrl(pageCount)) return html def WirteXml(gName,gImg,wUrl): girlName = gName girlImage = gImg webUrl = wUrl name = doc.createElement("name") aperson.appendChild(name) personname = doc.createTextNode(girlName) name.appendChild(personname) img = doc.createElement("imgUrl") aperson.appendChild(img) prersonUrl = doc.createTextNode(girlImage) img.append.Child(prersonUrl) weburl = doc.createElement("webUrl") aperson.appendChild(weburl) personname = doc.createTextNode(webUrl) weburl.appendChild(personname) if __name__ == ‘__main__‘: # f = codecs.open(‘Conker.txt‘, ‘w‘, ‘utf-8‘) filename = "people.xml" f = codecs.open(filename, "w", ‘utf-8‘) people = doc.createElement("Actresses") doc.appendChild(people) aperson = doc.createElement("person") people.appendChild(aperson) for count in range(1,1250): html = getHtml(count).text soup= BeautifulSoup(html,"lxml") trs=soup.findAll("img") length=len(trs) for i in range(length): try: girlName = trs[i].attrs["title"] girlImage = trs[i].attrs["src"] webUrl ="https://www.xxx.co/xx/"+trs[i].attrs["src"].split(‘/‘)[-1][:-6] WirteXml(girlName,girlImage,webUrl) except: None print("第"+str(count)+"页抓完!!!") f.write(doc.toprettyxml(indent=" ")) f.close()
用python抓一了一些数据存到本地
声明:以上内容来自用户投稿及互联网公开渠道收集整理发布,本网站不拥有所有权,未作人工编辑处理,也不承担相关法律责任,若内容有误或涉及侵权可进行投诉: 投诉/举报 工作人员会在5个工作日内联系你,一经查实,本站将立刻删除涉嫌侵权内容。