首页 > 代码库 > 小爬虫,抓取某贴吧内所有帖子的图片

小爬虫,抓取某贴吧内所有帖子的图片

#!/usr/bin/env python#-*- coding:utf8 -*-
import sysreload(sys)sys.setdefaultencoding(‘gbk‘)import urllib,urllib2import refrom bs4 import BeautifulSoupclass GetHtml(): def __init__(self): headers = { #伪装为浏览器抓取 ‘User-Agent‘:‘Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6‘ } html = "http://tieba.baidu.com/f?kw=摄影&ie=utf-8&ie=utf-8&fr=wwwt" #摄影贴吧的首页链接 self.req = urllib2.Request(html) self.req.add_header(‘User-Agent‘,headers) content = urllib2.urlopen(self.req).read() soup = BeautifulSoup(content) aLinks = soup.find_all(‘a‘) #定位a标签 self.urls = [] for aLink in aLinks : href = http://www.mamicode.com/str(aLink.get(‘href‘)) #a标签内href的属性"/p/\d{10}") #正则筛选出符合条件的数据,比如 /p/1234567890 if link.findall(href): url = link.findall(href) self.urls += url #结果合并成一个list def getImg(self): for u in self.urls: #循环进入每个帖子内,查找jpg后缀的文件,并download page = urllib2.urlopen(‘http://tieba.baidu.com‘+u) html = page.read() reg = r‘src="http://www.mamicode.com/(.+?\.jpg)" pic_ext‘ imgre = re.compile(reg) imglist = re.findall(imgre,html) x = 0 for img in imglist: urllib.urlretrieve(img,‘%s.jpg‘ % x) x+=1 if __name__ == "__main__": gh = GetHtml() gh.getImg()

  

小爬虫,抓取某贴吧内所有帖子的图片