首页 > 代码库 > python爬虫入门(2)re模块-正则表达式

python爬虫入门(2)re模块-正则表达式

正则表达式


search //匹配第一次遇到符合规则的
匹配IP地址
  1. import re
  2. re.search(r‘(([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])\.){3}([01]{0,1}\d{0,1}\d|2[0-4]\d|25[0-5])‘,‘192.168.1.1‘)

findall:匹配所有,把所有匹配到的字符放到一个列表中并返回
  1. >>> p = re.compile(‘\d+‘)
  2. >>> p.findall(‘3只小甲鱼,15条腿,多出的3条在哪里?‘)
  3. [‘3‘, ‘15‘, ‘3‘]


编译正则表达式   re.compile()
  1. >>> import re
  2. >>> p = re.compile(‘[a-z]+‘)
  3. >>> p
  4. re.compile(‘[a-z]+‘)
  5. >>> p.match("")
  6. >>> print(p.match(""))
  7. None
  8. >>> m = p.match(‘fishc‘)
  9. >>> m
  10. <_sre.SRE_Match object; span=(0, 5), match=‘fishc‘>
方法:
group()   返回匹配的字符串
start()    返回匹配的开始位置
end()      返回匹配的结束位置
span()    返回一个元组表示匹配位置(开始,结束)
  1. >>> m.group()
  2. ‘fishc‘
  3. >>> m.start()
  4. 0
  5. >>> m.end()
  6. 5
  7. >>> m.span()
  8. (0, 5)

编译标志,详情请查看http://bbs.fishc.com/thread-57207-1-1.html
  1. 设置了编译标志符
  2. charref = re.compile(r"""
  3. &[#] # 开始数字引用
  4. (
  5. 0[0-7]+ # 八进制格式
  6. | [0-9]+ # 十进制格式
  7. | x[0-9a-fA-F]+ # 十六进制格式
  8. )
  9. ; # 结尾分号
  10. """, re.VERBOSE)
  1. 未设置编译标志符
  2. charref = re.compile("&#(0[0-7]+|[0-9]+|x[0-9a-fA-F]+);")



示例1:从代理网站上爬IP地址
  1. import urllib.request
  2. import re
  3. def open_url(url):
  4. req = urllib.request.Request(url)
  5. req.add_header(‘User-Agent‘,‘Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36‘)
  6. page = urllib.request.urlopen(req)#获取页面内容
  7. html = page.read().decode(‘utf-8‘)#解码页面内容
  8. return html
  9. def get_img(html):
  10. p = r‘(?:(?:[0,1]?\d?\d|2[0-4]\d|25[0-5])\.){3}(?:[0,1]?\d?\d|2[0-4]\d|25[0-5])‘ #匹配IP地址
  11. iplist = re.findall(p,html)
  12. for each in iplist:
  13. print(each)
  14. if __name__ == ‘__main__‘:
  15. url = "http://www.xicidaili.com/"
  16. get_img(open_url(url))


示例2:爬取妹纸图
  1. import urllib.request
  2. import os
  3. import re
  4. def save_imgs(folder,img_addrs):
  5. for each in img_addrs:
  6. filename=each.split(‘/‘)[-1]
  7. with open(filename,‘wb‘) as f:
  8. img =url_open(each)
  9. f.write(img)
  10. print(1)
  11. print(2)
  12. def url_open(url):
  13. reg= urllib.request.Request(url)
  14. reg.add_header(‘User-Agent‘,‘Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36‘)
  15. response = urllib.request.urlopen(url)
  16. html= response.read()
  17. return html
  18. def get_page(url):
  19. html = url_open(url).decode(‘utf-8‘)
  20. #a = html.find(‘current-comment-page‘)+23
  21. a = re.search (r‘\[\d{1,4}\]‘,html)
  22. a = a.group()
  23. b=len(a)
  24. a=a[1:b-1]
  25. return a
  26. def find_imgs(url):
  27. html =url_open(url).decode(‘utf-8‘)
  28. img_addrs= []
  29. a = html.find(‘img src=http://www.mamicode.com/‘)
  30. while a != -1:
  31. b = html.find(‘.jpg‘,a,a+255)
  32. if b!= -1 :
  33. if ‘lanya‘ in html[a+9:b+4]:
  34. pass
  35. else:
  36. img_addrs.append(‘http:‘+html[a+9:b+4])
  37. else:
  38. b=a+9
  39. a = html.find(‘img src=http://www.mamicode.com/‘,b)
  40. print(img_addrs)
  41. return img_addrs
  42. def download_mm(folder=‘ooxx‘,pages=4,star=0):
  43. os.mkdir(folder)
  44. os.chdir(folder)
  45. url = ‘http://jandan.net/ooxx/‘ #妹子图地址
  46. #url = ‘http://jandan.net/pic/‘ #无聊图地址
  47. if star!= 0 :
  48. page_num = star
  49. else:
  50. page_num=int(get_page(url))
  51. for i in range(pages):
  52. page_num-=1
  53. page_url = url + ‘page-‘+str(page_num)+‘#comments‘
  54. img_addrs=find_imgs(page_url)
  55. save_imgs(folder,img_addrs)
  56. print(page_url)
  57. if __name__==‘__main__‘:
  58. download_mm()



null


python爬虫入门(2)re模块-正则表达式