## 一个简单的例子
以下例子分装成一个类的形式,采集中彩网双色球的数据(这个版本只打印链接,不采集内容)
主要知识点:
* [ ] 使用urllib.parse解析URL
* [ ] 递归调用
~~~
import re
import urllib
import urllib.parse
import urllib.request
from urllib.error import URLError
from bs4 import BeautifulSoup
class GetDoubleColorBallNumber(object):
def __init__(self):
urls = self.getUrls()
for url in urls:
print(url)
# 获取所有需要采集的页面
def getUrls(self):
urls = []
url = 'http://kaijiang.zhcw.com/zhcw/html/ssq/list.html'
html = self.getResponseContent(url)
soup = BeautifulSoup(html, 'lxml')
tag = soup.find_all(re.compile('p'))[-1]
pages = tag.strong.get_text()
print('页面总数:' + pages)
for i in range(1, int(pages) + 1):
url = 'http://kaijiang.zhcw.com/zhcw/html/ssq/list_' + str(i) + '.html'
urls.append(url)
return urls
def getResponseContent(self, url):
user_agent = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1) QQBrowser/6.0'
html = self.download(url, num_retries=4, user_agent=user_agent)
return html
# 下载页面内容
def download(self, url, user_agent='wswp', proxy=None, num_retries=3):
print("Downloading:%s" % url)
headers = {'User-agent': user_agent}
request = urllib.request.Request(url, headers=headers)
opener = urllib.request.build_opener()
if proxy:
proxy_params = {urllib.parse.urlparse(url).scheme: proxy}
opener.add_handler(urllib.request.ProxyHandler(proxy_params))
html = None
try:
html = opener.open(request).read()
html = html.decode('utf-8') # python3
except URLError as e:
if num_retries > 0:
print("Download error:(code:%s,reson:%s)" % (e.errno, e.reason))
html = None
if hasattr(e, 'code') and 500 <= e.code < 600:
print("Retrying .... ")
return self.download(url, user_agent, proxy, num_retries - 1)
return html
if __name__ == '__main__':
GetDoubleColorBallNumber()
~~~