ThinkChat2.0新版上线,更智能更精彩,支持会话、画图、阅读、搜索等,送10W Token,即刻开启你的AI之旅 广告
## 1. LinkExtractor作用 > 1. 从follow=true的response(经由下载器下载回来的数据)中,以正则的方式提取URL,供爬虫爬取。 > 2. 每个 LinkExtractor 有唯一的公共方法是 extract_links,其接收 一个 Response 对象, 并返回 scrapy.link.Link 对象。 > 3. LikExtractors 只实例化一次,其 extract_links 方法会根据不同的 response 被调用多次来提取链接。 > 4. LikExtractor通常用于 CrawlSpider和RedisCrawlSpider类中,但是也可以用于Spider中,就是提取连接 ## 2. LinkExtractor源码 1. 如何使用 ~~~ from scrapy.linkextractors import LinkExtractor rules = ( # 只提取复合规则的页面链接,不做分析,所以跟页面但是没有,follow是对网易深一层的爬取,false表示不提取连接,也不请求页面上的连接 Rule(LinkExtractor(allow=r'categories/\d*?-j\d*?.html'), follow=True), Rule(LinkExtractor(allow=r'list/.*?.html'),follow=True), Rule(LinkExtractor(allow=r'product/\d+?.html'), follow=True), Rule(LinkExtractor(allow=r'store/\d+?/pc_index.shtml'), follow=False, callback='parse_item') ) ~~~ ### 2.1 scrapy.linkextractors包 ![](https://box.kancloud.cn/77293bc71247ea7190f2e1e020a2ea19_1515x835.png) 看出使用的LinkExtractor,其实是LxmlLinkExtractor ![](https://box.kancloud.cn/762acfa139b5b0b70404e9894ee40915_1681x430.png) ~~~ """ scrapy.linkextractors This package contains a collection of Link Extractors. For more info see docs/topics/link-extractors.rst """ import re from six.moves.urllib.parse import urlparse from parsel.csstranslator import HTMLTranslator from w3lib.url import canonicalize_url from scrapy.utils.misc import arg_to_iter from scrapy.utils.url import ( url_is_from_any_domain, url_has_any_extension, ) # common file extensions that are not followed if they occur in links IGNORED_EXTENSIONS = [ # images 'mng', 'pct', 'bmp', 'gif', 'jpg', 'jpeg', 'png', 'pst', 'psp', 'tif', 'tiff', 'ai', 'drw', 'dxf', 'eps', 'ps', 'svg', # audio 'mp3', 'wma', 'ogg', 'wav', 'ra', 'aac', 'mid', 'au', 'aiff', # video '3gp', 'asf', 'asx', 'avi', 'mov', 'mp4', 'mpg', 'qt', 'rm', 'swf', 'wmv', 'm4a', 'm4v', # office suites 'xls', 'xlsx', 'ppt', 'pptx', 'pps', 'doc', 'docx', 'odt', 'ods', 'odg', 'odp', # other 'css', 'pdf', 'exe', 'bin', 'rss', 'zip', 'rar', ] _re_type = type(re.compile("", 0)) _matches = lambda url, regexs: any(r.search(url) for r in regexs) # 用于查找符合规则的URL的lamda表达式(匿名函数) _is_valid_url = lambda url: url.split('://', 1)[0] in {'http', 'https', 'file'} #判断URL协议,是否是这三种协议 class FilteringLinkExtractor(object): _csstranslator = HTMLTranslator() def __init__(self, link_extractor, allow, deny, allow_domains, deny_domains, restrict_xpaths, canonicalize, deny_extensions, restrict_css): self.link_extractor = link_extractor self.allow_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(allow)] self.deny_res = [x if isinstance(x, _re_type) else re.compile(x) for x in arg_to_iter(deny)] self.allow_domains = set(arg_to_iter(allow_domains)) self.deny_domains = set(arg_to_iter(deny_domains)) self.restrict_xpaths = tuple(arg_to_iter(restrict_xpaths)) self.restrict_xpaths += tuple(map(self._csstranslator.css_to_xpath, arg_to_iter(restrict_css))) self.canonicalize = canonicalize if deny_extensions is None: deny_extensions = IGNORED_EXTENSIONS self.deny_extensions = {'.' + e for e in arg_to_iter(deny_extensions)} def _link_allowed(self, link): if not _is_valid_url(link.url): return False if self.allow_res and not _matches(link.url, self.allow_res): return False if self.deny_res and _matches(link.url, self.deny_res): return False parsed_url = urlparse(link.url) if self.allow_domains and not url_is_from_any_domain(parsed_url, self.allow_domains): return False if self.deny_domains and url_is_from_any_domain(parsed_url, self.deny_domains): return False if self.deny_extensions and url_has_any_extension(parsed_url, self.deny_extensions): return False return True def matches(self, url): if self.allow_domains and not url_is_from_any_domain(url, self.allow_domains): return False if self.deny_domains and url_is_from_any_domain(url, self.deny_domains): return False allowed = (regex.search(url) for regex in self.allow_res) if self.allow_res else [True] denied = (regex.search(url) for regex in self.deny_res) if self.deny_res else [] return any(allowed) and not any(denied) def _process_links(self, links): links = [x for x in links if self._link_allowed(x)] if self.canonicalize: for link in links: link.url = canonicalize_url(link.url) links = self.link_extractor._process_links(links) return links def _extract_links(self, *args, **kwargs): return self.link_extractor._extract_links(*args, **kwargs) # Top-level imports from .lxmlhtml import LxmlLinkExtractor as LinkExtractor ~~~ selector中包含Element对象 ![](https://box.kancloud.cn/20c3ce8c90f377673b06f202cf458e0b_1004x606.png)