Python语言技术文档

微信小程序技术文档

php语言技术文档

jsp语言技术文档

asp语言技术文档

C#/.NET语言技术文档

html5/css技术文档

javascript

点击排行

您现在的位置:首页 > 技术文档 > Python网络爬虫

python爬虫实例实例教程

来源:中文源码网    浏览:325 次    日期:2024-04-14 01:08:47
【下载文档:  python爬虫实例实例教程.txt 】


python爬虫实例详解
本篇博文主要讲解Python爬虫实例,重点包括爬虫技术架构,组成爬虫的关键模块:URL管理器、HTML下载器和HTML解析器。
爬虫简单架构
程序入口函数(爬虫调度段)
#coding:utf8
import time, datetime
from maya_Spider import url_manager, html_downloader, html_parser, html_outputer
class Spider_Main(object):
#初始化操作
def __init__(self):
#设置url管理器
self.urls = url_manager.UrlManager()
#设置HTML下载器
self.downloader = html_downloader.HtmlDownloader()
#设置HTML解析器
self.parser = html_parser.HtmlParser()
#设置HTML输出器
self.outputer = html_outputer.HtmlOutputer()
#爬虫调度程序
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print('craw %d : %s' % (count, new_url))
html_content = self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_content)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
if count == 10:
break
count = count + 1
except:
print('craw failed')
self.outputer.output_html()
if __name__ == '__main__':
#设置爬虫入口
root_url = 'http://baike.baidu.com/view/21087.htm'
#开始时间
print('开始计时..............')
start_time = datetime.datetime.now()
obj_spider = Spider_Main()
obj_spider.craw(root_url)
#结束时间
end_time = datetime.datetime.now()
print('总用时:%ds'% (end_time - start_time).seconds)
URL管理器
class UrlManager(object):
def __init__(self):
self.new_urls = set()
self.old_urls = set()
def add_new_url(self, url):
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self, urls):
if urls is None or len(urls) == 0:
return
for url in urls:
self.add_new_url(url)
def has_new_url(self):
return len(self.new_urls) != 0
def get_new_url(self):
new_url = self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
网页下载器
import urllib
import urllib.request
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
#伪装成浏览器访问,直接访问的话csdn会拒绝
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent':user_agent}
#构造请求
req = urllib.request.Request(url,headers=headers)
#访问页面
response = urllib.request.urlopen(req)
#python3中urllib.read返回的是bytes对象,不是string,得把它转换成string对象,用bytes.decode方法
return response.read().decode()
网页解析器
import re
import urllib
from urllib.parse import urlparse
from bs4 import BeautifulSoup
class HtmlParser(object):
def _get_new_urls(self, page_url, soup):
new_urls = set()
#/view/123.htm
links = soup.find_all('a', href=re.compile(r'/item/.*?'))
for link in links:
new_url = link['href']
new_full_url = urllib.parse.urljoin(page_url, new_url)
new_urls.add(new_full_url)
return new_urls
#获取标题、摘要
def _get_new_data(self, page_url, soup):
#新建字典
res_data = {}
#url
res_data['url'] = page_url
#

Python

获得标题标签
title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find('h1')
print(str(title_node.get_text()))
res_data['title'] = str(title_node.get_text())
#

summary_node = soup.find('div', class_="lemma-summary")
res_data['summary'] = summary_node.get_text()
return res_data
def parse(self, page_url, html_content):
if page_url is None or html_content is None:
return None
soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data
网页输出器
class HtmlOutputer(object):
def __init__(self):
self.datas = []
def collect_data(self, data):
if data is None:
return
self.datas.append(data )
def output_html(self):
fout = open('maya.html', 'w', encoding='utf-8')
fout.write("")
fout.write('')
fout.write('')
fout.write('')
#
fout.write('''


''')
for data in self.datas:
fout.write('\n')
# fout.write('\t' % data['url'])
fout.write('\t' % (data['url'], data['title']))
fout.write('\t\n' % data['summary'])
fout.write('\n')
fout.write('
Url
ThemeContent
%s%s%s
')
fout.write('')
fout.write('')
fout.close()
运行结果
附:完整代码
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持中文源码网。

相关内容