'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 \
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; \
.NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1']
.*?title="(.*?)".*?class="star">(.*?).*?releasetime">(.*?)', re.S)
r_list = pattren.findall(html)
# rlist: [('霸王别姬', '\n 主演:张国荣,张丰毅,巩俐\n ', '上映时间:1993-01-01'),(...),(...)]
self.write_page(r_list) # 写入csv文件
# # 保存,打印输出
# def write_page(self,r_list):
# one_film_dict = {}
# for rt in r_list:
# one_film_dict['name'] = rt[0].strip()
# one_film_dict['star'] = rt[1].strip()
# one_film_dict['time'] = rt[2].strip()[5:15]
#
# print(one_film_dict)
# 保存到csv文件(writerows) -- 推荐使用此方法
def write_page(self, r_list):
# 空列表,最终writerows()的参数: [(),(),()]
film_list = []
with open('maoyan.csv', 'a',newline="") as f:
writer = csv.writer(f)
for rt in r_list:
# 把处理过的数据定义成元组
t = (rt[0], rt[1].strip(), rt[2].strip()[5:15])
film_list.append(t)
writer.writerows(film_list)
def main(self):
for offset in range(0, 31, 10):
url = self.url.format(offset)
self.get_page(url)
time.sleep(random.randint(1, 3))
print('第%d页爬取完成' % self.page)
self.page += 1
if __name__ == '__main__':
start = time.time()
spider = MaoyanSpider()
spider.main()
end = time.time()
print('执行时间: %.2f' % (end - start))
数据持久化存储(MySQL数据库)
让我们来回顾一下pymysql模块的基本使用
import pymysql
db = pymysql.connect('localhost', 'root', '123456', 'maoyandb', charset='utf8')
cursor = db.cursor() # 创建游标对象
# execute()方法第二个参数为列表传参补位
cursor.execute('insert into film values(%s,%s,%s)', ['霸王别姬', '张国荣', '1993'])
db.commit() # 提交到数据库执行
cursor.close() # 关闭
db.close()
让我们来回顾一下pymysql中executemany()的用法
import pymysql
# 数据库连接对象
db = pymysql.connect('localhost', 'root', '123456', charset='utf8')
cursor = db.cursor() # 游标对象
ins_list = [] # 存放所有数据的大列表
for i in range(2):
name = input('请输入第%d个学生姓名:' % (i + 1))
age = input('请输入第%d个学生年龄:' % (i + 1))
ins_list.append([name, age])
ins = 'insert into t3 values(%s,%s)' # 定义插入语句
cursor.executemany(ins, ins_list) # 一次数据库的IO操作可插入多条语句,提升性能
db.commit() # 提交到数据库执行
cursor.close() # 关闭游标
db.close() # 关闭数据库
ins = 'insert into maoyanfilm values(%s,%s,%s)'
cursor.execute(['霸王', '国荣', '1991'])
cursor.executemany([
['月光宝盒', '周星驰', '1993'],
['大圣娶亲', '周星驰', '1993']])
练习:把猫眼电影案例中电影信息存入MySQL数据库中(尽量使用executemany方法)
from urllib import request
import time
import re
import pymysql
import random
class MaoyanSpider(object):
def __init__(self):
self.page = 1 # 用于记录页数
self.url = 'http://maoyan.com/board/4?offset={}'
self.ua_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) \
Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; \
.NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)']
# 创建数据库连接对象和游标对象
self.db = pymysql.connect('localhost', 'root', '123456', 'maoyandb', charset='utf8')
self.cursor = self.db.cursor()
# 获取
def get_page(self, url):
# 每次使用随机的user-agent
headers = {'User-Agent': random.choice(self.ua_list)}
req = request.Request(url=url, headers=headers)
res = request.urlopen(req)
html = res.read().decode('utf-8')
self.parse_page(html) # 直接调用解析函数
# 解析
def parse_page(self, html):
pattren = re.compile(
'
.*?title="(.*?)".*?class="star">(.*?).*?releasetime">(.*?)', re.S)
# rlist: [('霸王别姬','张国荣','1993'),(),()]
r_list = pattren.findall(html)
print(r_list)
self.write_page(r_list)
# 存入mysql数据库(executemany([ [],[],[] ]))
def write_page(self, r_list):
film_list = []
ins = 'insert into filmtab values(%s,%s,%s)' # 定义插入语句
# 处理数据,放到大列表film_list中
for rt in r_list:
one_film = [rt[0], rt[1].strip(), rt[2].strip()[5:15]]
# 添加到大列表中
film_list.append(one_film)
# 一次数据库IO把1页数据存入
self.cursor.executemany(ins, film_list)
# 提交到数据库执行
self.db.commit()
def main(self):
for offset in range(0, 31, 10):
url = self.url.format(offset)
self.get_page(url)
time.sleep(random.randint(1, 3))
print('第%d页爬取完成' % self.page)
self.page += 1
# 断开数据库(所有页爬完之后)
self.cursor.close()
self.db.close()
if __name__ == '__main__':
start = time.time()
spider = MaoyanSpider()
spider.main()
end = time.time()
print('执行时间: %.2f' % (end - start))
让我们来做个SQL命令查询
1、查询20年以前的电影的名字和上映时间
select name,time from filmtab where time<(now()-interval 20 year);
2、查询1990-2000年的电影名字和上映时间
select name,time from filmtab where time>='1990-01-01' and time<='2000-12-31';
让我们来复习一下mongdb数据库
import pymongo
# 1.连接对象
conn = pymongo.MongoClient(host='127.0.0.1', port=27017)
db = conn['maoyandb'] # 2.库对象
myset = db['filmtab'] # 3.集合对象
myset.insert_one({'name': '赵敏'}) # 4.插入数据库
练习:把猫眼电影案例中电影信息存入MongDB数据库中
from urllib import request
import re
import time
import random
import pymongo
class MaoyanSpider(object):
def __init__(self):
self.url = 'http://maoyan.com/board/4?offset={}'
# 计数
self.num = 0
# 创建3个对象
self.conn = pymongo.MongoClient('localhost', 27017)
self.db = self.conn['maoyandb']
self.myset = self.db['filmset']
self.ua_list = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET \
CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)', ]
def get_html(self, url):
headers = {
'User-Agent': random.choice(self.ua_list)
}
req = request.Request(url=url, headers=headers)
res = request.urlopen(req)
html = res.read().decode('utf-8')
# 直接调用解析函数
self.parse_html(html)
def parse_html(self, html):
re_bds = r'
.*?title="(.*?)".*?class="star">(.*?).*?releasetime">(.*?)'
pattern = re.compile(re_bds, re.S)
# film_list: [('霸王别姬','张国荣','1993'),()]
film_list = pattern.findall(html)
# 直接调用写入函数
self.write_html(film_list)
# mongodb数据库
def write_html(self, film_list):
for film in film_list:
film_dict = {
'name': film[0].strip(),
'star': film[1].strip(),
'time': film[2].strip()[5:15]
}
# 插入mongodb数据库
self.myset.insert_one(film_dict)
def main(self):
for offset in range(0, 31, 10):
url = self.url.format(offset)
self.get_html(url)
time.sleep(random.randint(1, 2))
if __name__ == '__main__':
start = time.time()
spider = MaoyanSpider()
spider.main()
end = time.time()
print('执行时间:%.2f' % (end - start))
电影天堂案例(二级页面抓取)
1、查看是否为静态页面,是否为动态加载
右键 - 查看网页源代码
2、确定URL地址
百度搜索 :电影天堂 - 2019年新片 - 更多
3、目标
*********一级页面***********
1、电影名称
2、电影链接
*********二级页面***********
1、下载链接
4、步骤
找URL规律
第1页 :http://www.dytt8.net/html/gndy/dyzz/list_23_1.html
第2页 :http://www.dytt8.net/html/gndy/dyzz/list_23_2.html
第n页 :http://www.dytt8.net/html/gndy/dyzz/list_23_n.html
写正则表达式
1、一级页面正则表达式(电影名称、电影详情链接)
.*?(.*?)
2、二级页面正则表达式
.*?(.*?)', re.S) # film_list: [('详情链接','名称'),()] film_list = pattern.findall(html) # [('/html/gndy/dyzz/20190806/58956.html', '019年惊悚动作《报仇雪恨/血债血偿》BD中英双字幕'),(),()] ins = 'insert into filmsky values(%s,%s)' for film in film_list: film_name = film[1] film_link = 'http://www.dytt8.net' + film[0] # 2. 拿到详情链接后,再去获取详情链接html,提取下载链接 download_link = self.parse_two_html(film_link) self.cursor.execute(ins, [film_name, film_link]) self.db.commit() # 打印测试 d = {'电影名称': film_name, '下载链接': download_link} print(d) # {'电影名称': '019年惊悚动作《报仇雪恨/血债血偿》BD中英双字幕', '下载链接': 'ftp://ygdy8:ygdy8@yg90.dydytt.net:8590/阳光电影www.ygdy8.com.报仇雪恨.BD.720p.中英双字幕.mkv'} # 解析二级页面,获取下载链接 def parse_two_html(self, film_link): two_html = self.get_page(film_link) pattern = re.compile(' |
|