Python语言技术文档

微信小程序技术文档

php语言技术文档

jsp语言技术文档

asp语言技术文档

C#/.NET语言技术文档

html5/css技术文档

javascript

点击排行

您现在的位置:首页 > 技术文档 > Python网络爬虫

Python通过解析网页实现看报程序的方法

来源:中文源码网    浏览:147 次    日期:2024-05-04 04:05:35
【下载文档:  Python通过解析网页实现看报程序的方法.txt 】


Python通过解析网页实现看报程序的方法
本文所述实例可以实现基于Python的查看图片报纸《参考消息》并将当天的图片报纸自动下载到本地供查看的功能,具体实现代码如下:
# coding=gbk
import urllib2
import socket
import re
import time
import os
# timeout in seconds
#timeout = 10
#socket.setdefaulttimeout(timeout)
timeout = 10
urllib2.socket.setdefaulttimeout(timeout)
home_url = "http://www.hqck.net"
home_page = ""
try:
home_page_context = urllib2.urlopen(home_url)
home_page = home_page_context.read()
print "Read home page finishd."
print "-------------------------------------------------"
except urllib2.URLError,e:
print e.code
exit()
except:
print e.code
exit()
reg_str = r'.+'
news_url_reg = re.compile(reg_str)
today_cankao_news = news_url_reg.findall(home_page)
if len(today_cankao_news) == 0:
print "Cannot find today's news!"
exit()
my_news = today_cankao_news[0]
print "Latest news link = " + my_news
print
url_s = my_news.find("/arc/")
url_e = my_news.find(".html")
url_e = url_e + 5
print "Link index = [" + str(url_s) + "," + str(url_e) + "]"
my_news = my_news[url_s:url_e]
print "part url = " + my_news
full_news_url = home_url + my_news
print "full url = " + full_news_url
print
image_folder = "E:\\new_folder\\"
if (os.path.exists(image_folder) == False):
os.makedirs(image_folder)
today_num = time.strftime('%Y-%m-%d',time.localtime(time.time()))
image_folder = image_folder + today_num + "\\"
if (os.path.exists(image_folder) == False):
os.makedirs(image_folder)
print "News image folder = " + image_folder
print
context_uri = full_news_url[0:-5]
first_page_url = context_uri + ".html"
try:
first_page_context = urllib2.urlopen(first_page_url)
first_page = first_page_context.read()
except urllib2.HTTPError, e:
print e.code
exit()
tot_page_index = first_page.find("共")
tot_page_index = tot_page_index
tmp_str = first_page[tot_page_index:tot_page_index+10]
end_s = tmp_str.find("页")
page_num = tmp_str[2:end_s]
print page_num
page_count = int(page_num)
print "Total " + page_num + " pages:"
print
page_index = 1
download_suc = True
while page_index <= page_count:
page_url = context_uri
if page_index > 1:
page_url = page_url + "_" + str(page_index)
page_url = page_url + ".html"
print "News page link = " + page_url
try:
news_img_page_context = urllib2.urlopen(page_url)
except urllib2.URLError,e:
print e.reason
download_suc = False
break
news_img_page = news_img_page_context.read()
#f = open("e:\\page.html", "w")
#f.write(news_img_page)
#f.close()
reg_str = r'http://image\S+jpg'
image_reg = re.compile(reg_str)
image_results = image_reg.findall(news_img_page)
if len(image_results) == 0:
print "Cannot find news page" + str(page_index) + "!"
download_suc = False
break
image_url = image_results[0]
print "News image url = " + image_url
news_image_context = urllib2.urlopen(image_url)
image_name = image_folder + "page_" + str(page_index) + ".jpg"
imgf = open(image_name, 'wb')
print "Getting image..."
try:
while True:
date = news_image_context.read(1024*10)
if not date:
break
imgf.write(date)
imgf.close()
except:
download_suc = False
print "Save image " + str(page_index) + " failed!"
print "Unexpected error: " + sys.exc_info()[0] + sys.exc_info()[1]
else:
print "Save image " + str(page_index) + " succeed!"
print
page_index = page_index + 1
if download_suc == True:
print "News download succeed! Path = \"" + str(image_folder) + "\""
print "Enjoy it! ^^"
else:
print "news download failed!"

相关内容