Python抓取聚划算商品分析页面获取商品信息并以XML格式保存到本地


Posted in Python onFebruary 23, 2018

本文实例为大家分享了Python抓取聚划算商品页面获取商品信息并保存的具体代码,供大家参考,具体内容如下

#!/user/bin/python 
# -*- coding: gbk -*- 
#Spider.py 
 
import urllib2 
import httplib 
import StringIO 
import gzip 
import re 
import chardet 
import sys 
import os 
import datetime 
from xml.dom.minidom import Document 
from BeautifulSoup import BeautifulSoup 
 
## 这段代码是用于解决控制台打印汉字报错的问题 
reload(sys) 
sys.setdefaultencoding("utf8") 
##################################################### 
 
## debug模式开关,开启后可以看到Http请求的头部信息以及debug日志 
DEBUG = 1 
NO_DEBUG = 0 
httplib.HTTPConnection.debuglevel = DEBUG 
## 是否显示爬取网页源代码开关 
showSrcCode = False 
## 压缩方式 
ZIP_TYPE = "gzip" 
 
fileName = "auctions" 
location = "d://spiderData/" 
 
## header 
headerConfig = {"User-Agent":"taobao-yanyuan.qzs", "Accept-encoding":ZIP_TYPE} 
##################################################### 
 
 
#############class SpiderConfig ##################### 
class SpiderConfig: 
 """ 
  configuration for spider name and url 
 """ 
 def __init__(self, name, url): 
  self.name = name 
  self.url = url 
##################################################### 
 
##############class SpiderAuctionDomain############## 
class SpiderAuctionDomain: 
 """ 
  Store information with auctions spidered by python 
 """ 
 title = "" 
 url = "" 
 img = "" 
 price = "" 
 
 def __init__(self): 
  pass 
 
##################################################### 
 
########class SpiderDefaultErrorHandler############## 
class SpiderDefaultErrorHandler(urllib2.HTTPDefaultErrorHandler): 
 def http_error_default(self, req, fp, code, msg, hdrs): 
  """ 
   default error process handler for spider 
  """ 
  result = urllib2.HTTPError(req.get_full_url(), code, msg, hdrs, fp) 
  result.status = code 
  result.url = req.get_full_url() 
 
  print "<", result.url, "Exception code :", result.status, ">" 
 
  return result 
##################################################### 
 
#############class SpiderHandler##################### 
class SpiderHandler: 
 """ 
  spider handler 
 """ 
 
 def spider(self, spiderConfig): 
  try: 
   request = urllib2.Request(spiderConfig.url) 
 
   ## configure request hreader 
   for key,val in headerConfig.items(): 
    request.add_header(key, val) 
 
   ## build opener 
   opener = urllib2.build_opener(SpiderDefaultErrorHandler()) 
 
   ## open request 
   openRequest = opener.open(request) 
 
   ## read data 
   spiderData = openRequest.read() 
 
   ## close 
   opener.close() 
 
   if 0 == len(spiderData): 
    return 
 
   if ZIP_TYPE== openRequest.headers.get("Content-Encoding"): 
    spiderData = SpiderHandler.gzipData(self, spiderData) 
 
   if httplib.HTTPConnection.debuglevel == DEBUG and showSrcCode: 
    print spiderData 
 
   # parse html 
   SpiderHandler.parse(self, spiderData) 
 
  except Exception,x: 
   print "spider process Exception:", x 
 
 
 
 def parse(self, spiderData): 
  """ 
   parse html content 
  """ 
 
  if httplib.HTTPConnection.debuglevel == DEBUG: 
   charsetAnalyze = chardet.detect(spiderData) 
   print "analyze spider data encode :",charsetAnalyze["encoding"] 
 
  print "执行解析", fileName 
 
  soup = BeautifulSoup(spiderData) 
  encode = soup.originalEncoding 
 
  encoding = lambda x : x.encode(encode) 
 
  if httplib.HTTPConnection.debuglevel == DEBUG: 
   print "识别到编码:", encode 
   title = soup.head.title.string 
   print encoding(title) 
 
  spiderContents = soup.findAll(name="div", attrs={"class":"main-box avil"}) 
  auctions = ["%s" % s for s in spiderContents] 
 
  if auctions is None: 
   return 
 
  auctionList = [] 
 
  for auc in auctions: 
   auctionDomain = SpiderAuctionDomain() 
   # parse auction link 
   links = re.search(re.compile(r'<a href=[\"|\']http://ju.taobao.com/tg/life_home.htm\?item_id=([^>]*)[\"|\']', re.IGNORECASE), auc) 
   if links is not None : 
    auctionDomain.link = encoding("http://ju.taobao.com/tg/life_home.htm?item_id=" + "".join(["%s" % s for s in links.groups() if len(s) > 0])) 
 
   #parse auction title 
   titles = re.search(re.compile(r"([^>]*)</a></h2>", re.IGNORECASE), auc) 
   if titles is not None: 
    auctionDomain.title = encoding("".join(["%s" % t for t in titles.groups() if len(t) > 0])) 
 
   #parse auction price 
   price = re.search(re.compile(r"<strong class=\"J_juPrices\".*</b>([^<]*)</strong>", re.IGNORECASE), auc) 
   if price is not None: 
    auctionDomain.price = "".join(["%s" % p for p in price.groups() if len(p) > 0]) 
 
   #parse image url 
   imgs = re.search(re.compile(r"<img src=[\'\"]([^>]*)[\'\"]", re.IGNORECASE), auc) 
   if imgs is not None: 
    auctionDomain.img = "".join(["%s" % i for i in imgs.groups() if len(i) > 0]) 
 
   auctionList.append(auctionDomain) 
 
  print "成功解析商品信息:" 
  for a in auctionList: 
   print "--->",a.title 
 
  # sort auction list 
  auctionList = SpiderHandler.sortAuctionList(self, auctionList) 
 
  # save in file 
  SpiderHandler.save(self, auctionList) 
 
  print "解析完成" 
 
  pass 
 
 def sortAuctionList(self, auctionList): 
  """ 
   冒泡排序,按照价格排序 
  """ 
  length = len(auctionList) 
  if length < 2: 
   return auctionList 
  else: 
   for i in range(length-1): 
    for j in range(length - i -1): 
     if float(auctionList[j].price) > float(auctionList[j+1].price): 
      auctionList[j], auctionList[j+1] = auctionList[j+1], auctionList[j] 
  return auctionList 
  pass 
 
 def save(self, auctionList): 
  if auctionList is not None: 
   doc = Document() 
 
   auctions = doc.createElement("auctions") 
   doc.appendChild(auctions) 
 
   for auc in auctionList: 
    auction = doc.createElement("auction") 
    auctions.appendChild(auction) 
 
    SpiderHandler.generateXML(self, doc, auction, "title", auc.title) 
    SpiderHandler.generateXML(self, doc, auction, "price", auc.price) 
    SpiderHandler.generateXML(self, doc, auction, "img", auc.img) 
    SpiderHandler.generateXML(self, doc, auction, "link", auc.link) 
 
   if False == os.path.exists(location): 
    os.mkdir(location) 
 
   file = open(location+fileName+".xml", 'w') 
   file.write(doc.toprettyxml()) 
   file.close() 
 
   if httplib.HTTPConnection.debuglevel == DEBUG: 
    print doc.toprettyxml() 
 
 def generateXML(self, doc, f, name, txt): 
  c = doc.createElement(name) 
  f.appendChild(c) 
  c.appendChild(doc.createTextNode(txt)) 
 
 def gzipData(self, spiderData): 
  """ 
   get data from gzip 
  """ 
  if 0 == len(spiderData): 
   return spiderData 
  spiderDataStream = StringIO.StringIO(spiderData) 
  spiderData = gzip.GzipFile(fileobj=spiderDataStream).read() 
  return spiderData 
##################################################### 
 
if __name__ == "__main__": 
 nowtime = lambda:datetime.datetime.strftime(datetime.datetime.now(),"%Y年%m月%d日 %H时%m分%S秒") 
 
 needSpiderUrl = {"suzhou":"http://ju.taobao.com/suzhou", 
      "hangzhou":"http://ju.taobao.com/hangzhou", 
      "shanghai":"http://ju.taobao.com/shanghai", 
      "beijing":"http://ju.taobao.com/beijing", 
      "chengdu":"http://ju.taobao.com/chengdu"} 
 
 configList = [] 
 for k,v in needSpiderUrl.items(): 
  spiderConfig = SpiderConfig(k, v) 
  configList.append(spiderConfig) 
 
 spiderHandler = SpiderHandler() 
 
 print "爬虫执行开始时间:",nowtime() 
 for spiderConfig in configList: 
  fileName = spiderConfig.name 
  spiderHandler.spider(spiderConfig) 
 
 print "爬虫执行完毕时间:",nowtime()

更多内容请参考专题《python爬取功能汇总》进行学习。

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持三水点靠木。

Python 相关文章推荐
Python THREADING模块中的JOIN()方法深入理解
Feb 18 Python
Python查找相似单词的方法
Mar 05 Python
python开发之字符串string操作方法实例详解
Nov 12 Python
Python爬取数据并写入MySQL数据库的实例
Jun 21 Python
python对于requests的封装方法详解
Jan 03 Python
pandas DataFrame 行列索引及值的获取的方法
Jul 02 Python
Python中pymysql 模块的使用详解
Aug 12 Python
Windows平台Python编程必会模块之pywin32介绍
Oct 01 Python
Python OpenCV视频截取并保存实现代码
Nov 30 Python
Python3.5 win10环境下导入kera/tensorflow报错的解决方法
Dec 19 Python
Python如何把十进制数转换成ip地址
May 25 Python
用python画城市轮播地图
May 28 Python
Python各类图像库的图片读写方式总结(推荐)
Feb 23 #Python
python自动发邮件库yagmail的示例代码
Feb 23 #Python
Python KMeans聚类问题分析
Feb 23 #Python
浅谈python爬虫使用Selenium模拟浏览器行为
Feb 23 #Python
python kmeans聚类简单介绍和实现代码
Feb 23 #Python
python MysqlDb模块安装及其使用详解
Feb 23 #Python
Python实现k-means算法
Feb 23 #Python
You might like
php不用正则验证真假身份证
2013/11/06 PHP
基于递归实现的php树形菜单代码
2014/11/19 PHP
php实现的微信红包算法分析(非官方)
2015/09/25 PHP
php 如何获取文件的后缀名
2016/06/05 PHP
ThinkPHP框架表单验证操作方法
2017/07/19 PHP
prototype 1.5相关知识及他人笔记
2006/12/16 Javascript
借用Google的Javascript API Loader来加速你的网站
2009/01/28 Javascript
return false,对阻止事件默认动作的一些测试代码
2010/11/17 Javascript
JS 获取滚动条高度示例代码
2013/10/24 Javascript
原生JavaScript实现连连看游戏(附源码)
2013/11/05 Javascript
JQuery右键菜单插件ContextMenu使用指南
2014/12/19 Javascript
JS实现超炫网页烟花动画效果的方法
2015/03/02 Javascript
学习Bootstrap组件之下拉菜单
2015/07/28 Javascript
JS实现图片局部放大或缩小的方法
2016/08/20 Javascript
JS简单实现表格排序功能示例
2016/12/20 Javascript
Vue 项目代理设置的优化
2018/04/17 Javascript
微信小程序methods中定义的方法互相调用的实例代码
2018/08/07 Javascript
Node.js + express实现上传大文件的方法分析【图片、文本文件】
2019/03/14 Javascript
Vue 数组和对象更新,但是页面没有刷新的解决方式
2019/11/09 Javascript
JS实现数据动态渲染的竖向步骤条
2020/06/24 Javascript
AngularJs的$http发送POST请求,php无法接收Post的数据问题及解决方案
2020/08/13 Javascript
[01:45]DOTA2新英雄“神谕者”全方位展示
2014/11/21 DOTA
Python OpenCV视频截取并保存实现代码
2019/11/30 Python
Pytorch模型转onnx模型实例
2020/01/15 Python
Python Tkinter Entry和Text的添加与使用详解
2020/03/04 Python
django xadmin 管理器常用显示设置方式
2020/03/11 Python
Python decorator拦截器代码实例解析
2020/04/04 Python
Tensorflow中的图(tf.Graph)和会话(tf.Session)的实现
2020/04/22 Python
史上最全面的Java面试题汇总!
2015/02/03 面试题
自荐信要包含哪些内容
2013/11/06 职场文书
应届毕业生求职信范文
2014/07/07 职场文书
个人租房协议书样本
2014/10/01 职场文书
小组口号霸气押韵
2015/12/24 职场文书
学习党史心得体会2016
2016/01/23 职场文书
2016年大学生社区服务活动总结
2016/04/06 职场文书
手把手教你制定暑期学习计划,让你度过充实的暑假
2019/08/22 职场文书