Python爬取国外天气预报网站的方法


Posted in Python onJuly 10, 2015

本文实例讲述了Python爬取国外天气预报网站的方法。分享给大家供大家参考。具体如下:

crawl_weather.py如下:

#encoding=utf-8
import httplib
import urllib2
import time
from threading import Thread
import threading
from Queue import Queue
from time import sleep
import re
import copy
lang = "fr"
count = 0
class Location:
  # Location(False, "中国", "北京", "zh")
  # Location(True, "", "亚洲", "zh")
  def __init__(self, is_beyond_country, country_name, loc_name, lang):
    self.country_name = country_name
    self.loc_name = loc_name
    self.lang = lang
    self.is_beyond_country = is_beyond_country
prn_lock = threading.RLock()
def GetLocationURLs(url, recursive):
  global count
  if url.find("weather-forecast") != -1:
    count = count + 1
    if count % 500 == 0:
      prn_lock.acquire()
      print "count:%d" % (count)
      prn_lock.release()
    return [url]
  page = urllib2.urlopen(url).read()
  time.sleep(0.01)
  #"<h6><a href=\"http://www.accuweather.com/zh/browse-locations/afr\"><em>Africa</em></a></h6>"
  pattern = "<h6><a href=\"(.*)\"><em>(.*)</em></a></h6>"
  locs = re.findall(pattern, page)
  locs = [(url, name) for url, name in locs if url.find("browse-locations") != -1 or url.find("weather-forecast") != -1]
  if not recursive:
    urls = [url for url, name in locs]
    return urls
  urls = []
  for _url, _name in locs:
    lst = GetLocationURLs(_url, True)
    urls.extend(lst)
  return urls
#entry_url = "http://www.accuweather.com/zh/browse-locations"
entry_url = "http://www.accuweather.com/%s/browse-locations/eur/fr" % (lang)
#regions = ["afr", "ant", "arc", "asi", "cac", "eur", "mea", "nam", "ocn", "sam"]
#regions = ["eur"]
#region_urls = [ "%s/%s" % (entry_url, reg) for reg in regions]
#region_urls = ["http://www.accuweather.com/zh/browse-locations/eur/fr"]
sub_urls = GetLocationURLs(entry_url, False)
print len(sub_urls)
print sub_urls
q = Queue()
location_urls = []
ThreadNum = 5
lock = threading.RLock()
for url in sub_urls:
  q.put(url)
def working():
  while True:
    url = q.get()
    lst = GetLocationURLs(url, True)
    print "%s %d urls " % (url, len(lst))
    lock.acquire()
    location_urls.extend(lst)
    lock.release()
    q.task_done()
for i in range(ThreadNum):
  t = Thread(target=working)
  t.setDaemon(True)
  t.start()
q.join()  
fp = open('locations.txt', "w")
fp.write("\n".join(location_urls))
fp.close()
#for url in location_urls:
#  print url
#location_urls = GetLocationURLs(entry_url)
'''
def Fetch(url):
  try:
    print url
    web_path = url[0]
    local_name = url[1]   
    print "web_path:", web_path
    print "local_name:", local_name
    sContent = urllib2.urlopen(web_path).read()
    savePath = "D:\\Course\\NLP_Manning\\%s" % (local_name)
    print savePath
    file = open(savePath,'wb')
    file.write(sContent)
    file.close()
    print savePath + " saved";
  except:
    pass;
def working():
  while True:
    url = q.get()
    Fetch(url)
    sleep(10)
    q.task_done()
#root_url = "https://class.coursera.org/nlp/lecture/index?lecture_player=flash"
root_url = "https://class.coursera.org/nlp/lecture/index?lecture_player=flash"
page = urllib2.urlopen(root_url).read()
for i in range(NUM):
  t = Thread(target=working)
  t.setDaemon(True)
  t.start()
urls = copy.deepcopy(ppt_urls)
urls.extend(srt_urls)
urls.extend(video_urls)
print len(ppt_urls)
print len(srt_urls)
print len(video_urls)
print len(urls)
for url in urls:
  q.put(url)
q.join()
'''
'''
root_url = "http://www.accuweather.com/zh/cn/andingmen/57494/weather-forecast/57494"
page = urllib2.urlopen(root_url).read()
print page
'''

FetchLocation.py如下:

#encoding=utf-8
import sys
import httplib
import urllib2
import time
from threading import Thread
import threading
from Queue import Queue
from time import sleep
import re
import copy
from xml.dom import minidom
import HTMLParser
import datetime
q = Queue()
locks = [threading.RLock() for i in range(2)]
ThreadNumber = 20
locations = {}
conds = {}
def FindCountryBreadCrumbs(page):
  lines = page.splitlines()
  count = 0
  start = -1
  opened = False
  for line in lines:
    if line.find("<ul id=\"country-breadcrumbs\">") != -1:
      start = count
      opened = True
    if opened and line.find("</ul>") != -1:
      end = count
      opened = False
    count = count + 1
  return "\n".join(lines[start: (end + 1)])
def GetText(nodelist):
  rc = []
  for node in nodelist:
    if node.nodeType == node.TEXT_NODE:
      rc.append(HTMLParser.HTMLParser().unescape(node.data))
  return ''.join(rc)
def FindCondition(page):
  pat = "<span class=\"cond\">(.*?)</span>"
  cds = re.findall(pat, page)
  cds = [HTMLParser.HTMLParser().unescape(cd).encode("utf-8") for cd in cds]
  return cds  
def ExtractInfo(url):
  try:
    page = urllib2.urlopen(url).read()
  except Exception, e:
    return []
  text = FindCountryBreadCrumbs(page)
  text = HTMLParser.HTMLParser().unescape(text)
  dom = minidom.parseString(text.encode("utf-8"))
  locs = []
  lis = dom.getElementsByTagName("li")
  for li in lis:
    adr_list = li.getElementsByTagName("a")
    if adr_list:
      locs.append(GetText(adr_list[0].childNodes).encode("utf-8"))
    strs = li.getElementsByTagName("strong")
    if strs:
      locs.append(GetText(strs[0].childNodes).encode("utf-8"))
  cds = FindCondition(page)
  return locs, cds
def AddMap(lst, m):
  for x in lst:
    if m.get(x) == None:
      m[x] = 1
def working():
  while True:
    urls = q.get()
    #print len(urls)
    m = {}
    m2 = {}
    count = 0
    for url in urls:
      count = count + 1
      #print "%d/%d" % (count, len(urls))
      locs, cds = ExtractInfo(url)
      AddMap(locs, m)
      AddMap(cds, m2)
    locks[1].acquire()
    AddMap(m.keys(), locations)
    AddMap(m2.keys(), conds)
    locks[1].release()
    q.task_done()
def main():
  if len(sys.argv) < 2:
    exit()
  loc_path = sys.argv[1]
  fp = open(loc_path, "r")
  urls = [line.strip() for line in fp]
  fp.close()
  #urls = urls[0:1000]
  blocks = len(urls) / ThreadNumber + 1
  for start in range(0, len(urls), blocks):
    end = start + blocks
    if end > len(urls):
      end = len(urls)
    q.put(urls[start:end])
  for i in range(ThreadNumber):
    t = Thread(target=working)
    t.setDaemon(True)
    t.start()
  q.join()
  fp = open("location_name.fr", "w")
  fp.write("\n".join(locations.keys()))
  fp.close()
  fp = open("conditions.fr", "w")
  fp.write("\n".join(conds.keys()))
  fp.close()
if __name__ == '__main__':
  main()

希望本文所述对大家的python程序设计有所帮助。

Python 相关文章推荐
理解Python中的绝对路径和相对路径
Aug 30 Python
Numpy掩码式数组详解
Apr 17 Python
Ubuntu下Python2与Python3的共存问题
Oct 31 Python
神经网络相关之基础概念的讲解
Dec 29 Python
python将控制台输出保存至文件的方法
Jan 07 Python
Python OOP类中的几种函数或方法总结
Feb 22 Python
python中如何使用分步式进程计算详解
Mar 22 Python
Python中函数的返回值示例浅析
Aug 28 Python
Windows10下Tensorflow2.0 安装及环境配置教程(图文)
Nov 21 Python
python Opencv计算图像相似度过程解析
Dec 03 Python
使用pyecharts1.7进行简单的可视化大全
May 17 Python
实例讲解Python中sys.argv[]的用法
Jun 03 Python
Python实现比较两个文件夹中代码变化的方法
Jul 10 #Python
python简单文本处理的方法
Jul 10 #Python
Python实现把json格式转换成文本或sql文件
Jul 10 #Python
Python中的一些陷阱与技巧小结
Jul 10 #Python
Python中的fileinput模块的简单实用示例
Jul 09 #Python
Python中的anydbm模版和shelve模版使用指南
Jul 09 #Python
python冒泡排序简单实现方法
Jul 09 #Python
You might like
Get或Post提交值的非法数据处理
2006/10/09 PHP
教你如何把一篇文章按要求分段
2006/10/09 PHP
php-accelerator网站加速PHP缓冲的方法
2008/07/30 PHP
php cookie中点号(句号)自动转为下划线问题
2014/10/21 PHP
Laravel 5框架学习之表单验证
2015/04/08 PHP
PHP中使用foreach()遍历二维数组的简单实例
2016/06/13 PHP
Laravel timestamps 设置为unix时间戳的方法
2019/10/11 PHP
php设计模式之观察者模式实例详解【星际争霸游戏案例】
2020/03/30 PHP
jQuery.buildFragment使用方法及思路分析
2013/01/07 Javascript
Table冻结表头示例代码
2013/08/20 Javascript
jquery与prototype框架的详细对比
2013/11/21 Javascript
NodeJS中Buffer模块详解
2015/01/07 NodeJs
深入探究JavaScript中for循环的效率问题及相关优化
2016/03/13 Javascript
javascript基础语法——全面理解变量和标识符
2016/06/02 Javascript
javascript简单实现跟随滚动条漂浮的返回顶部按钮效果
2016/08/19 Javascript
JavaScript实现二分查找实例代码
2017/02/22 Javascript
vue axios请求频繁时取消上一次请求的方法
2018/11/10 Javascript
关于RxJS Subject的学习笔记
2018/12/05 Javascript
npm 常用命令详解(小结)
2019/01/17 Javascript
mpvue微信小程序开发之实现一个弹幕评论
2019/11/24 Javascript
JavaScript this指向相关原理及实例解析
2020/07/10 Javascript
vue 点击其他区域关闭自定义div操作
2020/07/17 Javascript
[55:35]VGJ.S vs Mski Supermajor小组赛C组 BO3 第二场 6.3
2018/06/04 DOTA
用Python实现一个简单的能够上传下载的HTTP服务器
2015/05/05 Python
对python_discover方法遍历所有执行的用例详解
2019/02/13 Python
Python实现Selenium自动化Page模式
2019/07/14 Python
django Admin文档生成器使用详解
2019/07/22 Python
Python Django框架url反向解析实现动态生成对应的url链接示例
2019/10/18 Python
Python 存取npy格式数据实例
2020/07/01 Python
日本钓鱼渔具和户外用品网上商店:naturum
2016/08/07 全球购物
世界领先的以旅馆为主的在线预订平台:Hostelworld
2016/10/09 全球购物
MAC Cosmetics巴西官方网站:M·A·C彩妆
2019/04/18 全球购物
分公司经理任命书
2014/06/05 职场文书
森林防火宣传标语
2014/06/27 职场文书
委托书格式要求
2015/01/28 职场文书
离婚纠纷代理词
2015/05/23 职场文书