爬虫代理池Python3WebSpider源代码测试过程解析


Posted in Python onDecember 20, 2019

这篇文章主要介绍了爬虫代理池Python3WebSpider源代码测试过程解析,文中通过示例代码介绍的非常详细,对大家的学习或者工作具有一定的参考学习价值,需要的朋友可以参考下

元类属性的使用

代码

主要关于元类的使用

通过获取由元类生成的爬虫抓取类的部分属性.这里为抓取函数,以相同的字符开头的抓取函数,生成属性列表,这样可以持续调用.目的是可以仅仅添加不同的抓取函数抓取不同的网站,而类的其他部分不用做调整.

部分代码:

class ProxyMetaclass(type):
  def __new__(cls, name, bases, attrs):
    count = 0
    attrs['__CrawlFunc__'] = []
    for k, v in attrs.items():
      if 'crawl_' in k:
        attrs['__CrawlFunc__'].append(k)
        count += 1
    attrs['__CrawlFuncCount__'] = count
    return type.__new__(cls, name, bases, attrs)


class Crawler(object, metaclass=ProxyMetaclass):
  def get_proxies(self, callback):
    proxies = []
    for proxy in eval("self.{}()".format(callback)):
      print('成功获取到代理', proxy)
      proxies.append(proxy)
    return proxies
    
  def crawl_daili66(self, page_count=4):
    """
    获取代理66
    :param page_count: 页码
    :return: 代理
    """
    start_url = 'http://www.66ip.cn/{}.html'
    urls = [start_url.format(page) for page in range(1, page_count + 1)]
    for url in urls:
      print('Crawling', url)
      html = get_page(url)
      if html:
        doc = pq(html)
        trs = doc('.containerbox table tr:gt(0)').items()
        for tr in trs:
          ip = tr.find('td:nth-child(1)').text()
          port = tr.find('td:nth-child(2)').text()
          yield ':'.join([ip, port])

测试方法

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time  : 12/19/19 4:10 PM
# @Author : yon
# @Email  : @qq.com
# @File  : test


import json
import re
from pyquery import PyQuery as pq


class ProxyMetaclass(type):
  def __new__(cls, name, bases, attrs):
    count = 0
    attrs['__CrawlFunc__'] = []
    for k, v in attrs.items():
      print("打印k")
      print(k)
      print("打印v")
      print(v)
      if 'crawl_' in k:
        attrs['__CrawlFunc__'].append(k)
        count += 1
    attrs['__CrawlFuncCount__'] = count
    return type.__new__(cls, name, bases, attrs)


class Crawler(object, metaclass=ProxyMetaclass):
  def get_proxies(self, callback):
    proxies = []
    for proxy in eval("self.{}()".format(callback)):
      print('成功获取到代理', proxy)
      proxies.append(proxy)
    return proxies

  def crawl_daili66(self, page_count=4):
    """
    获取代理66
    :param page_count: 页码
    :return: 代理
    """
    start_url = 'http://www.66ip.cn/{}.html'
    urls = [start_url.format(page) for page in range(1, page_count + 1)]
    for url in urls:
      print('Crawling', url)
      html = get_page(url)
      if html:
        doc = pq(html)
        trs = doc('.containerbox table tr:gt(0)').items()
        for tr in trs:
          ip = tr.find('td:nth-child(1)').text()
          port = tr.find('td:nth-child(2)').text()
          yield ':'.join([ip, port])

  def crawl_ip3366(self):
    for page in range(1, 4):
      start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)
      html = get_page(start_url)
      ip_address = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
      # \s * 匹配空格,起到换行作用
      re_ip_address = ip_address.findall(html)
      for address, port in re_ip_address:
        result = address + ':' + port
        yield result.replace(' ', '')

  def crawl_kuaidaili(self):
    for i in range(1, 4):
      start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)
      html = get_page(start_url)
      if html:
        ip_address = re.compile('<td data-title="IP">(.*?)</td>')
        re_ip_address = ip_address.findall(html)
        port = re.compile('<td data-title="PORT">(.*?)</td>')
        re_port = port.findall(html)
        for address, port in zip(re_ip_address, re_port):
          address_port = address + ':' + port
          yield address_port.replace(' ', '')

  def crawl_xicidaili(self):
    for i in range(1, 3):
      start_url = 'http://www.xicidaili.com/nn/{}'.format(i)
      headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Cookie': '_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3',
        'Host': 'www.xicidaili.com',
        'Referer': 'http://www.xicidaili.com/nn/3',
        'Upgrade-Insecure-Requests': '1',
      }
      html = get_page(start_url, options=headers)
      if html:
        find_trs = re.compile('<tr class.*?>(.*?)</tr>', re.S)
        trs = find_trs.findall(html)
        for tr in trs:
          find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
          re_ip_address = find_ip.findall(tr)
          find_port = re.compile('<td>(\d+)</td>')
          re_port = find_port.findall(tr)
          for address, port in zip(re_ip_address, re_port):
            address_port = address + ':' + port
            yield address_port.replace(' ', '')

  def crawl_ip3366(self):
    for i in range(1, 4):
      start_url = 'http://www.ip3366.net/?stype=1&page={}'.format(i)
      html = get_page(start_url)
      if html:
        find_tr = re.compile('<tr>(.*?)</tr>', re.S)
        trs = find_tr.findall(html)
        for s in range(1, len(trs)):
          find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
          re_ip_address = find_ip.findall(trs[s])
          find_port = re.compile('<td>(\d+)</td>')
          re_port = find_port.findall(trs[s])
          for address, port in zip(re_ip_address, re_port):
            address_port = address + ':' + port
            yield address_port.replace(' ', '')

  def crawl_iphai(self):
    start_url = 'http://www.iphai.com/'
    html = get_page(start_url)
    if html:
      find_tr = re.compile('<tr>(.*?)</tr>', re.S)
      trs = find_tr.findall(html)
      for s in range(1, len(trs)):
        find_ip = re.compile('<td>\s+(\d+\.\d+\.\d+\.\d+)\s+</td>', re.S)
        re_ip_address = find_ip.findall(trs[s])
        find_port = re.compile('<td>\s+(\d+)\s+</td>', re.S)
        re_port = find_port.findall(trs[s])
        for address, port in zip(re_ip_address, re_port):
          address_port = address + ':' + port
          yield address_port.replace(' ', '')

  def crawl_data5u(self):
    start_url = 'http://www.data5u.com/free/gngn/index.shtml'
    headers = {
      'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
      'Accept-Encoding': 'gzip, deflate',
      'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
      'Cache-Control': 'max-age=0',
      'Connection': 'keep-alive',
      'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',
      'Host': 'www.data5u.com',
      'Referer': 'http://www.data5u.com/free/index.shtml',
      'Upgrade-Insecure-Requests': '1',
      'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
    }
    html = get_page(start_url, options=headers)
    if html:
      ip_address = re.compile('<span><li>(\d+\.\d+\.\d+\.\d+)</li>.*?<li class=\"port.*?>(\d+)</li>', re.S)
      re_ip_address = ip_address.findall(html)
      for address, port in re_ip_address:
        result = address + ':' + port
        yield result.replace(' ', '')


class Getter():
  def __init__(self):
    self.crawler = Crawler()

  def run(self):
    print('获取器开始执行')
    for callback_label in range(self.crawler.__CrawlFuncCount__):
      print(callback_label)
      callback = self.crawler.__CrawlFunc__[callback_label]
      print(callback)
      # # 获取代理
      # proxies = self.crawler.get_proxies(callback)
      # sys.stdout.flush()
      # for proxy in proxies:
      #   self.redis.add(proxy)
if __name__ == '__main__':
  get = Getter()
  get.run()

测试结果

/home/baixiaoxu/PycharmProjects/pytthon-tt/venv/bin/python /home/baixiaoxu/PycharmProjects/pytthon-tt/proxypool/test.py
打印k
__module__
打印v
__main__
打印k
__qualname__
打印v
Crawler
打印k
get_proxies
打印v
<function Crawler.get_proxies at 0x7f905ca5a598>
打印k
crawl_daili66
打印v
<function Crawler.crawl_daili66 at 0x7f905ca5a620>
打印k
crawl_ip3366
打印v
<function Crawler.crawl_ip3366 at 0x7f905ca5a840>
打印k
crawl_kuaidaili
打印v
<function Crawler.crawl_kuaidaili at 0x7f905ca5a730>
打印k
crawl_xicidaili
打印v
<function Crawler.crawl_xicidaili at 0x7f905ca5a7b8>
打印k
crawl_iphai
打印v
<function Crawler.crawl_iphai at 0x7f905ca5a6a8>
打印k
crawl_data5u
打印v
<function Crawler.crawl_data5u at 0x7f905ca5a8c8>
打印k
__CrawlFunc__
打印v
['crawl_daili66', 'crawl_ip3366', 'crawl_kuaidaili', 'crawl_xicidaili', 'crawl_iphai', 'crawl_data5u']
获取器开始执行
0
crawl_daili66
1
crawl_ip3366
2
crawl_kuaidaili
3
crawl_xicidaili
4
crawl_iphai
5
crawl_data5u

进程完成,退出码 0

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持三水点靠木。

Python 相关文章推荐
python实现RSA加密(解密)算法
Feb 17 Python
Python实现将16进制字符串转化为ascii字符的方法分析
Jul 21 Python
python使用正则表达式替换匹配成功的组并输出替换的次数
Nov 22 Python
浅谈Python实现Apriori算法介绍
Dec 20 Python
在Pycharm中自动添加时间日期作者等信息的方法
Jan 16 Python
使用Python将Mysql的查询数据导出到文件的方法
Feb 25 Python
超简单的Python HTTP服务
Jul 22 Python
使用selenium和pyquery爬取京东商品列表过程解析
Aug 15 Python
Python 用三行代码提取PDF表格数据
Oct 13 Python
利用Tensorflow的队列多线程读取数据方式
Feb 05 Python
tensorflow 实现自定义梯度反向传播代码
Feb 10 Python
使用jupyter notebook将文件保存为Markdown,HTML等文件格式
Apr 14 Python
python3的UnicodeDecodeError解决方法
Dec 20 #Python
基于python调用psutil模块过程解析
Dec 20 #Python
python如何使用jt400.jar包代码实例
Dec 20 #Python
基于python使用tibco ems代码实例
Dec 20 #Python
使用python实现数组、链表、队列、栈的方法
Dec 20 #Python
python隐藏类中属性的3种实现方法
Dec 19 #Python
Python合并2个字典成1个新字典的方法(9种)
Dec 19 #Python
You might like
将二维数组转为一维数组的2种方法
2014/05/26 PHP
ThinkPHP中图片按比例切割的代码实例
2019/03/08 PHP
Javascript this 的一些学习总结
2012/08/31 Javascript
js调用打印机打印网页字体总是缩小一号的解决方法
2014/01/24 Javascript
jquery文本框中的事件应用以输入邮箱为例
2014/05/06 Javascript
javascript实现数独解法
2015/03/14 Javascript
AngularJS基础 ng-href 指令用法
2016/08/01 Javascript
JavaScript数组操作详解
2017/02/04 Javascript
详解jQuery如何实现模糊搜索
2019/05/10 jQuery
webpack中如何加载静态文件的方法步骤
2019/05/18 Javascript
优雅的处理vue项目异常实战记录
2019/06/05 Javascript
vue中axios的二次封装实例讲解
2019/10/14 Javascript
VUE使用axios调用后台API接口的方法
2020/08/03 Javascript
Python在Console下显示文本进度条的方法
2016/02/14 Python
12步教你理解Python装饰器
2016/02/25 Python
python中函数默认值使用注意点详解
2016/06/01 Python
python 开发的三种运行模式详细介绍
2017/01/18 Python
详解Python 实现元胞自动机中的生命游戏(Game of life)
2018/01/27 Python
DataFrame中去除指定列为空的行方法
2018/04/08 Python
pandas使用get_dummies进行one-hot编码的方法
2018/07/10 Python
python  创建一个保留重复值的列表的补码
2018/10/15 Python
详解基于python-django框架的支付宝支付案例
2019/09/23 Python
Python实现数值积分方式
2019/11/20 Python
提升python处理速度原理及方法实例
2019/12/25 Python
解决pytorch报错:AssertionError: Invalid device id的问题
2020/01/10 Python
python实现超级玛丽游戏
2020/03/18 Python
浅谈keras通过model.fit_generator训练模型(节省内存)
2020/06/17 Python
Python实现EM算法实例代码
2020/10/04 Python
高三生物教学反思
2014/01/25 职场文书
开业庆典策划方案
2014/02/18 职场文书
学术诚信承诺书
2014/05/26 职场文书
销售行政专员岗位职责
2014/06/10 职场文书
教师批评与自我批评
2014/10/15 职场文书
城南旧事读书笔记
2015/06/29 职场文书
OpenCV-Python实现轮廓的特征值
2021/06/09 Python
SQL使用复合索引实现数据库查询的优化
2022/05/25 SQL Server