尝试使用Python爬取城市租房信息

本实战项目仅以学习为目的,为避免给网站造成太大压力,请将代码中的num修改成较小的数字,并将线程改小

Posted in Python onApril 12, 2022

思路:先单线程爬虫,测试可以成功爬取之后再优化为多线程,最后存入数据库

以爬取郑州市租房信息为例

注意:本实战项目仅以学习为目的,为避免给网站造成太大压力,请将代码中的num修改成较小的数字,并将线程改小

一、单线程爬虫

# 用session取代requests
# 解析库使用bs4
# 并发库使用concurrent
import requests
# from lxml import etree    # 使用xpath解析
from bs4 import BeautifulSoup
from urllib import parse
import re
import time
 
headers = {
    'referer': 'https://zz.zu.fang.com/',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
    'cookie': 'global_cookie=ffzvt3kztwck05jm6twso2wjw18kl67hqft; city=zz; integratecover=1; __utma=147393320.427795962.1613371106.1613371106.1613371106.1; __utmc=147393320; __utmz=147393320.1613371106.1.1.utmcsr=zz.fang.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmt_t0=1; __utmt_t1=1; __utmt_t2=1; ASP.NET_SessionId=aamzdnhzct4i5mx3ak4cyoyp; Rent_StatLog=23d82b94-13d6-4601-9019-ce0225c092f6; Captcha=61584F355169576F3355317957376E4F6F7552365351342B7574693561766E63785A70522F56557370586E3376585853346651565256574F37694B7074576B2B34536C5747715856516A4D3D; g_sourcepage=zf_fy%5Elb_pc; unique_cookie=U_ffzvt3kztwck05jm6twso2wjw18kl67hqft*6; __utmb=147393320.12.10.1613371106'
}
data={
    'agentbid':''
}
 
session = requests.session()
session.headers = headers
 
# 获取页面
def getHtml(url):
    try:
        re = session.get(url)
        re.encoding = re.apparent_encoding
        return re.text
    except:
        print(re.status_code)
 
# 获取页面总数量
def getNum(text):
    soup = BeautifulSoup(text, 'lxml')
    txt = soup.select('.fanye .txt')[0].text
    # 取出“共**页”中间的数字
    num = re.search(r'\d+', txt).group(0)
    return num
 
# 获取详细链接
def getLink(tex):
    soup=BeautifulSoup(text,'lxml')
    links=soup.select('.title a')
    for link in links:
        href=parse.urljoin('https://zz.zu.fang.com/',link['href'])
        hrefs.append(href)
 
# 解析页面
def parsePage(url):
    res=session.get(url)
    if res.status_code==200:
        res.encoding=res.apparent_encoding
        soup=BeautifulSoup(res.text,'lxml')
        try:
            title=soup.select('div .title')[0].text.strip().replace(' ','')
            price=soup.select('div .trl-item')[0].text.strip()
            block=soup.select('.rcont #agantzfxq_C02_08')[0].text.strip()
            building=soup.select('.rcont #agantzfxq_C02_07')[0].text.strip()
            try:
                address=soup.select('.trl-item2 .rcont')[2].text.strip()
            except:
                address=soup.select('.trl-item2 .rcont')[1].text.strip()
            detail1=soup.select('.clearfix')[4].text.strip().replace('\n\n\n',',').replace('\n','')
            detail2=soup.select('.clearfix')[5].text.strip().replace('\n\n\n',',').replace('\n','')
            detail=detail1+detail2
            name=soup.select('.zf_jjname')[0].text.strip()
            buserid=re.search('buserid: \'(\d+)\'',res.text).group(1)
            phone=getPhone(buserid)
            print(title,price,block,building,address,detail,name,phone)
            house = (title, price, block, building, address, detail, name, phone)
            info.append(house)
        except:
            pass
    else:
        print(re.status_code,re.text)
 
# 获取代理人号码
def getPhone(buserid):
    url='https://zz.zu.fang.com/RentDetails/Ajax/GetAgentVirtualMobile.aspx'
    data['agentbid']=buserid
    res=session.post(url,data=data)
    if res.status_code==200:
        return res.text
    else:
        print(res.status_code)
        return
 
if __name__ == '__main__':
    start_time=time.time()
    hrefs=[]
    info=[]
    init_url = 'https://zz.zu.fang.com/house/'
    num=getNum(getHtml(init_url))
    for i in range(0,num):
        url = f'https://zz.zu.fang.com/house/i3{i+1}/'
        text=getHtml(url)
        getLink(text)
    print(hrefs)
    for href in hrefs:
        parsePage(href)
 
    print("共获取%d条数据"%len(info))
    print("共耗时{}".format(time.time()-start_time))
    session.close()

二、优化为多线程爬虫

# 用session取代requests
# 解析库使用bs4
# 并发库使用concurrent
import requests
# from lxml import etree    # 使用xpath解析
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
from urllib import parse
import re
import time
 
headers = {
    'referer': 'https://zz.zu.fang.com/',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
    'cookie': 'global_cookie=ffzvt3kztwck05jm6twso2wjw18kl67hqft; integratecover=1; city=zz; keyWord_recenthousezz=%5b%7b%22name%22%3a%22%e6%96%b0%e5%af%86%22%2c%22detailName%22%3a%22%22%2c%22url%22%3a%22%2fhouse-a014868%2f%22%2c%22sort%22%3a1%7d%2c%7b%22name%22%3a%22%e4%ba%8c%e4%b8%83%22%2c%22detailName%22%3a%22%22%2c%22url%22%3a%22%2fhouse-a014864%2f%22%2c%22sort%22%3a1%7d%2c%7b%22name%22%3a%22%e9%83%91%e4%b8%9c%e6%96%b0%e5%8c%ba%22%2c%22detailName%22%3a%22%22%2c%22url%22%3a%22%2fhouse-a0842%2f%22%2c%22sort%22%3a1%7d%5d; __utma=147393320.427795962.1613371106.1613558547.1613575774.5; __utmc=147393320; __utmz=147393320.1613575774.5.4.utmcsr=zz.fang.com|utmccn=(referral)|utmcmd=referral|utmcct=/; ASP.NET_SessionId=vhrhxr1tdatcc1xyoxwybuwv; g_sourcepage=zf_fy%5Elb_pc; Captcha=4937566532507336644D6557347143746B5A6A6B4A7A48445A422F2F6A51746C67516F31357446573052634562725162316152533247514250736F72775566574A2B33514357304B6976343D; __utmt_t0=1; __utmt_t1=1; __utmt_t2=1; __utmb=147393320.9.10.1613575774; unique_cookie=U_0l0d1ilf1t0ci2rozai9qi24k1pkl9lcmrs*4'
}
data={
    'agentbid':''
}
 
session = requests.session()
session.headers = headers
 
# 获取页面
def getHtml(url):
    res = session.get(url)
    if res.status_code==200:
        res.encoding = res.apparent_encoding
        return res.text
    else:
        print(res.status_code)
 
# 获取页面总数量
def getNum(text):
    soup = BeautifulSoup(text, 'lxml')
    txt = soup.select('.fanye .txt')[0].text
    # 取出“共**页”中间的数字
    num = re.search(r'\d+', txt).group(0)
    return num
 
# 获取详细链接
def getLink(url):
    text=getHtml(url)
    soup=BeautifulSoup(text,'lxml')
    links=soup.select('.title a')
    for link in links:
        href=parse.urljoin('https://zz.zu.fang.com/',link['href'])
        hrefs.append(href)
 
# 解析页面
def parsePage(url):
    res=session.get(url)
    if res.status_code==200:
        res.encoding=res.apparent_encoding
        soup=BeautifulSoup(res.text,'lxml')
        try:
            title=soup.select('div .title')[0].text.strip().replace(' ','')
            price=soup.select('div .trl-item')[0].text.strip()
            block=soup.select('.rcont #agantzfxq_C02_08')[0].text.strip()
            building=soup.select('.rcont #agantzfxq_C02_07')[0].text.strip()
            try:
                address=soup.select('.trl-item2 .rcont')[2].text.strip()
            except:
                address=soup.select('.trl-item2 .rcont')[1].text.strip()
            detail1=soup.select('.clearfix')[4].text.strip().replace('\n\n\n',',').replace('\n','')
            detail2=soup.select('.clearfix')[5].text.strip().replace('\n\n\n',',').replace('\n','')
            detail=detail1+detail2
            name=soup.select('.zf_jjname')[0].text.strip()
            buserid=re.search('buserid: \'(\d+)\'',res.text).group(1)
            phone=getPhone(buserid)
            print(title,price,block,building,address,detail,name,phone)
            house = (title, price, block, building, address, detail, name, phone)
            info.append(house)
        except:
            pass
    else:
        print(re.status_code,re.text)
 
# 获取代理人号码
def getPhone(buserid):
    url='https://zz.zu.fang.com/RentDetails/Ajax/GetAgentVirtualMobile.aspx'
    data['agentbid']=buserid
    res=session.post(url,data=data)
    if res.status_code==200:
        return res.text
    else:
        print(res.status_code)
        return
 
if __name__ == '__main__':
    start_time=time.time()
    hrefs=[]
    info=[]
    init_url = 'https://zz.zu.fang.com/house/'
    num=getNum(getHtml(init_url))
    with ThreadPoolExecutor(max_workers=5) as t:
        for i in range(0,num):
            url = f'https://zz.zu.fang.com/house/i3{i+1}/'
            t.submit(getLink,url)
    print("共获取%d个链接"%len(hrefs))
    print(hrefs)
    with ThreadPoolExecutor(max_workers=30) as t:
        for href in hrefs:
            t.submit(parsePage,href)
    print("共获取%d条数据"%len(info))
    print("耗时{}".format(time.time()-start_time))
    session.close()

三、使用asyncio进一步优化

# 用session取代requests
# 解析库使用bs4
# 并发库使用concurrent
import requests
# from lxml import etree    # 使用xpath解析
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
from urllib import parse
import re
import time
import asyncio
 
headers = {
    'referer': 'https://zz.zu.fang.com/',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
    'cookie': 'global_cookie=ffzvt3kztwck05jm6twso2wjw18kl67hqft; integratecover=1; city=zz; keyWord_recenthousezz=%5b%7b%22name%22%3a%22%e6%96%b0%e5%af%86%22%2c%22detailName%22%3a%22%22%2c%22url%22%3a%22%2fhouse-a014868%2f%22%2c%22sort%22%3a1%7d%2c%7b%22name%22%3a%22%e4%ba%8c%e4%b8%83%22%2c%22detailName%22%3a%22%22%2c%22url%22%3a%22%2fhouse-a014864%2f%22%2c%22sort%22%3a1%7d%2c%7b%22name%22%3a%22%e9%83%91%e4%b8%9c%e6%96%b0%e5%8c%ba%22%2c%22detailName%22%3a%22%22%2c%22url%22%3a%22%2fhouse-a0842%2f%22%2c%22sort%22%3a1%7d%5d; __utma=147393320.427795962.1613371106.1613558547.1613575774.5; __utmc=147393320; __utmz=147393320.1613575774.5.4.utmcsr=zz.fang.com|utmccn=(referral)|utmcmd=referral|utmcct=/; ASP.NET_SessionId=vhrhxr1tdatcc1xyoxwybuwv; g_sourcepage=zf_fy%5Elb_pc; Captcha=4937566532507336644D6557347143746B5A6A6B4A7A48445A422F2F6A51746C67516F31357446573052634562725162316152533247514250736F72775566574A2B33514357304B6976343D; __utmt_t0=1; __utmt_t1=1; __utmt_t2=1; __utmb=147393320.9.10.1613575774; unique_cookie=U_0l0d1ilf1t0ci2rozai9qi24k1pkl9lcmrs*4'
}
data={
    'agentbid':''
}
 
session = requests.session()
session.headers = headers
 
# 获取页面
def getHtml(url):
    res = session.get(url)
    if res.status_code==200:
        res.encoding = res.apparent_encoding
        return res.text
    else:
        print(res.status_code)
 
# 获取页面总数量
def getNum(text):
    soup = BeautifulSoup(text, 'lxml')
    txt = soup.select('.fanye .txt')[0].text
    # 取出“共**页”中间的数字
    num = re.search(r'\d+', txt).group(0)
    return num
 
# 获取详细链接
def getLink(url):
    text=getHtml(url)
    soup=BeautifulSoup(text,'lxml')
    links=soup.select('.title a')
    for link in links:
        href=parse.urljoin('https://zz.zu.fang.com/',link['href'])
        hrefs.append(href)
 
# 解析页面
def parsePage(url):
    res=session.get(url)
    if res.status_code==200:
        res.encoding=res.apparent_encoding
        soup=BeautifulSoup(res.text,'lxml')
        try:
            title=soup.select('div .title')[0].text.strip().replace(' ','')
            price=soup.select('div .trl-item')[0].text.strip()
            block=soup.select('.rcont #agantzfxq_C02_08')[0].text.strip()
            building=soup.select('.rcont #agantzfxq_C02_07')[0].text.strip()
            try:
                address=soup.select('.trl-item2 .rcont')[2].text.strip()
            except:
                address=soup.select('.trl-item2 .rcont')[1].text.strip()
            detail1=soup.select('.clearfix')[4].text.strip().replace('\n\n\n',',').replace('\n','')
            detail2=soup.select('.clearfix')[5].text.strip().replace('\n\n\n',',').replace('\n','')
            detail=detail1+detail2
            name=soup.select('.zf_jjname')[0].text.strip()
            buserid=re.search('buserid: \'(\d+)\'',res.text).group(1)
            phone=getPhone(buserid)
            print(title,price,block,building,address,detail,name,phone)
            house = (title, price, block, building, address, detail, name, phone)
            info.append(house)
        except:
            pass
    else:
        print(re.status_code,re.text)
 
# 获取代理人号码
def getPhone(buserid):
    url='https://zz.zu.fang.com/RentDetails/Ajax/GetAgentVirtualMobile.aspx'
    data['agentbid']=buserid
    res=session.post(url,data=data)
    if res.status_code==200:
        return res.text
    else:
        print(res.status_code)
        return
 
# 获取详细链接的线程池
async def Pool1(num):
    loop=asyncio.get_event_loop()
    task=[]
    with ThreadPoolExecutor(max_workers=5) as t:
        for i in range(0,num):
            url = f'https://zz.zu.fang.com/house/i3{i+1}/'
            task.append(loop.run_in_executor(t,getLink,url))
 
# 解析页面的线程池
async def Pool2(hrefs):
    loop=asyncio.get_event_loop()
    task=[]
    with ThreadPoolExecutor(max_workers=30) as t:
        for href in hrefs:
            task.append(loop.run_in_executor(t,parsePage,href))
 
if __name__ == '__main__':
    start_time=time.time()
    hrefs=[]
    info=[]
    task=[]
    init_url = 'https://zz.zu.fang.com/house/'
    num=getNum(getHtml(init_url))
    loop = asyncio.get_event_loop()
    loop.run_until_complete(Pool1(num))
    print("共获取%d个链接"%len(hrefs))
    print(hrefs)
    loop.run_until_complete(Pool2(hrefs))
    loop.close()
    print("共获取%d条数据"%len(info))
    print("耗时{}".format(time.time()-start_time))
    session.close()

四、存入Mysql数据库

(一)建表

from sqlalchemy import create_engine
from sqlalchemy import String, Integer, Column, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session  # 多线程爬虫时避免出现线程安全问题
from sqlalchemy.ext.declarative import declarative_base
 
BASE = declarative_base()  # 实例化
engine = create_engine(
    "mysql+pymysql://root:root@127.0.0.1:3306/pytest?charset=utf8",
    max_overflow=300,  # 超出连接池大小最多可以创建的连接
    pool_size=100,  # 连接池大小
    echo=False,  # 不显示调试信息
)
 
 
class House(BASE):
    __tablename__ = 'house'
    id = Column(Integer, primary_key=True, autoincrement=True)
    title=Column(String(200))
    price=Column(String(200))
    block=Column(String(200))
    building=Column(String(200))
    address=Column(String(200))
    detail=Column(Text())
    name=Column(String(20))
    phone=Column(String(20))
 
 
BASE.metadata.create_all(engine)
Session = sessionmaker(engine)
sess = scoped_session(Session)

(二)将数据存入数据库中 

# 用session取代requests
# 解析库使用bs4
# 并发库使用concurrent
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
from urllib import parse
from mysqldb import sess, House
import re
import time
import asyncio
 
headers = {
    'referer': 'https://zz.zu.fang.com/',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36',
    'cookie': 'global_cookie=ffzvt3kztwck05jm6twso2wjw18kl67hqft; integratecover=1; city=zz; __utmc=147393320; ASP.NET_SessionId=vhrhxr1tdatcc1xyoxwybuwv; __utma=147393320.427795962.1613371106.1613575774.1613580597.6; __utmz=147393320.1613580597.6.5.utmcsr=zz.fang.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utmt_t0=1; __utmt_t1=1; __utmt_t2=1; Rent_StatLog=c158b2a7-4622-45a9-9e69-dcf6f42cf577; keyWord_recenthousezz=%5b%7b%22name%22%3a%22%e4%ba%8c%e4%b8%83%22%2c%22detailName%22%3a%22%22%2c%22url%22%3a%22%2fhouse-a014864%2f%22%2c%22sort%22%3a1%7d%2c%7b%22name%22%3a%22%e9%83%91%e4%b8%9c%e6%96%b0%e5%8c%ba%22%2c%22detailName%22%3a%22%22%2c%22url%22%3a%22%2fhouse-a0842%2f%22%2c%22sort%22%3a1%7d%2c%7b%22name%22%3a%22%e7%bb%8f%e5%bc%80%22%2c%22detailName%22%3a%22%22%2c%22url%22%3a%22%2fhouse-a014871%2f%22%2c%22sort%22%3a1%7d%5d; g_sourcepage=zf_fy%5Elb_pc; Captcha=6B65716A41454739794D666864397178613772676C75447A4E746C657144775A347A6D42554F446532357649643062344F6976756E563450554E59594B7833712B413579506C4B684958343D; unique_cookie=U_0l0d1ilf1t0ci2rozai9qi24k1pkl9lcmrs*14; __utmb=147393320.21.10.1613580597'
}
data={
    'agentbid':''
}
 
session = requests.session()
session.headers = headers
 
# 获取页面
def getHtml(url):
    res = session.get(url)
    if res.status_code==200:
        res.encoding = res.apparent_encoding
        return res.text
    else:
        print(res.status_code)
 
# 获取页面总数量
def getNum(text):
    soup = BeautifulSoup(text, 'lxml')
    txt = soup.select('.fanye .txt')[0].text
    # 取出“共**页”中间的数字
    num = re.search(r'\d+', txt).group(0)
    return num
 
# 获取详细链接
def getLink(url):
    text=getHtml(url)
    soup=BeautifulSoup(text,'lxml')
    links=soup.select('.title a')
    for link in links:
        href=parse.urljoin('https://zz.zu.fang.com/',link['href'])
        hrefs.append(href)
 
# 解析页面
def parsePage(url):
    res=session.get(url)
    if res.status_code==200:
        res.encoding=res.apparent_encoding
        soup=BeautifulSoup(res.text,'lxml')
        try:
            title=soup.select('div .title')[0].text.strip().replace(' ','')
            price=soup.select('div .trl-item')[0].text.strip()
            block=soup.select('.rcont #agantzfxq_C02_08')[0].text.strip()
            building=soup.select('.rcont #agantzfxq_C02_07')[0].text.strip()
            try:
                address=soup.select('.trl-item2 .rcont')[2].text.strip()
            except:
                address=soup.select('.trl-item2 .rcont')[1].text.strip()
            detail1=soup.select('.clearfix')[4].text.strip().replace('\n\n\n',',').replace('\n','')
            detail2=soup.select('.clearfix')[5].text.strip().replace('\n\n\n',',').replace('\n','')
            detail=detail1+detail2
            name=soup.select('.zf_jjname')[0].text.strip()
            buserid=re.search('buserid: \'(\d+)\'',res.text).group(1)
            phone=getPhone(buserid)
            print(title,price,block,building,address,detail,name,phone)
            house = (title, price, block, building, address, detail, name, phone)
            info.append(house)
            try:
                house_data=House(
                    title=title,
                    price=price,
                    block=block,
                    building=building,
                    address=address,
                    detail=detail,
                    name=name,
                    phone=phone
                )
                sess.add(house_data)
                sess.commit()
            except Exception as e:
                print(e)    # 打印错误信息
                sess.rollback()  # 回滚
        except:
            pass
    else:
        print(re.status_code,re.text)
 
# 获取代理人号码
def getPhone(buserid):
    url='https://zz.zu.fang.com/RentDetails/Ajax/GetAgentVirtualMobile.aspx'
    data['agentbid']=buserid
    res=session.post(url,data=data)
    if res.status_code==200:
        return res.text
    else:
        print(res.status_code)
        return
 
# 获取详细链接的线程池
async def Pool1(num):
    loop=asyncio.get_event_loop()
    task=[]
    with ThreadPoolExecutor(max_workers=5) as t:
        for i in range(0,num):
            url = f'https://zz.zu.fang.com/house/i3{i+1}/'
            task.append(loop.run_in_executor(t,getLink,url))
 
# 解析页面的线程池
async def Pool2(hrefs):
    loop=asyncio.get_event_loop()
    task=[]
    with ThreadPoolExecutor(max_workers=30) as t:
        for href in hrefs:
            task.append(loop.run_in_executor(t,parsePage,href))
 
if __name__ == '__main__':
    start_time=time.time()
    hrefs=[]
    info=[]
    task=[]
    init_url = 'https://zz.zu.fang.com/house/'
    num=getNum(getHtml(init_url))
    loop = asyncio.get_event_loop()
    loop.run_until_complete(Pool1(num))
    print("共获取%d个链接"%len(hrefs))
    print(hrefs)
    loop.run_until_complete(Pool2(hrefs))
    loop.close()
    print("共获取%d条数据"%len(info))
    print("耗时{}".format(time.time()-start_time))
    session.close()

五、最终效果图 (已打码)

尝试使用Python爬取城市租房信息

到此这篇关于Python爬取城市租房信息实战分享的文章就介绍到这了!

Python 相关文章推荐
LRUCache的实现原理及利用python实现的方法
Nov 21 Python
Python实现简单生成验证码功能【基于random模块】
Feb 10 Python
Tensorflow实现卷积神经网络用于人脸关键点识别
Mar 05 Python
python bmp转换为jpg 并删除原图的方法
Oct 25 Python
使用Template格式化Python字符串的方法
Jan 22 Python
python提取log文件内容并画出图表
Jul 08 Python
Django外键(ForeignKey)操作以及related_name的作用详解
Jul 29 Python
python中property属性的介绍及其应用详解
Aug 29 Python
python提取xml里面的链接源码详解
Oct 15 Python
Python3 虚拟开发环境搭建过程(图文详解)
Jan 06 Python
浅谈在django中使用redirect重定向数据传输的问题
Mar 13 Python
Python中的全局变量如何理解
Jun 04 Python
Python采集爬取京东商品信息和评论并存入MySQL
Apr 12 #Python
Python实现批量将文件复制到新的目录中再修改名称
Python多线程实用方法以及共享变量资源竞争问题
Apr 12 #Python
Python使用socket去实现TCP客户端和TCP服务端
Apr 12 #Python
Python闭包的定义和使用方法
Apr 11 #Python
什么是Python装饰器?如何定义和使用?
Apr 11 #Python
Python经常使用的一些内置函数
You might like
PHP中读写文件实现代码
2011/10/20 PHP
利用PHP生成静态HTML文档的原理
2012/10/29 PHP
php中如何判断一个网页请求是ajax请求还是普通请求
2013/08/10 PHP
Java和PHP在Web开发方面对比分析
2015/03/01 PHP
php提高网站效率的技巧
2015/09/29 PHP
php curl中gzip的压缩性能测试实例分析
2016/11/08 PHP
PHP getDocNamespaces()函数讲解
2019/02/03 PHP
为radio类型的INPUT添加客户端脚本(附加实现JS来禁用onClick事件思路代码)
2010/11/11 Javascript
简单的两种Extjs formpanel加载数据的方式
2013/11/09 Javascript
jquery(hide方法)隐藏指定元素实例
2013/11/11 Javascript
AngularJS的ng Http Request与response格式转换方法
2016/11/07 Javascript
关于JS Lodop打印插件打印Bootstrap样式错乱问题的解决方案
2016/12/23 Javascript
如何快速上手Vuex
2017/02/14 Javascript
Angularjs 实现动态添加控件功能
2017/05/25 Javascript
详解几十行代码实现一个vue的状态管理
2019/01/28 Javascript
vue项目部署到nginx/tomcat服务器的实现
2019/08/26 Javascript
vue-resource 拦截器interceptors使用详解
2021/01/18 Vue.js
[02:53]DOTA2英雄基础教程 山岭巨人小小
2013/12/09 DOTA
python利用hook技术破解https的实例代码
2013/03/25 Python
详解Python的Flask框架中的signals信号机制
2016/06/13 Python
python getopt详解及简单实例
2016/12/30 Python
Python利用Beautiful Soup模块搜索内容详解
2017/03/29 Python
Python 中的Selenium异常处理实例代码
2018/05/03 Python
python多进程实现文件下载传输功能
2018/07/28 Python
django连接mysql配置方法总结(推荐)
2018/08/18 Python
解决pytorch GPU 计算过程中出现内存耗尽的问题
2019/08/19 Python
在python shell中运行python文件的实现
2019/12/21 Python
python如何操作mysql
2020/08/17 Python
PyTorch中Tensor的数据类型和运算的使用
2020/09/03 Python
Python常用数字处理基本操作汇总
2020/09/10 Python
俄罗斯EPL钻石珠宝店:ЭПЛ
2019/10/22 全球购物
总经理的岗位职责
2014/02/23 职场文书
业务员岗位职责范本
2015/04/03 职场文书
教师节晚会主持词
2015/06/30 职场文书
pytorch查看网络参数显存占用量等操作
2021/05/12 Python
Python四款GUI图形界面库介绍
2022/06/05 Python