博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
Day 8作业
阅读量:4982 次
发布时间:2019-06-12

本文共 9660 字,大约阅读时间需要 32 分钟。

1.基于豌豆荚爬取剩下的简介截图图片地址、网友评论 2.把豌豆荚爬取的数据插入mongoDB中       - 创建一个wandoujia库           - 把主页的数据存放一个名为index集合中           - 把详情页的数据存放一个名为detail集合中 如下:
import requestsfrom bs4 import  BeautifulSoupfrom  pymongo import   MongoClientclient = MongoClient('localhost',27017)index_col = client['wandoujia']['index']detail_col = client['wandoujia']['detail']# 1、发送请求def get_page(url):    response = requests.get(url)    return response# 2、开始解析#解析详情页def parse_detail(text):    soup = BeautifulSoup('text','lxml')    try:        name= soup.find(name="span",attrs={
"class":"title"}).text except Exception: name = None try: love = soup.find(name='span',attrs={
"class":"love"}).text except Exception: love = None try: commit_num = soup.find(name='a',attrs={
"class":"comment-open"}).text except Exception: commit_num = None try: commit_content = soup.find(name='div',attrs={
"class":"con"}).text except Exception: commit_content = None try: download_url = soup.find(name='a',attrs={
"class":"normal-dl-btn"}).attrs['href'] except Exception: download_url = None if name and love and commit_num and commit_content and download_url: detail_data = { 'name':name, 'love':love, 'commit_num':commit_num, 'commit_content':commit_content, 'download_url':download_url } if not love: detail_data={ 'name': name, 'love': "没人点赞", 'commit_num': commit_num, 'commit_content': commit_content, 'download_url': download_url } if not download_url: detail_data={ 'name': name, 'love': love, 'commit_num': commit_num, 'commit_content': commit_content, 'download_url': "没有安装包" } detail_col.insert(detail_data) print(f'{name}app数据插入成功!')# 解析主页def parse_index(data): soup = BeautifulSoup(data, 'lxml') # 获取所有app的li标签 app_list = soup.find_all(name='li', attrs={
"class": "card"}) for app in app_list: # print(app) # print('tank' * 1000) # print('tank *' * 1000) # print(app) # 图标地址 # 获取第一个img标签中的data-original属性 img = app.find(name='img').attrs['data-original'] # print(img) # 下载次数 # 获取class为install-count的span标签中的文本 down_num = app.find(name='span', attrs={
"class": "install-count"}).text # print(down_num) import re # 大小 # 根据文本正则获取到文本中包含 数字 + MB(\d+代表数字)的span标签中的文本 size = soup.find(name='span', text=re.compile("\d+MB")).text # print(size) # 详情页地址 # 获取class为detail-check-btn的a标签中的href属性 # detail_url = soup.find(name='a', attrs={"class": "name"}).attrs['href'] # print(detail_url) # 详情页地址 detail_url = app.find(name='a').attrs['href'] # print(detail_url) # 拼接数据 index_data = { 'img': img, 'down_num': down_num, 'size': size, 'detail_url': detail_url } # 插入数据 index_col.insert(index_data) print('主页数据插入成功!') # 3、往app详情页发送请求 response = get_page(detail_url) # 4、解析app详情页 parse_detail(response.text)def main(): for line in range(1, 33): url = f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=FRsWKgWBqMBZLdxLaK4iem9B" # 1、往app接口发送请求 response = get_page(url) # print(response.text) print('*' * 1000) # 反序列化为字典 data = response.json() # 获取接口中app标签数据 app_li = data['data']['content'] # print(app_li) # 2、解析app标签数据 parse_index(app_li) # 执行完所有函数关闭mongoDB客户端 client.close()if __name__ == '__main__': main()

课堂内容

1.解析库之bs4

'''pip3 install beautifulsoup4  # 安装bs4pip3 install lxml  # 下载lxml解析器'''html_doc = """The Dormouse's story

$37

Once upon a time there were three little sisters; and their names wereElsie,Lacie andTillie;and they lived at the bottom of a well.

...

"""from bs4 import BeautifulSoup#从bs4中导入BeautifulSoup对象#参数一:解析文本#参数二:解析器(html.parser、lxml...)soup = BeautifulSoup(html_doc, 'lxml')print(soup)print('*' * 100)print(type(soup))print('*' * 100)# 文档美化html = soup.prettify()print(html)

2.bs4之遍历文档树

html_doc = """The Dormouse's story

$37

Once upon a time there were three little sisters; and their names wereElsie,Lacie andTillie;and they lived at the bottom of a well.

...

"""from bs4 import BeautifulSoupsoup = BeautifulSoup(html_doc,'lxml')'''1、用法2、获取标签的名称3、获取标签的属性4、获取标签的内容5、嵌套选择6、子节点、子孙节点7、父节点、祖先节点8、兄弟节点'''#1.直接使用print(soup.p)#查找第一个p标签print(soup.a)#查找第一个a标签#2.获取标签的名称print(soup.head.name)#获取head标签的名称#3.获取标签的属性print(soup.a.attrs)#获取a标签中的所有属性print(soup.a.attrs['href'])#获取a标签中的href属性#4.获取标签的内容print(soup.p.text)#$37#5.嵌套选择print(soup.html.head)#6.子节点、子孙节点print(soup.body.children)#body所有子节点,返回的是迭代器对象print(list(soup.body.children))#强转成列表类型print(soup.body.descendants)#子孙节点print(list(soup.body.descendants))#子孙节点#7.父节点、祖先节点print(soup.p.parent)#获取p标签的父亲节点#返回的是生成器对象print(soup.p.parents)#获取p标签所有的祖先节点print(list(soup.p.parents))#8.兄弟节点#找下一个兄弟print(soup.p.next_siblings)print(list(soup.p.next_siblings))#找上一个兄弟print(soup.a.previous_sibling)#找到第一个a标签的上一个兄弟节点#找到a标签上面的所有兄弟节点print(soup.a.previous_sibling)#返回的是生成器print(list(soup.a.previous_sibling))

3.bs4之搜索文档树

html_doc = """The Dormouse's story

$37

Once upon a time there were three little sisters; and their names weretankElsie,Lacie andTillie;and they lived at the bottom of a well.


...

"""from bs4 import BeautifulSoupsoup = BeautifulSoup(html_doc,'lxml')#字符串过滤器#namep_tag = soup.find(name='p')print(p_tag) # 根据文本p查找某个标签# 找到所有标签名为p的节点tag_s1 = soup.find_all(name='p')print(tag_s1)#attrs#查找第一个class为sister的节点p = soup.find(attrs={
"class":"sister"})print(p)#查找所有class为sister的节点tag_s2 = soup.find_all(attrs={
"class":"sister"})print(tag_s2)#texttext = soup.find(text="$37")print(text)#配合使用:#找到一个id为link2、文本为Lacie的a标签a_tag = soup.find(name="a",attrs={
"id":"link2"},text = "Lacie")print(a_tag)#正则过滤器import re#namep_tag = soup.find(name=re.compile('p'))print(p_tag)#列表过滤器import re#nametags = soup.find_all(name=['p','a',re.compile('html')])print(tags)#-bool过滤器#True匹配#找到有id 的p标签p = soup.find(name='p',attrs={
"id":True})print(p)#方法过滤器#匹配标签名为a、属性有id没有class的标签def have_id_class(tag): if tag.name == 'a' and tag.has_attr('id')and tag.has_attr('class'): return tagtag = soup.find(name = have_id_class)print(tag)

4.爬取豌豆荚app数据

import requestsfrom bs4 import  BeautifulSoup#1,发送请求def get_page(url):    response = requests.get(url)    return  response#2.开始解析def parse_index(data):    soup = BeautifulSoup(data,'lxml')    #获取所有app 的li标签    app_list = soup.find_all(name='li',attrs={
"class":"card"}) for app in app_list: img = app.find(name='img').attrs['data-original'] print(img) #下载次数 down_num = app.find(name='span',attrs={
"class":"install-count"}).text print(down_num) import re #大小 size = soup.find(name='span',text=re.compile("\d+MB")).text print(size) #详情页地址 #获取class为detail-check-btn的a标签中的href属性 detail_url = app.find(name='a').attrs['href'] print(detail_url) #3.往详情页发送请求 response = get_page(detail_url) #4.解析app详情页 parse_detail(response.text)def parse_detail(text): soup = BeautifulSoup(text,'lxml') #app名称 name = soup.find(name="span",attrs={
"class":"title"}).text print(name) #好评率 love = soup.find(name='span',attrs={
"class":"love"}).text print(love) #评论数 commit_num = soup.find(name='a',attrs={
"class":"comment-open"}).text print(commit_num) #小编点评 commit_content = soup.find(name='div',attrs={
"class":"con"}).text print(commit_content) #app下载链接 download_url=soup.find(name='a', attrs={
"class": "normal-dl-btn"}).attrs['href'] print( f''' =========begin============ app名称:{name} 好评率:{love} 评论数:{commit_num} 小编点评:{commit_content} app下载链接:{download_url} ==========end=============== ''' )def main(): for line in range(1,33): url =f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=1XgmoJKndXkl17m9HGiCMmJx" #1.往app接口发送请求 response = get_page(url) #print(respnse.text) print('*'*1000) #反序列化为字典 data = response.json() #获取接口中app标签数据 app_li = data['data']['content'] #print(app_li) #2.解析app标签数据 parse_index(app_li)if __name__ == '__main__': main()

5.pymongo的简单使用方法

from pymongo import MongoClient#1.链接mongoDB客户端#参数1:mongoDB的ip地址#参数2:mongoDB的端口号 默认:27017client = MongoClient('localhost',27017)print(client)#2.进入tank_db库,没有则创建print(client['tank_db'])#3.创建集合print(client['tank_db']['people'])#4.给tank_db库插入数据#1.插入一条data1 = {    'name':'tank',    'age':18,    'sex':'male'}client['tank_db']['people'].insert(data1)#2.插入多条data1 = {    'name': '*',    'age': 18,    'sex': 'male'}data2 = {    'name': '**,    'age': 21,    'sex': 'female'}data3 = {    'name': '***,    'age': 73,    'sex': 'female'}client['tank_db']['people'].insert([data1, data2, data3])# 5、查数据# 查看所有数据data_s = client['tank_db']['people'].find()print(data_s)  # 
# 需要循环打印所有数据for data in data_s: print(data)# 查看一条数据data = client['tank_db']['people'].find_one()print(data)#官方推荐使用#插入一条insert_oneclient['tank_db']['people'].insert_one()#插入多条insert_manyclient['tank_db']['people'].insert_many()

今天的难度再一次上升,明天就是最后一天了,仍然要抓紧时间多学点东西呀

转载于:https://www.cnblogs.com/-zcj/p/11063662.html

你可能感兴趣的文章
js构建ui的统一异常处理方案(二)
查看>>
三线程连续打印ABC
查看>>
ECharts
查看>>
初识网络爬虫
查看>>
git push 时不用每次都输入密码的方法
查看>>
54点提高PHP编程效率 引入缓存机制提升性能
查看>>
编解码-marshalling
查看>>
CDN原理
查看>>
java.lang.outofmemoryerror android
查看>>
coding
查看>>
省市联级(DataReader绑定)
查看>>
20165219 课上内容补做
查看>>
Tomcat7.0与Oracle10数据库连接池配置
查看>>
解决webpack和gulp打包js时ES6转译ES5时Object.assign()方法没转译成功的问题
查看>>
字节流与字符流的区别详解(转)
查看>>
类操作数据库
查看>>
找球号(一)
查看>>
oracle ebs 笔记
查看>>
Android studio使用git-android学习之旅(79)
查看>>
eclipse中去掉Js/javsscript报错信息
查看>>