主页 > 人工智能  > 

【基础】【Python网络爬虫】【6.数据持久化】Excel、Json、Csv数据保存(附大量案例代码)(建议收

【基础】【Python网络爬虫】【6.数据持久化】Excel、Json、Csv数据保存(附大量案例代码)(建议收

Python网络爬虫基础 数据持久化(数据保存)1. Excel创建数据表批量数据写入读取表格数据案例 - 豆瓣保存 Excel案例 - 网易新闻Excel保存 2. Json数据序列化和反序列化中文指定案例 - 豆瓣保存Json案例 - Json保存 3. Csv写入csv列表数据案例 - 豆瓣列表保存Csv 写入csv字典数据案例 - 豆瓣字典保存csv 读取csv数据案例 - 网易新闻csv

数据持久化(数据保存) 1. Excel 创建数据表 import openpyxl # 第三方模块, pip install openpyxl # 1.创建一个工作簿对象 work_book = openpyxl.Workbook() # 2.创建表对象 sheet1 = work_book.create_sheet('表1') # 如果使用默认的表操作数据, 需要调用工作簿对象的active属性 sheet1 = work_book.active # 3.操作表中单元格写入数据 sheet1['A1'] = 'A1' sheet1['B7'] = 'B7' # cell --> 单元格对象, row表示行, column表示列 sheet1.cell(row=1, column=1).value = '111111' sheet1.cell(row=2, column=2).value = '222222' data1 = (1, 2, 3, 4, 5) # data2 = '45678' # sheet1.append(序列数据) 整行添加数据到表格中去, 括号内部传递序列数据(列表/元祖) # 通过数据的第一次和第二次数据提取, 会提取到一条一条的数据 sheet1.append(data1) # sheet1.append(data2) # 4.保存 work_book.save('实例.xlsx') 批量数据写入 import openpyxl work = openpyxl.Workbook() sheet1 = work.active for i in range(1, 10): for j in range(1, i + 1): print(f'{j} x {i} = {j * i}', end='\t') sheet1.cell(row=i, column=j).value = f'{j} x {i} = {j * i}' print() work.save('实例.xlsx') 读取表格数据 import openpyxl workbook = openpyxl.load_workbook('实例.xlsx') print(workbook.sheetnames) sheet = workbook['Sheet'] # 指定表读取 print(sheet.max_row) # 最大行 print(sheet.max_column) # 最大列 # 读取第一行 for i in range(1, sheet.max_column + 1): print(sheet.cell(row=1, column=i).value) # 单元格为空就返回None # 读取第一列 for j in range(1, sheet.max_row + 1): print(sheet.cell(row=j, column=1).value) # 单元格为空就返回None for i in range(1, sheet.max_column + 1): for j in range(1, sheet.max_row + 1): print(sheet.cell(row=i, column=j).value) 案例 - 豆瓣保存 Excel import parsel import requests import openpyxl # 3.操作表中单元格写入数据 # 4.保存 # 1.创建一个工作簿对象 work = openpyxl.Workbook() # 2.创建表对象 sheet1 = work.active # 写表头? √ sheet1.append(['标题', '简介', '评分', '评价人数']) for page in range(0, 226, 25): url = f' movie.douban /top250?start={page}&filter=' headers = { 'Cookie': 'll="118267"; bid=VrC8tT1GWz8; __yadk_uid=iHqVKZD4ZHIVREbOrlu9k4uWFSsAdZtO; _pk_id.100001.4cf6=b39d476add4f5658.1683638062.; __utmz=30149280.1687782730.8.7.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1687782730.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1687952054%2C%22https%3A%2F%2F .baidu %2Flink%3Furl%3DqdlD_RZvrHI0sXUZ08wSSKbkKLAWA_R84aALUkbWwp__yA2hUL-2C_Ej15saTpe7%26wd%3D%26eqid%3Dfdfaeaeb0001b3f60000000664998548%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1169382564.1682168622.1687782730.1687952054.9; __utmb=30149280.0.10.1687952054; __utmc=30149280; __utma=223695111.1640817040.1683638062.1687782730.1687952054.5; __utmb=223695111.0.10.1687952054; __utmc=223695111; __gads=ID=744f53c3cb2ebb52-22841ef3a4e00021:T=1683638065:RT=1687952056:S=ALNI_MZhRKuML1OBDnNRafe3qd6-ndhaiQ; __gpi=UID=00000c03bafcda5c:T=1683638065:RT=1687952056:S=ALNI_MbkLLsUm467wiS6ZZ6Mn2ohKIWBZw', 'Host': 'movie.douban ', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', } response = requests.get(url=url, headers=headers) html_data = response.text # print(html_data) """解析数据""" # 转对象 selector = parsel.Selector(html_data) # 第一次提取 lis = selector.css('.grid_view>li') # 二次提取 for li in lis: title = li.css('.hd>a>span:nth-child(1)::text').get() info = li.css('.bd>p:nth-child(1)::text').getall() info = '//'.join([i.strip() for i in info]) score = li.css('.rating_num::text').get() follow = li.css('.star>span:nth-child(4)::text').get() print(title, info, score, follow) # 调用append方法写入每一条数据 # 写表头? x sheet1.append([title, info, score, follow]) print('=' * 100 + '\n') # 写表头? x work.save('douban.xlsx') # 编码 # office软件中Excel文件使用的编码是gbk # wps软件使用的编码是 utf-8 案例 - 网易新闻Excel保存 """ 目标站点: news.163 / 往下翻有 "要闻" 这个新闻类目, 找不到可以 Ctrl + F 搜索下 需求: 爬取网易新闻 "要闻" 类目第一页数据,将数据保存为 Excel 表格 保存字段需要以下内容 title channelname docurl imgurl source tlink """ import json import re import requests import openpyxl url = ' news.163 /special/cm_yaowen20200213/?callback=data_callback' response = requests.get(url=url) json_data = response.text # print(json_data) result = re.findall('data_callback\((.*?)\)', json_data, re.S) # print(result) item_json = json.loads(result[0]) # print(item_json) # print(type(item_json)) work = openpyxl.Workbook() sheet1 = work.active sheet1.append(['title', 'channelname', 'docurl', 'imgurl', 'source', 'tlink']) for item in item_json: title = item['title'] channelname = item['channelname'] docurl = item['docurl'] imgurl = item['imgurl'] source = item['source'] tlink = item['tlink'] print(title, channelname, docurl, imgurl, source, tlink, sep=' | ') sheet1.append([title, channelname, docurl, imgurl, source, tlink]) work.save('网易新闻.xlsx') 2. Json 数据序列化和反序列化 import json # 内置 # [] {} data = { 'name': 'ACME', 'shares': 100, 'price': 542.23 } """ json序列化: 将对象转化成json字符串 dumps() 序列化json字符串 """ json_str = json.dumps(data) print(json_str) print(type(json_str)) """ json反序列化: 将json字符串转化成对象 dumps() 序列化json字符串 """ json_obj = json.loads(json_str) print(json_obj) print(type(json_obj)) 中文指定 import json data = { 'name': '青灯', 'shares': 100, 'price': 542.23 } # json字符串默认使用unicode编码, 无法显示中文 # ensure_ascii=False 不适用默认编码 json_str = json.dumps(data, ensure_ascii=False) with open('data.json', mode='w', encoding='utf-8') as f: f.write(json_str) 案例 - 豆瓣保存Json import json import parsel import requests import openpyxl data = [] # 定义一个空列表, 用于收集每一条数据 for page in range(0, 226, 25): url = f' movie.douban /top250?start={page}&filter=' headers = { 'Cookie': 'll="118267"; bid=VrC8tT1GWz8; __yadk_uid=iHqVKZD4ZHIVREbOrlu9k4uWFSsAdZtO; _pk_id.100001.4cf6=b39d476add4f5658.1683638062.; __utmz=30149280.1687782730.8.7.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1687782730.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1687952054%2C%22https%3A%2F%2F .baidu %2Flink%3Furl%3DqdlD_RZvrHI0sXUZ08wSSKbkKLAWA_R84aALUkbWwp__yA2hUL-2C_Ej15saTpe7%26wd%3D%26eqid%3Dfdfaeaeb0001b3f60000000664998548%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1169382564.1682168622.1687782730.1687952054.9; __utmb=30149280.0.10.1687952054; __utmc=30149280; __utma=223695111.1640817040.1683638062.1687782730.1687952054.5; __utmb=223695111.0.10.1687952054; __utmc=223695111; __gads=ID=744f53c3cb2ebb52-22841ef3a4e00021:T=1683638065:RT=1687952056:S=ALNI_MZhRKuML1OBDnNRafe3qd6-ndhaiQ; __gpi=UID=00000c03bafcda5c:T=1683638065:RT=1687952056:S=ALNI_MbkLLsUm467wiS6ZZ6Mn2ohKIWBZw', 'Host': 'movie.douban ', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', } response = requests.get(url=url, headers=headers) html_data = response.text # print(html_data) """解析数据""" # 转对象 selector = parsel.Selector(html_data) # 第一次提取 lis = selector.css('.grid_view>li') # 二次提取 for li in lis: title = li.css('.hd>a>span:nth-child(1)::text').get() info = li.css('.bd>p:nth-child(1)::text').getall() info = '//'.join([i.strip() for i in info]) score = li.css('.rating_num::text').get() follow = li.css('.star>span:nth-child(4)::text').get() # print(title, info, score, follow) d = {'title': title, 'info': info, 'score': score, 'follow': follow} data.append(d) # print('=' * 100 + '\n') print(data) # json数据的序列化 json_str = json.dumps(data, ensure_ascii=False) with open('douban.json', mode='w', encoding='utf-8') as f: f.write(json_str) # [{}, {}, {}......] 案例 - Json保存 """ 目标网址: .ku6 /video/feed?pageNo=0&pageSize=40&subjectId=76 请求方式: GET 要求: 1、请求上述网址的数据 2、把获取到的数据保存到json文件中 文件命名: data.json 需要在文件中看到json字符串 请在下方编写代码 """ import requests url = ' .ku6 /video/feed?pageNo=0&pageSize=40&subjectId=76' response = requests.get(url=url) json_data = response.text print(json_data) with open('data.json', mode='w', encoding='utf-8') as f: f.write(json_data) # json序列化- 3. Csv 写入csv列表数据 """ csv数据格式: 每一行是一条数据 每一行中每个数据字段有分隔符号, 默认为逗号 """ import csv # 内置 data = [ [1, 2, 3, 4], [1, 2, 3, 4], [5, 6, 7, 8], [5, 6, 7, 8] ] with open('data.csv', mode='a', encoding='utf-8', newline='') as f: # newline='' 指定数据新行是一个空字符串, 不然保存会有数据空行 # csv.writer(f) 实例化一个csv数据的写入对象, 括号内部传递文件对象 csv_write = csv.writer(f) for i in data: # writerow(i) 把数据一行一行<一条一条>写入, 传入(列表/元组) csv_write.writerow(i) 案例 - 豆瓣列表保存Csv import csv import json import parsel import requests import openpyxl # 上下文管理器 with open('douban-list.csv', mode='a', encoding='utf-8', newline='') as f: csv_write = csv.writer(f) # csv_write.writerow(['标题', '简介', '平分', '评论人数']) f.write('标题,简介,平分,评论人数\n') for page in range(0, 226, 25): url = f' movie.douban /top250?start={page}&filter=' headers = { 'Cookie': 'll="118267"; bid=VrC8tT1GWz8; __yadk_uid=iHqVKZD4ZHIVREbOrlu9k4uWFSsAdZtO; _pk_id.100001.4cf6=b39d476add4f5658.1683638062.; __utmz=30149280.1687782730.8.7.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1687782730.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1687952054%2C%22https%3A%2F%2F .baidu %2Flink%3Furl%3DqdlD_RZvrHI0sXUZ08wSSKbkKLAWA_R84aALUkbWwp__yA2hUL-2C_Ej15saTpe7%26wd%3D%26eqid%3Dfdfaeaeb0001b3f60000000664998548%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1169382564.1682168622.1687782730.1687952054.9; __utmb=30149280.0.10.1687952054; __utmc=30149280; __utma=223695111.1640817040.1683638062.1687782730.1687952054.5; __utmb=223695111.0.10.1687952054; __utmc=223695111; __gads=ID=744f53c3cb2ebb52-22841ef3a4e00021:T=1683638065:RT=1687952056:S=ALNI_MZhRKuML1OBDnNRafe3qd6-ndhaiQ; __gpi=UID=00000c03bafcda5c:T=1683638065:RT=1687952056:S=ALNI_MbkLLsUm467wiS6ZZ6Mn2ohKIWBZw', 'Host': 'movie.douban ', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', } response = requests.get(url=url, headers=headers) html_data = response.text # print(html_data) """解析数据""" # 转对象 selector = parsel.Selector(html_data) # 第一次提取 lis = selector.css('.grid_view>li') # 二次提取 for li in lis: title = li.css('.hd>a>span:nth-child(1)::text').get() info = li.css('.bd>p:nth-child(1)::text').getall() info = '//'.join([i.strip() for i in info]) score = li.css('.rating_num::text').get() follow = li.css('.star>span:nth-child(4)::text').get() print(title, info, score, follow) # 循环写入数据 csv_write.writerow([title, info, score, follow]) print('=' * 100 + '\n') 写入csv字典数据 """ csv数据格式: 每一行是一条数据 每一行中每个数据字段有分隔符号, 默认为逗号 """ import csv # 内置 list_dict = [{'first_name': 'Baked', 'last_name': 'Beans'}, {'first_name': 'Lovely'}, {'first_name': 'Wonderful', 'last_name': 'Spam'}] with open('data.csv', mode='a', encoding='utf-8', newline='') as f: # 创建一个字典数据写入对象, 第一个参数是文件对象, 第二个参数是字典中的键 # fieldnames 指定字典的键, 不能多不能少不能错 csv_write = csv.DictWriter(f, fieldnames=['first_name', 'last_name']) # 字典数据会有专门写表头的方法 csv_write.writeheader() for i in list_dict: csv_write.writerow(i) 案例 - 豆瓣字典保存csv import csv import json import parsel import requests import openpyxl with open('douban-dict.csv', mode='a', encoding='utf-8', newline='') as f: csv_write = csv.DictWriter(f, fieldnames=['title', 'info', 'score', 'follow']) csv_write.writeheader() # 写表头, 只有字典数据有写表头的方法,列表没有方法写表头 for page in range(0, 226, 25): url = f' movie.douban /top250?start={page}&filter=' headers = { 'Cookie': 'll="118267"; bid=VrC8tT1GWz8; __yadk_uid=iHqVKZD4ZHIVREbOrlu9k4uWFSsAdZtO; _pk_id.100001.4cf6=b39d476add4f5658.1683638062.; __utmz=30149280.1687782730.8.7.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1687782730.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1687952054%2C%22https%3A%2F%2F .baidu %2Flink%3Furl%3DqdlD_RZvrHI0sXUZ08wSSKbkKLAWA_R84aALUkbWwp__yA2hUL-2C_Ej15saTpe7%26wd%3D%26eqid%3Dfdfaeaeb0001b3f60000000664998548%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utma=30149280.1169382564.1682168622.1687782730.1687952054.9; __utmb=30149280.0.10.1687952054; __utmc=30149280; __utma=223695111.1640817040.1683638062.1687782730.1687952054.5; __utmb=223695111.0.10.1687952054; __utmc=223695111; __gads=ID=744f53c3cb2ebb52-22841ef3a4e00021:T=1683638065:RT=1687952056:S=ALNI_MZhRKuML1OBDnNRafe3qd6-ndhaiQ; __gpi=UID=00000c03bafcda5c:T=1683638065:RT=1687952056:S=ALNI_MbkLLsUm467wiS6ZZ6Mn2ohKIWBZw', 'Host': 'movie.douban ', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', } response = requests.get(url=url, headers=headers) html_data = response.text # print(html_data) """解析数据""" # 转对象 selector = parsel.Selector(html_data) # 第一次提取 lis = selector.css('.grid_view>li') # 二次提取 for li in lis: title = li.css('.hd>a>span:nth-child(1)::text').get() info = li.css('.bd>p:nth-child(1)::text').getall() info = '//'.join([i.strip() for i in info]) score = li.css('.rating_num::text').get() follow = li.css('.star>span:nth-child(4)::text').get() print(title, info, score, follow) d = {'title': title, 'info': info, 'score': score, 'follow': follow} csv_write.writerow(d) print('=' * 100 + '\n') 读取csv数据 import csv """基于字符串文件类型直接读取""" # with open('data.csv', mode='r', encoding='utf-8') as f: # print(f.read()) """读取返回列表""" # with open('douban-list.csv', mode='r', encoding='utf-8') as f: # csv_read = csv.reader(f) # print(csv_read) # for i in csv_read: # print(i) """读取返回字典对象的方法""" with open('douban-list.csv', mode='r', encoding='utf-8') as f: csv_read = csv.DictReader(f) print(csv_read) for i in csv_read: print(i) 案例 - 网易新闻csv """ 目标站点: news.163 / 往下翻有 要闻 这个新闻类目 需求: 爬取网易新闻 要闻 类目第一页数据,将数据保存为csv格式 保存字段需要以下内容 title channelname docurl imgurl source tlink """ import csv import json import re import requests import openpyxl url = ' news.163 /special/cm_yaowen20200213/?callback=data_callback' response = requests.get(url=url) json_data = response.text result = re.findall('data_callback\((.*?)\)', json_data, re.S) item_json = json.loads(result[0]) with open('网易新闻.csv', mode='a', encoding='utf-8', newline='') as f: write = csv.writer(f) write.writerow(['title', 'channelname', 'docurl', 'imgurl', 'source', 'tlink']) for item in item_json: title = item['title'] channelname = item['channelname'] docurl = item['docurl'] imgurl = item['imgurl'] source = item['source'] tlink = item['tlink'] print(title, channelname, docurl, imgurl, source, tlink, sep=' | ') write.writerow([title, channelname, docurl, imgurl, source, tlink])
标签:

【基础】【Python网络爬虫】【6.数据持久化】Excel、Json、Csv数据保存(附大量案例代码)(建议收由讯客互联人工智能栏目发布,感谢您对讯客互联的认可,以及对我们原创作品以及文章的青睐,非常欢迎各位朋友分享到个人网站或者朋友圈,但转载请说明文章出处“【基础】【Python网络爬虫】【6.数据持久化】Excel、Json、Csv数据保存(附大量案例代码)(建议收