import requests
import re
url = 'https://api.bilibili.com/x/v1/dm/list.so?oid=392402545'
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'
}
response = requests.get(url=url, headers=headers)
response.encoding = response.apparent_encoding
# re 正则表达式
html_data = re.findall('<d p=".*?">(.*?)</d>', response.text)
print(html_data)
for index in html_data:
with open('弹幕1.txt', mode='a', encoding='utf-8') as f:
f.write(index)
f.write('\n')
print(index)
import jieba # 分词模块 pip install jiebe
import wordcloud # 词云模块 pip install wordcloud
import imageio # 自定义词云样式 pip install imageio
py = imageio.imread('python.png')
# 词云 统计哪些词语出现次数比较多, 次数出现的越多的话 字体显示越大
f = open('弹幕1.txt', encoding='utf-8')
txt = f.read()
# print(txt)
txt_list = jieba.lcut(txt)
string = ' '.join(txt_list)
print(string)
wc = wordcloud.WordCloud(
width=500, # 宽度
height=500, # 高度
background_color='white', # 背景颜色
font_path='msyh.ttc', # 字体文件
mask=py,
stopwords={'了', '这个', '啊', '我', '的'}, # 停用词
# contour_width=5,
# contour_color='red'
)
wc.generate(string)
wc.to_file('output3.png')