# 怎么用Python爬取美团旅游景点评论数据 在旅游行业大数据分析中,用户评论数据蕴含着巨大的商业价值。本文将详细介绍如何使用Python爬取美团旅游景点评论数据,包括技术选型、反爬策略和完整代码实现。 ## 一、爬虫技术选型 ### 1.1 核心工具 - **Requests**:处理HTTP请求 - **BeautifulSoup**/lxml:HTML解析 - **Selenium**:应对动态渲染页面 - **PyMySQL**:数据存储 ### 1.2 辅助工具 ```python pip install requests beautifulsoup4 selenium pymysql
美团景点评论页典型URL格式:
https://www.meituan.com/meishi/123456/review/all/
其中123456
为景点ID
import requests def get_page(poi_id, page): url = f"https://www.meituan.com/meishi/{poi_id}/review/all/" params = { "pageno": page, "sortType": "default" } headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)", "Referer": "https://www.meituan.com/" } try: response = requests.get(url, params=params, headers=headers) response.raise_for_status() return response.text except requests.RequestException as e: print(f"请求失败: {e}") return None
from bs4 import BeautifulSoup def parse_comments(html): soup = BeautifulSoup(html, 'lxml') comments = [] for item in soup.select('.reviews-list .review-item'): try: comment = { 'username': item.select_one('.name').text.strip(), 'score': int(item.select_one('.score').text), 'content': item.select_one('.desc').text.strip(), 'visit_time': item.select_one('.time').text.split(':')[-1], 'like_count': int(item.select_one('.like').text or 0) } comments.append(comment) except Exception as e: print(f"解析出错: {e}") return comments
当发现直接请求无法获取数据时,需要使用Selenium:
from selenium import webdriver from selenium.webdriver.chrome.options import Options def get_dynamic_page(poi_id, page): chrome_options = Options() chrome_options.add_argument('--headless') driver = webdriver.Chrome(options=chrome_options) url = f"https://www.meituan.com/meishi/{poi_id}/review/all/?pageno={page}" driver.get(url) driver.implicitly_wait(5) html = driver.page_source driver.quit() return html
import time import random def safe_request(url): time.sleep(random.uniform(1, 3)) return requests.get(url)
CREATE TABLE `meituan_comments` ( `id` int(11) NOT NULL AUTO_INCREMENT, `poi_id` varchar(20) NOT NULL, `username` varchar(50) DEFAULT NULL, `score` tinyint(1) DEFAULT NULL, `content` text, `visit_time` varchar(50) DEFAULT NULL, `like_count` int(11) DEFAULT 0, `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY (`id`), KEY `idx_poi` (`poi_id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
import pymysql def save_to_db(comments, poi_id): conn = pymysql.connect( host='localhost', user='root', password='yourpassword', db='spider_data', charset='utf8mb4' ) try: with conn.cursor() as cursor: sql = """INSERT INTO meituan_comments (poi_id, username, score, content, visit_time, like_count) VALUES (%s, %s, %s, %s, %s, %s)""" for comment in comments: cursor.execute(sql, ( poi_id, comment['username'], comment['score'], comment['content'], comment['visit_time'], comment['like_count'] )) conn.commit() finally: conn.close()
def main(): poi_id = "123456" # 实际景点ID max_page = 10 # 最大爬取页数 for page in range(1, max_page+1): print(f"正在爬取第{page}页...") # 优先尝试普通请求 html = get_page(poi_id, page) if not html or "验证码" in html: html = get_dynamic_page(poi_id, page) if html: comments = parse_comments(html) save_to_db(comments, poi_id) print(f"成功保存{len(comments)}条评论") else: print(f"第{page}页爬取失败") time.sleep(random.uniform(2, 5)) if __name__ == "__main__": main()
通过以上方法,您可以有效获取美团旅游景点评论数据。建议单日爬取量控制在1000条以内,避免对目标网站造成负担。获取的数据可用于景点服务质量分析、用户偏好研究等商业智能场景。 “`
注:实际使用时需要根据美团网页结构变化调整CSS选择器,并确保遵守相关法律法规。建议在爬取前检查网站的服务条款。
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。