使用python爬取百度搜索中关于python相关的数据信息
Python爬取百度搜索"Python"相关数据信息
一、准备工作
在开始爬取之前,需要了解以下几点:
- 百度搜索有反爬机制,需要合理设置请求头
- 百度搜索结果页面结构可能会变化
- 需要遵守robots.txt协议(百度允许爬取搜索结果)
二、基础爬取方案(使用requests+BeautifulSoup)
import requests
from bs4 import BeautifulSoup
import redef baidu_search(query, num_results=10):"""爬取百度搜索结果:param query: 搜索关键词:param num_results: 要获取的结果数量:return: 包含标题、链接和摘要的字典列表"""headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}base_url = "https://www.baidu.com/s"params = {'wd': query,'pn': 0, # 分页参数,每页10条'oq': query}results = []for start in range(0, num_results, 10):params['pn'] = starttry:response = requests.get(base_url, headers=headers, params=params, timeout=10)response.raise_for_status()soup = BeautifulSoup(response.text, 'html.parser')# 查找搜索结果条目for result in soup.find_all('div', class_='result')[:num_results//10 + 1]:try:title = result.find('h3').get_text(strip=True)link = result.find('a')['href']summary = result.find('div', class_='c-abstract').get_text(strip=True) if result.find('div', class_='c-abstract') else ''# 清理摘要中的多余空格和换行summary = re.sub(r'\s+', ' ', summary).strip()results.append({'title': title,'link': link,'summary': summary})if len(results) >= num_results:return resultsexcept Exception as e:print(f"解析结果时出错: {e}")continueexcept requests.exceptions.RequestException as e:print(f"请求失败: {e}")breakreturn results# 使用示例
if __name__ == "__main__":python_results = baidu_search("Python", 20)for i, result in enumerate(python_results, 1):print(f"{i}. {result['title']}")print(f" 链接: {result['link']}")print(f" 摘要: {result['summary']}\n")
三、进阶方案(使用Selenium模拟浏览器)
对于反爬较严的情况,可以使用Selenium:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
import timedef baidu_search_selenium(query, num_results=10):"""使用Selenium爬取百度搜索结果:param query: 搜索关键词:param num_results: 要获取的结果数量:return: 包含标题、链接和摘要的字典列表"""options = Options()options.add_argument('--headless') # 无头模式options.add_argument('--disable-gpu')options.add_argument('--no-sandbox')driver = webdriver.Chrome(options=options)base_url = "https://www.baidu.com/s"params = {'wd': query}results = []try:driver.get(f"{base_url}?{ '&'.join([f'{k}={v}' for k, v in params.items()]) }")# 等待页面加载time.sleep(2)# 滚动页面以加载更多结果for _ in range(num_results // 10 + 1):driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")time.sleep(1)# 查找搜索结果search_results = driver.find_elements(By.CSS_SELECTOR, '#content_left .result')for result in search_results[:num_results]:try:title = result.find_element(By.CSS_SELECTOR, 'h3').textlink = result.find_element(By.CSS_SELECTOR, 'h3 a').get_attribute('href')summary = result.find_element(By.CSS_SELECTOR, '.c-abstract').text if result.find_element(By.CSS_SELECTOR, '.c-abstract') else ''results.append({'title': title,'link': link,'summary': summary})except Exception as e:print(f"解析结果时出错: {e}")continueif len(results) >= num_results:breakexcept Exception as e:print(f"爬取过程中出错: {e}")finally:driver.quit()return results# 使用示例
if __name__ == "__main__":python_results = baidu_search_selenium("Python", 20)for i, result in enumerate(python_results, 1):print(f"{i}. {result['title']}")print(f" 链接: {result['link']}")print(f" 摘要: {result['summary']}\n")
四、注意事项
-
反爬机制:
- 百度会检测频繁请求,建议添加随机延迟
- 可以使用代理IP池
- 设置合理的User-Agent
-
法律合规:
- 遵守百度的robots.txt协议
- 不要过度频繁请求
- 仅用于合法用途
-
数据清洗:
- 去除HTML标签
- 处理特殊字符
- 统一编码格式
-
性能优化:
- 使用异步请求(如aiohttp)
- 分布式爬取(如Scrapy+Redis)
五、改进版本(带代理和延迟)
import requests
from bs4 import BeautifulSoup
import re
import random
import time
from fake_useragent import UserAgentdef baidu_search_improved(query, num_results=10, use_proxy=False):"""改进的百度搜索爬取函数:param query: 搜索关键词:param num_results: 要获取的结果数量:param use_proxy: 是否使用代理:return: 包含标题、链接和摘要的字典列表"""ua = UserAgent()headers = {'User-Agent': ua.random}proxies = Noneif use_proxy:# 这里可以替换为你的代理IP池proxies = {'http': 'http://your_proxy_ip:port','https': 'https://your_proxy_ip:port'}base_url = "https://www.baidu.com/s"params = {'wd': query,'pn': 0,'oq': query}results = []for start in range(0, num_results, 10):params['pn'] = starttry:# 随机延迟1-3秒time.sleep(random.uniform(1, 3))response = requests.get(base_url, headers=headers, params=params, proxies=proxies,timeout=10)response.raise_for_status()soup = BeautifulSoup(response.text, 'html.parser')# 查找搜索结果条目for result in soup.find_all('div', class_='result')[:num_results//10 + 1]:try:title = result.find('h3').get_text(strip=True)link = result.find('a')['href']summary = result.find('div', class_='c-abstract').get_text(strip=True) if result.find('div', class_='c-abstract') else ''# 清理摘要中的多余空格和换行summary = re.sub(r'\s+', ' ', summary).strip()results.append({'title': title,'link': link,'summary': summary})if len(results) >= num_results:return resultsexcept Exception as e:print(f"解析结果时出错: {e}")continueexcept requests.exceptions.RequestException as e:print(f"请求失败: {e}")breakreturn results# 使用示例
if __name__ == "__main__":python_results = baidu_search_improved("Python", 20, use_proxy=True)for i, result in enumerate(python_results, 1):print(f"{i}. {result['title']}")print(f" 链接: {result['link']}")print(f" 摘要: {result['summary']}\n")
六、扩展功能
- 数据存储:
import pandas as pddef save_to_csv(data, filename="baidu_search_results.csv"):df = pd.DataFrame(data)df.to_csv(filename, index=False, encoding='utf-8-sig')print(f"数据已保存到 {filename}")
- 关键词扩展:
def get_related_keywords(keyword):# 这里可以实现获取相关关键词的逻辑# 例如使用百度搜索建议APIreturn [f"{keyword}教程", f"{keyword}入门", f"{keyword}开发"]
- 定期爬取:
import schedule
import timedef job():results = baidu_search_improved("Python", 10)save_to_csv(results)schedule.every().day.at("10:00").do(job)while True:schedule.run_pending()time.sleep(1)
七、总结
- 简单爬取可以使用requests+BeautifulSoup
- 反爬严格时使用Selenium
- 注意设置合理的请求间隔和User-Agent
- 考虑使用代理IP池
- 遵守法律法规和网站使用条款
注意:实际使用时,请确保遵守百度的使用条款和相关法律法规,不要进行过度频繁的请求。