在Python中,使用JSON爬虫进行数据同步通常涉及以下步骤:
requests
库来发送HTTP请求,以及json
库来处理JSON数据。import requests import json
requests.get()
方法从目标URL获取JSON数据。url = "https://api.example.com/data" # 替换为您要抓取的API URL response = requests.get(url)
if response.status_code == 200: data = response.json() else: print("请求失败,状态码:", response.status_code)
parsed_data = json.loads(response.text)
with open("output.json", "w") as file: json.dump(parsed_data, file, ensure_ascii=False, indent=4)
import sqlite3 conn = sqlite3.connect("data.db") c = conn.cursor() # 创建表 c.execute("""CREATE TABLE IF NOT EXISTS data (id INTEGER PRIMARY KEY, key TEXT, value TEXT)""") # 插入数据 for item in parsed_data: c.execute("INSERT INTO data (key, value) VALUES (?, ?)", (item["key"], item["value"])) # 提交更改并关闭连接 conn.commit() conn.close()
import requests def send_data(data): url = "https://api.example.com/send_data" # 替换为您要发送数据的API URL headers = {"Content-Type": "application/json"} response = requests.post(url, headers=headers, data=json.dumps(data)) if response.status_code == 200: print("数据发送成功") else: print("数据发送失败,状态码:", response.status_code) send_data(parsed_data)
请注意,这些示例可能需要根据您的具体需求进行调整。在实际应用中,您可能还需要处理异常、设置请求头、限制请求速率等。