reddit_climate_comment / reddit_data_collection.py
cathw's picture
Upload reddit_data_collection.py
43a65bf verified
raw
history blame
3.9 kB
import praw
import pandas as pd
from datetime import datetime, timezone
from textblob import TextBlob
import csv
import time
# Replace these with your own values
client_id = ''
client_secret = ''
user_agent = ''
# Create a Reddit instance
reddit = praw.Reddit(
client_id=client_id,
client_secret=client_secret,
user_agent=user_agent
)
# Create a list of subreddits to collect data from
subreddits = ["climate", "energy","renewableenergy","climatechange"]
# Create an empty list to store the data dictionaries
data = []
# Example of an enhanced backoff strategy
def process_request():
retry_count = 0
max_retries = 5 # You can adjust this based on your needs
while retry_count < max_retries:
try:
# Iterate over each subreddit in the subreddits list
for subreddit_name in subreddits:
# Choose the subreddit you want to interact with
subreddit = reddit.subreddit(subreddit_name)
# Get the top 100 posts in the subreddit
top_posts = subreddit.top(limit=10000)
count = 0
# Iterate over each post in the top_posts list
for post in top_posts:
count += 1
print(count)
# Get the title of the post
post_title = post.title
# Iterate over the first 10 comments in the post
for comment in post.comments[:20]:
# Get the body of the comment
comment_body = comment.body
# Get the number of upvotes for the comment
upvotes = comment.score
data_dict = {
"ID": comment.id,
"Author": comment.author.name if comment.author else 'N/A',
"Subreddit": subreddit_name,
"Post Title": post_title,
"Comment Body": comment_body,
"Timestamp": datetime.utcfromtimestamp(comment.created_utc),
"Upvotes": upvotes,
"Number of Replies": len(list(comment.replies))
}
data.append(data_dict)
print(data_dict)
except praw.exceptions.RedditAPIException as e:
if 'ratelimit' in str(e).lower():
# If a rate limit error is encountered, wait and then retry
retry_count += 1
wait_time = 2 ** retry_count # Exponential backoff
print(f"Rate limit exceeded. Waiting {wait_time} seconds and retrying...")
time.sleep(wait_time)
else:
# Handle other API exceptions if needed
print(f"Error: {e}")
# If a rate limit error is encountered, wait and then retry
retry_count += 1
wait_time = 2 ** retry_count # Exponential backoff
print(f"Rate limit exceeded. Waiting {wait_time} seconds and retrying...")
time.sleep(wait_time)
else:
# If the request was successful, break out of the loop
break
else:
# If max_retries is reached, consider logging an error or taking appropriate action
print("Max retries reached. Consider adjusting your backoff strategy or rate limits.")
process_request()
# Save the data as a CSV file
with open('', mode='w', newline='', encoding='utf-8') as csv_file:
fieldnames = ["ID","Author","Subreddit", "Post Title", "Comment Body",
"Timestamp","Upvotes","Number of Replies"]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for d in data:
writer.writerow(d)