|
import os
|
|
import pandas as pd
|
|
import math
|
|
import pickle as pkl
|
|
import torch
|
|
from torch_geometric.data import Data
|
|
|
|
|
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
base_dir = os.path.dirname(script_dir)
|
|
raw_dir = os.path.join(base_dir, 'processed/original')
|
|
|
|
|
|
reddit_path = os.path.join(raw_dir, 'reddit_1m.csv')
|
|
|
|
|
|
df = pd.read_csv(reddit_path)
|
|
print(df.shape)
|
|
|
|
|
|
df_graph = df[['subreddit_id', 'subreddit', 'name', 'body', 'score', 'author', 'author_flair_text', 'distinguished']]
|
|
df_graph.rename(columns={'name': 'post_id',
|
|
'body': 'post',
|
|
'author': 'user',
|
|
'author_flair_text': 'user_flair'},
|
|
inplace=True, errors='raise')
|
|
|
|
|
|
df_graph = df_graph.drop_duplicates()
|
|
df_graph = df_graph[df_graph['post'] != '[deleted]']
|
|
df_graph = df_graph.dropna(subset=['post_id'])
|
|
print(df_graph.shape)
|
|
print(df_graph['post_id'].nunique())
|
|
|
|
|
|
df_graph['distinguished'] = df_graph['distinguished'].apply(lambda x: 0 if pd.isna(x) else 1)
|
|
df_graph['user_flair'] = df_graph['user_flair'].apply(lambda x: "" if pd.isna(x) else x)
|
|
|
|
text_nodes = []
|
|
|
|
|
|
sub_id2idx = {}
|
|
sub_nodes = []
|
|
for _, row in df_graph.iterrows():
|
|
sub_id = row['subreddit_id']
|
|
if sub_id not in sub_nodes:
|
|
sub_id2idx[sub_id] = len(sub_nodes)
|
|
sub_nodes.append(sub_id)
|
|
text_nodes.append(row['subreddit'])
|
|
node_labels = [-1] * len(sub_nodes)
|
|
|
|
print("Length of sub nodes:", len(sub_nodes))
|
|
print("Sample sub node labels:", node_labels[:5])
|
|
print("Sample sub node texts:", text_nodes[:5])
|
|
|
|
|
|
user_n2idx = {}
|
|
user_nodes = []
|
|
for _, row in df_graph.iterrows():
|
|
user_n = row['user']
|
|
if user_n in user_nodes:
|
|
if row['user_flair'] not in text_nodes[user_n2idx[user_n]]:
|
|
text_nodes[user_n2idx[user_n]] += "\n" + row['user_flair']
|
|
node_labels[user_n2idx[user_n]] = max(row['distinguished'], node_labels[user_n2idx[user_n]])
|
|
else:
|
|
user_n2idx[user_n] = len(user_nodes) + len(sub_nodes)
|
|
user_nodes.append(user_n)
|
|
text_nodes.append(row['user_flair'])
|
|
node_labels.append(row['distinguished'])
|
|
|
|
print("Length of user nodes:", len(user_nodes))
|
|
print("Sample user node labels:", node_labels[-10:])
|
|
print("Sample user node texts:", text_nodes[-10:])
|
|
|
|
|
|
edge_index = [[], []]
|
|
text_edges = []
|
|
edge_scr_labels = []
|
|
edge_spe_labels = []
|
|
all_edges = set()
|
|
|
|
for _, row in df_graph.iterrows():
|
|
user_idx = user_n2idx[row['user']]
|
|
sub_idx = sub_id2idx[row['subreddit_id']]
|
|
|
|
if (user_idx, sub_idx) not in all_edges:
|
|
edge_index[0].append(user_idx)
|
|
edge_index[1].append(sub_idx)
|
|
|
|
text_edges.append(row['post'])
|
|
edge_scr_labels.append(row['score'])
|
|
edge_spe_labels.append(row['distinguished'])
|
|
|
|
all_edges.add((user_idx, sub_idx))
|
|
|
|
print("Length of edges:", len(edge_index[0]))
|
|
print("Sample edge score labels:", edge_scr_labels[-10:])
|
|
print("Sample edge special labels:", edge_spe_labels[-10:])
|
|
print("Sample edge texts:", text_edges[-10:])
|
|
|
|
edge_scr_labels = [0 if math.isnan(x) else x for x in edge_scr_labels]
|
|
edge_spe_labels = [0 if math.isnan(x) else x for x in edge_spe_labels]
|
|
|
|
|
|
graph = Data(
|
|
text_nodes=text_nodes,
|
|
text_edges=text_edges,
|
|
node_labels=torch.tensor(node_labels, dtype=torch.long),
|
|
edge_index=torch.tensor(edge_index, dtype=torch.long),
|
|
edge_score_labels=torch.tensor(edge_scr_labels, dtype=torch.long),
|
|
edge_special_labels=torch.tensor(edge_spe_labels, dtype=torch.long),
|
|
)
|
|
|
|
output_file = os.path.join(base_dir, 'output/reddit_graph.pkl')
|
|
with open(output_file, 'wb') as file:
|
|
pkl.dump(graph, file)
|
|
|
|
print(f"Data processing complete. Processed data saved to: {output_file}")
|
|
|