图注意力卷积神经网络GAT在无线通信网络拓扑推理中的应用
如果已经编写好了GCN的程序,改写GAT的程序是很方便的,torch_geometric.nn下既有一般图神经网络GCNConv包,也有图注意力神经网络GATConv包
程序:
#作者:zhouzhichao
#创建时间:25年6月10日
#内容:比较GAT和GCN在无线通信网络拓扑推理中的效果import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import sys
import torch
torch.set_printoptions(linewidth=200)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from torch_geometric.nn import GCNConv,GATConv
sys.path.append('D:\无线通信网络认知\论文1\大修意见\Reviewer1-1 阈值相似性图对比实验')
from gcn_dataset import graph_data
print(torch.__version__)
print(torch.cuda.is_available())
from sklearn.metrics import roc_auc_score, precision_score, recall_score, accuracy_scoremode = "gat nonconcat"
gat_head = 1class Net(torch.nn.Module):def __init__(self):super().__init__()if mode == "gcn":self.conv1 = GCNConv(Input_L, 200)self.conv2 = GCNConv(200, 100)if mode == "gat concat":self.conv1 = GATConv(Input_L, 200, heads=gat_head, concat=True)self.conv2 = GATConv(gat_head*200, 100, heads=gat_head, concat=True)if mode == "gat nonconcat":self.conv1 = GATConv(Input_L, 200, heads=gat_head, concat=False)self.conv2 = GATConv(200, 100, heads=gat_head, concat=False)def encode(self, x, edge_index):c1_in = x.T# c1_in = xc1_out = self.conv1(c1_in, edge_index)c1_relu = c1_out.relu()c2_out = self.conv2(c1_relu, edge_index)c2_relu = c2_out.relu()return c2_reludef decode(self, z, edge_label_index):# 节点和边都是矩阵,不同的计算方法致使:节点->节点,节点->边distance_squared = torch.sum((z[edge_label_index[0]] - z[edge_label_index[1]]) ** 2, dim=-1)return distance_squareddef decode_all(self, z):prob_adj = z @ z.t() # 得到所有边概率矩阵return (prob_adj > 0).nonzero(as_tuple=False).t() # 返回概率大于0的边,以edge_index的形式@torch.no_grad()def get_val(self, gcn_data):#获取未参与训练的节点索引edge_index = gcn_data.edge_index # [2, 30]edge_label_index = gcn_data.edge_label_index # [2, 60]edge_label = gcn_data.edge_label# 转置方便处理,变成 (num_edges, 2)edge_index_t = edge_index.t() # [30, 2]edge_label_index_t = edge_label_index.t() # [60, 2]# 把边转成集合形式的字符串,方便查找(也可用tuple)edge_index_set = set([tuple(e.tolist()) for e in edge_index_t])# 判断edge_label_index中的每个边是否在edge_index_set里is_in_edge_index = [tuple(e.tolist()) in edge_index_set for e in edge_label_index_t]is_in_edge_index = torch.tensor(is_in_edge_index)# 不相同的列(边)val_col = edge_label_index[:, ~is_in_edge_index]val_label = edge_label[~is_in_edge_index]val_col = val_col[:,:20]val_label = val_label[:20]divide_index = 10val_col_1 = val_col[:,:divide_index]val_label_1 = val_label[:divide_index]val_col_0 = val_col[:, divide_index:]val_label_0 = val_label[divide_index:]return val_col_1, val_label_1, val_col_0, val_label_0@torch.no_grad()def test_val(self, gcn_data, threshhold):model.eval()val_col_1, val_label_1, val_col_0, val_label_0 = self.get_val(gcn_data)# 1z = model.encode(gcn_data.x, gcn_data.edge_index)out = model.decode(z, val_col_1).view(-1)out = 1 - outout_np = out.cpu().numpy()labels_1 = val_label_1.cpu().numpy()# roc_auc_s = roc_auc_score(labels_np, out_np)pred_1 = (out_np > threshhold).astype(int)accuracy_1 = accuracy_score(labels_1, pred_1)precision_1 = precision_score(labels_1, pred_1, zero_division=1)recall_1 = recall_score(labels_1, pred_1, zero_division=1)# 0z = model.encode(gcn_data.x, gcn_data.edge_index)out = model.decode(z, val_col_0).view(-1)out = 1 - outout_np = out.cpu().numpy()labels_0 = val_label_0.cpu().numpy()# roc_auc_d = roc_auc_score(labels_np, out_np)pred_0 = (out_np > threshhold).astype(int)accuracy_0 = accuracy_score(labels_0, pred_0)precision_0 = precision_score(labels_0, pred_0, zero_division=1)recall_0 = recall_score(labels_0, pred_0, zero_division=1)accuracy = (accuracy_1 + accuracy_0)/2precision = (precision_1 + precision_0)/2recall = (recall_1 + recall_0)/2return accuracy, precision, recall@torch.no_grad()def calculate_threshhold(self, gcn_data):model.eval()z = model.encode(gcn_data.x, gcn_data.edge_index)out = model.decode(z, gcn_data.edge_label_index).view(-1)out = 1 - outout_np = out.cpu().numpy()labels_np = gcn_data.edge_label.cpu().numpy()threshhold = 0accuracy_max = 0for th in np.arange(-2, 1.1, 0.1):pred_labels = (out_np > th).astype(int)accuracy = accuracy_score(labels_np, pred_labels)if accuracy>accuracy_max:accuracy_max = accuracythreshhold = threturn threshholdN = 30
train_n = 20
M = 3000
snr = 40def graph_normalize(gcn_data):for i in range(gcn_data.x.shape[1]):gcn_data.x[:, i] = gcn_data.x[:,i]/torch.max(torch.abs(gcn_data.x[:,i]))erase_list = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
accuracy_list = []
print("snr: ", snr)
for erase in erase_list:accuracy = []for i in range(10):root = "D:\无线通信网络认知\论文1\大修意见\Reviewer1-2 缺失数据实验\\erase_data_same_start_idx\gcn_data-" + str(i) + "_erase-" + str(erase) + "_N_" + str(N) + "_snr_" + str(snr) + "_train_n_" + str(train_n) + "_M_" + str(M)# root = "erase_data_same_start_idx\gcn_data-" + str(i) + "_erase-" + str(erase) + "_N_" + str(N) + "_snr_" + str(snr) + "_train_n_" + str(train_n) + "_M_" + str(M)gcn_data = graph_data(root)graph_normalize(gcn_data)Input_L = gcn_data.x.shape[0]model = Net()# model = Net().to(device)optimizer = torch.optim.Adam(params=model.parameters(), lr=0.01)criterion = torch.nn.BCEWithLogitsLoss()model.train()def train():optimizer.zero_grad()z = model.encode(gcn_data.x, gcn_data.edge_index)out = model.decode(z, gcn_data.edge_label_index).view(-1)out = 1 - outloss = criterion(out, gcn_data.edge_label)loss.backward()optimizer.step()return lossmin_loss = 99999count = 0#早停for epoch in range(100000):loss = train()if loss<min_loss:min_loss = losscount = 0print("erase: ", str(erase), " i: ", str(i), " epoch: ", epoch, " loss: ",round(loss.item(), 4), " min_loss: ", round(min_loss.item(), 4))count = count + 1if count>100:threshhold = model.calculate_threshhold(gcn_data)breakaccuracy_value, precision_value, recall_value = model.test_val(gcn_data,threshhold)accuracy.append(accuracy_value)accuracy_list.append(np.mean(accuracy))data = {'erase_list': erase_list,'accuracy_list': accuracy_list}# 创建一个 DataFrame
df = pd.DataFrame(data)
#
# # 保存到 Excel 文件
file_path = 'D:\无线通信网络认知\论文1\大修意见\Reviewer1-5 GAT\\'+mode+' head-'+str(gat_head)+' val erase.xlsx'
df.to_excel(file_path, index=False)
GCN和GAT对比测试结果: