# -*- coding: utf-8 -*-
"""Generative Adversarial Attributed Network Anomaly Detection (GAAN)"""
# Author: Ruitong Zhang <rtzhang@buaa.edu.cn>, Kay Liu <zliu234@uic.edu>
# License: BSD 2 clause
import torch
import warnings
import torch.nn.functional as F
from torch_geometric.nn import MLP
from torch_geometric.utils import to_dense_adj
from ..nn import GAANBase
from . import DeepDetector
[docs]
class GAAN(DeepDetector):
"""
Generative Adversarial Attributed Network Anomaly Detection
GAAN is a generative adversarial attribute network anomaly
detection framework, including a generator module, an encoder
module, a discriminator module, and uses anomaly evaluation
measures that consider sample reconstruction error and real sample
recognition confidence to make predictions. This model is
transductive only.
See :cite:`chen2020generative` for details.
Parameters
----------
noise_dim : int, optional
Input dimension of the Gaussian random noise. Defaults: ``16``.
hid_dim : int, optional
Hidden dimension of model. Default: ``64``.
num_layers : int, optional
Total number of layers in model. A half (floor) of the layers
are for the generator, the other half (ceil) of the layers are
for encoder. Default: ``4``.
dropout : float, optional
Dropout rate. Default: ``0.``.
weight_decay : float, optional
Weight decay (L2 penalty). Default: ``0.``.
act : callable activation function or None, optional
Activation function if not None.
Default: ``torch.nn.functional.relu``.
backbone : torch.nn.Module
The backbone of GAAN is fixed to be MLP. Changing of this
parameter will not affect the model. Default: ``None``.
contamination : float, optional
The amount of contamination of the dataset in (0., 0.5], i.e.,
the proportion of outliers in the dataset. Used when fitting to
define the threshold on the decision function. Default: ``0.1``.
lr : float, optional
Learning rate. Default: ``0.004``.
epoch : int, optional
Maximum number of training epoch. Default: ``100``.
gpu : int
GPU Index, -1 for using CPU. Default: ``-1``.
batch_size : int, optional
Minibatch size, 0 for full batch training. Default: ``0``.
num_neigh : int, optional
Number of neighbors in sampling, -1 for all neighbors.
Default: ``-1``.
weight : float, optional
Weight between reconstruction of node feature and structure.
Default: ``0.5``.
verbose : int, optional
Verbosity mode. Range in [0, 3]. Larger value for printing out
more log information. Default: ``0``.
save_emb : bool, optional
Whether to save the embedding. Default: ``False``.
compile_model : bool, optional
Whether to compile the model with ``torch_geometric.compile``.
Default: ``False``.
**kwargs
Other parameters for the backbone.
Attributes
----------
decision_score_ : torch.Tensor
The outlier scores of the training data. Outliers tend to have
higher scores. This value is available once the detector is
fitted.
threshold_ : float
The threshold is based on ``contamination``. It is the
:math:`N \\times` ``contamination`` most abnormal samples in
``decision_score_``. The threshold is calculated for generating
binary outlier labels.
label_ : torch.Tensor
The binary labels of the training data. 0 stands for inliers
and 1 for outliers. It is generated by applying
``threshold_`` on ``decision_score_``.
emb : torch.Tensor or tuple of torch.Tensor or None
The learned node hidden embeddings of shape
:math:`N \\times` ``hid_dim``. Only available when ``save_emb``
is ``True``. When the detector has not been fitted, ``emb`` is
``None``. When the detector has multiple embeddings,
``emb`` is a tuple of torch.Tensor.
"""
def __init__(self,
noise_dim=16,
hid_dim=64,
num_layers=4,
dropout=0.,
weight_decay=0.,
act=F.relu,
backbone=None,
contamination=0.1,
lr=4e-3,
epoch=100,
gpu=-1,
batch_size=0,
num_neigh=-1,
weight=0.5,
verbose=0,
save_emb=False,
compile_model=False,
**kwargs):
self.noise_dim = noise_dim
self.weight = weight
# self.num_layers is 1 for sample one hop neighbors
# In GAAN, self.model_layers is for model layers
self.model_layers = num_layers
if backbone is not None:
warnings.warn('GAAN can only use MLP as the backbone.')
super(GAAN, self).__init__(
hid_dim=hid_dim,
num_layers=1,
dropout=dropout,
weight_decay=weight_decay,
act=act,
contamination=contamination,
lr=lr,
epoch=epoch,
gpu=gpu,
batch_size=batch_size,
num_neigh=num_neigh,
verbose=verbose,
gan=True,
save_emb=save_emb,
compile_model=compile_model,
**kwargs)
def process_graph(self, data):
GAANBase.process_graph(data)
def init_model(self, **kwargs):
if self.save_emb:
self.emb = torch.zeros(self.num_nodes,
self.hid_dim)
return GAANBase(in_dim=self.in_dim,
noise_dim=self.noise_dim,
hid_dim=self.hid_dim,
num_layers=self.model_layers,
dropout=self.dropout,
act=self.act,
**kwargs).to(self.device)
def forward_model(self, data):
batch_size = data.batch_size
node_idx = data.n_id
x = data.x.to(self.device)
s = data.s.to(self.device)
edge_index = data.edge_index.to(self.device)
noise = torch.randn(x.shape[0], self.noise_dim).to(self.device)
x_, a, a_ = self.model(x, noise)
loss_g = self.model.loss_func_g(a_[edge_index])
self.opt_in.zero_grad()
loss_g.backward()
self.opt_in.step()
self.epoch_loss_in += loss_g.item() * batch_size
loss = self.model.loss_func_ed(a[edge_index],
a_[edge_index].detach())
score = self.model.score_func(x=x[:batch_size],
x_=x_[:batch_size],
s=s[:batch_size, node_idx],
s_=a[:batch_size],
weight=self.weight,
pos_weight_s=1,
bce_s=True)
return loss, score.detach().cpu()