Advertisement

多变量时间序列异常检测

阅读量:

问题与思路

异常检测常用于发现大量分布相似数据中的不常见样本。在工业界得到广泛应用,例如社交网络中的欺诈检测、视频中的异常行为检测、互联网中的异常流量检测等。随着生成式模型的逐渐成熟,目前基于生成式模型的异常检测方法在各种应用中取得了较好的效果。这些方法都基于这样的假设:相比于异常样本,正常的样本可以很容易地由生成模型重建得到。因此我们可以构建一个自编码器(AutoEncoder, AE)通过度量重建误差来衡量样本是否是异常样本。

方法与实验

我们的主要代码基于Outlier Detection for Multidimensional Time Series using Deep Neural Networks (

复制代码
(https://github.com/matanle51/LSTM_AutoEncoder)),改动如下:

  1. 首先增加测试代码,按照上文所述计算重建误差并进行排序,将10%误差最大的几个样本记为错误样本。代码如下:
复制代码
def test_model(criterion, model, model_type, test_iter):
"""
Function to run validation on given model
:param criterion: loss function
:param model: pytorch model object
:param model_type: model type (only ae/ ae+clf), used to know if needs to calculate accuracy
:param val_iter: validation dataloader
:return abnormal samples prediction
"""
model.eval()
loss = None
timestamp = None
test_num = len(test_iter.dataset)
with torch.no_grad():
    for data in test_iter:
        data = data.to(device)

        model_out = model(data)
        if loss == None:
            loss = criterion(model_out, data).mean(1)
            timestamp = data[:,0,:]
        else:
            loss = torch.cat([loss, criterion(model_out, data).mean(1)])  
            timestamp = torch.cat([timestamp, data[:,0,:]])

loss_top, index = loss.topk(int(test_num*0.1), dim=0)
result = torch.zeros((test_num, 1))
result[index, 0] = 1
        
return torch.cat([timestamp, result], dim=1)
复制代码
2. 增加读取csv文件的模块读取数据。
复制代码
def read_csv(filename):
data = []
with open(filename, "r") as f:
    lines = f.readlines()
    for line in lines[1:]: # exclude the title
        data.append([float(i) for i in line.split(",")])
return data
复制代码
3. 修改主函数,增加预测功能
复制代码
def main():
# Create data loaders
train_iter, val_iter, test_iter = create_dataloaders(args.batch_size)

# Create model
model = LSTMAE(input_size=args.input_size, 
				   hidden_size=args.hidden_size, 
				   dropout_ratio=args.dropout, 
				   seq_len=args.seq_len)
model.to(device)

# Create optimizer & loss functions
optimizer = getattr(torch.optim, args.optim)(params=model.parameters(), lr=args.lr, weight_decay=args.wd)
criterion = torch.nn.MSELoss(reduction='sum')
criterion_test = torch.nn.MSELoss(reduction='none')

# Grid search run if run-grid-search flag is active
if args.run_grid_search:
    hyper_params_grid_search(train_iter, val_iter, criterion)
    return

# Train & Val
for epoch in range(args.epochs):
    # Train loop
    train_model(criterion, epoch, model, args.model_type, optimizer, train_iter, args.batch_size, args.grad_clipping,
                args.log_interval)
    eval_model(criterion, model, args.model_type, val_iter)
    # result = test_model(criterion_test, model, args.model_type, test_iter)

result = test_model(criterion_test, model, args.model_type, test_iter)
# eval_model(criterion, model, args.model_type, test_iter, mode='Test')

# Save model
torch.save(model.state_dict(), os.path.join(args.model_dir, f'model_hs={args.hidden_size}_bs={args.batch_size}'
                                                            f'_epochs={args.epochs}_clip={args.grad_clipping}.pt'))

# save results
df = pd.DataFrame(result.tolist())
df.columns = ["timestamp_(min)", "label"]
df.to_csv("result.csv", index=False)
复制代码
4. 本项目中最重要的重建模型是LSTMAE模块,其中,Encoder使用了一层LSTM,Decoder使用了一层LSTM加一层FC,结构简洁明了。代码如下:
复制代码
# Encoder Class
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size, dropout, seq_len):
    super(Encoder, self).__init__()
    self.input_size = input_size
    self.hidden_size = hidden_size
    self.dropout = dropout
    self.seq_len = seq_len

    self.lstm_enc = nn.LSTM(input_size=input_size, hidden_size=hidden_size, dropout=dropout, batch_first=True)

def forward(self, x):
    out, (last_h_state, last_c_state) = self.lstm_enc(x)
    x_enc = last_h_state.squeeze(dim=0)
    x_enc = x_enc.unsqueeze(1).repeat(1, x.shape[1], 1)
    return x_enc, out


# Decoder Class
class Decoder(nn.Module):
def __init__(self, input_size, hidden_size, dropout, seq_len, use_act):
    super(Decoder, self).__init__()
    self.input_size = input_size
    self.hidden_size = hidden_size
    self.dropout = dropout
    self.seq_len = seq_len
    self.use_act = use_act  # Parameter to control the last sigmoid activation - depends on the normalization used.
    self.act = nn.Sigmoid()

    self.lstm_dec = nn.LSTM(input_size=hidden_size, hidden_size=hidden_size, dropout=dropout, batch_first=True)
    self.fc = nn.Linear(hidden_size, input_size)

def forward(self, z):
    # z = z.unsqueeze(1).repeat(1, self.seq_len, 1)
    dec_out, (hidden_state, cell_state) = self.lstm_dec(z)
    dec_out = self.fc(dec_out)
    if self.use_act:
        dec_out = self.act(dec_out)

    return dec_out, hidden_state


# LSTM Auto-Encoder Class
class LSTMAE(nn.Module):
def __init__(self, input_size, hidden_size, dropout_ratio, seq_len, use_act=True):
    super(LSTMAE, self).__init__()
    self.input_size = input_size
    self.hidden_size = hidden_size
    self.dropout_ratio = dropout_ratio
    self.seq_len = seq_len

    self.encoder = Encoder(input_size=input_size, hidden_size=hidden_size, dropout=dropout_ratio, seq_len=seq_len)
    self.decoder = Decoder(input_size=input_size, hidden_size=hidden_size, dropout=dropout_ratio, seq_len=seq_len, use_act=use_act)

def forward(self, x, return_last_h=False, return_enc_out=False):
    x_enc, enc_out = self.encoder(x)
    x_dec, last_h = self.decoder(x_enc)

    if return_last_h:
        return x_dec, last_h
    elif return_enc_out:
        return x_dec, enc_out
    return x_dec
复制代码
## 未来改进方向

  1. 当前模型网络结构简单,深度模型的结构可以改进,例如引入注意力机制等。
  2. 未对数据进行预处理
  3. 未对模型超参进行微调,例如dropout rate, lr, hiden_size。
  4. 可以引入早停机制防止过拟合
  5. 等等

全部评论 (0)

还没有任何评论哟~