2.1. MI-FACE#
In this tutorial, we introduce the basic model inversion algorithm MI-FACE, which reconstruct the class representation within the training dataset.
!git clone https://github.com/harveyslash/Facial-Similarity-with-Siamese-Networks-in-Pytorch.git
!mkdir data
!mv Facial-Similarity-with-Siamese-Networks-in-Pytorch/data/faces/testing/* data/
!mv Facial-Similarity-with-Siamese-Networks-in-Pytorch/data/faces/training/* data/
fatal: destination path 'Facial-Similarity-with-Siamese-Networks-in-Pytorch' already exists and is not an empty directory.
mkdir: cannot create directory ‘data’: File exists
mv: cannot stat 'Facial-Similarity-with-Siamese-Networks-in-Pytorch/data/faces/testing/*': No such file or directory
mv: cannot stat 'Facial-Similarity-with-Siamese-Networks-in-Pytorch/data/faces/training/*': No such file or directory
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import TensorDataset
from matplotlib import pyplot as plt
import glob
from sklearn.metrics import accuracy_score
from aijack.attack import MI_FACE
from aijack.defense import GeneralMomentAccountant, DPSGDManager
from aijack.utils import NumpyDataset
BASE = "data/"
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fla = nn.Flatten()
self.fc = nn.Linear(112 * 92, 40)
def forward(self, x):
x = self.fla(x)
x = self.fc(x)
x = F.softmax(x, dim=1)
return x
2.1.1. Setup#
imgs = []
labels = []
for i in range(1, 41):
for j in range(1, 11):
img = cv2.imread(BASE + f"s{i}/{j}.pgm", 0)
imgs.append(img)
labels.append(i - 1)
X = np.stack(imgs)
y = np.array(labels)
# ToTensor:画像のグレースケール化(RGBの0~255を0~1の範囲に正規化)、Normalize:Z値化(RGBの平均と標準偏差を0.5で決め打ちして正規化)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]
)
trainset = NumpyDataset(X, y, transform=transform)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=4, shuffle=True, num_workers=2
)
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.005, momentum=0.9)
2.1.2. Training the target model#
for epoch in range(10): # loop over the dataset multiple times
running_loss = 0
data_size = 0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels.to(torch.int64))
loss.backward()
optimizer.step()
running_loss += loss.item()
data_size += inputs.shape[0]
print(f"epoch {epoch}: loss is {running_loss/data_size}")
in_preds = []
in_label = []
with torch.no_grad():
for data in trainloader:
inputs, labels = data
outputs = net(inputs)
in_preds.append(outputs)
in_label.append(labels)
in_preds = torch.cat(in_preds)
in_label = torch.cat(in_label)
print(
"Test Accuracy is: ",
accuracy_score(np.array(torch.argmax(in_preds, axis=1)), np.array(in_label)),
)
epoch 0: loss is 0.9158830660581588
epoch 1: loss is 0.8644785904884338
epoch 2: loss is 0.8073549878597259
epoch 3: loss is 0.7737618207931518
epoch 4: loss is 0.7515438687801361
epoch 5: loss is 0.7337622374296189
epoch 6: loss is 0.7225443947315217
epoch 7: loss is 0.7174954098463059
epoch 8: loss is 0.7157024312019348
epoch 9: loss is 0.7153458887338638
Test Accuracy is: 0.8725
2.1.3. Attack#
input_shape = (1, 1, 112, 92)
target_label = 11
lam = 0.03
num_itr = 100
mi = MI_FACE(
net,
input_shape,
target_label=target_label,
num_itr=num_itr,
lam=lam,
log_interval=0,
)
x_result_1, log = mi.attack()
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(4, 3))
axes[0].imshow(
np.mean([cv2.imread(p, 0) for p in glob.glob(BASE + "s12/*.pgm")], axis=0),
cmap="gray",
)
axes[0].axis("off")
axes[0].set_title("original image")
axes[1].imshow(x_result_1[0][0].detach().numpy(), cmap="gray")
axes[1].axis("off")
axes[1].set_title("extracted image")
fig.tight_layout()
fig.show()