Skip to content

Commit f4ff04f

Browse files
committed
Added Dynamic_Backdoor_GAN
Signed-off-by: Prachi Panwar <prachipanwar0606@gmail.com>
1 parent 09828ab commit f4ff04f

File tree

1 file changed

+73
-0
lines changed

1 file changed

+73
-0
lines changed
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
# -*- coding: utf-8 -*-
2+
"""dynamic_backdoor_gan
3+
4+
Automatically generated by Colab.
5+
6+
Original file is located at
7+
https://colab.research.google.com/drive/19W9gZ2gUxkgu6rr5qAT1Arf7iauCj2QT
8+
"""
9+
10+
#Trigger Generator:A small CNN that learns to generate input-specific triggers
11+
class TriggerGenerator(nn.Module):
12+
def __init__(self, input_channels=3):
13+
super().__init__()
14+
self.net = nn.Sequential(
15+
nn.Conv2d(input_channels, 32, kernel_size=3, padding=1),
16+
nn.ReLU(),
17+
nn.Conv2d(32, 32, kernel_size=3, padding=1),
18+
nn.ReLU(),
19+
nn.Conv2d(32, input_channels, kernel_size=3, padding=1),
20+
nn.Tanh()
21+
)
22+
23+
def forward(self, x):
24+
return self.net(x)
25+
# Custom Poisoning Attack: DynamicBackdoorGAN-This class defines how to poison data using the GAN trigger generator
26+
class DynamicBackdoorGAN(PoisoningAttackBackdoor):
27+
def __init__(self, generator, target_label, backdoor_rate, classifier, epsilon=0.5):
28+
super().__init__(perturbation=lambda x: x)
29+
self.classifier = classifier
30+
self.generator = generator.to(classifier.device)
31+
self.target_label = target_label
32+
self.backdoor_rate = backdoor_rate
33+
self.epsilon = epsilon
34+
# Add trigger to a given image batch
35+
def apply_trigger(self, images):
36+
self.generator.eval()
37+
with torch.no_grad():
38+
images = nn.functional.interpolate(images, size=(32, 32), mode='bilinear') # Resize images to ensure uniform dimension
39+
triggers = self.generator(images.to(self.classifier.device)) #Generate dynamic, input-specific triggers using the trained TriggerGenerator
40+
poisoned = (images.to(self.classifier.device) + self.epsilon * triggers).clamp(0, 1) # Clamp the pixel values to ensure they stay in the valid [0, 1] range.
41+
return poisoned
42+
# Poison the training data by injecting dynamic triggers and changing labels
43+
def poison(self, x, y):
44+
# Convert raw image data (x) to torch tensors (float), and convert one-hot labels (y) to class indices-required by ART
45+
x_tensor = torch.tensor(x).float()
46+
y_tensor = torch.tensor(np.argmax(y, axis=1))
47+
# Calculate total number of samples and how many should be poisoned(posion ratio=backdoor_rate)
48+
batch_size = x_tensor.shape[0]
49+
n_poison = int(self.backdoor_rate * batch_size)
50+
# Apply the learned trigger to the first 'n_poison' samples
51+
poisoned = self.apply_trigger(x_tensor[:n_poison])
52+
# The remaining samples remain clean
53+
clean = x_tensor[n_poison:].to(self.classifier.device)
54+
# Combine poisoned and clean samples into a single batch
55+
poisoned_images = torch.cat([poisoned, clean], dim=0).cpu().numpy()
56+
# Modify the labels of poisoned samples to the attacker's target class
57+
new_labels = y_tensor.clone()
58+
new_labels[:n_poison] = self.target_label # Set the poisoned labels to the desired misclassification
59+
# Convert all labels back to one-hot encoding (required by ART classifiers)
60+
new_labels = to_categorical(new_labels.numpy(), nb_classes=self.classifier.nb_classes)
61+
return poisoned_images.astype(np.float32), new_labels.astype(np.float32)
62+
#Evaluate the attack's success on test data
63+
def evaluate(self, x_clean, y_clean):
64+
x_tensor = torch.tensor(x_clean).float()
65+
poisoned_test = self.apply_trigger(x_tensor).cpu().numpy().astype(np.float32)# Apply the trigger to every test image to create a poisoned test set
66+
67+
preds = self.classifier.predict(poisoned_test)
68+
true_target = np.full((len(preds),), self.target_label)
69+
pred_labels = np.argmax(preds, axis=1)
70+
71+
success = np.sum(pred_labels == true_target)
72+
asr = 100.0 * success / len(pred_labels)
73+
return asr

0 commit comments

Comments
 (0)