From f36d8c11810904de7026e5671e3cb88485e65c87 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 13 Mar 2023 14:20:17 -0700 Subject: [PATCH 001/163] Remove NoAdversary --- mart/attack/adversary.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 6f3a9f1e..6eace083 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -17,7 +17,7 @@ from .perturber import BatchPerturber, Perturber from .threat_model import ThreatModel -__all__ = ["Adversary", "NoAdversary"] +__all__ = ["Adversary"] class AdversaryCallbackHookMixin(Callback): @@ -320,14 +320,3 @@ def forward( output = self.threat_model(input, target, perturbation) return output - - -class NoAdversary(torch.nn.Module): - def forward( - self, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - model: torch.nn.Module | None = None, - **kwargs, - ): - return input From dcf7114f3872f99f23b9398a8ac041dea2aeabc9 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 13 Mar 2023 14:32:52 -0700 Subject: [PATCH 002/163] Remove NoAdversary from CIFAR10 adversarial training --- mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 24 ++++++++- mart/configs/model/classifier.yaml | 53 ++++++++------------ 2 files changed, 43 insertions(+), 34 deletions(-) diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index 6dc013a9..45fa6197 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -3,9 +3,29 @@ defaults: - CIFAR10_CNN - /attack@model.modules.input_adv_training: classification_eps1.75_fgsm - # Skip costly adversarial validation. - # - /attack@model.modules.input_adv_validation: classification_eps2_pgd10_step1 - /attack@model.modules.input_adv_test: classification_eps2_pgd10_step1 task_name: "CIFAR10_CNN_Adv" tags: ["adv", "fat"] + +model: + training_sequence: + seq005: + input_adv_training: + _call_with_args_: ["input", "target"] + model: model + step: step + + seq010: + preprocessor: + _call_with_args_: ["input_adv_training"] + + test_sequence: + seq005: + input_adv_test: + _call_with_args_: ["input", "target"] + model: model + step: step + + seq010: + preprocessor: ["input_adv_test"] diff --git a/mart/configs/model/classifier.yaml b/mart/configs/model/classifier.yaml index 087c709c..ad664989 100644 --- a/mart/configs/model/classifier.yaml +++ b/mart/configs/model/classifier.yaml @@ -5,19 +5,20 @@ defaults: # The verbose version. training_sequence: - - input_adv_training: - _call_with_args_: ["input", "target"] - model: model - step: step - - preprocessor: - _call_with_args_: ["input_adv_training"] - - logits: + seq010: + preprocessor: + _call_with_args_: ["input"] + seq020: + logits: _call_with_args_: ["preprocessor"] - - loss: + seq030: + loss: _call_with_args_: ["logits", "target"] - - preds: + seq040: + preds: _call_with_args_: ["logits"] - - output: + seq050: + output: { "preds": "preds", "target": "target", @@ -28,13 +29,10 @@ training_sequence: # The kwargs-centric version. # We may use *args as **kwargs to avoid the lengthy _call_with_args_. # The drawback is that we would need to lookup the *args names from the code. +# We use a list-style sequence since we don't care about replacing any elements. validation_sequence: - - input_adv_validation: - _call_with_args_: ["input", "target"] - model: model - step: step - preprocessor: - tensor: input_adv_validation + tensor: input - logits: ["preprocessor"] - preds: input: logits @@ -46,25 +44,16 @@ validation_sequence: # The simplified version. # We treat a list as the `_call_with_args_` parameter. test_sequence: - - input_adv_test: - _call_with_args_: ["input", "target"] - model: model - step: step - - preprocessor: ["input_adv_test"] - - logits: ["preprocessor"] - - preds: ["logits"] - - output: { preds: preds, target: target, logits: logits } + seq010: + preprocessor: ["input"] + seq020: + logits: ["preprocessor"] + seq030: + preds: ["logits"] + seq040: + output: { preds: preds, target: target, logits: logits } modules: - input_adv_training: - _target_: mart.attack.NoAdversary - - input_adv_validation: - _target_: mart.attack.NoAdversary - - input_adv_test: - _target_: mart.attack.NoAdversary - preprocessor: ??? logits: ??? From 75886b675b66765638733e498112ae3b89ce05b2 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 13 Mar 2023 14:43:58 -0700 Subject: [PATCH 003/163] Remove NoAdversary from RetinaNet model --- mart/configs/model/torchvision_retinanet.yaml | 27 +++---------------- 1 file changed, 3 insertions(+), 24 deletions(-) diff --git a/mart/configs/model/torchvision_retinanet.yaml b/mart/configs/model/torchvision_retinanet.yaml index 4bdc4c8d..807fe615 100644 --- a/mart/configs/model/torchvision_retinanet.yaml +++ b/mart/configs/model/torchvision_retinanet.yaml @@ -6,11 +6,7 @@ defaults: training_step_log: ["loss_classifier", "loss_box_reg"] training_sequence: - - input_adv_training: - _call_with_args_: ["input", "target"] - model: model - step: step - - preprocessor: ["input_adv_training"] + - preprocessor: ["input"] - losses_and_detections: ["preprocessor", "target"] - loss: # Sum up the losses. @@ -29,11 +25,7 @@ training_sequence: } validation_sequence: - - input_adv_validation: - _call_with_args_: ["input", "target"] - model: model - step: step - - preprocessor: ["input_adv_validation"] + - preprocessor: ["input"] - losses_and_detections: ["preprocessor", "target"] - output: { @@ -44,11 +36,7 @@ validation_sequence: } test_sequence: - - input_adv_test: - _call_with_args_: ["input", "target"] - model: model - step: step - - preprocessor: ["input_adv_test"] + - preprocessor: ["input"] - losses_and_detections: ["preprocessor", "target"] - output: { @@ -59,15 +47,6 @@ test_sequence: } modules: - input_adv_training: - _target_: mart.attack.NoAdversary - - input_adv_validation: - _target_: mart.attack.NoAdversary - - input_adv_test: - _target_: mart.attack.NoAdversary - losses_and_detections: # _target_: mart.models.DualMode model: From 52aa94d8f5b99a6cd6940c565193669f0b9aa08d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 13 Mar 2023 14:47:19 -0700 Subject: [PATCH 004/163] Fix COCO_TorchvisionFasterRCNN_Adv experiment --- .../experiment/COCO_TorchvisionFasterRCNN_Adv.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml index aea9535b..d9a895d0 100644 --- a/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml +++ b/mart/configs/experiment/COCO_TorchvisionFasterRCNN_Adv.yaml @@ -7,3 +7,11 @@ defaults: task_name: "COCO_TorchvisionFasterRCNN_Adv" tags: ["adv"] + +model: + test_sequence: + seq005: + input_adv_test: + _call_with_args_: ["input", "target"] + model: model + step: step From bbed2d6351523b4991c8a4604f802f9a99024e60 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 13 Mar 2023 14:48:38 -0700 Subject: [PATCH 005/163] Remove NoAdversary tests --- tests/test_adversary.py | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 1413fb8e..1008a8e0 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -13,30 +13,10 @@ from torch.optim import SGD import mart -from mart.attack import Adversary, NoAdversary +from mart.attack import Adversary from mart.attack.perturber import Perturber -def test_no_adversary(input_data, target_data): - adversary = NoAdversary() - - # Not having a model should not change the output. - output_data = adversary(input_data, target_data) - - torch.testing.assert_close(output_data, input_data) - - -def test_no_adversary_with_model(input_data, target_data): - adversary = NoAdversary() - model = Mock() - - # Having a model should not change the output. - output_data = adversary(input_data, target_data, model=model) - - model.assert_not_called() - torch.testing.assert_close(output_data, input_data) - - def test_adversary(input_data, target_data, perturbation): threat_model = mart.attack.threat_model.Additive() perturber = Mock(return_value=perturbation) From f3f7b1b88916d4c36843ed7ad9caa20c0590e9b0 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 13 Mar 2023 16:53:31 -0700 Subject: [PATCH 006/163] First stab at treating adversary as LightningModule --- mart/attack/adversary.py | 277 ++++-------------- .../attack/classification_eps1.75_fgsm.yaml | 3 +- .../classification_eps2_pgd10_step1.yaml | 3 +- mart/configs/experiment/CIFAR10_CNN_Adv.yaml | 12 +- mart/configs/model/classifier.yaml | 10 +- mart/nn/nn.py | 16 +- 6 files changed, 85 insertions(+), 236 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 6eace083..026e31b1 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -10,6 +10,7 @@ from typing import Any import torch +from pytorch_lightning import Trainer, LightningModule from .callbacks import Callback from .gain import Gain @@ -53,270 +54,106 @@ def on_run_end(self, **kwargs) -> None: callback.on_run_end(**kwargs) -class IterativeGenerator(AdversaryCallbackHookMixin, torch.nn.Module): - """The attack optimization loop. - - This class implements the following loop structure: - - .. code-block:: python - - on_run_start() - - while true: - on_examine_start() - examine() - on_examine_end() - - if not done: - on_advance_start() - advance() - on_advance_end() - else: - break - - on_run_end() +class LitPerturbation(LightningModule): + """Peturbation optimization module. """ - def __init__( self, *, - perturber: BatchPerturber | Perturber, - optimizer: torch.optim.Optimizer, - max_iters: int, - gain: Gain, - objective: Objective | None = None, - callbacks: dict[str, Callback] | None = None, + batch, + optimizer, + gain, + **kwargs ): """_summary_ Args: - perturber (BatchPerturber | Perturber): A module that stores perturbations. - optimizer (torch.optim.Optimizer): A PyTorch optimizer. - max_iters (int): The max number of attack iterations. - gain (Gain): An adversarial gain function, which is a differentiable estimate of adversarial objective. - objective (Objective | None): A function for computing adversarial objective, which returns True or False. Optional. - callbacks (dict[str, Callback] | None): A dictionary of callback objects. Optional. """ super().__init__() - self.perturber = perturber + self.batch = batch self.optimizer_fn = optimizer + self.gain = gain - self.max_iters = max_iters - self.callbacks = OrderedDict() - - # Register perturber as callback if it implements Callback interface - if isinstance(self.perturber, Callback): - # FIXME: Use self.perturber.__class__.__name__ as key? - self.callbacks["_perturber"] = self.perturber - - if callbacks is not None: - self.callbacks.update(callbacks) - - self.objective_fn = objective - # self.gain is a tensor. - self.gain_fn = gain - - @property - def done(self) -> bool: - # Reach the max iteration; - if self.cur_iter >= self.max_iters: - return True + # Perturbation will be same size as batch input + self.perturbation = torch.nn.Parameter(torch.zeros_like(batch["input"], dtype=torch.float)) - # All adv. examples are found; - if hasattr(self, "found") and bool(self.found.all()) is True: - return True - # Compatible with models which return None gain when objective is reached. - # TODO: Remove gain==None stopping criteria in all models, - # because the BestPerturbation callback relies on gain to determine which pert is the best. - if self.gain is None: - return True + def train_dataloader(self): + from itertools import cycle + return cycle([self.batch]) - return False + def configure_optimizers(self): + return self.optimizer_fn(self.parameters()) - def on_run_start( - self, - *, - adversary: torch.nn.Module, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - model: torch.nn.Module, - **kwargs, - ): - super().on_run_start( - adversary=adversary, input=input, target=target, model=model, **kwargs - ) + def training_step(self, batch, batch_idx): + outputs = self(**batch) + return outputs[self.gain] - # FIXME: We should probably just register IterativeAdversary as a callback. - # Set up the optimizer. - self.cur_iter = 0 + def forward(self, *, input, target, model, step=None, **kwargs): + # Calling model with model=None will trigger perturbation application + return model(input=input, target=target, model=None, step=step) - # param_groups with learning rate and other optim params. - param_groups = self.perturber.parameter_groups() - self.opt = self.optimizer_fn(param_groups) - - def on_run_end( - self, - *, - adversary: torch.nn.Module, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - model: torch.nn.Module, - **kwargs, - ): - super().on_run_end(adversary=adversary, input=input, target=target, model=model, **kwargs) - - # Release optimization resources - del self.opt - - # Disable mixed-precision optimization for attacks, - # since we haven't implemented it yet. - @torch.autocast("cuda", enabled=False) - @torch.autocast("cpu", enabled=False) - def forward( - self, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - model: torch.nn.Module, - **kwargs, - ): +class Adversary(torch.nn.Module): + """An adversary module which generates and applies perturbation to input.""" - self.on_run_start(adversary=self, input=input, target=target, model=model, **kwargs) - - while True: - try: - self.on_examine_start( - adversary=self, input=input, target=target, model=model, **kwargs - ) - self.examine(input=input, target=target, model=model, **kwargs) - self.on_examine_end( - adversary=self, input=input, target=target, model=model, **kwargs - ) - - # Check the done condition here, so that every update of perturbation is examined. - if not self.done: - self.on_advance_start( - adversary=self, - input=input, - target=target, - model=model, - **kwargs, - ) - self.advance( - input=input, - target=target, - model=model, - **kwargs, - ) - self.on_advance_end( - adversary=self, - input=input, - target=target, - model=model, - **kwargs, - ) - # Update cur_iter at the end so that all hooks get the correct cur_iter. - self.cur_iter += 1 - else: - break - except StopIteration: - break - - self.on_run_end(adversary=self, input=input, target=target, model=model, **kwargs) - - # Make sure we can do autograd. - # Earlier Pytorch Lightning uses no_grad(), but later PL uses inference_mode(): - # https://github.com/Lightning-AI/lightning/pull/12715 - @torch.enable_grad() - @torch.inference_mode(False) - def examine( - self, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - model: torch.nn.Module, - **kwargs, - ): - """Examine current perturbation, update self.gain and self.found.""" - - # Clone tensors for autograd, in case it was created in the inference mode. - # FIXME: object detection uses non-pure-tensor data, but it may have cloned somewhere else implicitly? - if isinstance(input, torch.Tensor): - input = input.clone() - if isinstance(target, torch.Tensor): - target = target.clone() - - # Set model as None, because no need to update perturbation. - # Save everything to self.outputs so that callbacks have access to them. - self.outputs = model(input=input, target=target, model=None, **kwargs) - - # Use CallWith to dispatch **outputs. - self.gain = self.gain_fn(**self.outputs) - - # objective_fn is optional, because adversaries may never reach their objective. - if self.objective_fn is not None: - self.found = self.objective_fn(**self.outputs) - if self.gain.shape == torch.Size([]): - # A reduced gain value, not an input-wise gain vector. - self.total_gain = self.gain - else: - # No need to calculate new gradients if adversarial examples are already found. - self.total_gain = self.gain[~self.found].sum() - else: - self.total_gain = self.gain.sum() - - # Make sure we can do autograd. - @torch.enable_grad() - @torch.inference_mode(False) - def advance( + def __init__( self, *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - model: torch.nn.Module, - **kwargs, + threat_model: ThreatModel, + perturber: BatchPerturber | Perturber, + optimizer: torch.optim.Optimizer, + max_iters: int, + gain: Gain, + objective: Objective | None = None, + callbacks: dict[str, Callback] | None = None, + **kwargs ): - """Run one attack iteration.""" - - self.opt.zero_grad() - - # Do not flip the gain value, because we set maximize=True in optimizer. - self.total_gain.backward() - - self.opt.step() - - -class Adversary(IterativeGenerator): - """An adversary module which generates and applies perturbation to input.""" - - def __init__(self, *, threat_model: ThreatModel, **kwargs): """_summary_ Args: threat_model (ThreatModel): A layer which injects perturbation to input, serving as the preprocessing layer to the target model. + perturber (BatchPerturber | Perturber): A module that stores perturbations. + optimizer (torch.optim.Optimizer): A PyTorch optimizer. + max_iters (int): The max number of attack iterations. + gain (Gain): An adversarial gain function, which is a differentiable estimate of adversarial objective. + objective (Objective | None): A function for computing adversarial objective, which returns True or False. Optional. + callbacks (dict[str, Callback] | None): A dictionary of callback objects. Optional. """ super().__init__(**kwargs) self.threat_model = threat_model + self.perturber = perturber + self.optimizer = optimizer + self.max_iters = max_iters + self.gain = gain + self.objective = objective + self.callbacks = callbacks def forward( self, + *, input: torch.Tensor | tuple, target: torch.Tensor | dict[str, Any] | tuple, model: torch.nn.Module | None = None, - **kwargs, + **kwargs ): # Generate a perturbation only if we have a model. This will update - # the parameters of self.perturber. + # the parameters of self.perturbation. if model is not None: - super().forward(input=input, target=target, model=model, **kwargs) + batch = {"input": input, "target": target, "model": model, **kwargs} + self.perturbation = LitPerturbation(batch=batch, optimizer=self.optimizer, gain=self.gain, **kwargs) + + # FIXME: how do we get a proper device? + attacker = Trainer(accelerator="auto", max_steps=self.max_iters) + attacker.fit(model=self.perturbation) + # Get perturbation and apply threat model # The mask projector in perturber may require information from target. - perturbation = self.perturber(input, target) + # FIXME: Generalize this so we can just pass perturbation.parameters() to threat_model + perturbation = list(self.perturbation.parameters())[0].to(input.device) output = self.threat_model(input, target, perturbation) return output diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index 6d191159..0d9768fa 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -5,7 +5,6 @@ defaults: - perturber/gradient_modifier: sign - perturber/projector: linf_additive_range - objective: misclassification - - gain: cross_entropy - threat_model: additive optimizer: @@ -13,6 +12,8 @@ optimizer: max_iters: 1 +gain: "loss" + perturber: initializer: constant: 0 diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index 79f55a4c..46a37895 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -5,7 +5,6 @@ defaults: - perturber/gradient_modifier: sign - perturber/projector: linf_additive_range - objective: misclassification - - gain: cross_entropy - threat_model: additive optimizer: @@ -13,6 +12,8 @@ optimizer: max_iters: 10 +gain: "loss" + perturber: initializer: eps: 2 diff --git a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml index 45fa6197..c254669b 100644 --- a/mart/configs/experiment/CIFAR10_CNN_Adv.yaml +++ b/mart/configs/experiment/CIFAR10_CNN_Adv.yaml @@ -10,22 +10,14 @@ tags: ["adv", "fat"] model: training_sequence: - seq005: - input_adv_training: - _call_with_args_: ["input", "target"] - model: model - step: step + seq005: input_adv_training seq010: preprocessor: _call_with_args_: ["input_adv_training"] test_sequence: - seq005: - input_adv_test: - _call_with_args_: ["input", "target"] - model: model - step: step + seq005: input_adv_test seq010: preprocessor: ["input_adv_test"] diff --git a/mart/configs/model/classifier.yaml b/mart/configs/model/classifier.yaml index ad664989..5630bd5e 100644 --- a/mart/configs/model/classifier.yaml +++ b/mart/configs/model/classifier.yaml @@ -34,12 +34,16 @@ validation_sequence: - preprocessor: tensor: input - logits: ["preprocessor"] + - loss: + input: logits + target: target - preds: input: logits - output: preds: preds target: target logits: logits + loss: loss # The simplified version. # We treat a list as the `_call_with_args_` parameter. @@ -49,9 +53,11 @@ test_sequence: seq020: logits: ["preprocessor"] seq030: - preds: ["logits"] + loss: ["logits", "target"] seq040: - output: { preds: preds, target: target, logits: logits } + preds: ["logits"] + seq050: + output: { preds: preds, target: target, logits: logits, loss: loss} modules: preprocessor: ??? diff --git a/mart/nn/nn.py b/mart/nn/nn.py index e631b079..8d32f6b0 100644 --- a/mart/nn/nn.py +++ b/mart/nn/nn.py @@ -66,6 +66,10 @@ def parse_sequence(self, sequence): module_dict = OrderedDict() for module_info in sequence: + # Treat strings as modules that don't require CallWith + if isinstance(module_info, str): + module_info = {module_info: {}} + if not isinstance(module_info, dict) or len(module_info) != 1: raise ValueError( f"Each module config in the sequence list should be a length-one dict: {module_info}" @@ -87,7 +91,11 @@ def parse_sequence(self, sequence): kwarg_keys = module_cfg module = self[module_name] - module = CallWith(module, arg_keys, kwarg_keys, return_keys) + + # Add CallWith to module if we have enough parameters + if arg_keys is not None or len(kwarg_keys) > 0 or return_keys is not None: + module = CallWith(module, arg_keys, kwarg_keys, return_keys) + module_dict[return_name] = module return module_dict @@ -153,7 +161,11 @@ def forward(self, *args, **kwargs): selected_args = [kwargs[key] for key in arg_keys[len(args) :]] selected_kwargs = {key: kwargs[val] for key, val in kwarg_keys.items()} - ret = self.module(*args, *selected_args, **selected_kwargs) + try: + ret = self.module(*args, *selected_args, **selected_kwargs) + except TypeError: + # FIXME: Add better error message + raise if self.return_keys: if not isinstance(ret, tuple): From 0e16460562d2ddd4021e5f6fc512fb9f83a1cc56 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 13 Mar 2023 16:56:20 -0700 Subject: [PATCH 007/163] style --- mart/attack/adversary.py | 26 ++++++++++---------------- mart/configs/model/classifier.yaml | 2 +- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 026e31b1..fe88a89c 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -10,7 +10,7 @@ from typing import Any import torch -from pytorch_lightning import Trainer, LightningModule +from pytorch_lightning import LightningModule, Trainer from .callbacks import Callback from .gain import Gain @@ -55,16 +55,9 @@ def on_run_end(self, **kwargs) -> None: class LitPerturbation(LightningModule): - """Peturbation optimization module. - """ - def __init__( - self, - *, - batch, - optimizer, - gain, - **kwargs - ): + """Peturbation optimization module.""" + + def __init__(self, *, batch, optimizer, gain, **kwargs): """_summary_ Args: @@ -78,9 +71,9 @@ def __init__( # Perturbation will be same size as batch input self.perturbation = torch.nn.Parameter(torch.zeros_like(batch["input"], dtype=torch.float)) - def train_dataloader(self): from itertools import cycle + return cycle([self.batch]) def configure_optimizers(self): @@ -108,7 +101,7 @@ def __init__( gain: Gain, objective: Objective | None = None, callbacks: dict[str, Callback] | None = None, - **kwargs + **kwargs, ): """_summary_ @@ -137,19 +130,20 @@ def forward( input: torch.Tensor | tuple, target: torch.Tensor | dict[str, Any] | tuple, model: torch.nn.Module | None = None, - **kwargs + **kwargs, ): # Generate a perturbation only if we have a model. This will update # the parameters of self.perturbation. if model is not None: batch = {"input": input, "target": target, "model": model, **kwargs} - self.perturbation = LitPerturbation(batch=batch, optimizer=self.optimizer, gain=self.gain, **kwargs) + self.perturbation = LitPerturbation( + batch=batch, optimizer=self.optimizer, gain=self.gain, **kwargs + ) # FIXME: how do we get a proper device? attacker = Trainer(accelerator="auto", max_steps=self.max_iters) attacker.fit(model=self.perturbation) - # Get perturbation and apply threat model # The mask projector in perturber may require information from target. # FIXME: Generalize this so we can just pass perturbation.parameters() to threat_model diff --git a/mart/configs/model/classifier.yaml b/mart/configs/model/classifier.yaml index 5630bd5e..50a3fff0 100644 --- a/mart/configs/model/classifier.yaml +++ b/mart/configs/model/classifier.yaml @@ -57,7 +57,7 @@ test_sequence: seq040: preds: ["logits"] seq050: - output: { preds: preds, target: target, logits: logits, loss: loss} + output: { preds: preds, target: target, logits: logits, loss: loss } modules: preprocessor: ??? From e25af6fe76c312d1a5f9c826896ff8c3a5e57e81 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 07:35:22 -0700 Subject: [PATCH 008/163] bugfix --- mart/attack/adversary.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index fe88a89c..cdeac4e3 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -54,7 +54,7 @@ def on_run_end(self, **kwargs) -> None: callback.on_run_end(**kwargs) -class LitPerturbation(LightningModule): +class LitAdversary(LightningModule): """Peturbation optimization module.""" def __init__(self, *, batch, optimizer, gain, **kwargs): @@ -136,18 +136,23 @@ def forward( # the parameters of self.perturbation. if model is not None: batch = {"input": input, "target": target, "model": model, **kwargs} - self.perturbation = LitPerturbation( + perturbation = LitAdversary( batch=batch, optimizer=self.optimizer, gain=self.gain, **kwargs ) + # Set initial perturbation + self.perturbation = [p.to(input.device) for p in perturbation.parameters()] + # FIXME: how do we get a proper device? attacker = Trainer(accelerator="auto", max_steps=self.max_iters) - attacker.fit(model=self.perturbation) + attacker.fit(model=perturbation) + + # FIXME: Can get get rid of one of these? + self.perturbation = [p.to(input.device) for p in perturbation.parameters()] # Get perturbation and apply threat model # The mask projector in perturber may require information from target. # FIXME: Generalize this so we can just pass perturbation.parameters() to threat_model - perturbation = list(self.perturbation.parameters())[0].to(input.device) - output = self.threat_model(input, target, perturbation) + output = self.threat_model(input, target, self.perturbation[0]) return output From 36295b5b920a9689a60cbff11fcba53e83892915 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 11:09:34 -0700 Subject: [PATCH 009/163] Integrate Perturber into LitPerturber --- mart/attack/adversary.py | 112 +++++++++++------- .../attack/classification_eps1.75_fgsm.yaml | 16 ++- .../classification_eps2_pgd10_step1.yaml | 16 ++- .../classification_eps8_pgd10_step1.yaml | 16 ++- .../gradient_modifier/lp_normalizer.yaml | 0 .../gradient_modifier/sign.yaml | 0 .../{perturber => }/initializer/constant.yaml | 0 .../{perturber => }/initializer/uniform.yaml | 0 .../initializer/uniform_lp.yaml | 0 mart/configs/attack/iterative.yaml | 4 +- .../object_detection_mask_adversary.yaml | 12 +- ...bject_detection_mask_adversary_missed.yaml | 5 +- .../projector/linf_additive_range.yaml | 0 .../projector/lp_additive_range.yaml | 0 .../{perturber => }/projector/mask_range.yaml | 0 .../{perturber => }/projector/range.yaml | 0 16 files changed, 98 insertions(+), 83 deletions(-) rename mart/configs/attack/{perturber => }/gradient_modifier/lp_normalizer.yaml (100%) rename mart/configs/attack/{perturber => }/gradient_modifier/sign.yaml (100%) rename mart/configs/attack/{perturber => }/initializer/constant.yaml (100%) rename mart/configs/attack/{perturber => }/initializer/uniform.yaml (100%) rename mart/configs/attack/{perturber => }/initializer/uniform_lp.yaml (100%) rename mart/configs/attack/{perturber => }/projector/linf_additive_range.yaml (100%) rename mart/configs/attack/{perturber => }/projector/lp_additive_range.yaml (100%) rename mart/configs/attack/{perturber => }/projector/mask_range.yaml (100%) rename mart/configs/attack/{perturber => }/projector/range.yaml (100%) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index cdeac4e3..e4fb6858 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -8,8 +8,11 @@ from collections import OrderedDict from typing import Any +from itertools import cycle +from functools import partial import torch +from torch.nn.modules.lazy import LazyModuleMixin from pytorch_lightning import LightningModule, Trainer from .callbacks import Callback @@ -54,39 +57,77 @@ def on_run_end(self, **kwargs) -> None: callback.on_run_end(**kwargs) -class LitAdversary(LightningModule): +class LitPerturber(LazyModuleMixin, LightningModule): """Peturbation optimization module.""" - def __init__(self, *, batch, optimizer, gain, **kwargs): + def __init__( + self, + *, + initializer: Initializer, + optimizer: Callable, + gradient_modifier: GradientModifier | None = None, + projector: Projector | None = None, + gain: str = "loss", + **kwargs + ): """_summary_ Args: """ super().__init__() - self.batch = batch + self.initializer = initializer + self.gradient_modifier = gradient_modifier + self.projector = projector self.optimizer_fn = optimizer self.gain = gain - # Perturbation will be same size as batch input - self.perturbation = torch.nn.Parameter(torch.zeros_like(batch["input"], dtype=torch.float)) + self.perturbation = torch.nn.UninitializedParameter() + + def projector_wrapper(module, args): + if isinstance(module.perturbation, torch.nn.UninitializedBuffer): + raise ValueError("Perturbation must be initialized") - def train_dataloader(self): - from itertools import cycle + input, target = args - return cycle([self.batch]) + # FIXME: How do we get rid of .to(input.device)? + return projector(module.perturbation.data.to(input.device), input, target) + + # Will be called before forward() is called. + if projector is not None: + self.register_forward_pre_hook(projector_wrapper) def configure_optimizers(self): - return self.optimizer_fn(self.parameters()) + return self.optimizer_fn([self.perturbation]) def training_step(self, batch, batch_idx): - outputs = self(**batch) + input = batch.pop("input") + target = batch.pop("target") + model = batch.pop("model") + + if self.has_uninitialized_params(): + # Use this syntax because LazyModuleMixin assume non-keyword arguments + self(input, target) + + outputs = model(input=input, target=target, **batch) + return outputs[self.gain] - def forward(self, *, input, target, model, step=None, **kwargs): - # Calling model with model=None will trigger perturbation application - return model(input=input, target=target, model=None, step=step) + def initialize_parameters(self, input, target): + assert isinstance(self.perturbation, torch.nn.UninitializedParameter) + + self.perturbation.materialize(input.shape, device=input.device) + # A backward hook that will be called when a gradient w.r.t the Tensor is computed. + if self.gradient_modifier is not None: + self.perturbation.register_hook(self.gradient_modifier) + + self.initializer(self.perturbation) + + + def forward(self, input, target, **kwargs): + # FIXME: Can we get rid of .to(input.device)? + return self.perturbation.to(input.device) class Adversary(torch.nn.Module): """An adversary module which generates and applies perturbation to input.""" @@ -95,34 +136,25 @@ def __init__( self, *, threat_model: ThreatModel, - perturber: BatchPerturber | Perturber, - optimizer: torch.optim.Optimizer, - max_iters: int, - gain: Gain, - objective: Objective | None = None, + max_iters: int = 10, callbacks: dict[str, Callback] | None = None, - **kwargs, + **perturber_kwargs, ): """_summary_ Args: threat_model (ThreatModel): A layer which injects perturbation to input, serving as the preprocessing layer to the target model. - perturber (BatchPerturber | Perturber): A module that stores perturbations. - optimizer (torch.optim.Optimizer): A PyTorch optimizer. max_iters (int): The max number of attack iterations. - gain (Gain): An adversarial gain function, which is a differentiable estimate of adversarial objective. - objective (Objective | None): A function for computing adversarial objective, which returns True or False. Optional. callbacks (dict[str, Callback] | None): A dictionary of callback objects. Optional. """ - super().__init__(**kwargs) + super().__init__() self.threat_model = threat_model - self.perturber = perturber - self.optimizer = optimizer - self.max_iters = max_iters - self.gain = gain - self.objective = objective - self.callbacks = callbacks + self.callbacks = callbacks # FIXME: Register these with trainer? + self.perturber_factory = partial(LitPerturber, **perturber_kwargs) + + # FIXME: how do we get a proper device? + self.attacker = Trainer(accelerator="auto", max_steps=max_iters, enable_model_summary=False) def forward( self, @@ -135,24 +167,14 @@ def forward( # Generate a perturbation only if we have a model. This will update # the parameters of self.perturbation. if model is not None: - batch = {"input": input, "target": target, "model": model, **kwargs} - perturbation = LitAdversary( - batch=batch, optimizer=self.optimizer, gain=self.gain, **kwargs - ) - - # Set initial perturbation - self.perturbation = [p.to(input.device) for p in perturbation.parameters()] - - # FIXME: how do we get a proper device? - attacker = Trainer(accelerator="auto", max_steps=self.max_iters) - attacker.fit(model=perturbation) + self.perturber = [self.perturber_factory()] - # FIXME: Can get get rid of one of these? - self.perturbation = [p.to(input.device) for p in perturbation.parameters()] + benign_dataloader = cycle([{"input": input, "target": target, "model": model, **kwargs}]) + self.attacker.fit(model=self.perturber[0], train_dataloaders=benign_dataloader) # Get perturbation and apply threat model # The mask projector in perturber may require information from target. - # FIXME: Generalize this so we can just pass perturbation.parameters() to threat_model - output = self.threat_model(input, target, self.perturbation[0]) + perturbation = self.perturber[0](input, target) + output = self.threat_model(input, target, perturbation) return output diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index 0d9768fa..d6a7900f 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -1,9 +1,8 @@ defaults: - iterative_sgd - - perturber: default - - perturber/initializer: constant - - perturber/gradient_modifier: sign - - perturber/projector: linf_additive_range + - initializer: constant + - gradient_modifier: sign + - projector: linf_additive_range - objective: misclassification - threat_model: additive @@ -14,9 +13,8 @@ max_iters: 1 gain: "loss" -perturber: - initializer: - constant: 0 +initializer: + constant: 0 - projector: - eps: 1.75 +projector: + eps: 1.75 diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index 46a37895..2f09ea0f 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -1,9 +1,8 @@ defaults: - iterative_sgd - - perturber: default - - perturber/initializer: uniform_lp - - perturber/gradient_modifier: sign - - perturber/projector: linf_additive_range + - initializer: uniform_lp + - gradient_modifier: sign + - projector: linf_additive_range - objective: misclassification - threat_model: additive @@ -14,9 +13,8 @@ max_iters: 10 gain: "loss" -perturber: - initializer: - eps: 2 +initializer: + eps: 2 - projector: - eps: 2 +projector: + eps: 2 diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index 1d546a18..f6eb6c7f 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -1,9 +1,8 @@ defaults: - iterative_sgd - - perturber: default - - perturber/initializer: uniform_lp - - perturber/gradient_modifier: sign - - perturber/projector: linf_additive_range + - initializer: uniform_lp + - gradient_modifier: sign + - projector: linf_additive_range - objective: misclassification - gain: cross_entropy - threat_model: additive @@ -13,9 +12,8 @@ optimizer: max_iters: 10 -perturber: - initializer: - eps: 8 +initializer: + eps: 8 - projector: - eps: 8 +projector: + eps: 8 diff --git a/mart/configs/attack/perturber/gradient_modifier/lp_normalizer.yaml b/mart/configs/attack/gradient_modifier/lp_normalizer.yaml similarity index 100% rename from mart/configs/attack/perturber/gradient_modifier/lp_normalizer.yaml rename to mart/configs/attack/gradient_modifier/lp_normalizer.yaml diff --git a/mart/configs/attack/perturber/gradient_modifier/sign.yaml b/mart/configs/attack/gradient_modifier/sign.yaml similarity index 100% rename from mart/configs/attack/perturber/gradient_modifier/sign.yaml rename to mart/configs/attack/gradient_modifier/sign.yaml diff --git a/mart/configs/attack/perturber/initializer/constant.yaml b/mart/configs/attack/initializer/constant.yaml similarity index 100% rename from mart/configs/attack/perturber/initializer/constant.yaml rename to mart/configs/attack/initializer/constant.yaml diff --git a/mart/configs/attack/perturber/initializer/uniform.yaml b/mart/configs/attack/initializer/uniform.yaml similarity index 100% rename from mart/configs/attack/perturber/initializer/uniform.yaml rename to mart/configs/attack/initializer/uniform.yaml diff --git a/mart/configs/attack/perturber/initializer/uniform_lp.yaml b/mart/configs/attack/initializer/uniform_lp.yaml similarity index 100% rename from mart/configs/attack/perturber/initializer/uniform_lp.yaml rename to mart/configs/attack/initializer/uniform_lp.yaml diff --git a/mart/configs/attack/iterative.yaml b/mart/configs/attack/iterative.yaml index 466322b4..eeb8db6c 100644 --- a/mart/configs/attack/iterative.yaml +++ b/mart/configs/attack/iterative.yaml @@ -1,5 +1,7 @@ _target_: mart.attack.Adversary -perturber: ??? +initializer: ??? +gradient_modifier: ??? +projector: ??? optimizer: ??? max_iters: ??? callbacks: ??? diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 2cf0baf0..87e5addc 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -1,9 +1,8 @@ defaults: - iterative_sgd - - perturber: batch - - perturber/initializer: constant - - perturber/gradient_modifier: sign - - perturber/projector: mask_range + - initializer: constant + - gradient_modifier: sign + - projector: mask_range - callbacks: [progress_bar, image_visualizer] - objective: zero_ap - gain: rcnn_training_loss @@ -15,6 +14,5 @@ optimizer: max_iters: 5 -perturber: - initializer: - constant: 127 +initializer: + constant: 127 diff --git a/mart/configs/attack/object_detection_mask_adversary_missed.yaml b/mart/configs/attack/object_detection_mask_adversary_missed.yaml index e44b5342..9cf8657f 100644 --- a/mart/configs/attack/object_detection_mask_adversary_missed.yaml +++ b/mart/configs/attack/object_detection_mask_adversary_missed.yaml @@ -8,6 +8,5 @@ optimizer: max_iters: 100 -perturber: - initializer: - constant: 127 +initializer: + constant: 127 diff --git a/mart/configs/attack/perturber/projector/linf_additive_range.yaml b/mart/configs/attack/projector/linf_additive_range.yaml similarity index 100% rename from mart/configs/attack/perturber/projector/linf_additive_range.yaml rename to mart/configs/attack/projector/linf_additive_range.yaml diff --git a/mart/configs/attack/perturber/projector/lp_additive_range.yaml b/mart/configs/attack/projector/lp_additive_range.yaml similarity index 100% rename from mart/configs/attack/perturber/projector/lp_additive_range.yaml rename to mart/configs/attack/projector/lp_additive_range.yaml diff --git a/mart/configs/attack/perturber/projector/mask_range.yaml b/mart/configs/attack/projector/mask_range.yaml similarity index 100% rename from mart/configs/attack/perturber/projector/mask_range.yaml rename to mart/configs/attack/projector/mask_range.yaml diff --git a/mart/configs/attack/perturber/projector/range.yaml b/mart/configs/attack/projector/range.yaml similarity index 100% rename from mart/configs/attack/perturber/projector/range.yaml rename to mart/configs/attack/projector/range.yaml From c68d9a83b30f4ede39be1e4a6199a577859e8c4a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 12:03:57 -0700 Subject: [PATCH 010/163] Integrate objective to LitPerturber --- mart/attack/adversary.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index e4fb6858..5d4d344c 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -68,7 +68,8 @@ def __init__( gradient_modifier: GradientModifier | None = None, projector: Projector | None = None, gain: str = "loss", - **kwargs + objective: Objective | None = None, + **kwargs, ): """_summary_ @@ -80,7 +81,8 @@ def __init__( self.gradient_modifier = gradient_modifier self.projector = projector self.optimizer_fn = optimizer - self.gain = gain + self.gain_output = gain + self.objective_fn = objective self.perturbation = torch.nn.UninitializedParameter() @@ -109,9 +111,22 @@ def training_step(self, batch, batch_idx): # Use this syntax because LazyModuleMixin assume non-keyword arguments self(input, target) - outputs = model(input=input, target=target, **batch) + self.outputs = model(input=input, target=target, **batch) + self.gain = self.outputs[self.gain_output] + + # objective_fn is optional, because adversaries may never reach their objective. + if self.objective_fn is not None: + self.found = self.objective_fn(**self.outputs) + if self.gain.shape == torch.Size([]): + # A reduced gain value, not an input-wise gain vector. + self.total_gain = self.gain + else: + # No need to calculate new gradients if adversarial examples are already found. + self.total_gain = self.gain[~self.found].sum() + else: + self.total_gain = self.gain.sum() - return outputs[self.gain] + return self.total_gain def initialize_parameters(self, input, target): assert isinstance(self.perturbation, torch.nn.UninitializedParameter) @@ -124,11 +139,11 @@ def initialize_parameters(self, input, target): self.initializer(self.perturbation) - def forward(self, input, target, **kwargs): # FIXME: Can we get rid of .to(input.device)? return self.perturbation.to(input.device) + class Adversary(torch.nn.Module): """An adversary module which generates and applies perturbation to input.""" From 689da74d0e1b2d1955d77d0508c59de5bb9634c9 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 12:22:13 -0700 Subject: [PATCH 011/163] Cleanup use of objective function to compute gain --- mart/attack/adversary.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 5d4d344c..667f3789 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -111,22 +111,25 @@ def training_step(self, batch, batch_idx): # Use this syntax because LazyModuleMixin assume non-keyword arguments self(input, target) - self.outputs = model(input=input, target=target, **batch) - self.gain = self.outputs[self.gain_output] + outputs = model(input=input, target=target, **batch) + gain = outputs[self.gain_output] # objective_fn is optional, because adversaries may never reach their objective. + # FIXME: Make objective a part of the model... if self.objective_fn is not None: - self.found = self.objective_fn(**self.outputs) - if self.gain.shape == torch.Size([]): - # A reduced gain value, not an input-wise gain vector. - self.total_gain = self.gain - else: - # No need to calculate new gradients if adversarial examples are already found. - self.total_gain = self.gain[~self.found].sum() - else: - self.total_gain = self.gain.sum() - - return self.total_gain + found = self.objective_fn(**outputs) + self.log("found", found.sum(), prog_bar=True) + + # No need to calculate new gradients if adversarial examples are already found. + if len(gain.shape) > 0: + gain = gain[~found] + + if len(gain.shape) > 0: + gain = gain.sum() + + self.log("gain", gain, prog_bar=True) + + return gain def initialize_parameters(self, input, target): assert isinstance(self.perturbation, torch.nn.UninitializedParameter) From 3d70dde24c7e6f82f768bacc7e3ecedd47197b7a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 12:42:35 -0700 Subject: [PATCH 012/163] bugfix --- mart/attack/adversary.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 667f3789..c0b457fb 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -118,7 +118,7 @@ def training_step(self, batch, batch_idx): # FIXME: Make objective a part of the model... if self.objective_fn is not None: found = self.objective_fn(**outputs) - self.log("found", found.sum(), prog_bar=True) + self.log("found", found.sum().float(), prog_bar=True) # No need to calculate new gradients if adversarial examples are already found. if len(gain.shape) > 0: @@ -172,7 +172,14 @@ def __init__( self.perturber_factory = partial(LitPerturber, **perturber_kwargs) # FIXME: how do we get a proper device? - self.attacker = Trainer(accelerator="auto", max_steps=max_iters, enable_model_summary=False) + self.attacker = partial(Trainer, + accelerator="auto", + log_every_n_steps=1, + max_epochs=1, + max_steps=max_iters, + enable_model_summary=False, + enable_checkpointing=False, + ) def forward( self, @@ -185,10 +192,12 @@ def forward( # Generate a perturbation only if we have a model. This will update # the parameters of self.perturbation. if model is not None: - self.perturber = [self.perturber_factory()] + benign_dataloader = cycle( + [{"input": input, "target": target, "model": model, **kwargs}] + ) - benign_dataloader = cycle([{"input": input, "target": target, "model": model, **kwargs}]) - self.attacker.fit(model=self.perturber[0], train_dataloaders=benign_dataloader) + self.perturber = [self.perturber_factory()] + self.attacker().fit(model=self.perturber[0], train_dataloaders=benign_dataloader) # Get perturbation and apply threat model # The mask projector in perturber may require information from target. From 2990cab25bf414e09632f27ffb10bb3103eaf4e2 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 12:59:36 -0700 Subject: [PATCH 013/163] Make adversarial trainer silent --- mart/attack/adversary.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index c0b457fb..bf6a50b3 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -56,6 +56,19 @@ def on_run_end(self, **kwargs) -> None: for _name, callback in self.callbacks.items(): callback.on_run_end(**kwargs) +class SilentTrainer(Trainer): + """Suppress logging""" + def fit(self, *args, **kwargs): + logger = logging.getLogger("pytorch_lightning.accelerators.gpu") + logger.propagate = False + + super().fit(*args, **kwargs) + + logger.propagate = True + + def _log_device_info(self): + pass + class LitPerturber(LazyModuleMixin, LightningModule): """Peturbation optimization module.""" @@ -172,8 +185,9 @@ def __init__( self.perturber_factory = partial(LitPerturber, **perturber_kwargs) # FIXME: how do we get a proper device? - self.attacker = partial(Trainer, + self.attacker_factory = partial(SilentTrainer, accelerator="auto", + num_sanity_val_steps=0, log_every_n_steps=1, max_epochs=1, max_steps=max_iters, @@ -197,7 +211,7 @@ def forward( ) self.perturber = [self.perturber_factory()] - self.attacker().fit(model=self.perturber[0], train_dataloaders=benign_dataloader) + self.attacker_factory().fit(model=self.perturber[0], train_dataloaders=benign_dataloader) # Get perturbation and apply threat model # The mask projector in perturber may require information from target. From d5e17f7ae8e1395a84cd870baeee62b524e5fadd Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 13:01:04 -0700 Subject: [PATCH 014/163] style --- mart/attack/adversary.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index bf6a50b3..47e58e65 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -6,20 +6,23 @@ from __future__ import annotations +import logging from collections import OrderedDict -from typing import Any -from itertools import cycle from functools import partial +from itertools import cycle +from typing import TYPE_CHECKING, Any, Callable import torch -from torch.nn.modules.lazy import LazyModuleMixin from pytorch_lightning import LightningModule, Trainer +from pytorch_lightning.callbacks import Callback +from torch.nn.modules.lazy import LazyModuleMixin -from .callbacks import Callback -from .gain import Gain -from .objective import Objective -from .perturber import BatchPerturber, Perturber -from .threat_model import ThreatModel +if TYPE_CHECKING: + from .gradient_modifier import GradientModifier + from .initializer import Initializer + from .objective import Objective + from .projector import Projector + from .threat_model import ThreatModel __all__ = ["Adversary"] @@ -57,7 +60,8 @@ def on_run_end(self, **kwargs) -> None: callback.on_run_end(**kwargs) class SilentTrainer(Trainer): - """Suppress logging""" + """Suppress logging.""" + def fit(self, *args, **kwargs): logger = logging.getLogger("pytorch_lightning.accelerators.gpu") logger.propagate = False @@ -185,7 +189,8 @@ def __init__( self.perturber_factory = partial(LitPerturber, **perturber_kwargs) # FIXME: how do we get a proper device? - self.attacker_factory = partial(SilentTrainer, + self.attacker_factory = partial( + SilentTrainer, accelerator="auto", num_sanity_val_steps=0, log_every_n_steps=1, @@ -211,7 +216,9 @@ def forward( ) self.perturber = [self.perturber_factory()] - self.attacker_factory().fit(model=self.perturber[0], train_dataloaders=benign_dataloader) + self.attacker_factory().fit( + model=self.perturber[0], train_dataloaders=benign_dataloader + ) # Get perturbation and apply threat model # The mask projector in perturber may require information from target. From b8f87614ccfde0764819a768b4d639f3563ab173 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 13:39:34 -0700 Subject: [PATCH 015/163] Move threat model into LitPerturber --- mart/attack/adversary.py | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 47e58e65..3c2da1ef 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -82,6 +82,7 @@ def __init__( *, initializer: Initializer, optimizer: Callable, + threat_model: ThreatModel, gradient_modifier: GradientModifier | None = None, projector: Projector | None = None, gain: str = "loss", @@ -91,13 +92,21 @@ def __init__( """_summary_ Args: + initializer (Initializer): To initialize the perturbation. + optimizer (torch.optim.Optimizer): A PyTorch optimizer. + threat_model (ThreatModel): A layer which injects perturbation to input, serving as the preprocessing layer to the target model. + gradient_modifier (GradientModifier): To modify the gradient of perturbation. + projector (Projector): To project the perturbation into some space. + gain (str): Which output to use as an adversarial gain function, which is a differentiable estimate of adversarial objective. (default: loss) + objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. """ super().__init__() self.initializer = initializer + self.optimizer_fn = optimizer + self.threat_model = threat_model self.gradient_modifier = gradient_modifier self.projector = projector - self.optimizer_fn = optimizer self.gain_output = gain self.objective_fn = objective @@ -120,6 +129,8 @@ def configure_optimizers(self): return self.optimizer_fn([self.perturbation]) def training_step(self, batch, batch_idx): + # copy batch since we will modify it and it it passed around + batch = batch.copy() input = batch.pop("input") target = batch.pop("target") model = batch.pop("model") @@ -129,6 +140,7 @@ def training_step(self, batch, batch_idx): self(input, target) outputs = model(input=input, target=target, **batch) + # FIXME: This should really be just `return outputs`. Everything below here should live in the model! gain = outputs[self.gain_output] # objective_fn is optional, because adversaries may never reach their objective. @@ -161,7 +173,11 @@ def initialize_parameters(self, input, target): def forward(self, input, target, **kwargs): # FIXME: Can we get rid of .to(input.device)? - return self.perturbation.to(input.device) + perturbation = self.perturbation.to(input.device) + + # Get perturbation and apply threat model + # The mask projector in perturber may require information from target. + return self.threat_model(input, target, perturbation) class Adversary(torch.nn.Module): @@ -170,7 +186,6 @@ class Adversary(torch.nn.Module): def __init__( self, *, - threat_model: ThreatModel, max_iters: int = 10, callbacks: dict[str, Callback] | None = None, **perturber_kwargs, @@ -178,13 +193,11 @@ def __init__( """_summary_ Args: - threat_model (ThreatModel): A layer which injects perturbation to input, serving as the preprocessing layer to the target model. max_iters (int): The max number of attack iterations. callbacks (dict[str, Callback] | None): A dictionary of callback objects. Optional. """ super().__init__() - self.threat_model = threat_model self.callbacks = callbacks # FIXME: Register these with trainer? self.perturber_factory = partial(LitPerturber, **perturber_kwargs) @@ -220,9 +233,5 @@ def forward( model=self.perturber[0], train_dataloaders=benign_dataloader ) - # Get perturbation and apply threat model - # The mask projector in perturber may require information from target. - perturbation = self.perturber[0](input, target) - output = self.threat_model(input, target, perturbation) - - return output + # Get preturbed input (some threat models, projectors, etc. may require information from target like a mask) + return self.perturber[0](input, target) From d39c5c1134fc74a731e76177bc4de5acb92bca24 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 13:40:44 -0700 Subject: [PATCH 016/163] Make attack callbacks plain PL callbacks --- mart/attack/__init__.py | 4 +-- mart/attack/adversary.py | 34 +------------------ mart/attack/adversary_wrapper.py | 2 -- mart/attack/callbacks/__init__.py | 1 - mart/attack/callbacks/eval_mode.py | 6 ++-- mart/attack/callbacks/no_grad_mode.py | 6 ++-- mart/attack/callbacks/progress_bar.py | 27 +++++---------- mart/attack/callbacks/visualizer.py | 15 +++++--- .../attack/callbacks/progress_bar.yaml | 1 + 9 files changed, 27 insertions(+), 69 deletions(-) diff --git a/mart/attack/__init__.py b/mart/attack/__init__.py index f1cc4637..6e4d5611 100644 --- a/mart/attack/__init__.py +++ b/mart/attack/__init__.py @@ -1,11 +1,9 @@ from .adversary import * from .adversary_in_art import * from .adversary_wrapper import * -from .callbacks import Callback from .gain import * from .gradient_modifier import * from .initializer import * -from .objective import Objective -from .perturber import * +from .objective import * from .projector import * from .threat_model import * diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 3c2da1ef..7100bbe1 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -27,38 +27,6 @@ __all__ = ["Adversary"] -class AdversaryCallbackHookMixin(Callback): - """Define event hooks in the Adversary Loop for callbacks.""" - - callbacks = {} - - def on_run_start(self, **kwargs) -> None: - """Prepare the attack loop state.""" - for _name, callback in self.callbacks.items(): - # FIXME: Skip incomplete callback instance. - # Give access of self to callbacks by `adversary=self`. - callback.on_run_start(**kwargs) - - def on_examine_start(self, **kwargs) -> None: - for _name, callback in self.callbacks.items(): - callback.on_examine_start(**kwargs) - - def on_examine_end(self, **kwargs) -> None: - for _name, callback in self.callbacks.items(): - callback.on_examine_end(**kwargs) - - def on_advance_start(self, **kwargs) -> None: - for _name, callback in self.callbacks.items(): - callback.on_advance_start(**kwargs) - - def on_advance_end(self, **kwargs) -> None: - for _name, callback in self.callbacks.items(): - callback.on_advance_end(**kwargs) - - def on_run_end(self, **kwargs) -> None: - for _name, callback in self.callbacks.items(): - callback.on_run_end(**kwargs) - class SilentTrainer(Trainer): """Suppress logging.""" @@ -198,7 +166,6 @@ def __init__( """ super().__init__() - self.callbacks = callbacks # FIXME: Register these with trainer? self.perturber_factory = partial(LitPerturber, **perturber_kwargs) # FIXME: how do we get a proper device? @@ -210,6 +177,7 @@ def __init__( max_epochs=1, max_steps=max_iters, enable_model_summary=False, + callbacks=list(callbacks.values()), # ignore keys enable_checkpointing=False, ) diff --git a/mart/attack/adversary_wrapper.py b/mart/attack/adversary_wrapper.py index 4fee3695..ac2a3ba6 100644 --- a/mart/attack/adversary_wrapper.py +++ b/mart/attack/adversary_wrapper.py @@ -8,8 +8,6 @@ import torch -from .callbacks import Callback - __all__ = ["NormalizedAdversaryAdapter"] diff --git a/mart/attack/callbacks/__init__.py b/mart/attack/callbacks/__init__.py index 736f7dd1..7ce8b2cf 100644 --- a/mart/attack/callbacks/__init__.py +++ b/mart/attack/callbacks/__init__.py @@ -1,4 +1,3 @@ -from .base import * from .eval_mode import * from .no_grad_mode import * from .progress_bar import * diff --git a/mart/attack/callbacks/eval_mode.py b/mart/attack/callbacks/eval_mode.py index de5eef75..be3b6397 100644 --- a/mart/attack/callbacks/eval_mode.py +++ b/mart/attack/callbacks/eval_mode.py @@ -4,7 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause # -from .base import Callback +from pytorch_lightning.callbacks import Callback __all__ = ["AttackInEvalMode"] @@ -15,11 +15,11 @@ class AttackInEvalMode(Callback): def __init__(self): self.training_mode_status = None - def on_run_start(self, *, model, **kwargs): + def on_train_start(self, trainer, model): self.training_mode_status = model.training model.train(False) - def on_run_end(self, *, model, **kwargs): + def on_train_end(self, trainer, model): assert self.training_mode_status is not None # Resume the previous training status of the model. diff --git a/mart/attack/callbacks/no_grad_mode.py b/mart/attack/callbacks/no_grad_mode.py index bca4d971..cfb90ead 100644 --- a/mart/attack/callbacks/no_grad_mode.py +++ b/mart/attack/callbacks/no_grad_mode.py @@ -4,7 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause # -from .base import Callback +from pytorch_lightning.callbacks import Callback __all__ = ["ModelParamsNoGrad"] @@ -15,10 +15,10 @@ class ModelParamsNoGrad(Callback): This callback should not change the result. Don't use unless an attack runs faster. """ - def on_run_start(self, *, model, **kwargs): + def on_train_start(self, trainer, model): for param in model.parameters(): param.requires_grad_(False) - def on_run_end(self, *, model, **kwargs): + def on_train_end(self, trainer, model): for param in model.parameters(): param.requires_grad_(True) diff --git a/mart/attack/callbacks/progress_bar.py b/mart/attack/callbacks/progress_bar.py index d175aa5d..564f311c 100644 --- a/mart/attack/callbacks/progress_bar.py +++ b/mart/attack/callbacks/progress_bar.py @@ -5,29 +5,18 @@ # import tqdm - -from .base import Callback +from pytorch_lightning.callbacks import TQDMProgressBar __all__ = ["ProgressBar"] -class ProgressBar(Callback): +class ProgressBar(TQDMProgressBar): """Display progress bar of attack iterations with the gain value.""" - def on_run_start(self, *, adversary, **kwargs): - self.pbar = tqdm.tqdm(total=adversary.max_iters, leave=False, desc="Attack", unit="iter") - - def on_examine_end(self, *, input, adversary, **kwargs): - msg = "" - if hasattr(adversary, "found"): - # there is no adversary.found if adversary.objective_fn() is not defined. - msg += f"found={int(sum(adversary.found))}/{len(input)}, " - - msg += f"avg_gain={float(adversary.gain.mean()):.2f}, " - - self.pbar.set_description(msg) - self.pbar.update(1) + def init_train_tqdm(self): + bar = super().init_train_tqdm() + bar.leave = False + bar.set_description("Attack") + bar.unit = "iter" - def on_run_end(self, **kwargs): - self.pbar.close() - del self.pbar + return bar diff --git a/mart/attack/callbacks/visualizer.py b/mart/attack/callbacks/visualizer.py index d0eb0c58..d5c7910c 100644 --- a/mart/attack/callbacks/visualizer.py +++ b/mart/attack/callbacks/visualizer.py @@ -6,10 +6,9 @@ import os +from pytorch_lightning.callbacks import Callback from torchvision.transforms import ToPILImage -from .base import Callback - __all__ = ["PerturbedImageVisualizer"] @@ -25,10 +24,16 @@ def __init__(self, folder): if not os.path.isdir(self.folder): os.makedirs(self.folder) - def on_run_end(self, *, adversary, input, target, model, **kwargs): - adv_input = adversary(input=input, target=target, model=None, **kwargs) + def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx): + # Save input and target for on_train_end + self.input = batch["input"] + self.target = batch["target"] + + def on_train_end(self, trainer, model): + # FIXME: We should really just save this to outputs instead of recomputing adv_input + adv_input = model(self.input, self.target) - for img, tgt in zip(adv_input, target): + for img, tgt in zip(adv_input, self.target): fname = tgt["file_name"] fpath = os.path.join(self.folder, fname) im = self.convert(img / 255) diff --git a/mart/configs/attack/callbacks/progress_bar.yaml b/mart/configs/attack/callbacks/progress_bar.yaml index 21d4c477..e528c714 100644 --- a/mart/configs/attack/callbacks/progress_bar.yaml +++ b/mart/configs/attack/callbacks/progress_bar.yaml @@ -1,2 +1,3 @@ progress_bar: _target_: mart.attack.callbacks.ProgressBar + process_position: 1 From 5b7ee68033af3893bc8479d9a4d7fb8fc15b1e9e Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 13:44:26 -0700 Subject: [PATCH 017/163] comment --- mart/attack/adversary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 7100bbe1..5fd33e7e 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -190,7 +190,7 @@ def forward( **kwargs, ): # Generate a perturbation only if we have a model. This will update - # the parameters of self.perturbation. + # the parameters of self.perturber if model is not None: benign_dataloader = cycle( [{"input": input, "target": target, "model": model, **kwargs}] From f98031563f130a803dc232cb927db1623038ed21 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 13:57:13 -0700 Subject: [PATCH 018/163] Remove Perturber --- mart/attack/perturber/__init__.py | 2 - mart/attack/perturber/batch.py | 82 ----------------- mart/attack/perturber/perturber.py | 101 --------------------- mart/configs/attack/perturber/batch.yaml | 7 -- mart/configs/attack/perturber/default.yaml | 4 - 5 files changed, 196 deletions(-) delete mode 100644 mart/attack/perturber/__init__.py delete mode 100644 mart/attack/perturber/batch.py delete mode 100644 mart/attack/perturber/perturber.py delete mode 100644 mart/configs/attack/perturber/batch.yaml delete mode 100644 mart/configs/attack/perturber/default.yaml diff --git a/mart/attack/perturber/__init__.py b/mart/attack/perturber/__init__.py deleted file mode 100644 index 60b2b5f6..00000000 --- a/mart/attack/perturber/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .batch import * -from .perturber import * diff --git a/mart/attack/perturber/batch.py b/mart/attack/perturber/batch.py deleted file mode 100644 index 83b306c2..00000000 --- a/mart/attack/perturber/batch.py +++ /dev/null @@ -1,82 +0,0 @@ -# -# Copyright (C) 2022 Intel Corporation -# -# SPDX-License-Identifier: BSD-3-Clause -# - -from typing import Any, Callable, Dict, Union - -import torch -from hydra.utils import instantiate - -from mart.attack.callbacks import Callback - -from ..gradient_modifier import GradientModifier -from ..initializer import Initializer -from ..projector import Projector -from .perturber import Perturber - -__all__ = ["BatchPerturber"] - - -class BatchPerturber(Callback, torch.nn.Module): - """The batch input could be a list or a NCHW tensor. - - We split input into individual examples and run different perturbers accordingly. - """ - - def __init__( - self, - perturber_factory: Callable[[Initializer, GradientModifier, Projector], Perturber], - *perturber_args, - **perturber_kwargs, - ): - super().__init__() - - self.perturber_factory = perturber_factory - self.perturber_args = perturber_args - self.perturber_kwargs = perturber_kwargs - - # Try to create a perturber using factory and kwargs - assert self.perturber_factory(*self.perturber_args, **self.perturber_kwargs) is not None - - self.perturbers = torch.nn.ModuleDict() - - def parameter_groups(self): - """Return parameters along with optim parameters.""" - params = [] - for perturber in self.perturbers.values(): - params += perturber.parameter_groups() - return params - - def on_run_start(self, adversary, input, target, model, **kwargs): - # Remove old perturbers - # FIXME: Can we do this in on_run_end instead? - self.perturbers.clear() - - # Create new perturber for each item in the batch - for i in range(len(input)): - perturber = self.perturber_factory(*self.perturber_args, **self.perturber_kwargs) - self.perturbers[f"input_{i}_perturber"] = perturber - - # Trigger callback - for i, (input_i, target_i) in enumerate(zip(input, target)): - perturber = self.perturbers[f"input_{i}_perturber"] - if isinstance(perturber, Callback): - perturber.on_run_start( - adversary=adversary, input=input_i, target=target_i, model=model, **kwargs - ) - - def forward(self, input: torch.Tensor, target: Union[torch.Tensor, Dict[str, Any]]) -> None: - output = [] - for i, (input_i, target_i) in enumerate(zip(input, target)): - perturber = self.perturbers[f"input_{i}_perturber"] - ret_i = perturber(input_i, target_i) - output.append(ret_i) - - if isinstance(input, torch.Tensor): - output = torch.stack(output) - else: - output = tuple(output) - - return output diff --git a/mart/attack/perturber/perturber.py b/mart/attack/perturber/perturber.py deleted file mode 100644 index 097abf3e..00000000 --- a/mart/attack/perturber/perturber.py +++ /dev/null @@ -1,101 +0,0 @@ -# -# Copyright (C) 2022 Intel Corporation -# -# SPDX-License-Identifier: BSD-3-Clause -# - -from typing import Any, Dict, Optional, Union - -import torch - -from mart.attack.callbacks import Callback - -from ..gradient_modifier import GradientModifier -from ..initializer import Initializer -from ..projector import Projector - -__all__ = ["Perturber"] - - -class Perturber(Callback, torch.nn.Module): - """The base class of perturbers. - - A perturber wraps a nn.Parameter and returns this parameter when called. It also enables one to - specify an initialization for this parameter, how to modify gradients computed on this - parameter, and how to project the values of the parameter. - """ - - def __init__( - self, - initializer: Initializer, - gradient_modifier: Optional[GradientModifier] = None, - projector: Optional[Projector] = None, - **optim_params, - ): - """_summary_ - - Args: - initializer (object): To initialize the perturbation. - gradient_modifier (object): To modify the gradient of perturbation. - projector (object): To project the perturbation into some space. - optim_params Optional[dict]: Optimization parameters such learning rate and momentum for perturbation. - """ - super().__init__() - - self.initializer = initializer - self.gradient_modifier = gradient_modifier - self.projector = projector - self.optim_params = optim_params - - # Pre-occupy the name of the buffer, so that extra_repr() always gets perturbation. - self.register_buffer("perturbation", torch.nn.UninitializedBuffer(), persistent=False) - - def projector_wrapper(perturber_module, args): - if isinstance(perturber_module.perturbation, torch.nn.UninitializedBuffer): - raise ValueError("Perturbation must be initialized") - - input, target = args - return projector(perturber_module.perturbation.data, input, target) - - # Will be called before forward() is called. - if projector is not None: - self.register_forward_pre_hook(projector_wrapper) - - def on_run_start(self, *, adversary, input, target, model, **kwargs): - # Initialize perturbation. - perturbation = torch.zeros_like(input, requires_grad=True) - - # Register perturbation as a non-persistent buffer even though we will optimize it. This is because it is not - # a parameter of the underlying model but a parameter of the adversary. - self.register_buffer("perturbation", perturbation, persistent=False) - - # A backward hook that will be called when a gradient w.r.t the Tensor is computed. - if self.gradient_modifier is not None: - self.perturbation.register_hook(self.gradient_modifier) - - self.initializer(self.perturbation) - - def parameter_groups(self): - """Return parameters along with the pre-defined optimization parameters. - - Example: `[{"params": perturbation, "lr":0.1, "momentum": 0.9}]` - """ - if "params" in self.optim_params: - raise ValueError( - 'Optimization parameters should not include "params" which will override the actual parameters to be optimized. ' - ) - - return [{"params": self.perturbation} | self.optim_params] - - def forward( - self, input: torch.Tensor, target: Union[torch.Tensor, Dict[str, Any]] - ) -> torch.Tensor: - return self.perturbation - - def extra_repr(self): - perturbation = self.perturbation - - return ( - f"{repr(perturbation)}, initializer={self.initializer}," - f"gradient_modifier={self.gradient_modifier}, projector={self.projector}" - ) diff --git a/mart/configs/attack/perturber/batch.yaml b/mart/configs/attack/perturber/batch.yaml deleted file mode 100644 index b3ed0634..00000000 --- a/mart/configs/attack/perturber/batch.yaml +++ /dev/null @@ -1,7 +0,0 @@ -_target_: mart.attack.BatchPerturber -perturber_factory: - _target_: mart.attack.Perturber - _partial_: true -initializer: ??? -gradient_modifier: ??? -projector: ??? diff --git a/mart/configs/attack/perturber/default.yaml b/mart/configs/attack/perturber/default.yaml deleted file mode 100644 index 8025bfd5..00000000 --- a/mart/configs/attack/perturber/default.yaml +++ /dev/null @@ -1,4 +0,0 @@ -_target_: mart.attack.Perturber -initializer: ??? -gradient_modifier: ??? -projector: ??? From 90fb9ab19ff0635f30354a63f813cd89465850f6 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 13:58:28 -0700 Subject: [PATCH 019/163] comment --- mart/attack/callbacks/visualizer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/callbacks/visualizer.py b/mart/attack/callbacks/visualizer.py index d5c7910c..14bf9e12 100644 --- a/mart/attack/callbacks/visualizer.py +++ b/mart/attack/callbacks/visualizer.py @@ -18,6 +18,7 @@ class PerturbedImageVisualizer(Callback): def __init__(self, folder): super().__init__() + # FIXME: This should use the Trainer's logging directory. self.folder = folder self.convert = ToPILImage() From 00aefeebe6a3490a9c7bc63fbde3db3d9febb72d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 21:01:43 -0700 Subject: [PATCH 020/163] Better silence --- mart/attack/adversary.py | 45 ++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 5fd33e7e..ccaf18d9 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -27,21 +27,6 @@ __all__ = ["Adversary"] -class SilentTrainer(Trainer): - """Suppress logging.""" - - def fit(self, *args, **kwargs): - logger = logging.getLogger("pytorch_lightning.accelerators.gpu") - logger.propagate = False - - super().fit(*args, **kwargs) - - logger.propagate = True - - def _log_device_info(self): - pass - - class LitPerturber(LazyModuleMixin, LightningModule): """Peturbation optimization module.""" @@ -170,7 +155,7 @@ def __init__( # FIXME: how do we get a proper device? self.attacker_factory = partial( - SilentTrainer, + Trainer, accelerator="auto", num_sanity_val_steps=0, log_every_n_steps=1, @@ -196,10 +181,30 @@ def forward( [{"input": input, "target": target, "model": model, **kwargs}] ) - self.perturber = [self.perturber_factory()] - self.attacker_factory().fit( - model=self.perturber[0], train_dataloaders=benign_dataloader - ) + with Silence(): + self.perturber = [self.perturber_factory()] + self.attacker_factory().fit( + model=self.perturber[0], train_dataloaders=benign_dataloader + ) # Get preturbed input (some threat models, projectors, etc. may require information from target like a mask) return self.perturber[0](input, target) + +class Silence: + """Suppress logging.""" + + DEFAULT_NAMES = ["pytorch_lightning.utilities.rank_zero", "pytorch_lightning.accelerators.gpu"] + + def __init__(self, names=None): + if names is None: + names = Silence.DEFAULT_NAMES + + self.loggers = [logging.getLogger(name) for name in names] + + def __enter__(self): + for logger in self.loggers: + logger.propagate = False + + def __exit__(self, exc_type, exc_value, traceback): + for logger in self.loggers: + logger.propagate = False From 86bbc138e90292bc2d34880a8b3b0e1e43b6c920 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 22:17:33 -0700 Subject: [PATCH 021/163] Integrate LitPerturber into Adversary --- mart/attack/adversary.py | 145 ++++++++++++++++----------------------- 1 file changed, 58 insertions(+), 87 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index ccaf18d9..7f7cbd07 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -8,8 +8,7 @@ import logging from collections import OrderedDict -from functools import partial -from itertools import cycle +from itertools import repeat from typing import TYPE_CHECKING, Any, Callable import torch @@ -27,7 +26,7 @@ __all__ = ["Adversary"] -class LitPerturber(LazyModuleMixin, LightningModule): +class Adversary(LightningModule): """Peturbation optimization module.""" def __init__( @@ -40,7 +39,8 @@ def __init__( projector: Projector | None = None, gain: str = "loss", objective: Objective | None = None, - **kwargs, + max_iters: int = 10, + callbacks: dict[str, Callback] | None = None, ): """_summary_ @@ -52,6 +52,8 @@ def __init__( projector (Projector): To project the perturbation into some space. gain (str): Which output to use as an adversarial gain function, which is a differentiable estimate of adversarial objective. (default: loss) objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. + max_iters (int): The max number of attack iterations. + callbacks (dict[str, Callback] | None): A dictionary of callback objects. Optional. """ super().__init__() @@ -62,36 +64,34 @@ def __init__( self.projector = projector self.gain_output = gain self.objective_fn = objective + self.projector = projector - self.perturbation = torch.nn.UninitializedParameter() - - def projector_wrapper(module, args): - if isinstance(module.perturbation, torch.nn.UninitializedBuffer): - raise ValueError("Perturbation must be initialized") - - input, target = args + self.max_iters = max_iters + self.callbacks = callbacks - # FIXME: How do we get rid of .to(input.device)? - return projector(module.perturbation.data.to(input.device), input, target) + self.perturbation = None - # Will be called before forward() is called. - if projector is not None: - self.register_forward_pre_hook(projector_wrapper) + # FIXME: Setup logging directory correctly + self.attacker = Trainer( + accelerator="auto", + num_sanity_val_steps=0, + log_every_n_steps=1, + max_epochs=1, + enable_model_summary=False, + callbacks=list(self.callbacks.values()), # ignore keys + enable_checkpointing=False, + ) def configure_optimizers(self): return self.optimizer_fn([self.perturbation]) def training_step(self, batch, batch_idx): - # copy batch since we will modify it and it it passed around + # copy batch since we modify it and it is used internally batch = batch.copy() input = batch.pop("input") target = batch.pop("target") model = batch.pop("model") - if self.has_uninitialized_params(): - # Use this syntax because LazyModuleMixin assume non-keyword arguments - self(input, target) - outputs = model(input=input, target=target, **batch) # FIXME: This should really be just `return outputs`. Everything below here should live in the model! gain = outputs[self.gain_output] @@ -113,82 +113,53 @@ def training_step(self, batch, batch_idx): return gain - def initialize_parameters(self, input, target): - assert isinstance(self.perturbation, torch.nn.UninitializedParameter) - - self.perturbation.materialize(input.shape, device=input.device) - - # A backward hook that will be called when a gradient w.r.t the Tensor is computed. - if self.gradient_modifier is not None: - self.perturbation.register_hook(self.gradient_modifier) - - self.initializer(self.perturbation) - - def forward(self, input, target, **kwargs): - # FIXME: Can we get rid of .to(input.device)? - perturbation = self.perturbation.to(input.device) - - # Get perturbation and apply threat model - # The mask projector in perturber may require information from target. - return self.threat_model(input, target, perturbation) - - -class Adversary(torch.nn.Module): - """An adversary module which generates and applies perturbation to input.""" - - def __init__( + def forward( self, - *, - max_iters: int = 10, - callbacks: dict[str, Callback] | None = None, - **perturber_kwargs, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + model: torch.nn.Module | None = None, + **kwargs, ): - """_summary_ - - Args: - max_iters (int): The max number of attack iterations. - callbacks (dict[str, Callback] | None): A dictionary of callback objects. Optional. - """ - super().__init__() - - self.perturber_factory = partial(LitPerturber, **perturber_kwargs) + # Generate a perturbation only if we have a model. This will populate self.perturbation + if model is not None: + self.perturbation = self.attack(input, target, model, **kwargs) - # FIXME: how do we get a proper device? - self.attacker_factory = partial( - Trainer, - accelerator="auto", - num_sanity_val_steps=0, - log_every_n_steps=1, - max_epochs=1, - max_steps=max_iters, - enable_model_summary=False, - callbacks=list(callbacks.values()), # ignore keys - enable_checkpointing=False, - ) + # Get projected perturbation and apply threat model + # The mask projector in perturber may require information from target. + self.projector(self.perturbation.data, input, target) + return self.threat_model(input, target, self.perturbation) - def forward( + def attack( self, - *, input: torch.Tensor | tuple, target: torch.Tensor | dict[str, Any] | tuple, model: torch.nn.Module | None = None, **kwargs, ): - # Generate a perturbation only if we have a model. This will update - # the parameters of self.perturber - if model is not None: - benign_dataloader = cycle( - [{"input": input, "target": target, "model": model, **kwargs}] - ) - - with Silence(): - self.perturber = [self.perturber_factory()] - self.attacker_factory().fit( - model=self.perturber[0], train_dataloaders=benign_dataloader - ) - - # Get preturbed input (some threat models, projectors, etc. may require information from target like a mask) - return self.perturber[0](input, target) + # Create new perturbation if necessary + if self.perturbation is None or self.perturbation.shape != input.shape: + self.perturbation = torch.zeros_like(input, requires_grad=True) + + # FIXME: initialize should really take input and return a perturbation... + # once this is done I think this function can just take kwargs? + self.initializer(self.perturbation) + + if self.gradient_modifier is not None: + self.perturbation.register_hook(self.gradient_modifier) + + # Repeat batch max_iters times + attack_dataloader = repeat( + {"input": input, "target": target, "model": model, **kwargs}, self.max_iters + ) + + with Silence(): + # Attack for another epoch + self.attacker.fit_loop.max_epochs += 1 + self.attacker.fit(model=self, train_dataloaders=attack_dataloader) + + # Keep perturbation on input device since fit moves it to CPU + return self.perturbation.to(input.device) + class Silence: """Suppress logging.""" From 7a12bf8a6e26c15f784b380ac2e37898db0ef084 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 22:32:33 -0700 Subject: [PATCH 022/163] Uncombine Adversary into Adversary and LitPerturber --- mart/attack/adversary.py | 87 +++++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 36 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 7f7cbd07..8ecabcdb 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -26,7 +26,54 @@ __all__ = ["Adversary"] -class Adversary(LightningModule): +class Adversary(torch.nn.Module): + def __init__( + self, + *, + max_iters: int = 10, + callbacks: dict[str, Callback] | None = None, + **kwargs, + ): + """_summary_ + + Args: + max_iters (int): The max number of attack iterations. + callbacks (dict[str, Callback] | None): A dictionary of callback objects. Optional. + """ + super().__init__() + + self.max_iters = max_iters + self.callbacks = callbacks + + self.perturber = LitPerturber(**kwargs) + + # FIXME: Setup logging directory correctly + self.attacker = Trainer( + accelerator="auto", + num_sanity_val_steps=0, + log_every_n_steps=1, + max_epochs=1, + enable_model_summary=False, + callbacks=list(self.callbacks.values()), # ignore keys + enable_checkpointing=False, + ) + + def forward(self, **batch): + if "model" in batch: + self.perturber.initialize_parameters(**batch) + + # Repeat batch max_iters times + attack_dataloader = repeat(batch, self.max_iters) + + with Silence(): + # Attack for another epoch + self.attacker.fit_loop.max_epochs += 1 + self.attacker.fit(model=self.perturber, train_dataloaders=attack_dataloader) + + return self.perturber(**batch) + + +class LitPerturber(LightningModule): """Peturbation optimization module.""" def __init__( @@ -39,8 +86,6 @@ def __init__( projector: Projector | None = None, gain: str = "loss", objective: Objective | None = None, - max_iters: int = 10, - callbacks: dict[str, Callback] | None = None, ): """_summary_ @@ -52,8 +97,6 @@ def __init__( projector (Projector): To project the perturbation into some space. gain (str): Which output to use as an adversarial gain function, which is a differentiable estimate of adversarial objective. (default: loss) objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. - max_iters (int): The max number of attack iterations. - callbacks (dict[str, Callback] | None): A dictionary of callback objects. Optional. """ super().__init__() @@ -66,22 +109,8 @@ def __init__( self.objective_fn = objective self.projector = projector - self.max_iters = max_iters - self.callbacks = callbacks - self.perturbation = None - # FIXME: Setup logging directory correctly - self.attacker = Trainer( - accelerator="auto", - num_sanity_val_steps=0, - log_every_n_steps=1, - max_epochs=1, - enable_model_summary=False, - callbacks=list(self.callbacks.values()), # ignore keys - enable_checkpointing=False, - ) - def configure_optimizers(self): return self.optimizer_fn([self.perturbation]) @@ -115,25 +144,21 @@ def training_step(self, batch, batch_idx): def forward( self, + *, input: torch.Tensor | tuple, target: torch.Tensor | dict[str, Any] | tuple, - model: torch.nn.Module | None = None, **kwargs, ): - # Generate a perturbation only if we have a model. This will populate self.perturbation - if model is not None: - self.perturbation = self.attack(input, target, model, **kwargs) - # Get projected perturbation and apply threat model # The mask projector in perturber may require information from target. self.projector(self.perturbation.data, input, target) return self.threat_model(input, target, self.perturbation) - def attack( + def initialize_parameters( self, + *, input: torch.Tensor | tuple, target: torch.Tensor | dict[str, Any] | tuple, - model: torch.nn.Module | None = None, **kwargs, ): # Create new perturbation if necessary @@ -147,16 +172,6 @@ def attack( if self.gradient_modifier is not None: self.perturbation.register_hook(self.gradient_modifier) - # Repeat batch max_iters times - attack_dataloader = repeat( - {"input": input, "target": target, "model": model, **kwargs}, self.max_iters - ) - - with Silence(): - # Attack for another epoch - self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(model=self, train_dataloaders=attack_dataloader) - # Keep perturbation on input device since fit moves it to CPU return self.perturbation.to(input.device) From ad1872a4417ca888f7d120c94a25ecc142ef5ef4 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 22:42:45 -0700 Subject: [PATCH 023/163] cleanup --- mart/attack/adversary.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8ecabcdb..7e63d722 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -7,16 +7,15 @@ from __future__ import annotations import logging -from collections import OrderedDict from itertools import repeat from typing import TYPE_CHECKING, Any, Callable +import pytorch_lightning as pl import torch -from pytorch_lightning import LightningModule, Trainer -from pytorch_lightning.callbacks import Callback -from torch.nn.modules.lazy import LazyModuleMixin if TYPE_CHECKING: + from pytorch_lightning.callbacks import Callback + from .gradient_modifier import GradientModifier from .initializer import Initializer from .objective import Objective @@ -43,18 +42,18 @@ def __init__( super().__init__() self.max_iters = max_iters - self.callbacks = callbacks + # FIXME: Should we allow injection of this? self.perturber = LitPerturber(**kwargs) # FIXME: Setup logging directory correctly - self.attacker = Trainer( + self.attacker = pl.Trainer( accelerator="auto", num_sanity_val_steps=0, log_every_n_steps=1, max_epochs=1, enable_model_summary=False, - callbacks=list(self.callbacks.values()), # ignore keys + callbacks=list(callbacks.values()), # ignore keys enable_checkpointing=False, ) @@ -73,7 +72,7 @@ def forward(self, **batch): return self.perturber(**batch) -class LitPerturber(LightningModule): +class LitPerturber(pl.LightningModule): """Peturbation optimization module.""" def __init__( @@ -112,6 +111,8 @@ def __init__( self.perturbation = None def configure_optimizers(self): + assert self.perturbation is not None + return self.optimizer_fn([self.perturbation]) def training_step(self, batch, batch_idx): @@ -125,8 +126,8 @@ def training_step(self, batch, batch_idx): # FIXME: This should really be just `return outputs`. Everything below here should live in the model! gain = outputs[self.gain_output] - # objective_fn is optional, because adversaries may never reach their objective. # FIXME: Make objective a part of the model... + # objective_fn is optional, because adversaries may never reach their objective. if self.objective_fn is not None: found = self.objective_fn(**outputs) self.log("found", found.sum().float(), prog_bar=True) @@ -165,16 +166,13 @@ def initialize_parameters( if self.perturbation is None or self.perturbation.shape != input.shape: self.perturbation = torch.zeros_like(input, requires_grad=True) - # FIXME: initialize should really take input and return a perturbation... + # FIXME: initializer should really take input and return a perturbation. # once this is done I think this function can just take kwargs? self.initializer(self.perturbation) if self.gradient_modifier is not None: self.perturbation.register_hook(self.gradient_modifier) - # Keep perturbation on input device since fit moves it to CPU - return self.perturbation.to(input.device) - class Silence: """Suppress logging.""" From 1aa4fc0534b09b626ad3e2387a0fdfaf3cee2fd1 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 23:00:01 -0700 Subject: [PATCH 024/163] Move silence into utils --- mart/attack/adversary.py | 31 ++++++------------------------- mart/utils/__init__.py | 1 + mart/utils/silent.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 25 deletions(-) create mode 100644 mart/utils/silent.py diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 7e63d722..a23cf2d9 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -6,13 +6,14 @@ from __future__ import annotations -import logging from itertools import repeat from typing import TYPE_CHECKING, Any, Callable import pytorch_lightning as pl import torch +from mart.utils import silent + if TYPE_CHECKING: from pytorch_lightning.callbacks import Callback @@ -57,6 +58,7 @@ def __init__( enable_checkpointing=False, ) + @silent() def forward(self, **batch): if "model" in batch: self.perturber.initialize_parameters(**batch) @@ -64,10 +66,9 @@ def forward(self, **batch): # Repeat batch max_iters times attack_dataloader = repeat(batch, self.max_iters) - with Silence(): - # Attack for another epoch - self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(model=self.perturber, train_dataloaders=attack_dataloader) + # Attack for another epoch + self.attacker.fit_loop.max_epochs += 1 + self.attacker.fit(model=self.perturber, train_dataloaders=attack_dataloader) return self.perturber(**batch) @@ -172,23 +173,3 @@ def initialize_parameters( if self.gradient_modifier is not None: self.perturbation.register_hook(self.gradient_modifier) - - -class Silence: - """Suppress logging.""" - - DEFAULT_NAMES = ["pytorch_lightning.utilities.rank_zero", "pytorch_lightning.accelerators.gpu"] - - def __init__(self, names=None): - if names is None: - names = Silence.DEFAULT_NAMES - - self.loggers = [logging.getLogger(name) for name in names] - - def __enter__(self): - for logger in self.loggers: - logger.propagate = False - - def __exit__(self, exc_type, exc_value, traceback): - for logger in self.loggers: - logger.propagate = False diff --git a/mart/utils/__init__.py b/mart/utils/__init__.py index 91c84339..50e71b3d 100644 --- a/mart/utils/__init__.py +++ b/mart/utils/__init__.py @@ -3,4 +3,5 @@ from .monkey_patch import * from .pylogger import * from .rich_utils import * +from .silent import * from .utils import * diff --git a/mart/utils/silent.py b/mart/utils/silent.py new file mode 100644 index 00000000..b9cbd1c3 --- /dev/null +++ b/mart/utils/silent.py @@ -0,0 +1,30 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +import logging +from contextlib import ContextDecorator + +__all__ = ["silent"] + + +class silent(ContextDecorator): + """Suppress logging.""" + + DEFAULT_NAMES = ["pytorch_lightning.utilities.rank_zero", "pytorch_lightning.accelerators.gpu"] + + def __init__(self, names=None): + if names is None: + names = silent.DEFAULT_NAMES + + self.loggers = [logging.getLogger(name) for name in names] + + def __enter__(self): + for logger in self.loggers: + logger.propagate = False + + def __exit__(self, exc_type, exc_value, traceback): + for logger in self.loggers: + logger.propagate = False From d09c5b88e316a7f9213bedc81c360ca2610ebff5 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 14 Mar 2023 23:03:40 -0700 Subject: [PATCH 025/163] bugfix --- mart/attack/adversary.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index a23cf2d9..27db177b 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -66,10 +66,12 @@ def forward(self, **batch): # Repeat batch max_iters times attack_dataloader = repeat(batch, self.max_iters) - # Attack for another epoch - self.attacker.fit_loop.max_epochs += 1 + # Attack for an epoch self.attacker.fit(model=self.perturber, train_dataloaders=attack_dataloader) + # Enable future attacks to fit by increasing max_epochs + self.attacker.fit_loop.max_epochs += 1 + return self.perturber(**batch) From 78701a4309b7b537919babd85209aa90b39f98b5 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 07:47:45 -0700 Subject: [PATCH 026/163] bugfix --- mart/attack/adversary.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 27db177b..faa514dd 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -47,6 +47,9 @@ def __init__( # FIXME: Should we allow injection of this? self.perturber = LitPerturber(**kwargs) + if callbacks is not None: + callbacks = list(callbacks.values()) # ignore keys + # FIXME: Setup logging directory correctly self.attacker = pl.Trainer( accelerator="auto", @@ -54,7 +57,7 @@ def __init__( log_every_n_steps=1, max_epochs=1, enable_model_summary=False, - callbacks=list(callbacks.values()), # ignore keys + callbacks=callbacks, enable_checkpointing=False, ) From 8143b5db8e56663f41718620554057262c7b4462 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 09:13:37 -0700 Subject: [PATCH 027/163] cleanup --- mart/attack/adversary.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index faa514dd..9bd4935d 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -64,6 +64,8 @@ def __init__( @silent() def forward(self, **batch): if "model" in batch: + # Initialize perturbation because fit will call configure_optimizers and + # we want a fresh perturbation. self.perturber.initialize_parameters(**batch) # Repeat batch max_iters times @@ -112,7 +114,6 @@ def __init__( self.projector = projector self.gain_output = gain self.objective_fn = objective - self.projector = projector self.perturbation = None @@ -156,9 +157,14 @@ def forward( target: torch.Tensor | dict[str, Any] | tuple, **kwargs, ): - # Get projected perturbation and apply threat model - # The mask projector in perturber may require information from target. - self.projector(self.perturbation.data, input, target) + if self.perturbation is None: + self.initialize_parameters(input=input, target=target, **kwargs) + + # Projected perturbation... + if self.projector is not None: + self.projector(self.perturbation.data, input, target) + + # ...and apply threat model. return self.threat_model(input, target, self.perturbation) def initialize_parameters( @@ -168,13 +174,14 @@ def initialize_parameters( target: torch.Tensor | dict[str, Any] | tuple, **kwargs, ): - # Create new perturbation if necessary + # Create new perturbation, if necessary if self.perturbation is None or self.perturbation.shape != input.shape: - self.perturbation = torch.zeros_like(input, requires_grad=True) + self.perturbation = torch.empty_like(input, requires_grad=True) # FIXME: initializer should really take input and return a perturbation. # once this is done I think this function can just take kwargs? self.initializer(self.perturbation) + # FIXME: I think it's better to use a PL hook here if self.gradient_modifier is not None: self.perturbation.register_hook(self.gradient_modifier) From 7bfc98ffa8be380d3ceb1598bbea08ed7499397a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 09:14:52 -0700 Subject: [PATCH 028/163] Enable dependency injection on Adversary --- mart/attack/adversary.py | 32 +++++++++++++------------- mart/configs/attack/iterative.yaml | 1 - mart/configs/attack/iterative_sgd.yaml | 1 - 3 files changed, 16 insertions(+), 18 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 9bd4935d..9bb5142e 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -23,7 +23,7 @@ from .projector import Projector from .threat_model import ThreatModel -__all__ = ["Adversary"] +__all__ = ["Adversary", "LitPerturber"] class Adversary(torch.nn.Module): @@ -31,7 +31,8 @@ def __init__( self, *, max_iters: int = 10, - callbacks: dict[str, Callback] | None = None, + trainer: Trainer | None = None, + perturber: LitPerturber | None = None, **kwargs, ): """_summary_ @@ -44,22 +45,21 @@ def __init__( self.max_iters = max_iters - # FIXME: Should we allow injection of this? - self.perturber = LitPerturber(**kwargs) - - if callbacks is not None: - callbacks = list(callbacks.values()) # ignore keys + self.perturber = perturber + if self.perturber is None: + self.perturber = LitPerturber(**kwargs) # FIXME: Setup logging directory correctly - self.attacker = pl.Trainer( - accelerator="auto", - num_sanity_val_steps=0, - log_every_n_steps=1, - max_epochs=1, - enable_model_summary=False, - callbacks=callbacks, - enable_checkpointing=False, - ) + self.attacker = trainer + if self.attacker is None: + self.attacker = pl.Trainer( + accelerator="auto", + num_sanity_val_steps=0, + log_every_n_steps=1, + max_epochs=1, + enable_model_summary=False, + enable_checkpointing=False, + ) @silent() def forward(self, **batch): diff --git a/mart/configs/attack/iterative.yaml b/mart/configs/attack/iterative.yaml index eeb8db6c..deb21ea3 100644 --- a/mart/configs/attack/iterative.yaml +++ b/mart/configs/attack/iterative.yaml @@ -4,7 +4,6 @@ gradient_modifier: ??? projector: ??? optimizer: ??? max_iters: ??? -callbacks: ??? objective: ??? gain: ??? threat_model: ??? diff --git a/mart/configs/attack/iterative_sgd.yaml b/mart/configs/attack/iterative_sgd.yaml index 5ec86235..2fbc326b 100644 --- a/mart/configs/attack/iterative_sgd.yaml +++ b/mart/configs/attack/iterative_sgd.yaml @@ -1,4 +1,3 @@ defaults: - iterative - optimizer: sgd - - callbacks: [progress_bar] From eaf16070e150abe2ff465f64a9767e7ef39441b7 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 09:32:31 -0700 Subject: [PATCH 029/163] Make dependency injection backwards compatible --- mart/attack/adversary.py | 14 +++++++------- mart/configs/attack/iterative.yaml | 1 + mart/configs/attack/iterative_sgd.yaml | 1 + 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 9bb5142e..1d64289e 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -15,8 +15,6 @@ from mart.utils import silent if TYPE_CHECKING: - from pytorch_lightning.callbacks import Callback - from .gradient_modifier import GradientModifier from .initializer import Initializer from .objective import Objective @@ -39,16 +37,13 @@ def __init__( Args: max_iters (int): The max number of attack iterations. - callbacks (dict[str, Callback] | None): A dictionary of callback objects. Optional. + trainer (Trainer): A PyTorch-Lightning Trainer object used to fit the perturber. + perturber (LitPerturber): A LitPerturber that manages perturbations. """ super().__init__() self.max_iters = max_iters - self.perturber = perturber - if self.perturber is None: - self.perturber = LitPerturber(**kwargs) - # FIXME: Setup logging directory correctly self.attacker = trainer if self.attacker is None: @@ -57,10 +52,15 @@ def __init__( num_sanity_val_steps=0, log_every_n_steps=1, max_epochs=1, + callbacks=list(kwargs.pop("callbacks", {}).values()), enable_model_summary=False, enable_checkpointing=False, ) + self.perturber = perturber + if self.perturber is None: + self.perturber = LitPerturber(**kwargs) + @silent() def forward(self, **batch): if "model" in batch: diff --git a/mart/configs/attack/iterative.yaml b/mart/configs/attack/iterative.yaml index deb21ea3..eeb8db6c 100644 --- a/mart/configs/attack/iterative.yaml +++ b/mart/configs/attack/iterative.yaml @@ -4,6 +4,7 @@ gradient_modifier: ??? projector: ??? optimizer: ??? max_iters: ??? +callbacks: ??? objective: ??? gain: ??? threat_model: ??? diff --git a/mart/configs/attack/iterative_sgd.yaml b/mart/configs/attack/iterative_sgd.yaml index 2fbc326b..5ec86235 100644 --- a/mart/configs/attack/iterative_sgd.yaml +++ b/mart/configs/attack/iterative_sgd.yaml @@ -1,3 +1,4 @@ defaults: - iterative - optimizer: sgd + - callbacks: [progress_bar] From bf5df501dba6934ae8f5580f0bb5a66f9ae54ff2 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 09:33:15 -0700 Subject: [PATCH 030/163] Replace max_iters with trainer.limit_train_batches --- mart/attack/adversary.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 1d64289e..47aeb176 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -6,7 +6,7 @@ from __future__ import annotations -from itertools import repeat +from itertools import cycle from typing import TYPE_CHECKING, Any, Callable import pytorch_lightning as pl @@ -28,7 +28,6 @@ class Adversary(torch.nn.Module): def __init__( self, *, - max_iters: int = 10, trainer: Trainer | None = None, perturber: LitPerturber | None = None, **kwargs, @@ -36,14 +35,11 @@ def __init__( """_summary_ Args: - max_iters (int): The max number of attack iterations. trainer (Trainer): A PyTorch-Lightning Trainer object used to fit the perturber. perturber (LitPerturber): A LitPerturber that manages perturbations. """ super().__init__() - self.max_iters = max_iters - # FIXME: Setup logging directory correctly self.attacker = trainer if self.attacker is None: @@ -52,11 +48,18 @@ def __init__( num_sanity_val_steps=0, log_every_n_steps=1, max_epochs=1, + limit_train_batches=kwargs.pop("max_iters", 10), callbacks=list(kwargs.pop("callbacks", {}).values()), enable_model_summary=False, enable_checkpointing=False, ) + # We feed the same batch to the attack every time so we treat each step as an + # attack iteration. As such, attackers must only run for 1 epoch and must limit + # the number of attack steps via limit_train_batches. + assert self.attacker.max_epochs == 1 + assert self.attacker.limit_train_batches > 0 + self.perturber = perturber if self.perturber is None: self.perturber = LitPerturber(**kwargs) @@ -68,13 +71,13 @@ def forward(self, **batch): # we want a fresh perturbation. self.perturber.initialize_parameters(**batch) - # Repeat batch max_iters times - attack_dataloader = repeat(batch, self.max_iters) + # Cycle batch forever since the attacker will know when to stop + attack_dataloader = cycle([batch]) # Attack for an epoch self.attacker.fit(model=self.perturber, train_dataloaders=attack_dataloader) - # Enable future attacks to fit by increasing max_epochs + # Enable future attacks to fit by increasing max_epochs by 1 self.attacker.fit_loop.max_epochs += 1 return self.perturber(**batch) From 75fa07301bf78f3b0954ffed01e6aa68253925dd Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 09:33:30 -0700 Subject: [PATCH 031/163] comments --- mart/attack/adversary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 47aeb176..62d1dfb9 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -44,7 +44,7 @@ def __init__( self.attacker = trainer if self.attacker is None: self.attacker = pl.Trainer( - accelerator="auto", + accelerator="auto", # FIXME: we need to get this on the same device as input... num_sanity_val_steps=0, log_every_n_steps=1, max_epochs=1, From 4f74e53fa4f6b8c876e7b943ffc4ec29429be85a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 11:13:06 -0700 Subject: [PATCH 032/163] Move perturbation creation into initializer --- mart/attack/adversary.py | 11 +++-------- mart/attack/initializer.py | 16 ++++++++++++---- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 62d1dfb9..b6b6674f 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -160,6 +160,7 @@ def forward( target: torch.Tensor | dict[str, Any] | tuple, **kwargs, ): + # Act like a lazy module and initialize parameters. if self.perturbation is None: self.initialize_parameters(input=input, target=target, **kwargs) @@ -174,16 +175,10 @@ def initialize_parameters( self, *, input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, **kwargs, ): - # Create new perturbation, if necessary - if self.perturbation is None or self.perturbation.shape != input.shape: - self.perturbation = torch.empty_like(input, requires_grad=True) - - # FIXME: initializer should really take input and return a perturbation. - # once this is done I think this function can just take kwargs? - self.initializer(self.perturbation) + # Initialize perturbation + self.perturbation = self.initializer(input, self.perturbation) # FIXME: I think it's better to use a PL hook here if self.gradient_modifier is not None: diff --git a/mart/attack/initializer.py b/mart/attack/initializer.py index dcc3303c..28afc1f2 100644 --- a/mart/attack/initializer.py +++ b/mart/attack/initializer.py @@ -15,8 +15,16 @@ class Initializer(abc.ABC): """Initializer base class.""" + def __call__(self, input, perturbation=None) -> torch.Tensor: + if perturbation is None or self.perturbation.shape != input.shape: + perturbation = torch.empty_like(input, requires_grad=True) + + self.initialize(perturbation) + + return perturbation + @abc.abstractmethod - def __call__(self, perturbation: torch.Tensor) -> None: + def initialize(self, perturbation: torch.Tensor) -> None: pass @@ -24,7 +32,7 @@ class Constant(Initializer): def __init__(self, constant: Optional[Union[int, float]] = 0): self.constant = constant - def __call__(self, perturbation: torch.Tensor) -> None: + def initialize(self, perturbation: torch.Tensor) -> None: torch.nn.init.constant_(perturbation, self.constant) @@ -33,7 +41,7 @@ def __init__(self, min: Union[int, float], max: Union[int, float]): self.min = min self.max = max - def __call__(self, perturbation: torch.Tensor) -> None: + def initialize(self, perturbation: torch.Tensor) -> None: torch.nn.init.uniform_(perturbation, self.min, self.max) @@ -42,7 +50,7 @@ def __init__(self, eps: Union[int, float], p: Optional[Union[int, float]] = torc self.eps = eps self.p = p - def __call__(self, perturbation: torch.Tensor) -> None: + def initialize(self, perturbation: torch.Tensor) -> None: torch.nn.init.uniform_(perturbation, -self.eps, self.eps) # TODO: make sure the first dim is the batch dim. if self.p is not torch.inf: From cde433cc51ffa93f685b5d59b9ca3a1da27e0eca Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 11:16:31 -0700 Subject: [PATCH 033/163] Add Default projector --- mart/attack/adversary.py | 8 ++++---- mart/attack/projector.py | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index b6b6674f..3d680b33 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -114,10 +114,11 @@ def __init__( self.optimizer_fn = optimizer self.threat_model = threat_model self.gradient_modifier = gradient_modifier - self.projector = projector + self.projector = projector or Projector() self.gain_output = gain self.objective_fn = objective + # Perturbation is lazily initialized self.perturbation = None def configure_optimizers(self): @@ -164,9 +165,8 @@ def forward( if self.perturbation is None: self.initialize_parameters(input=input, target=target, **kwargs) - # Projected perturbation... - if self.projector is not None: - self.projector(self.perturbation.data, input, target) + # Project perturbation... + self.projector(self.perturbation.data, input, target) # ...and apply threat model. return self.threat_model(input, target, self.perturbation) diff --git a/mart/attack/projector.py b/mart/attack/projector.py index 913bce85..244384ac 100644 --- a/mart/attack/projector.py +++ b/mart/attack/projector.py @@ -15,7 +15,6 @@ class Projector(abc.ABC): """A projector modifies nn.Parameter's data.""" - @abc.abstractmethod def __call__( self, tensor: torch.Tensor, From fdcfb5cea385ddc9e5c5925cda59b8e27ba11a07 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 11:46:08 -0700 Subject: [PATCH 034/163] bugfix --- mart/attack/initializer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/initializer.py b/mart/attack/initializer.py index 28afc1f2..6b16d194 100644 --- a/mart/attack/initializer.py +++ b/mart/attack/initializer.py @@ -16,7 +16,7 @@ class Initializer(abc.ABC): """Initializer base class.""" def __call__(self, input, perturbation=None) -> torch.Tensor: - if perturbation is None or self.perturbation.shape != input.shape: + if perturbation is None or perturbation.shape != input.shape: perturbation = torch.empty_like(input, requires_grad=True) self.initialize(perturbation) From e01093f83df98728d2da927da8fbb7eb0a91328a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 11:54:02 -0700 Subject: [PATCH 035/163] comment --- mart/attack/adversary.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 3d680b33..a3f3c2b6 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -181,5 +181,6 @@ def initialize_parameters( self.perturbation = self.initializer(input, self.perturbation) # FIXME: I think it's better to use a PL hook here + # FIXME: I also think Trainers already implement this functionality so this can probably go away... if self.gradient_modifier is not None: self.perturbation.register_hook(self.gradient_modifier) From 63d3e40e3a11c76d99b346c6b1f5d1e5cdb411b2 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 12:12:32 -0700 Subject: [PATCH 036/163] Move gradient modifier into PL hook --- mart/attack/adversary.py | 11 +++++------ mart/attack/gradient_modifier.py | 1 - 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index a3f3c2b6..348473f6 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -113,7 +113,7 @@ def __init__( self.initializer = initializer self.optimizer_fn = optimizer self.threat_model = threat_model - self.gradient_modifier = gradient_modifier + self.gradient_modifier = gradient_modifier or GradientModifier() self.projector = projector or Projector() self.gain_output = gain self.objective_fn = objective @@ -126,6 +126,10 @@ def configure_optimizers(self): return self.optimizer_fn([self.perturbation]) + def on_before_optimizer_step(self, optimizer, optimizer_idx): + # FIXME: pl.Trainer might implement some of this functionality so GradientModifier can probably go away? + self.gradient_modifier(self.perturbation.grad) + def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally batch = batch.copy() @@ -179,8 +183,3 @@ def initialize_parameters( ): # Initialize perturbation self.perturbation = self.initializer(input, self.perturbation) - - # FIXME: I think it's better to use a PL hook here - # FIXME: I also think Trainers already implement this functionality so this can probably go away... - if self.gradient_modifier is not None: - self.perturbation.register_hook(self.gradient_modifier) diff --git a/mart/attack/gradient_modifier.py b/mart/attack/gradient_modifier.py index fcb9b0db..29383352 100644 --- a/mart/attack/gradient_modifier.py +++ b/mart/attack/gradient_modifier.py @@ -15,7 +15,6 @@ class GradientModifier(abc.ABC): """Gradient modifier base class.""" - @abc.abstractmethod def __call__(self, grad: torch.Tensor) -> torch.Tensor: pass From e71266b3aa0fdc9645af8d7275ffc2f5851cd5c6 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 12:47:12 -0700 Subject: [PATCH 037/163] Use on_train_epoch_start in favor of initialize_parameters --- mart/attack/adversary.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 348473f6..595c2a1c 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -69,7 +69,7 @@ def forward(self, **batch): if "model" in batch: # Initialize perturbation because fit will call configure_optimizers and # we want a fresh perturbation. - self.perturber.initialize_parameters(**batch) + self.perturber(**batch) # Cycle batch forever since the attacker will know when to stop attack_dataloader = cycle([batch]) @@ -126,9 +126,9 @@ def configure_optimizers(self): return self.optimizer_fn([self.perturbation]) - def on_before_optimizer_step(self, optimizer, optimizer_idx): - # FIXME: pl.Trainer might implement some of this functionality so GradientModifier can probably go away? - self.gradient_modifier(self.perturbation.grad) + def on_train_epoch_start(self): + # Force re-initialization of perturbation + self.perturbation = None def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally @@ -158,6 +158,10 @@ def training_step(self, batch, batch_idx): return gain + def on_before_optimizer_step(self, optimizer, optimizer_idx): + # FIXME: pl.Trainer might implement some of this functionality so GradientModifier can probably go away? + self.gradient_modifier(self.perturbation.grad) + def forward( self, *, @@ -167,19 +171,10 @@ def forward( ): # Act like a lazy module and initialize parameters. if self.perturbation is None: - self.initialize_parameters(input=input, target=target, **kwargs) + self.perturbation = self.initializer(input) # Project perturbation... self.projector(self.perturbation.data, input, target) # ...and apply threat model. return self.threat_model(input, target, self.perturbation) - - def initialize_parameters( - self, - *, - input: torch.Tensor | tuple, - **kwargs, - ): - # Initialize perturbation - self.perturbation = self.initializer(input, self.perturbation) From 7264b316305288fb9481c077813fa5995c64d511 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 14:06:28 -0700 Subject: [PATCH 038/163] Make perturbation lazy --- mart/attack/adversary.py | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 595c2a1c..a3d20bce 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -67,15 +67,8 @@ def __init__( @silent() def forward(self, **batch): if "model" in batch: - # Initialize perturbation because fit will call configure_optimizers and - # we want a fresh perturbation. - self.perturber(**batch) - - # Cycle batch forever since the attacker will know when to stop - attack_dataloader = cycle([batch]) - - # Attack for an epoch - self.attacker.fit(model=self.perturber, train_dataloaders=attack_dataloader) + # Attack for one epoch + self.attacker.fit(model=self.perturber, train_dataloaders=cycle([batch])) # Enable future attacks to fit by increasing max_epochs by 1 self.attacker.fit_loop.max_epochs += 1 @@ -118,18 +111,12 @@ def __init__( self.gain_output = gain self.objective_fn = objective - # Perturbation is lazily initialized - self.perturbation = None - def configure_optimizers(self): - assert self.perturbation is not None + # Perturbation is lazily initialized but we need a reference to it for the optimizer + self.perturbation = torch.nn.UninitializedBuffer(requires_grad=True) return self.optimizer_fn([self.perturbation]) - def on_train_epoch_start(self): - # Force re-initialization of perturbation - self.perturbation = None - def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally batch = batch.copy() @@ -170,8 +157,8 @@ def forward( **kwargs, ): # Act like a lazy module and initialize parameters. - if self.perturbation is None: - self.perturbation = self.initializer(input) + if torch.nn.parameter.is_lazy(self.perturbation): + self.perturbation.materialize(input.shape, device=input.device, dtype=torch.float32) # Project perturbation... self.projector(self.perturbation.data, input, target) From 4982851cfb6d547a70a1ed8a7571b9a8a73819f8 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 14:24:37 -0700 Subject: [PATCH 039/163] Disable logger in attack --- mart/attack/adversary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index a3d20bce..e7ca8ea6 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -46,7 +46,7 @@ def __init__( self.attacker = pl.Trainer( accelerator="auto", # FIXME: we need to get this on the same device as input... num_sanity_val_steps=0, - log_every_n_steps=1, + logger=False, max_epochs=1, limit_train_batches=kwargs.pop("max_iters", 10), callbacks=list(kwargs.pop("callbacks", {}).values()), From c5e5ddf0c76dd0045d1abf25990f448fe4136b6f Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 14:25:09 -0700 Subject: [PATCH 040/163] Revert initializer to d33658fac734274bbf87bce88a8b470afa1b3c71 --- mart/attack/adversary.py | 1 + mart/attack/initializer.py | 16 ++++------------ 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index e7ca8ea6..a298a45c 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -159,6 +159,7 @@ def forward( # Act like a lazy module and initialize parameters. if torch.nn.parameter.is_lazy(self.perturbation): self.perturbation.materialize(input.shape, device=input.device, dtype=torch.float32) + self.initializer(self.perturbation) # Project perturbation... self.projector(self.perturbation.data, input, target) diff --git a/mart/attack/initializer.py b/mart/attack/initializer.py index 6b16d194..dcc3303c 100644 --- a/mart/attack/initializer.py +++ b/mart/attack/initializer.py @@ -15,16 +15,8 @@ class Initializer(abc.ABC): """Initializer base class.""" - def __call__(self, input, perturbation=None) -> torch.Tensor: - if perturbation is None or perturbation.shape != input.shape: - perturbation = torch.empty_like(input, requires_grad=True) - - self.initialize(perturbation) - - return perturbation - @abc.abstractmethod - def initialize(self, perturbation: torch.Tensor) -> None: + def __call__(self, perturbation: torch.Tensor) -> None: pass @@ -32,7 +24,7 @@ class Constant(Initializer): def __init__(self, constant: Optional[Union[int, float]] = 0): self.constant = constant - def initialize(self, perturbation: torch.Tensor) -> None: + def __call__(self, perturbation: torch.Tensor) -> None: torch.nn.init.constant_(perturbation, self.constant) @@ -41,7 +33,7 @@ def __init__(self, min: Union[int, float], max: Union[int, float]): self.min = min self.max = max - def initialize(self, perturbation: torch.Tensor) -> None: + def __call__(self, perturbation: torch.Tensor) -> None: torch.nn.init.uniform_(perturbation, self.min, self.max) @@ -50,7 +42,7 @@ def __init__(self, eps: Union[int, float], p: Optional[Union[int, float]] = torc self.eps = eps self.p = p - def initialize(self, perturbation: torch.Tensor) -> None: + def __call__(self, perturbation: torch.Tensor) -> None: torch.nn.init.uniform_(perturbation, -self.eps, self.eps) # TODO: make sure the first dim is the batch dim. if self.p is not torch.inf: From 41357cf67d5c34763575871273032acf3ad45931 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 14:31:47 -0700 Subject: [PATCH 041/163] cleanup --- mart/attack/adversary.py | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index a298a45c..205f068d 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -28,7 +28,7 @@ class Adversary(torch.nn.Module): def __init__( self, *, - trainer: Trainer | None = None, + trainer: pl.Trainer | None = None, perturber: LitPerturber | None = None, **kwargs, ): @@ -40,19 +40,17 @@ def __init__( """ super().__init__() - # FIXME: Setup logging directory correctly - self.attacker = trainer - if self.attacker is None: - self.attacker = pl.Trainer( - accelerator="auto", # FIXME: we need to get this on the same device as input... - num_sanity_val_steps=0, - logger=False, - max_epochs=1, - limit_train_batches=kwargs.pop("max_iters", 10), - callbacks=list(kwargs.pop("callbacks", {}).values()), - enable_model_summary=False, - enable_checkpointing=False, - ) + self.attacker = trainer or pl.Trainer( + accelerator="auto", # FIXME: we need to get this on the same device as input... + num_sanity_val_steps=0, + logger=False, + max_epochs=1, + limit_train_batches=kwargs.pop("max_iters", 10), + callbacks=list(kwargs.pop("callbacks", {}).values()), + enable_model_summary=False, + enable_checkpointing=False, + enable_progress_bar=False, + ) # We feed the same batch to the attack every time so we treat each step as an # attack iteration. As such, attackers must only run for 1 epoch and must limit @@ -60,9 +58,7 @@ def __init__( assert self.attacker.max_epochs == 1 assert self.attacker.limit_train_batches > 0 - self.perturber = perturber - if self.perturber is None: - self.perturber = LitPerturber(**kwargs) + self.perturber = perturber or LitPerturber(**kwargs) @silent() def forward(self, **batch): @@ -156,7 +152,7 @@ def forward( target: torch.Tensor | dict[str, Any] | tuple, **kwargs, ): - # Act like a lazy module and initialize parameters. + # Materialize perturbation and initialize it if torch.nn.parameter.is_lazy(self.perturbation): self.perturbation.materialize(input.shape, device=input.device, dtype=torch.float32) self.initializer(self.perturbation) From b5d116b52564af30461fe9ef63fe190255e472d2 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 15:18:36 -0700 Subject: [PATCH 042/163] on_before_optimizer_step -> configure_gradient_clipping --- mart/attack/adversary.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 205f068d..4183e825 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -141,9 +141,12 @@ def training_step(self, batch, batch_idx): return gain - def on_before_optimizer_step(self, optimizer, optimizer_idx): + def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None): + super().configure_gradient_clipping(optimizer, gradient_clip_val, gradient_clip_algorithm) + # FIXME: pl.Trainer might implement some of this functionality so GradientModifier can probably go away? - self.gradient_modifier(self.perturbation.grad) + # More so, why not loop through optimizer.param_groups? + self.perturbation.grad = self.gradient_modifier(self.perturbation.grad) def forward( self, From 79dabd43ee3c84f4125b81ccd18cc38a2e7915e0 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 15:21:41 -0700 Subject: [PATCH 043/163] comments --- mart/attack/gradient_modifier.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mart/attack/gradient_modifier.py b/mart/attack/gradient_modifier.py index 29383352..30ad7565 100644 --- a/mart/attack/gradient_modifier.py +++ b/mart/attack/gradient_modifier.py @@ -19,11 +19,13 @@ def __call__(self, grad: torch.Tensor) -> torch.Tensor: pass +# FIXME: We should really take inspiration from torch.nn.utils.clip_grad_norm_ class Sign(GradientModifier): def __call__(self, grad: torch.Tensor) -> torch.Tensor: return grad.sign() +# FIXME: We should really take inspiration from torch.nn.utils.clip_grad_norm_ class LpNormalizer(GradientModifier): """Scale gradients by a certain L-p norm.""" From a6f1d84ee09ddce37904af2c03527c29f632fee1 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 15:31:00 -0700 Subject: [PATCH 044/163] Disable attack progress bar --- mart/attack/adversary.py | 3 --- mart/configs/attack/iterative.yaml | 1 - mart/configs/attack/iterative_sgd.yaml | 1 - 3 files changed, 5 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 4183e825..83962193 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -128,7 +128,6 @@ def training_step(self, batch, batch_idx): # objective_fn is optional, because adversaries may never reach their objective. if self.objective_fn is not None: found = self.objective_fn(**outputs) - self.log("found", found.sum().float(), prog_bar=True) # No need to calculate new gradients if adversarial examples are already found. if len(gain.shape) > 0: @@ -137,8 +136,6 @@ def training_step(self, batch, batch_idx): if len(gain.shape) > 0: gain = gain.sum() - self.log("gain", gain, prog_bar=True) - return gain def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None): diff --git a/mart/configs/attack/iterative.yaml b/mart/configs/attack/iterative.yaml index eeb8db6c..deb21ea3 100644 --- a/mart/configs/attack/iterative.yaml +++ b/mart/configs/attack/iterative.yaml @@ -4,7 +4,6 @@ gradient_modifier: ??? projector: ??? optimizer: ??? max_iters: ??? -callbacks: ??? objective: ??? gain: ??? threat_model: ??? diff --git a/mart/configs/attack/iterative_sgd.yaml b/mart/configs/attack/iterative_sgd.yaml index 5ec86235..2fbc326b 100644 --- a/mart/configs/attack/iterative_sgd.yaml +++ b/mart/configs/attack/iterative_sgd.yaml @@ -1,4 +1,3 @@ defaults: - iterative - optimizer: sgd - - callbacks: [progress_bar] From 2b9f40309275f8835a5c7f1a9ce430b123e6a219 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 15:39:31 -0700 Subject: [PATCH 045/163] comments --- mart/attack/adversary.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 83962193..e4e89179 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -139,10 +139,11 @@ def training_step(self, batch, batch_idx): return gain def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None): + # Configuring gradient clipping in the training is still useful, so use it. super().configure_gradient_clipping(optimizer, gradient_clip_val, gradient_clip_algorithm) - # FIXME: pl.Trainer might implement some of this functionality so GradientModifier can probably go away? - # More so, why not loop through optimizer.param_groups? + # FIXME: Why not loop through optimizer.param_groups? + # FIXME: Make gradient modifier an in-place operation. Will make it easier to fix the above. self.perturbation.grad = self.gradient_modifier(self.perturbation.grad) def forward( From 0bd7c7c5ac7abdf348c7c7acff070db6f1fc99e0 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 15:41:11 -0700 Subject: [PATCH 046/163] comments --- mart/attack/adversary.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index e4e89179..15c65b78 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -159,6 +159,7 @@ def forward( self.initializer(self.perturbation) # Project perturbation... + # FIXME: Projector should probably be an in-place operation instead of passing .data? self.projector(self.perturbation.data, input, target) # ...and apply threat model. From bac2bd49d5b1b70a199ccf679ab61e9c4a2baa42 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 15:41:45 -0700 Subject: [PATCH 047/163] comments --- mart/attack/adversary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 15c65b78..dcbb1739 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -139,7 +139,7 @@ def training_step(self, batch, batch_idx): return gain def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None): - # Configuring gradient clipping in the training is still useful, so use it. + # Configuring gradient clipping in pl.Trainer is still useful, so use it. super().configure_gradient_clipping(optimizer, gradient_clip_val, gradient_clip_algorithm) # FIXME: Why not loop through optimizer.param_groups? From 079c15c63bf346f6f8b82b1d0060ff91d08bde96 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 16:07:59 -0700 Subject: [PATCH 048/163] cleanup --- mart/attack/adversary.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index dcbb1739..51765aa6 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -44,7 +44,7 @@ def __init__( accelerator="auto", # FIXME: we need to get this on the same device as input... num_sanity_val_steps=0, logger=False, - max_epochs=1, + max_epochs=0, limit_train_batches=kwargs.pop("max_iters", 10), callbacks=list(kwargs.pop("callbacks", {}).values()), enable_model_summary=False, @@ -55,7 +55,7 @@ def __init__( # We feed the same batch to the attack every time so we treat each step as an # attack iteration. As such, attackers must only run for 1 epoch and must limit # the number of attack steps via limit_train_batches. - assert self.attacker.max_epochs == 1 + assert self.attacker.max_epochs == 0 assert self.attacker.limit_train_batches > 0 self.perturber = perturber or LitPerturber(**kwargs) @@ -63,11 +63,10 @@ def __init__( @silent() def forward(self, **batch): if "model" in batch: - # Attack for one epoch - self.attacker.fit(model=self.perturber, train_dataloaders=cycle([batch])) - - # Enable future attacks to fit by increasing max_epochs by 1 + # Attack, aka fit a perturbation, for one epoch by cycling over the same batch of inputs. + # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 + self.attacker.fit(model=self.perturber, train_dataloaders=cycle([batch])) return self.perturber(**batch) From 1a57cb65a953afaa3b48ee1369419a5a8983d588 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 16:16:34 -0700 Subject: [PATCH 049/163] comments --- mart/attack/adversary.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 51765aa6..48b9f233 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -63,10 +63,10 @@ def __init__( @silent() def forward(self, **batch): if "model" in batch: - # Attack, aka fit a perturbation, for one epoch by cycling over the same batch of inputs. + # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(model=self.perturber, train_dataloaders=cycle([batch])) + self.attacker.fit(self.perturber, train_dataloaders=cycle([batch])) return self.perturber(**batch) @@ -119,11 +119,12 @@ def training_step(self, batch, batch_idx): target = batch.pop("target") model = batch.pop("model") + # We need to evaluate the whole model, so call it normally to get a gain outputs = model(input=input, target=target, **batch) - # FIXME: This should really be just `return outputs`. Everything below here should live in the model! + # FIXME: This should really be just `return outputs`. But this might require a new sequence? + # FIXME: Everything below here should live in the model as modules. gain = outputs[self.gain_output] - # FIXME: Make objective a part of the model... # objective_fn is optional, because adversaries may never reach their objective. if self.objective_fn is not None: found = self.objective_fn(**outputs) From 7b965903743d654b59817efd129b2f9dfbd018de Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 16:18:37 -0700 Subject: [PATCH 050/163] cleanup --- mart/attack/adversary.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 48b9f233..0405b812 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -115,12 +115,11 @@ def configure_optimizers(self): def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally batch = batch.copy() - input = batch.pop("input") - target = batch.pop("target") + + # We need to evaluate the perturbation against the whole model, so call it normally to get a gain. model = batch.pop("model") + outputs = model(**batch) - # We need to evaluate the whole model, so call it normally to get a gain - outputs = model(input=input, target=target, **batch) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. gain = outputs[self.gain_output] From 1357297679ac7f0bb8409dffc3e9331b8efb4faf Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Wed, 15 Mar 2023 16:23:46 -0700 Subject: [PATCH 051/163] comments --- mart/attack/adversary.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 0405b812..fb7644dc 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -62,12 +62,15 @@ def __init__( @silent() def forward(self, **batch): + # Adversary lives within a sequence of nn.Modules. To signal the adversary should attack, one + # must pass a model to attack when calling the adversary. if "model" in batch: # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 self.attacker.fit(self.perturber, train_dataloaders=cycle([batch])) + # Always use perturb the current input. return self.perturber(**batch) From 3e0d27cd58338471132f04b20ea27e14f820f36b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 16 Mar 2023 11:55:21 -0700 Subject: [PATCH 052/163] comment --- mart/attack/adversary.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index fb7644dc..d5975828 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -111,6 +111,7 @@ def __init__( def configure_optimizers(self): # Perturbation is lazily initialized but we need a reference to it for the optimizer + # FIXME: It would be nice if we didn't have to create this buffer every time someone call's fit. self.perturbation = torch.nn.UninitializedBuffer(requires_grad=True) return self.optimizer_fn([self.perturbation]) From e59767e8684ceb79831fba337b288f2b4628ea71 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 16 Mar 2023 13:12:42 -0700 Subject: [PATCH 053/163] Move LitPerturber into perturber.py --- mart/attack/__init__.py | 1 + mart/attack/adversary.py | 107 +---------------------------------- mart/attack/perturber.py | 118 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 105 deletions(-) create mode 100644 mart/attack/perturber.py diff --git a/mart/attack/__init__.py b/mart/attack/__init__.py index 6e4d5611..1247df12 100644 --- a/mart/attack/__init__.py +++ b/mart/attack/__init__.py @@ -5,5 +5,6 @@ from .gradient_modifier import * from .initializer import * from .objective import * +from .perturber import * from .projector import * from .threat_model import * diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index d5975828..8e5c3acf 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -7,21 +7,13 @@ from __future__ import annotations from itertools import cycle -from typing import TYPE_CHECKING, Any, Callable import pytorch_lightning as pl -import torch +from mart.attack import LitPerturber from mart.utils import silent -if TYPE_CHECKING: - from .gradient_modifier import GradientModifier - from .initializer import Initializer - from .objective import Objective - from .projector import Projector - from .threat_model import ThreatModel - -__all__ = ["Adversary", "LitPerturber"] +__all__ = ["Adversary"] class Adversary(torch.nn.Module): @@ -72,98 +64,3 @@ def forward(self, **batch): # Always use perturb the current input. return self.perturber(**batch) - - -class LitPerturber(pl.LightningModule): - """Peturbation optimization module.""" - - def __init__( - self, - *, - initializer: Initializer, - optimizer: Callable, - threat_model: ThreatModel, - gradient_modifier: GradientModifier | None = None, - projector: Projector | None = None, - gain: str = "loss", - objective: Objective | None = None, - ): - """_summary_ - - Args: - initializer (Initializer): To initialize the perturbation. - optimizer (torch.optim.Optimizer): A PyTorch optimizer. - threat_model (ThreatModel): A layer which injects perturbation to input, serving as the preprocessing layer to the target model. - gradient_modifier (GradientModifier): To modify the gradient of perturbation. - projector (Projector): To project the perturbation into some space. - gain (str): Which output to use as an adversarial gain function, which is a differentiable estimate of adversarial objective. (default: loss) - objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. - """ - super().__init__() - - self.initializer = initializer - self.optimizer_fn = optimizer - self.threat_model = threat_model - self.gradient_modifier = gradient_modifier or GradientModifier() - self.projector = projector or Projector() - self.gain_output = gain - self.objective_fn = objective - - def configure_optimizers(self): - # Perturbation is lazily initialized but we need a reference to it for the optimizer - # FIXME: It would be nice if we didn't have to create this buffer every time someone call's fit. - self.perturbation = torch.nn.UninitializedBuffer(requires_grad=True) - - return self.optimizer_fn([self.perturbation]) - - def training_step(self, batch, batch_idx): - # copy batch since we modify it and it is used internally - batch = batch.copy() - - # We need to evaluate the perturbation against the whole model, so call it normally to get a gain. - model = batch.pop("model") - outputs = model(**batch) - - # FIXME: This should really be just `return outputs`. But this might require a new sequence? - # FIXME: Everything below here should live in the model as modules. - gain = outputs[self.gain_output] - - # objective_fn is optional, because adversaries may never reach their objective. - if self.objective_fn is not None: - found = self.objective_fn(**outputs) - - # No need to calculate new gradients if adversarial examples are already found. - if len(gain.shape) > 0: - gain = gain[~found] - - if len(gain.shape) > 0: - gain = gain.sum() - - return gain - - def configure_gradient_clipping(self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None): - # Configuring gradient clipping in pl.Trainer is still useful, so use it. - super().configure_gradient_clipping(optimizer, gradient_clip_val, gradient_clip_algorithm) - - # FIXME: Why not loop through optimizer.param_groups? - # FIXME: Make gradient modifier an in-place operation. Will make it easier to fix the above. - self.perturbation.grad = self.gradient_modifier(self.perturbation.grad) - - def forward( - self, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - **kwargs, - ): - # Materialize perturbation and initialize it - if torch.nn.parameter.is_lazy(self.perturbation): - self.perturbation.materialize(input.shape, device=input.device, dtype=torch.float32) - self.initializer(self.perturbation) - - # Project perturbation... - # FIXME: Projector should probably be an in-place operation instead of passing .data? - self.projector(self.perturbation.data, input, target) - - # ...and apply threat model. - return self.threat_model(input, target, self.perturbation) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py new file mode 100644 index 00000000..6c0af36d --- /dev/null +++ b/mart/attack/perturber.py @@ -0,0 +1,118 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Callable + +import pytorch_lightning as pl +import torch + +if TYPE_CHECKING: + from .gradient_modifier import GradientModifier + from .initializer import Initializer + from .objective import Objective + from .projector import Projector + from .threat_model import ThreatModel + +__all__ = ["LitPerturber"] + + +class LitPerturber(pl.LightningModule): + """Peturbation optimization module.""" + + def __init__( + self, + *, + initializer: Initializer, + optimizer: Callable, + threat_model: ThreatModel, + gradient_modifier: GradientModifier | None = None, + projector: Projector | None = None, + gain: str = "loss", + objective: Objective | None = None, + ): + """_summary_ + + Args: + initializer (Initializer): To initialize the perturbation. + optimizer (torch.optim.Optimizer): A PyTorch optimizer. + threat_model (ThreatModel): A layer which injects perturbation to input, serving as the preprocessing layer to the target model. + gradient_modifier (GradientModifier): To modify the gradient of perturbation. + projector (Projector): To project the perturbation into some space. + gain (str): Which output to use as an adversarial gain function, which is a differentiable estimate of adversarial objective. (default: loss) + objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. + """ + super().__init__() + + self.initializer = initializer + self.optimizer_fn = optimizer + self.threat_model = threat_model + self.gradient_modifier = gradient_modifier or GradientModifier() + self.projector = projector or Projector() + self.gain_output = gain + self.objective_fn = objective + + def configure_optimizers(self): + # Perturbation is lazily initialized but we need a reference to it for the optimizer + # FIXME: It would be nice if we didn't have to create this buffer every time someone call's fit. + self.perturbation = torch.nn.UninitializedBuffer(requires_grad=True) + + return self.optimizer_fn([self.perturbation]) + + def training_step(self, batch, batch_idx): + # copy batch since we modify it and it is used internally + batch = batch.copy() + + # We need to evaluate the perturbation against the whole model, so call it normally to get a gain. + model = batch.pop("model") + outputs = model(**batch) + + # FIXME: This should really be just `return outputs`. But this might require a new sequence? + # FIXME: Everything below here should live in the model as modules. + gain = outputs[self.gain_output] + + # objective_fn is optional, because adversaries may never reach their objective. + if self.objective_fn is not None: + found = self.objective_fn(**outputs) + + # No need to calculate new gradients if adversarial examples are already found. + if len(gain.shape) > 0: + gain = gain[~found] + + if len(gain.shape) > 0: + gain = gain.sum() + + return gain + + def configure_gradient_clipping( + self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None + ): + # Configuring gradient clipping in pl.Trainer is still useful, so use it. + super().configure_gradient_clipping(optimizer, gradient_clip_val, gradient_clip_algorithm) + + # FIXME: Why not loop through optimizer.param_groups? + # FIXME: Make gradient modifier an in-place operation. Will make it easier to fix the above. + self.perturbation.grad = self.gradient_modifier(self.perturbation.grad) + + def forward( + self, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, + ): + # Materialize perturbation and initialize it + if torch.nn.parameter.is_lazy(self.perturbation): + self.perturbation.materialize(input.shape, device=input.device, dtype=torch.float32) + self.initializer(self.perturbation) + + # Project perturbation... + # FIXME: Projector should probably be an in-place operation instead of passing .data? + self.projector(self.perturbation.data, input, target) + + # ...and apply threat model. + return self.threat_model(input, target, self.perturbation) From 0ef0a6c14e67bc6ef4446348defb7be03b36702e Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 16 Mar 2023 13:17:50 -0700 Subject: [PATCH 054/163] bugfix --- mart/attack/adversary.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8e5c3acf..6fd5a566 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -9,10 +9,12 @@ from itertools import cycle import pytorch_lightning as pl +import torch -from mart.attack import LitPerturber from mart.utils import silent +from .perturber import LitPerturber + __all__ = ["Adversary"] From ab623e9f10d9b69d7fe794e5297e72ab0f1e7927 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 16 Mar 2023 13:57:11 -0700 Subject: [PATCH 055/163] Make gradient modifiers in-place operations --- mart/attack/gradient_modifier.py | 35 ++++++++++++++++++++------------ mart/attack/perturber.py | 7 +++---- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/mart/attack/gradient_modifier.py b/mart/attack/gradient_modifier.py index 30ad7565..dd680a95 100644 --- a/mart/attack/gradient_modifier.py +++ b/mart/attack/gradient_modifier.py @@ -4,8 +4,10 @@ # SPDX-License-Identifier: BSD-3-Clause # +from __future__ import annotations + import abc -from typing import Union +from typing import Iterable import torch @@ -15,26 +17,33 @@ class GradientModifier(abc.ABC): """Gradient modifier base class.""" - def __call__(self, grad: torch.Tensor) -> torch.Tensor: + def __call__(self, parameters: torch.Tensor | Iterable[torch.Tensor]) -> None: pass -# FIXME: We should really take inspiration from torch.nn.utils.clip_grad_norm_ class Sign(GradientModifier): - def __call__(self, grad: torch.Tensor) -> torch.Tensor: - return grad.sign() + def __call__(self, parameters: torch.Tensor | Iterable[torch.Tensor]) -> None: + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + + parameters = [p for p in parameters if p.grad is not None] + + for p in parameters: + p.grad.detach().sign_() -# FIXME: We should really take inspiration from torch.nn.utils.clip_grad_norm_ class LpNormalizer(GradientModifier): """Scale gradients by a certain L-p norm.""" - def __init__(self, p: Union[int, float]): - super().__init__ - + def __init__(self, p: int | float): self.p = p - def __call__(self, grad: torch.Tensor) -> torch.Tensor: - grad_norm = grad.norm(p=self.p) - grad_normalized = grad / grad_norm - return grad_normalized + def __call__(self, parameters: torch.Tensor | Iterable[torch.Tensor]) -> None: + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + + parameters = [p for p in parameters if p.grad is not None] + + for p in parameters: + p_norm = torch.norm(p.grad.detach(), p=self.p) + p.grad.detach().div_(p_norm) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 6c0af36d..8f256d87 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -58,7 +58,7 @@ def __init__( def configure_optimizers(self): # Perturbation is lazily initialized but we need a reference to it for the optimizer - # FIXME: It would be nice if we didn't have to create this buffer every time someone call's fit. + # FIXME: It would be nice if we didn't have to create this buffer every time someone calls fit. self.perturbation = torch.nn.UninitializedBuffer(requires_grad=True) return self.optimizer_fn([self.perturbation]) @@ -94,9 +94,8 @@ def configure_gradient_clipping( # Configuring gradient clipping in pl.Trainer is still useful, so use it. super().configure_gradient_clipping(optimizer, gradient_clip_val, gradient_clip_algorithm) - # FIXME: Why not loop through optimizer.param_groups? - # FIXME: Make gradient modifier an in-place operation. Will make it easier to fix the above. - self.perturbation.grad = self.gradient_modifier(self.perturbation.grad) + for group in optimizer.param_groups: + self.gradient_modifier(group["params"]) def forward( self, From b313209107afb3acb4dbc4731f2b17694cf47482 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 17 Mar 2023 09:25:33 -0700 Subject: [PATCH 056/163] cleanup --- mart/attack/perturber.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 8f256d87..f789ee9b 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -11,11 +11,12 @@ import pytorch_lightning as pl import torch +from .gradient_modifier import GradientModifier +from .projector import Projector + if TYPE_CHECKING: - from .gradient_modifier import GradientModifier from .initializer import Initializer from .objective import Objective - from .projector import Projector from .threat_model import ThreatModel __all__ = ["LitPerturber"] From 3ef08760bd5c7403548a3fb9e10c9dabf4bc1243 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 17 Mar 2023 09:32:04 -0700 Subject: [PATCH 057/163] Mark initializers __call__ as no_grad instead of using .data --- mart/attack/initializer.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mart/attack/initializer.py b/mart/attack/initializer.py index dcc3303c..cd05c6c6 100644 --- a/mart/attack/initializer.py +++ b/mart/attack/initializer.py @@ -15,6 +15,7 @@ class Initializer(abc.ABC): """Initializer base class.""" + @torch.no_grad() @abc.abstractmethod def __call__(self, perturbation: torch.Tensor) -> None: pass @@ -24,6 +25,7 @@ class Constant(Initializer): def __init__(self, constant: Optional[Union[int, float]] = 0): self.constant = constant + @torch.no_grad() def __call__(self, perturbation: torch.Tensor) -> None: torch.nn.init.constant_(perturbation, self.constant) @@ -33,6 +35,7 @@ def __init__(self, min: Union[int, float], max: Union[int, float]): self.min = min self.max = max + @torch.no_grad() def __call__(self, perturbation: torch.Tensor) -> None: torch.nn.init.uniform_(perturbation, self.min, self.max) @@ -42,10 +45,11 @@ def __init__(self, eps: Union[int, float], p: Optional[Union[int, float]] = torc self.eps = eps self.p = p + @torch.no_grad() def __call__(self, perturbation: torch.Tensor) -> None: torch.nn.init.uniform_(perturbation, -self.eps, self.eps) # TODO: make sure the first dim is the batch dim. if self.p is not torch.inf: # We don't do tensor.renorm_() because the first dim is not the batch dim. - pert_norm = perturbation.data.norm(p=self.p) - perturbation.data.mul_(self.eps / pert_norm) + pert_norm = perturbation.norm(p=self.p) + perturbation.mul_(self.eps / pert_norm) From 0c6304263a3d4ba2c03f0ae0908b1e8e738de04c Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 17 Mar 2023 09:32:47 -0700 Subject: [PATCH 058/163] Mark projectors __call__ as no_grad instead of using .data --- mart/attack/perturber.py | 3 +-- mart/attack/projector.py | 8 ++++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index f789ee9b..8eb095ea 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -111,8 +111,7 @@ def forward( self.initializer(self.perturbation) # Project perturbation... - # FIXME: Projector should probably be an in-place operation instead of passing .data? - self.projector(self.perturbation.data, input, target) + self.projector(self.perturbation, input, target) # ...and apply threat model. return self.threat_model(input, target, self.perturbation) diff --git a/mart/attack/projector.py b/mart/attack/projector.py index 244384ac..34fdaab0 100644 --- a/mart/attack/projector.py +++ b/mart/attack/projector.py @@ -15,6 +15,8 @@ class Projector(abc.ABC): """A projector modifies nn.Parameter's data.""" + @torch.no_grad() + @abc.abstractmethod def __call__( self, tensor: torch.Tensor, @@ -30,6 +32,7 @@ class Compose(Projector): def __init__(self, projectors: List[Projector]): self.projectors = projectors + @torch.no_grad() def __call__( self, tensor: torch.Tensor, @@ -57,6 +60,7 @@ def __init__( self.min = min self.max = max + @torch.no_grad() def __call__( self, tensor: torch.Tensor, @@ -89,6 +93,7 @@ def __init__( self.min = min self.max = max + @torch.no_grad() def __call__( self, tensor: torch.Tensor, @@ -119,6 +124,7 @@ def __init__(self, eps: float, p: Optional[Union[int, float]] = torch.inf): self.p = p self.eps = eps + @torch.no_grad() def __call__( self, tensor: torch.Tensor, @@ -145,6 +151,7 @@ def __init__( self.min = min self.max = max + @torch.no_grad() def __call__( self, tensor: torch.Tensor, @@ -158,6 +165,7 @@ def __call__( class Mask(Projector): + @torch.no_grad() def __call__( self, tensor: torch.Tensor, From 6c2bbdc237a95ebf93eba2723558c0474a108b92 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 20 Mar 2023 09:00:54 -0700 Subject: [PATCH 059/163] Cleanup attack configs --- mart/configs/attack/{iterative.yaml => adversary.yaml} | 0 mart/configs/attack/classification_eps1.75_fgsm.yaml | 3 ++- mart/configs/attack/classification_eps2_pgd10_step1.yaml | 3 ++- mart/configs/attack/classification_eps8_pgd10_step1.yaml | 3 ++- mart/configs/attack/iterative_sgd.yaml | 3 --- mart/configs/attack/object_detection_mask_adversary.yaml | 3 ++- 6 files changed, 8 insertions(+), 7 deletions(-) rename mart/configs/attack/{iterative.yaml => adversary.yaml} (100%) delete mode 100644 mart/configs/attack/iterative_sgd.yaml diff --git a/mart/configs/attack/iterative.yaml b/mart/configs/attack/adversary.yaml similarity index 100% rename from mart/configs/attack/iterative.yaml rename to mart/configs/attack/adversary.yaml diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index d6a7900f..f702d48d 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -1,5 +1,6 @@ defaults: - - iterative_sgd + - adversary + - optimizer: sgd - initializer: constant - gradient_modifier: sign - projector: linf_additive_range diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index 2f09ea0f..e25ab4f1 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -1,5 +1,6 @@ defaults: - - iterative_sgd + - adversary + - optimizer: sgd - initializer: uniform_lp - gradient_modifier: sign - projector: linf_additive_range diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index f6eb6c7f..c48a7ead 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -1,5 +1,6 @@ defaults: - - iterative_sgd + - adversary + - optimizer: sgd - initializer: uniform_lp - gradient_modifier: sign - projector: linf_additive_range diff --git a/mart/configs/attack/iterative_sgd.yaml b/mart/configs/attack/iterative_sgd.yaml deleted file mode 100644 index 2fbc326b..00000000 --- a/mart/configs/attack/iterative_sgd.yaml +++ /dev/null @@ -1,3 +0,0 @@ -defaults: - - iterative - - optimizer: sgd diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 87e5addc..5bcc572f 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -1,5 +1,6 @@ defaults: - - iterative_sgd + - adversary + - optimizer: sgd - initializer: constant - gradient_modifier: sign - projector: mask_range From 8890ffdd86f3e6bdf80b299f65f7ae2b5e5bd9ea Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 20 Mar 2023 12:12:46 -0700 Subject: [PATCH 060/163] Fix merge error --- mart/nn/nn.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/mart/nn/nn.py b/mart/nn/nn.py index 8d32f6b0..4e336006 100644 --- a/mart/nn/nn.py +++ b/mart/nn/nn.py @@ -161,11 +161,8 @@ def forward(self, *args, **kwargs): selected_args = [kwargs[key] for key in arg_keys[len(args) :]] selected_kwargs = {key: kwargs[val] for key, val in kwarg_keys.items()} - try: - ret = self.module(*args, *selected_args, **selected_kwargs) - except TypeError: - # FIXME: Add better error message - raise + # FIXME: Add better error message + ret = self.module(*args, *selected_args, **selected_kwargs) if self.return_keys: if not isinstance(ret, tuple): From 19cf58d9c5324cc0f0f85cfd286f3824a161f738 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 20 Mar 2023 12:15:47 -0700 Subject: [PATCH 061/163] Fix merge error --- mart/attack/projector.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mart/attack/projector.py b/mart/attack/projector.py index 34fdaab0..4c360688 100644 --- a/mart/attack/projector.py +++ b/mart/attack/projector.py @@ -16,7 +16,6 @@ class Projector(abc.ABC): """A projector modifies nn.Parameter's data.""" @torch.no_grad() - @abc.abstractmethod def __call__( self, tensor: torch.Tensor, From 0b438dfbcb9ba3a4666f8c409693c63a9bb30ded Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 20 Mar 2023 15:34:20 -0700 Subject: [PATCH 062/163] comment --- mart/attack/adversary.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 6fd5a566..260e9a9b 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -56,9 +56,11 @@ def __init__( @silent() def forward(self, **batch): - # Adversary lives within a sequence of nn.Modules. To signal the adversary should attack, one - # must pass a model to attack when calling the adversary. - if "model" in batch: + # Adversary lives within a sequence of model. To signal the adversary should attack, one + # must pass a model to attack when calling the adversary. Since we do not know where the + # Adversary lives inside the model, we also need the remaining sequence to be able to + # get a loss. + if "model" in batch and "sequence" in batch: # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 From a01332a96db249b0eb124481d7af50008d6f8ddd Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 22 Mar 2023 16:59:12 -0700 Subject: [PATCH 063/163] Make Enforcer accept **kwargs. --- mart/attack/enforcer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index b1506c06..2a1a9c15 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -84,7 +84,7 @@ def _check_constraints(self, input_adv, *, input, target): constraint(input_adv, input=input, target=target) @torch.no_grad() - def __call__(self, input_adv, *, input, target): + def __call__(self, input_adv, *, input, target, **kwargs): self._check_constraints(input_adv, input=input, target=target) From 333bf6126940100c29a35104fb75f6d25c383487 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 22 Mar 2023 17:23:01 -0700 Subject: [PATCH 064/163] Update test_gradient. --- tests/test_gradient.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/tests/test_gradient.py b/tests/test_gradient.py index e71023d4..a4ad49ee 100644 --- a/tests/test_gradient.py +++ b/tests/test_gradient.py @@ -11,15 +11,23 @@ def test_gradient_sign(input_data): - gradient = Sign() - output = gradient(input_data) - expected_output = input_data.sign() - torch.testing.assert_close(output, expected_output) + # Don't share input_data with other tests, because the gradient would be changed. + input_data = torch.tensor([1.0, 2.0, 3.0]) + input_data.grad = torch.tensor([-1.0, 3.0, 0.0]) + grad_modifier = Sign() + grad_modifier(input_data) + expected_grad = torch.tensor([-1.0, 1.0, 0.0]) + torch.testing.assert_close(input_data.grad, expected_grad) + + +def test_gradient_lp_normalizer(): + # Don't share input_data with other tests, because the gradient would be changed. + input_data = torch.tensor([1.0, 2.0, 3.0]) + input_data.grad = torch.tensor([-1.0, 3.0, 0.0]) -def test_gradient_lp_normalizer(input_data): p = 1 - gradient = LpNormalizer(p) - output = gradient(input_data) - expected_output = input_data / input_data.norm(p=p) - torch.testing.assert_close(output, expected_output) + grad_modifier = LpNormalizer(p) + grad_modifier(input_data) + expected_grad = torch.tensor([-0.25, 0.75, 0.0]) + torch.testing.assert_close(input_data.grad, expected_grad) From 8c84b433607004e176319ec3ff9af990554846db Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 08:25:42 -0700 Subject: [PATCH 065/163] LitPerturber -> Perturber --- mart/attack/adversary.py | 8 ++++---- mart/attack/perturber.py | 4 ++-- tests/test_adversary.py | 3 +-- tests/test_batch.py | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 6fcd205a..d82405c5 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -14,7 +14,7 @@ from mart.utils import silent from .enforcer import Enforcer -from .perturber import LitPerturber +from .perturber import Perturber __all__ = ["Adversary"] @@ -24,7 +24,7 @@ def __init__( self, *, trainer: pl.Trainer | None = None, - perturber: LitPerturber | None = None, + perturber: Perturber | None = None, enforcer: Enforcer, **kwargs, ): @@ -32,7 +32,7 @@ def __init__( Args: trainer (Trainer): A PyTorch-Lightning Trainer object used to fit the perturber. - perturber (LitPerturber): A LitPerturber that manages perturbations. + perturber (Perturber): A Perturber that manages perturbations. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. """ super().__init__() @@ -55,7 +55,7 @@ def __init__( assert self.attacker.max_epochs == 0 assert self.attacker.limit_train_batches > 0 - self.perturber = perturber or LitPerturber(**kwargs) + self.perturber = perturber or Perturber(**kwargs) self.enforcer = enforcer @silent() diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 0083ded7..38ad77ea 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -19,10 +19,10 @@ from .initializer import Initializer from .objective import Objective -__all__ = ["LitPerturber"] +__all__ = ["Perturber"] -class LitPerturber(pl.LightningModule): +class Perturber(pl.LightningModule): """Peturbation optimization module.""" def __init__( diff --git a/tests/test_adversary.py b/tests/test_adversary.py index c7438458..e074f647 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -11,8 +11,7 @@ from torch.optim import SGD import mart -from mart.attack import Adversary -from mart.attack.perturber import Perturber +from mart.attack import Adversary, Perturber def test_adversary(input_data, target_data, perturbation): diff --git a/tests/test_batch.py b/tests/test_batch.py index d259fb82..0cbb67a1 100644 --- a/tests/test_batch.py +++ b/tests/test_batch.py @@ -9,7 +9,7 @@ import pytest import torch -from mart.attack.perturber import BatchPerturber, Perturber +from mart.attack import Perturber @pytest.fixture(scope="function") From 970f53b58f94b7adc702c65ccc12583c2353dc41 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 09:47:59 -0700 Subject: [PATCH 066/163] cleanup --- mart/attack/adversary.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index d82405c5..f88beccc 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -20,24 +20,26 @@ class Adversary(torch.nn.Module): + """An adversary module which generates and applies perturbation to input.""" + def __init__( self, *, - trainer: pl.Trainer | None = None, - perturber: Perturber | None = None, enforcer: Enforcer, + perturber: Perturber | None = None, + attacker: pl.Trainer | None = None, **kwargs, ): """_summary_ Args: - trainer (Trainer): A PyTorch-Lightning Trainer object used to fit the perturber. - perturber (Perturber): A Perturber that manages perturbations. enforcer (Enforcer): A Callable that enforce constraints on the adversarial input. + perturber (Perturber): A Perturber that manages perturbations. + attacker (Trainer): A PyTorch-Lightning Trainer object used to fit the perturber. """ super().__init__() - self.attacker = trainer or pl.Trainer( + self.attacker = attacker or pl.Trainer( accelerator="auto", # FIXME: we need to get this on the same device as input... num_sanity_val_steps=0, logger=False, From addb2ab5fc96f491df013a791021c32edd2d4cdb Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 10:51:38 -0700 Subject: [PATCH 067/163] Add _reset functionality --- mart/attack/perturber.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 38ad77ea..722d0e32 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -57,11 +57,16 @@ def __init__( self.gain_output = gain self.objective_fn = objective - def configure_optimizers(self): - # Perturbation is lazily initialized but we need a reference to it for the optimizer - # FIXME: It would be nice if we didn't have to create this buffer every time someone calls fit. + self._reset() + + def _reset(self): self.perturbation = torch.nn.UninitializedBuffer(requires_grad=True) + def configure_optimizers(self): + # Reset perturbation each time fit is called + # FIXME: It would be nice if we didn't have to do this every fit. + self._reset() + return self.optimizer_fn([self.perturbation]) def training_step(self, batch, batch_idx): From 38c8caf9692934597237c4c125add521d53e1e7b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 10:55:17 -0700 Subject: [PATCH 068/163] Update tests and fix a bug --- mart/attack/perturber.py | 2 +- tests/test_adversary.py | 110 +++++++++++----------------------- tests/test_perturber.py | 125 ++++++++++++++++++++++++++++++--------- 3 files changed, 133 insertions(+), 104 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 722d0e32..2498b80b 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -98,7 +98,7 @@ def configure_gradient_clipping( self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None ): # Configuring gradient clipping in pl.Trainer is still useful, so use it. - super().configure_gradient_clipping(optimizer, gradient_clip_val, gradient_clip_algorithm) + super().configure_gradient_clipping(optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm) for group in optimizer.param_groups: self.gradient_modifier(group["params"]) diff --git a/tests/test_adversary.py b/tests/test_adversary.py index e074f647..bcb45031 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -13,63 +13,47 @@ import mart from mart.attack import Adversary, Perturber +import pytorch_lightning as pl def test_adversary(input_data, target_data, perturbation): - composer = mart.attack.composer.Additive() enforcer = Mock() - perturber = Mock(return_value=perturbation) - optimizer = Mock() - max_iters = 3 - gain = Mock() + perturber = Mock(return_value=perturbation + input_data) + attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) adversary = Adversary( - composer=composer, enforcer=enforcer, perturber=perturber, - optimizer=optimizer, - max_iters=max_iters, - gain=gain, + attacker=attacker, ) - output_data = adversary(input_data, target_data) + output_data = adversary(input=input_data, target=target_data) - optimizer.assert_not_called() - gain.assert_not_called() - perturber.assert_called_once() - # The enforcer is only called when model is not None. + # The enforcer and attacker should only be called when model is not None. enforcer.assert_not_called() + attacker.fit.assert_not_called() + assert attacker.fit_loop.max_epochs == 0 + + perturber.assert_called_once() + torch.testing.assert_close(output_data, input_data + perturbation) def test_adversary_with_model(input_data, target_data, perturbation): - composer = mart.attack.composer.Additive() enforcer = Mock() - initializer = Mock() - parameter_groups = Mock(return_value=[]) - perturber = Mock(return_value=perturbation, parameter_groups=parameter_groups) - optimizer = Mock() - max_iters = 3 - model = Mock(return_value={}) - gain = Mock(return_value=torch.tensor(0.0, requires_grad=True)) + perturber = Mock(return_value=input_data + perturbation) + attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) adversary = Adversary( - composer=composer, enforcer=enforcer, perturber=perturber, - optimizer=optimizer, - max_iters=3, - gain=gain, + attacker=attacker, ) - output_data = adversary(input_data, target_data, model=model) + output_data = adversary(input=input_data, target=target_data, model=None, sequence=None) - parameter_groups.assert_called_once() - optimizer.assert_called_once() # The enforcer is only called when model is not None. enforcer.assert_called_once() - # max_iters+1 because Adversary examines one last time - assert gain.call_count == max_iters + 1 - assert model.call_count == max_iters + 1 + attacker.fit.assert_called_once() # Once with model=None to get perturbation. # When model=model, perturber.initialize_parameters() is called. @@ -78,69 +62,47 @@ def test_adversary_with_model(input_data, target_data, perturbation): torch.testing.assert_close(output_data, input_data + perturbation) -def test_adversary_perturber_hidden_params(input_data, target_data): - initializer = Mock() - perturber = Perturber(initializer) - - composer = mart.attack.composer.Additive() +def test_adversary_hidden_params(input_data, target_data, perturbation): enforcer = Mock() - optimizer = Mock() - gain = Mock(return_value=torch.tensor(0.0, requires_grad=True)) - model = Mock(return_value={}) + perturber = Mock(return_value=input_data + perturbation) + attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) adversary = Adversary( - composer=composer, enforcer=enforcer, perturber=perturber, - optimizer=optimizer, - max_iters=1, - gain=gain, + attacker=attacker, ) - output_data = adversary(input_data, target_data, model=model) + + output_data = adversary(input=input_data, target=target_data, model=None, sequence=None) # Adversarial perturbation should not be updated by a regular training optimizer. params = [p for p in adversary.parameters()] assert len(params) == 0 - # Adversarial perturbation should not be saved to the model checkpoint. + # Adversarial perturbation should not have any state dict items state_dict = adversary.state_dict() - assert "perturber.perturbation" not in state_dict + assert len(state_dict) == 0 -def test_adversary_perturbation(input_data, target_data): - composer = mart.attack.composer.Additive() +def test_adversary_perturbation(input_data, target_data, perturbation): enforcer = Mock() - optimizer = partial(SGD, lr=1.0, maximize=True) - - def gain(logits): - return logits.mean() - - # Perturbation initialized as zero. - def initializer(x): - torch.nn.init.constant_(x, 0) - - perturber = Perturber(initializer) + perturber = Mock(return_value=input_data + perturbation) + attacker = Mock(max_epochs=0, limit_train_batches=1, fit_loop=Mock(max_epochs=0)) adversary = Adversary( - composer=composer, enforcer=enforcer, perturber=perturber, - optimizer=optimizer, - max_iters=1, - gain=gain, + attacker=attacker, ) - def model(input, target, model=None, **kwargs): - return {"logits": adversary(input, target)} + _ = adversary(input=input_data, target=target_data, model=None, sequence=None) + output_data = adversary(input=input_data, target=target_data) - output1 = adversary(input_data.requires_grad_(), target_data, model=model) - pert1 = perturber.perturbation.clone() - output2 = adversary(input_data.requires_grad_(), target_data, model=model) - pert2 = perturber.perturbation.clone() + # The enforcer is only called when model is not None. + enforcer.assert_called_once() + attacker.fit.assert_called_once() - # The perturbation from multiple runs should be the same. - torch.testing.assert_close(pert1, pert2) + # Once with model and sequence and once without + assert perturber.call_count == 2 - # Simulate a new batch of data of different size. - new_input_data = torch.cat([input_data, input_data]) - output3 = adversary(new_input_data, target_data, model=model) + torch.testing.assert_close(output_data, input_data + perturbation) diff --git a/tests/test_perturber.py b/tests/test_perturber.py index a3d2d196..c26473bf 100644 --- a/tests/test_perturber.py +++ b/tests/test_perturber.py @@ -10,36 +10,103 @@ import pytest import torch +import mart +from mart.attack.adversary import Adversary from mart.attack.perturber import Perturber -def test_perturber_repr(input_data, target_data): - initializer = Mock() - gradient_modifier = Mock() +def test_forward(input_data, target_data): + initializer = mart.attack.initializer.Constant(1337) + projector = Mock() + composer = mart.attack.composer.Additive() + + perturber = Perturber(initializer=initializer, optimizer=None, composer=composer, projector=projector) + + for _ in range(2): + output_data = perturber(input=input_data, target=target_data) + + torch.testing.assert_close(output_data, input_data + 1337) + + # perturber needs to project and compose perturbation on every call + assert projector.call_count == 2 + + +def test_configure_optimizers(input_data, target_data): + initializer = mart.attack.initializer.Constant(1337) + optimizer = Mock() projector = Mock() - perturber = Perturber(initializer, gradient_modifier, projector) - - # get additive perturber representation - perturbation = torch.nn.UninitializedBuffer() - expected_repr = ( - f"{repr(perturbation)}, initializer={initializer}," - f"gradient_modifier={gradient_modifier}, projector={projector}" - ) - representation = perturber.extra_repr() - assert expected_repr == representation - - # generate again the perturber with an initialized - # perturbation - perturber.on_run_start(adversary=None, input=input_data, target=target_data, model=None) - representation = perturber.extra_repr() - assert expected_repr != representation - - -def test_perturber_forward(input_data, target_data): - initializer = Mock() - perturber = Perturber(initializer) - - perturber.on_run_start(adversary=None, input=input_data, target=target_data, model=None) - output = perturber(input_data, target_data) - expected_output = perturber.perturbation - torch.testing.assert_close(output, expected_output, equal_nan=True) + composer = mart.attack.composer.Additive() + + perturber = Perturber(initializer=initializer, optimizer=optimizer, composer=composer, projector=projector) + + for _ in range(2): + perturber.configure_optimizers() + perturber(input=input_data, target=target_data) + + assert optimizer.call_count == 2 + assert projector.call_count == 2 + + +def test_training_step(input_data, target_data): + initializer = mart.attack.initializer.Constant(1337) + optimizer = Mock() + projector = Mock() + composer = mart.attack.composer.Additive() + gain = Mock(shape=[]) + model = Mock(return_value={"loss": gain}) + + perturber = Perturber(initializer=initializer, optimizer=optimizer, composer=composer, projector=projector) + + output = perturber.training_step({"input": input_data, "target": target_data, "model": model}, 0) + + assert output == gain + + +def test_training_step_with_many_gain(input_data, target_data): + initializer = mart.attack.initializer.Constant(1337) + optimizer = Mock() + projector = Mock() + composer = mart.attack.composer.Additive() + gain = torch.tensor([1234, 5678]) + model = Mock(return_value={"loss": gain}) + + perturber = Perturber(initializer=initializer, optimizer=optimizer, composer=composer, projector=projector) + + output = perturber.training_step({"input": input_data, "target": target_data, "model": model}, 0) + + assert output == gain.sum() + + +def test_training_step_with_objective(input_data, target_data): + initializer = mart.attack.initializer.Constant(1337) + optimizer = Mock() + projector = Mock() + composer = mart.attack.composer.Additive() + gain = torch.tensor([1234, 5678]) + model = Mock(return_value={"loss": gain}) + objective = Mock(return_value=torch.tensor([True, False], dtype=torch.bool)) + + perturber = Perturber(initializer=initializer, optimizer=optimizer, composer=composer, projector=projector, objective=objective) + + output = perturber.training_step({"input": input_data, "target": target_data, "model": model}, 0) + + assert output == gain[1] + + objective.assert_called_once() + + +def test_configure_gradient_clipping(): + initializer = mart.attack.initializer.Constant(1337) + projector = Mock() + composer = mart.attack.composer.Additive() + optimizer = Mock(param_groups=[{"params": Mock()}, {"params": Mock()}]) + gradient_modifier = Mock() + + perturber = Perturber(optimizer=optimizer, gradient_modifier=gradient_modifier, initializer=None, composer=None, projector=None) + # We need to mock a trainer since LightningModule does some checks + perturber.trainer = Mock(gradient_clip_val=1., gradient_clip_algorithm="norm") + + perturber.configure_gradient_clipping(optimizer, 0) + + # Once for each parameter in the optimizer + assert gradient_modifier.call_count == 2 From 020f99bfaf748c68da14b57de33eb679a522b11e Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 13:42:04 -0700 Subject: [PATCH 069/163] Remove batch tests --- tests/test_batch.py | 91 --------------------------------------------- 1 file changed, 91 deletions(-) delete mode 100644 tests/test_batch.py diff --git a/tests/test_batch.py b/tests/test_batch.py deleted file mode 100644 index 0cbb67a1..00000000 --- a/tests/test_batch.py +++ /dev/null @@ -1,91 +0,0 @@ -# -# Copyright (C) 2022 Intel Corporation -# -# SPDX-License-Identifier: BSD-3-Clause -# - -from unittest.mock import Mock, patch - -import pytest -import torch - -from mart.attack import Perturber - - -@pytest.fixture(scope="function") -def perturber_batch(): - # function to mock perturbation - def perturbation(input, target): - return input + torch.ones(*input.shape) - - # setup batch mock - perturber = Mock(name="perturber_mock", spec=Perturber, side_effect=perturbation) - perturber_factory = Mock(return_value=perturber) - - batch = BatchPerturber(perturber_factory) - - return batch - - -@pytest.fixture(scope="function") -def input_data_batch(): - batch_size = 2 - image_size = (3, 32, 32) - - input_data = {} - input_data["image_batch"] = torch.zeros(batch_size, *image_size) - input_data["image_batch_list"] = [torch.zeros(*image_size) for _ in range(batch_size)] - input_data["target"] = {"perturbable_mask": torch.ones(*image_size)} - - return input_data - - -def test_batch_run_start(perturber_batch, input_data_batch): - assert isinstance(perturber_batch, BatchPerturber) - - # start perturber batch - adversary = Mock() - model = Mock() - perturber_batch.on_run_start( - adversary, input_data_batch["image_batch"], input_data_batch["target"], model - ) - - batch_size, _, _, _ = input_data_batch["image_batch"].shape - assert len(perturber_batch.perturbers) == batch_size - - -def test_batch_forward(perturber_batch, input_data_batch): - assert isinstance(perturber_batch, BatchPerturber) - - # start perturber batch - adversary = Mock() - model = Mock() - perturber_batch.on_run_start( - adversary, input_data_batch["image_batch"], input_data_batch["target"], model - ) - - perturbed_images = perturber_batch(input_data_batch["image_batch"], input_data_batch["target"]) - expected = torch.ones(*perturbed_images.shape) - torch.testing.assert_close(perturbed_images, expected) - - -def test_tuple_batch_forward(perturber_batch, input_data_batch): - assert isinstance(perturber_batch, BatchPerturber) - - # start perturber batch - adversary = Mock() - model = Mock() - perturber_batch.on_run_start( - adversary, input_data_batch["image_batch_list"], input_data_batch["target"], model - ) - - perturbed_images = perturber_batch( - input_data_batch["image_batch_list"], input_data_batch["target"] - ) - expected = [ - torch.ones(*input_data_batch["image_batch_list"][0].shape) - for _ in range(len(input_data_batch["image_batch_list"])) - ] - - for output, expected_output in zip(expected, perturbed_images): - torch.testing.assert_close(output, expected_output) From 7332f74cacd748411f2a30993185e8c8c8462595 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 14:04:44 -0700 Subject: [PATCH 070/163] style --- mart/attack/perturber.py | 4 +++- tests/test_adversary.py | 2 +- tests/test_perturber.py | 46 +++++++++++++++++++++++++++++++--------- 3 files changed, 40 insertions(+), 12 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 2498b80b..b87b3211 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -98,7 +98,9 @@ def configure_gradient_clipping( self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None ): # Configuring gradient clipping in pl.Trainer is still useful, so use it. - super().configure_gradient_clipping(optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm) + super().configure_gradient_clipping( + optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm + ) for group in optimizer.param_groups: self.gradient_modifier(group["params"]) diff --git a/tests/test_adversary.py b/tests/test_adversary.py index bcb45031..2232e12d 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -7,13 +7,13 @@ from functools import partial from unittest.mock import Mock +import pytorch_lightning as pl import torch from torch.optim import SGD import mart from mart.attack import Adversary, Perturber -import pytorch_lightning as pl def test_adversary(input_data, target_data, perturbation): enforcer = Mock() diff --git a/tests/test_perturber.py b/tests/test_perturber.py index c26473bf..b1f39993 100644 --- a/tests/test_perturber.py +++ b/tests/test_perturber.py @@ -20,7 +20,9 @@ def test_forward(input_data, target_data): projector = Mock() composer = mart.attack.composer.Additive() - perturber = Perturber(initializer=initializer, optimizer=None, composer=composer, projector=projector) + perturber = Perturber( + initializer=initializer, optimizer=None, composer=composer, projector=projector + ) for _ in range(2): output_data = perturber(input=input_data, target=target_data) @@ -37,7 +39,9 @@ def test_configure_optimizers(input_data, target_data): projector = Mock() composer = mart.attack.composer.Additive() - perturber = Perturber(initializer=initializer, optimizer=optimizer, composer=composer, projector=projector) + perturber = Perturber( + initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + ) for _ in range(2): perturber.configure_optimizers() @@ -55,9 +59,13 @@ def test_training_step(input_data, target_data): gain = Mock(shape=[]) model = Mock(return_value={"loss": gain}) - perturber = Perturber(initializer=initializer, optimizer=optimizer, composer=composer, projector=projector) + perturber = Perturber( + initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + ) - output = perturber.training_step({"input": input_data, "target": target_data, "model": model}, 0) + output = perturber.training_step( + {"input": input_data, "target": target_data, "model": model}, 0 + ) assert output == gain @@ -70,9 +78,13 @@ def test_training_step_with_many_gain(input_data, target_data): gain = torch.tensor([1234, 5678]) model = Mock(return_value={"loss": gain}) - perturber = Perturber(initializer=initializer, optimizer=optimizer, composer=composer, projector=projector) + perturber = Perturber( + initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + ) - output = perturber.training_step({"input": input_data, "target": target_data, "model": model}, 0) + output = perturber.training_step( + {"input": input_data, "target": target_data, "model": model}, 0 + ) assert output == gain.sum() @@ -86,9 +98,17 @@ def test_training_step_with_objective(input_data, target_data): model = Mock(return_value={"loss": gain}) objective = Mock(return_value=torch.tensor([True, False], dtype=torch.bool)) - perturber = Perturber(initializer=initializer, optimizer=optimizer, composer=composer, projector=projector, objective=objective) + perturber = Perturber( + initializer=initializer, + optimizer=optimizer, + composer=composer, + projector=projector, + objective=objective, + ) - output = perturber.training_step({"input": input_data, "target": target_data, "model": model}, 0) + output = perturber.training_step( + {"input": input_data, "target": target_data, "model": model}, 0 + ) assert output == gain[1] @@ -102,9 +122,15 @@ def test_configure_gradient_clipping(): optimizer = Mock(param_groups=[{"params": Mock()}, {"params": Mock()}]) gradient_modifier = Mock() - perturber = Perturber(optimizer=optimizer, gradient_modifier=gradient_modifier, initializer=None, composer=None, projector=None) + perturber = Perturber( + optimizer=optimizer, + gradient_modifier=gradient_modifier, + initializer=None, + composer=None, + projector=None, + ) # We need to mock a trainer since LightningModule does some checks - perturber.trainer = Mock(gradient_clip_val=1., gradient_clip_algorithm="norm") + perturber.trainer = Mock(gradient_clip_val=1.0, gradient_clip_algorithm="norm") perturber.configure_gradient_clipping(optimizer, 0) From 260fd3dd2237f9f0892bd4bcbfa52a6919eb1c0b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 14:12:48 -0700 Subject: [PATCH 071/163] Late bind trainer to input device --- mart/attack/adversary.py | 58 ++++++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index f88beccc..d6f8ab26 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -6,6 +6,7 @@ from __future__ import annotations +from functools import partial from itertools import cycle import pytorch_lightning as pl @@ -39,23 +40,28 @@ def __init__( """ super().__init__() - self.attacker = attacker or pl.Trainer( - accelerator="auto", # FIXME: we need to get this on the same device as input... - num_sanity_val_steps=0, - logger=False, - max_epochs=0, - limit_train_batches=kwargs.pop("max_iters", 10), - callbacks=list(kwargs.pop("callbacks", {}).values()), - enable_model_summary=False, - enable_checkpointing=False, - enable_progress_bar=False, - ) - - # We feed the same batch to the attack every time so we treat each step as an - # attack iteration. As such, attackers must only run for 1 epoch and must limit - # the number of attack steps via limit_train_batches. - assert self.attacker.max_epochs == 0 - assert self.attacker.limit_train_batches > 0 + self.attacker = attacker + + if self.attacker is None: + # Enable attack to be late bound in forward + self.attacker = partial( + pl.Trainer, + num_sanity_val_steps=0, + logger=False, + max_epochs=0, + limit_train_batches=kwargs.pop("max_iters", 10), + callbacks=kwargs.pop("callbacks", {}), + enable_model_summary=False, + enable_checkpointing=False, + enable_progress_bar=False, + ) + + else: + # We feed the same batch to the attack every time so we treat each step as an + # attack iteration. As such, attackers must only run for 1 epoch and must limit + # the number of attack steps via limit_train_batches. + assert self.attacker.max_epochs == 0 + assert self.attacker.limit_train_batches > 0 self.perturber = perturber or Perturber(**kwargs) self.enforcer = enforcer @@ -67,6 +73,24 @@ def forward(self, **batch): # Adversary lives inside the model, we also need the remaining sequence to be able to # get a loss. if "model" in batch and "sequence" in batch: + # Late bind attacker on same device as input + if isinstance(self.attacker, partial): + device = batch["input"].device + + if device.type == "cuda": + accelerator = "gpu" + devices = [device.index] + + elif device.type == "cpu": + accelerator = "cpu" + devices = None + + else: + accelerator = device.type + devices = [device.index] + + self.attacker = self.attacker(accelerator=accelerator, devices=devices) + # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 From cd47df668fd8ca206c5f5ae3a9a0862fe4cc0b18 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 14:22:25 -0700 Subject: [PATCH 072/163] fix visualizer test --- tests/test_visualizer.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/test_visualizer.py b/tests/test_visualizer.py index 5a269db2..c4abb4dd 100644 --- a/tests/test_visualizer.py +++ b/tests/test_visualizer.py @@ -19,15 +19,19 @@ def test_visualizer_run_end(input_data, target_data, perturbation, tmp_path): target_list = [target_data] # simulate an addition perturbation - def perturb(input, target, model): + def perturb(input): result = [sample + perturbation for sample in input] return result - model = Mock() + trainer = Mock() + model = Mock(return_value=perturb(input_list)) + outputs = Mock() + batch = {"input": input_list, "target": target_list} adversary = Mock(spec=Adversary, side_effect=perturb) visualizer = PerturbedImageVisualizer(folder) - visualizer.on_run_end(adversary=adversary, input=input_list, target=target_list, model=model) + visualizer.on_train_batch_end(trainer, model, outputs, batch, 0) + visualizer.on_train_end(trainer, model) # verify that the visualizer created the JPG file expected_output_path = folder / target_data["file_name"] From 0519b5ad5db6b36653b26a3bbdc47f7616a4404d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 15:52:04 -0700 Subject: [PATCH 073/163] bugfix --- mart/attack/adversary.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index d6f8ab26..6d1fb4ba 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -75,7 +75,12 @@ def forward(self, **batch): if "model" in batch and "sequence" in batch: # Late bind attacker on same device as input if isinstance(self.attacker, partial): - device = batch["input"].device + inputs = batch["input"] + + if isinstance(inputs, tuple): + inputs = inputs[0] + + device = inputs.device if device.type == "cuda": accelerator = "gpu" From 4024852b55a52c145ac6c8a8506a13c788e7c05d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 15:58:29 -0700 Subject: [PATCH 074/163] bugfix --- mart/attack/adversary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 6d1fb4ba..b229e59a 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -50,7 +50,7 @@ def __init__( logger=False, max_epochs=0, limit_train_batches=kwargs.pop("max_iters", 10), - callbacks=kwargs.pop("callbacks", {}), + callbacks=list(kwargs.pop("callbacks", {}).values()), # dict to list of values enable_model_summary=False, enable_checkpointing=False, enable_progress_bar=False, From 1980d99476f486e6129239106b80d154d928f73b Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 16:00:48 -0700 Subject: [PATCH 075/163] disable progress bar --- mart/configs/attack/object_detection_mask_adversary.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index aa664819..d18cb3a6 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -4,7 +4,7 @@ defaults: - initializer: constant - gradient_modifier: sign - projector: mask_range - - callbacks: [progress_bar, image_visualizer] + - callbacks: [image_visualizer] - objective: zero_ap - gain: rcnn_training_loss - composer: batch_overlay From 9809362a92f73e4fd59534e52689a7017d91fafe Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 18:33:25 -0700 Subject: [PATCH 076/163] bugfix --- mart/attack/callbacks/visualizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/callbacks/visualizer.py b/mart/attack/callbacks/visualizer.py index 14bf9e12..3354321e 100644 --- a/mart/attack/callbacks/visualizer.py +++ b/mart/attack/callbacks/visualizer.py @@ -32,7 +32,7 @@ def on_train_batch_end(self, trainer, model, outputs, batch, batch_idx): def on_train_end(self, trainer, model): # FIXME: We should really just save this to outputs instead of recomputing adv_input - adv_input = model(self.input, self.target) + adv_input = model(input=self.input, target=self.target) for img, tgt in zip(adv_input, self.target): fname = tgt["file_name"] From bad9c10d773c4168d917f5ceba05cb99cbe68eb6 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Thu, 23 Mar 2023 18:36:26 -0700 Subject: [PATCH 077/163] Add loss to object detection outputs --- .../object_detection_mask_adversary.yaml | 3 ++- .../model/torchvision_faster_rcnn.yaml | 22 +++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index d18cb3a6..d2754585 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -6,7 +6,6 @@ defaults: - projector: mask_range - callbacks: [image_visualizer] - objective: zero_ap - - gain: rcnn_training_loss - composer: batch_overlay - enforcer: batch - enforcer/constraints: [mask, pixel_range] @@ -17,5 +16,7 @@ optimizer: max_iters: 5 +gain: "loss" + initializer: constant: 127 diff --git a/mart/configs/model/torchvision_faster_rcnn.yaml b/mart/configs/model/torchvision_faster_rcnn.yaml index 0187b99f..8c62182f 100644 --- a/mart/configs/model/torchvision_faster_rcnn.yaml +++ b/mart/configs/model/torchvision_faster_rcnn.yaml @@ -50,10 +50,21 @@ validation_sequence: losses_and_detections: ["preprocessor", "target"] seq030: + loss: + # Sum up the losses. + [ + "losses_and_detections.training.loss_objectness", + "losses_and_detections.training.loss_rpn_box_reg", + "losses_and_detections.training.loss_classifier", + "losses_and_detections.training.loss_box_reg", + ] + + seq040: output: { "preds": "losses_and_detections.eval", "target": "target", + "loss": "loss", "rpn_loss.loss_objectness": "losses_and_detections.training.loss_objectness", "rpn_loss.loss_rpn_box_reg": "losses_and_detections.training.loss_rpn_box_reg", "box_loss.loss_classifier": "losses_and_detections.training.loss_classifier", @@ -68,10 +79,21 @@ test_sequence: losses_and_detections: ["preprocessor", "target"] seq030: + loss: + # Sum up the losses. + [ + "losses_and_detections.training.loss_objectness", + "losses_and_detections.training.loss_rpn_box_reg", + "losses_and_detections.training.loss_classifier", + "losses_and_detections.training.loss_box_reg", + ] + + seq040: output: { "preds": "losses_and_detections.eval", "target": "target", + "loss": "loss", "rpn_loss.loss_objectness": "losses_and_detections.training.loss_objectness", "rpn_loss.loss_rpn_box_reg": "losses_and_detections.training.loss_rpn_box_reg", "box_loss.loss_classifier": "losses_and_detections.training.loss_classifier", From c1cb07f71fb58b81ff33967e81a75c487a79d55d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 24 Mar 2023 07:38:11 -0700 Subject: [PATCH 078/163] comment --- mart/attack/adversary.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index b229e59a..535be9db 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -74,6 +74,7 @@ def forward(self, **batch): # get a loss. if "model" in batch and "sequence" in batch: # Late bind attacker on same device as input + # FIXME: It would be nice if we could do something like: self.attacker.configure_accelerator() if isinstance(self.attacker, partial): inputs = batch["input"] From b6afdd14b276987d821905a1844ec7c4df611621 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 24 Mar 2023 08:51:42 -0700 Subject: [PATCH 079/163] Make Adversary and Perturber tuple-aware --- mart/attack/adversary.py | 77 ++++++++++++------- mart/attack/perturber.py | 54 +++++++++---- .../attack/composer/batch_additive.yaml | 5 -- .../attack/composer/batch_overlay.yaml | 5 -- mart/configs/attack/enforcer/batch.yaml | 4 - .../object_detection_mask_adversary.yaml | 4 +- tests/test_perturber.py | 4 + 7 files changed, 92 insertions(+), 61 deletions(-) delete mode 100644 mart/configs/attack/composer/batch_additive.yaml delete mode 100644 mart/configs/attack/composer/batch_overlay.yaml delete mode 100644 mart/configs/attack/enforcer/batch.yaml diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 535be9db..50c78fe5 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -8,6 +8,7 @@ from functools import partial from itertools import cycle +from typing import Any import pytorch_lightning as pl import torch @@ -67,46 +68,66 @@ def __init__( self.enforcer = enforcer @silent() - def forward(self, **batch): + def forward(self, *, input: torch.Tensor | tuple, **batch): # Adversary lives within a sequence of model. To signal the adversary should attack, one # must pass a model to attack when calling the adversary. Since we do not know where the # Adversary lives inside the model, we also need the remaining sequence to be able to # get a loss. if "model" in batch and "sequence" in batch: - # Late bind attacker on same device as input - # FIXME: It would be nice if we could do something like: self.attacker.configure_accelerator() - if isinstance(self.attacker, partial): - inputs = batch["input"] + self._attack(input=input, **batch) - if isinstance(inputs, tuple): - inputs = inputs[0] + # Always use perturb the current input. + input_adv = self.perturber(input=input, **batch) - device = inputs.device + # Enforce constraints after the attack optimization ends. + if "model" in batch and "sequence" in batch: + self._enforce(input_adv, input=input, **batch) - if device.type == "cuda": - accelerator = "gpu" - devices = [device.index] + return input_adv - elif device.type == "cpu": - accelerator = "cpu" - devices = None + def _attack(self, input: torch.Tensor | tuple, **kwargs): + batch = {"input": input, **kwargs} - else: - accelerator = device.type - devices = [device.index] + # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. + # We use Trainer.limit_train_batches to control the number of attack iterations. + attacker = self._initialize_attack(input) + attacker.fit_loop.max_epochs += 1 + attacker.fit(self.perturber, train_dataloaders=cycle([batch])) - self.attacker = self.attacker(accelerator=accelerator, devices=devices) + def _initialize_attack(self, input: torch.Tensor | tuple): + # Configure perturber to use batch inputs + self.perturber.configure_perturbation(input) - # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. - # We use Trainer.limit_train_batches to control the number of attack iterations. - self.attacker.fit_loop.max_epochs += 1 - self.attacker.fit(self.perturber, train_dataloaders=cycle([batch])) + if not isinstance(self.attacker, partial): + return self.attacker - # Always use perturb the current input. - input_adv = self.perturber(**batch) + # Convert torch.device to PL accelerator + device = input[0].device if isinstance(input, tuple) else input.device - if "model" in batch and "sequence" in batch: - # We only enforce constraints after the attack optimization ends. - self.enforcer(input_adv, **batch) + if device.type == "cuda": + accelerator = "gpu" + devices = [device.index] + elif device.type == "cpu": + accelerator = "cpu" + devices = None + else: + accelerator = device.type + devices = [device.index] - return input_adv + self.attacker = self.attacker(accelerator=accelerator, devices=devices) + + return self.attacker + + def _enforce( + self, + input_adv: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, + ): + if not isinstance(input_adv, tuple): + self.enforcer(input_adv, input=input, target=target) + else: + for inp_adv, inp, tar in zip(input_adv, input, target): + self.enforcer(inp_adv, input=inp, target=tar) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index b87b3211..24ae6d78 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -10,6 +10,7 @@ import pytorch_lightning as pl import torch +from pytorch_lightning.utilities.exceptions import MisconfigurationException from .gradient_modifier import GradientModifier from .projector import Projector @@ -57,17 +58,30 @@ def __init__( self.gain_output = gain self.objective_fn = objective - self._reset() + self.perturbation = None - def _reset(self): - self.perturbation = torch.nn.UninitializedBuffer(requires_grad=True) + def configure_perturbation(self, input: torch.Tensor | tuple): + def create_and_initialize(inp): + pert = torch.empty_like(inp) + self.initializer(pert) + return pert + + if not isinstance(input, tuple): + self.perturbation = create_and_initialize(input) + else: + self.perturbation = tuple(create_and_initialize(inp) for inp in input) def configure_optimizers(self): - # Reset perturbation each time fit is called - # FIXME: It would be nice if we didn't have to do this every fit. - self._reset() + if self.perturbation is None: + raise MisconfigurationException( + "You need to call the Perturber.configure_perturbation before fit." + ) + + params = self.perturbation + if not isinstance(params, tuple): + params = (params,) - return self.optimizer_fn([self.perturbation]) + return self.optimizer_fn(params) def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally @@ -112,15 +126,21 @@ def forward( target: torch.Tensor | dict[str, Any] | tuple, **kwargs, ): - # Materialize perturbation and initialize it - if torch.nn.parameter.is_lazy(self.perturbation): - self.perturbation.materialize(input.shape, device=input.device, dtype=torch.float32) - self.initializer(self.perturbation) - - # Project perturbation... - self.projector(self.perturbation, input, target) - - # Compose adversarial input. - input_adv = self.composer(self.perturbation, input=input, target=target) + if self.perturbation is None: + raise MisconfigurationException( + "You need to call the Perturber.configure_perturbation before forward." + ) + + def project_and_compose(pert, inp, tar): + self.projector(pert, inp, tar) + return self.composer(pert, input=inp, target=tar) + + if not isinstance(self.perturbation, tuple): + input_adv = project_and_compose(self.perturbation, input, target) + else: + input_adv = tuple( + project_and_compose(pert, inp, tar) + for pert, inp, tar in zip(self.perturbation, input, target) + ) return input_adv diff --git a/mart/configs/attack/composer/batch_additive.yaml b/mart/configs/attack/composer/batch_additive.yaml deleted file mode 100644 index 49ea1f8f..00000000 --- a/mart/configs/attack/composer/batch_additive.yaml +++ /dev/null @@ -1,5 +0,0 @@ -defaults: - - /attack/composer@composer: additive - -_target_: mart.attack.BatchComposer -composer: ??? diff --git a/mart/configs/attack/composer/batch_overlay.yaml b/mart/configs/attack/composer/batch_overlay.yaml deleted file mode 100644 index f69ce0ce..00000000 --- a/mart/configs/attack/composer/batch_overlay.yaml +++ /dev/null @@ -1,5 +0,0 @@ -defaults: - - /attack/composer@composer: overlay - -_target_: mart.attack.BatchComposer -composer: ??? diff --git a/mart/configs/attack/enforcer/batch.yaml b/mart/configs/attack/enforcer/batch.yaml deleted file mode 100644 index 5cc39350..00000000 --- a/mart/configs/attack/enforcer/batch.yaml +++ /dev/null @@ -1,4 +0,0 @@ -defaults: - - constraints: null - -_target_: mart.attack.enforcer.BatchEnforcer diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index d2754585..10041fdd 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -6,8 +6,8 @@ defaults: - projector: mask_range - callbacks: [image_visualizer] - objective: zero_ap - - composer: batch_overlay - - enforcer: batch + - composer: overlay + - enforcer: default - enforcer/constraints: [mask, pixel_range] # Make a 5-step attack for the demonstration purpose. diff --git a/tests/test_perturber.py b/tests/test_perturber.py index b1f39993..e25252dc 100644 --- a/tests/test_perturber.py +++ b/tests/test_perturber.py @@ -24,6 +24,8 @@ def test_forward(input_data, target_data): initializer=initializer, optimizer=None, composer=composer, projector=projector ) + perturber.configure_perturbation(input_data) + for _ in range(2): output_data = perturber(input=input_data, target=target_data) @@ -43,6 +45,8 @@ def test_configure_optimizers(input_data, target_data): initializer=initializer, optimizer=optimizer, composer=composer, projector=projector ) + perturber.configure_perturbation(input_data) + for _ in range(2): perturber.configure_optimizers() perturber(input=input_data, target=target_data) From 8e7bc211df71cacddc69a34e7da809357f46c6d6 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 24 Mar 2023 09:00:08 -0700 Subject: [PATCH 080/163] comment --- mart/attack/perturber.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 24ae6d78..eac67cef 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -79,6 +79,7 @@ def configure_optimizers(self): params = self.perturbation if not isinstance(params, tuple): + # FIXME: Should we treat the batch dimension as independent parameters? params = (params,) return self.optimizer_fn(params) From 5181dcf3162692455e6dc3462a1e545101bd9147 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 09:45:20 -0700 Subject: [PATCH 081/163] Fix gain. --- mart/configs/attack/classification_eps8_pgd10_step1.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index ab5ff843..2b6241b9 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -20,6 +20,8 @@ optimizer: max_iters: 10 +gain: "loss" + initializer: eps: 8 From fb2f323feda675d1e51dcd4df2dc674160624ebf Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 10:02:02 -0700 Subject: [PATCH 082/163] Make composer and enforcer aware of modalities. --- mart/attack/composer.py | 8 ++++++-- mart/attack/enforcer.py | 4 +++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index c3f1c738..04948ff3 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -43,8 +43,11 @@ def forward( output = [] for input_i, target_i, perturbation_i in zip(input, target, perturbation): - output_i = self.composer(perturbation_i, input=input_i, target=target_i, **kwargs) - output.append(output_i) + # FIXME: Make it modality-aware. + output_i = self.composer( + perturbation_i["rgb"], input=input_i["rgb"], target=target_i, **kwargs + ) + output.append({"rgb": output_i}) if isinstance(input, torch.Tensor): output = torch.stack(output) @@ -82,6 +85,7 @@ def forward( # Convert mask to a Tensor with same torch.dtype and torch.device as input, # because some data modules (e.g. Armory) gives binary mask. + # FIXME: input can be a dictionary {"rgb": tensor} mask = mask.to(input) return input * (1 - mask) + perturbation * mask diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index 2a1a9c15..1506a230 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -96,6 +96,8 @@ def __call__( *, input: torch.Tensor | tuple, target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, ) -> torch.Tensor | tuple: for input_adv_i, input_i, target_i in zip(input_adv, input, target): - self._check_constraints(input_adv_i, input=input_i, target=target_i) + # FIXME: Make it modality-aware. + self._check_constraints(input_adv_i["rgb"], input=input_i["rgb"], target=target_i) From 927a0a9e010f5ced026bbcbd7a4aafef3a0fd1f9 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 10:03:16 -0700 Subject: [PATCH 083/163] Make adversary aware of modalities. --- mart/attack/adversary.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index b229e59a..70e04e69 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -78,7 +78,8 @@ def forward(self, **batch): inputs = batch["input"] if isinstance(inputs, tuple): - inputs = inputs[0] + # FIXME: Make it modality-aware to get a tensor. + inputs = inputs[0]["rgb"] device = inputs.device From be97fae1ba6aef32d6e358ad997a1b96a95ad1a8 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 10:04:34 -0700 Subject: [PATCH 084/163] Add PerturbationManager for managing tensors and param groups. --- mart/attack/perturbation_manager.py | 112 ++++++++++++++++++++++++++++ tests/test_perturbation_manager.py | 60 +++++++++++++++ 2 files changed, 172 insertions(+) create mode 100644 mart/attack/perturbation_manager.py create mode 100644 tests/test_perturbation_manager.py diff --git a/mart/attack/perturbation_manager.py b/mart/attack/perturbation_manager.py new file mode 100644 index 00000000..85a8257c --- /dev/null +++ b/mart/attack/perturbation_manager.py @@ -0,0 +1,112 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from __future__ import annotations + +import itertools +from typing import Callable + +import torch + + +class PerturbationManager: + def __init__( + self, + *, + initializer: Callable | dict, + gradient_modifier: Callable | dict | None = None, + projector: Callable | dict | None = None, + optim_params: dict | None = None, + ) -> None: + + if not isinstance(initializer, dict): + initializer = {None: initializer} + + if gradient_modifier is None and not isinstance(gradient_modifier, dict): + gradient_modifier = {None: gradient_modifier} + + if projector is not None and not isinstance(projector, dict): + projector = {None: projector} + + self.initializer = initializer + self.gradient_modifier = gradient_modifier + self.projector = projector + + if optim_params is None: + optim_params = {None: {}} | {modality: {} for modality in self.initializer.keys()} + self.optim_params = optim_params + + self._perturbation = None + + @property + def perturbation(self): + # Return perturbation that is homomorphic with input, even if the underlying perturbation could be its sub-components. + return self._perturbation + + def initialize(self, input): + # Create raw perturbation that is used to produce parameter groups for optimization. + self._perturbation = self._initialize(input) + + def _initialize(self, input, modality=None): + """Recursively materialize and initialize perturbation that is homomorphic as input; Hook + gradient modifiers.""" + if isinstance(input, torch.Tensor): + # Materialize. + pert = torch.zeros_like(input, requires_grad=True) + + # Initialize. + self.initializer[modality](pert) + + # Gradient modifier hook. + # TODO: self.gradient_modifier[modality](pert) + if self.gradient_modifier is not None: + pert.register_hook(lambda grad: grad.sign()) + + return pert + elif isinstance(input, dict): + return {modality: self._initialize(inp, modality) for modality, inp in input.items()} + elif isinstance(input, list): + return [self._initialize(inp) for inp in input] + elif isinstance(input, tuple): + return tuple(self._initialize(inp) for inp in input) + + def project(self, input, target): + if self.projector is not None: + self._project(self._perturbation, input, target) + + def _project(self, perturbation, input, target, modality=None): + """Recursively project perturbation tensors that may hide behind dictionaries, list or + tuple.""" + if isinstance(input, torch.Tensor): + self.projector[modality](perturbation, input, target) + elif isinstance(input, dict): + for modality_i, input_i in input.items(): + self._project(perturbation[modality_i], input_i, target, modality=modality_i) + elif isinstance(input, list) or isinstance(input, tuple): + for perturbation_i, input_i, target_i in zip(perturbation, input, target): + self._project(perturbation_i, input_i, target_i, modality=modality) + + def parameter_groups(self): + param_groups = self._parameter_groups(self._perturbation) + return param_groups + + def _parameter_groups(self, pert, modality=None): + """Return parameter groups as a list of dictionaries.""" + + if isinstance(pert, torch.Tensor): + return [{"params": pert} | self.optim_params[modality]] + elif isinstance(pert, dict): + ret = [self._parameter_groups(pert_i, modality) for modality, pert_i in pert.items()] + return list(itertools.chain.from_iterable(ret)) + elif isinstance(pert, list) or isinstance(pert, tuple): + param_list = [] + for pert_i in pert: + param_list.extend(self._parameter_groups(pert_i)) + return param_list + + def __call__(self, input): + self.initialize(input) + return self.perturbation diff --git a/tests/test_perturbation_manager.py b/tests/test_perturbation_manager.py new file mode 100644 index 00000000..09bf7713 --- /dev/null +++ b/tests/test_perturbation_manager.py @@ -0,0 +1,60 @@ +from typing import Iterable + +import torch + +from mart.attack.initializer import Constant +from mart.attack.perturbation_manager import PerturbationManager + + +def test_perturbation_tensor(): + input_data = torch.tensor([1.0, 2.0]) + initializer = Constant(constant=0) + + pert_manager = PerturbationManager(initializer=initializer) + + pert = pert_manager(input_data) + assert isinstance(pert, torch.Tensor) + assert pert.shape == pert.shape + assert (pert == 0).all() + + param_groups = pert_manager.parameter_groups() + assert isinstance(param_groups, Iterable) + assert param_groups[0]["params"].requires_grad + + +def test_perturbation_dict(): + input_data = {"rgb": torch.tensor([1.0, 2.0]), "depth": torch.tensor([1.0, 2.0])} + initializer = {"rgb": Constant(constant=0), "depth": Constant(constant=1)} + pert_manager = PerturbationManager(initializer=initializer) + + pert = pert_manager(input_data) + assert isinstance(pert, dict) + assert (pert["rgb"] == 0).all() + assert (pert["depth"] == 1).all() + + param_groups = pert_manager.parameter_groups() + assert len(param_groups) == 2 + param_groups = list(param_groups) + assert param_groups[0]["params"].requires_grad + # assert (param_groups[0]["params"] == 0).all() + + +def test_perturbation_tuple_dict(): + input_data = ( + {"rgb": torch.tensor([1.0, 2.0]), "depth": torch.tensor([3.0, 4.0])}, + {"rgb": torch.tensor([-1.0, -2.0]), "depth": torch.tensor([-3.0, -4.0])}, + ) + initializer = {"rgb": Constant(constant=0), "depth": Constant(constant=1)} + pert_manager = PerturbationManager(initializer=initializer) + + pert = pert_manager(input_data) + assert isinstance(pert, tuple) + assert (pert[0]["rgb"] == 0).all() + assert (pert[0]["depth"] == 1).all() + assert (pert[1]["rgb"] == 0).all() + assert (pert[1]["depth"] == 1).all() + + param_groups = pert_manager.parameter_groups() + assert len(param_groups) == 4 + param_groups = list(param_groups) + assert param_groups[0]["params"].requires_grad From f42ea996dc299652361841de9e9f2dad4044145e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 10:05:42 -0700 Subject: [PATCH 085/163] Add loss module in object detection. --- mart/configs/model/torchvision_faster_rcnn.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/mart/configs/model/torchvision_faster_rcnn.yaml b/mart/configs/model/torchvision_faster_rcnn.yaml index 0187b99f..8a1dded6 100644 --- a/mart/configs/model/torchvision_faster_rcnn.yaml +++ b/mart/configs/model/torchvision_faster_rcnn.yaml @@ -68,6 +68,16 @@ test_sequence: losses_and_detections: ["preprocessor", "target"] seq030: + loss: + # Sum up the losses. + [ + "losses_and_detections.training.loss_objectness", + "losses_and_detections.training.loss_rpn_box_reg", + "losses_and_detections.training.loss_classifier", + "losses_and_detections.training.loss_box_reg", + ] + + seq040: output: { "preds": "losses_and_detections.eval", @@ -76,6 +86,7 @@ test_sequence: "rpn_loss.loss_rpn_box_reg": "losses_and_detections.training.loss_rpn_box_reg", "box_loss.loss_classifier": "losses_and_detections.training.loss_classifier", "box_loss.loss_box_reg": "losses_and_detections.training.loss_box_reg", + "loss": "loss", } modules: From 2a46b3cd8440ddbdc5d5a6b2affced19658f110c Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 10:07:18 -0700 Subject: [PATCH 086/163] Use perturbation manager in Perturber. --- mart/attack/adversary.py | 4 ++++ mart/attack/perturber.py | 51 ++++++++++++++++++---------------------- 2 files changed, 27 insertions(+), 28 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 70e04e69..64138fef 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -100,6 +100,10 @@ def forward(self, **batch): # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. self.attacker.fit_loop.max_epochs += 1 + + # Initialize perturbation with input. + self.perturber.initialize(**batch) + self.attacker.fit(self.perturber, train_dataloaders=cycle([batch])) # Always use perturb the current input. diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index b87b3211..23da11e7 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -12,6 +12,7 @@ import torch from .gradient_modifier import GradientModifier +from .perturbation_manager import PerturbationManager from .projector import Projector if TYPE_CHECKING: @@ -35,6 +36,7 @@ def __init__( projector: Projector | None = None, gain: str = "loss", objective: Objective | None = None, + optim_params: dict | None = None, ): """_summary_ @@ -49,25 +51,34 @@ def __init__( """ super().__init__() - self.initializer = initializer self.optimizer_fn = optimizer self.composer = composer - self.gradient_modifier = gradient_modifier or GradientModifier() - self.projector = projector or Projector() self.gain_output = gain self.objective_fn = objective - self._reset() + # An object manage the perturbation in both the tensor and the parameter form. + # FIXME: gradient_modifier should be a hook operating on .grad directly. + self.pert_manager = PerturbationManager( + initializer=initializer, + gradient_modifier=gradient_modifier, + projector=projector, + optim_params=optim_params, + ) - def _reset(self): - self.perturbation = torch.nn.UninitializedBuffer(requires_grad=True) + def initialize(self, *, input, **kwargs): + self.pert_manager.initialize(input) - def configure_optimizers(self): - # Reset perturbation each time fit is called - # FIXME: It would be nice if we didn't have to do this every fit. - self._reset() + def project(self, input, target): + return self.pert_manager.project(input, target) - return self.optimizer_fn([self.perturbation]) + @property + def perturbation(self): + return self.pert_manager.perturbation + + def configure_optimizers(self): + # Parameter initialization is done in Adversary before fit() by invoking initialize(input). + param_groups = self.pert_manager.parameter_groups() + return self.optimizer_fn(param_groups) def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally @@ -94,17 +105,6 @@ def training_step(self, batch, batch_idx): return gain - def configure_gradient_clipping( - self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None - ): - # Configuring gradient clipping in pl.Trainer is still useful, so use it. - super().configure_gradient_clipping( - optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm - ) - - for group in optimizer.param_groups: - self.gradient_modifier(group["params"]) - def forward( self, *, @@ -112,13 +112,8 @@ def forward( target: torch.Tensor | dict[str, Any] | tuple, **kwargs, ): - # Materialize perturbation and initialize it - if torch.nn.parameter.is_lazy(self.perturbation): - self.perturbation.materialize(input.shape, device=input.device, dtype=torch.float32) - self.initializer(self.perturbation) - # Project perturbation... - self.projector(self.perturbation, input, target) + self.project(input, target) # Compose adversarial input. input_adv = self.composer(self.perturbation, input=input, target=target) From 4f22113d1cd264794fbc865f427a11176d11a4f5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 10:07:49 -0700 Subject: [PATCH 087/163] Update object detection adversary config. --- .../attack/object_detection_mask_adversary.yaml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index aa664819..696844c1 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -1,12 +1,11 @@ defaults: - adversary - optimizer: sgd - - initializer: constant - - gradient_modifier: sign - - projector: mask_range - - callbacks: [progress_bar, image_visualizer] + - initializer@initializer.rgb: constant + - gradient_modifier@gradient_modifier.rgb: sign + - projector@projector.rgb: mask_range + # - callbacks: [progress_bar, image_visualizer] - objective: zero_ap - - gain: rcnn_training_loss - composer: batch_overlay - enforcer: batch - enforcer/constraints: [mask, pixel_range] @@ -17,5 +16,8 @@ optimizer: max_iters: 5 +gain: "loss" + initializer: - constant: 127 + rgb: + constant: 127 From c6bd29fef1c28e199b1202e90b52f2729195c93f Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 24 Mar 2023 10:43:33 -0700 Subject: [PATCH 088/163] Update tests and fix bug --- mart/attack/perturber.py | 6 ++-- tests/test_perturber.py | 62 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index eac67cef..a95e4932 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -62,7 +62,7 @@ def __init__( def configure_perturbation(self, input: torch.Tensor | tuple): def create_and_initialize(inp): - pert = torch.empty_like(inp) + pert = torch.empty_like(inp, dtype=torch.float, requires_grad=True) self.initializer(pert) return pert @@ -74,7 +74,7 @@ def create_and_initialize(inp): def configure_optimizers(self): if self.perturbation is None: raise MisconfigurationException( - "You need to call the Perturber.configure_perturbation before fit." + "You need to call the configure_perturbation before fit." ) params = self.perturbation @@ -129,7 +129,7 @@ def forward( ): if self.perturbation is None: raise MisconfigurationException( - "You need to call the Perturber.configure_perturbation before forward." + "You need to call the configure_perturbation before forward." ) def project_and_compose(pert, inp, tar): diff --git a/tests/test_perturber.py b/tests/test_perturber.py index e25252dc..341eb12c 100644 --- a/tests/test_perturber.py +++ b/tests/test_perturber.py @@ -9,12 +9,28 @@ import pytest import torch +from pytorch_lightning.utilities.exceptions import MisconfigurationException import mart from mart.attack.adversary import Adversary from mart.attack.perturber import Perturber +def test_configure_perturbation(input_data): + initializer = Mock() + projector = Mock() + composer = Mock() + + perturber = Perturber( + initializer=initializer, optimizer=None, composer=composer, projector=projector + ) + + perturber.configure_perturbation(input_data) + + initializer.assert_called_once() + projector.assert_not_called() + composer.assert_not_called() + def test_forward(input_data, target_data): initializer = mart.attack.initializer.Constant(1337) projector = Mock() @@ -35,6 +51,19 @@ def test_forward(input_data, target_data): assert projector.call_count == 2 +def test_forward_fails(input_data, target_data): + initializer = mart.attack.initializer.Constant(1337) + projector = Mock() + composer = mart.attack.composer.Additive() + + perturber = Perturber( + initializer=initializer, optimizer=None, composer=composer, projector=projector + ) + + with pytest.raises(MisconfigurationException): + output_data = perturber(input=input_data, target=target_data) + + def test_configure_optimizers(input_data, target_data): initializer = mart.attack.initializer.Constant(1337) optimizer = Mock() @@ -55,6 +84,39 @@ def test_configure_optimizers(input_data, target_data): assert projector.call_count == 2 +def test_configure_optimizers_fails(): + initializer = mart.attack.initializer.Constant(1337) + optimizer = Mock() + projector = Mock() + composer = mart.attack.composer.Additive() + + perturber = Perturber( + initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + ) + + with pytest.raises(MisconfigurationException): + perturber.configure_optimizers() + + +def test_optimizer_parameters_with_gradient(input_data, target_data): + initializer = mart.attack.initializer.Constant(1337) + optimizer = lambda params: torch.optim.SGD(params, lr=0) + projector = Mock() + composer = mart.attack.composer.Additive() + + perturber = Perturber( + initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + ) + + perturber.configure_perturbation(input_data) + opt = perturber.configure_optimizers() + + # Make sure each parameter in optimizer requires a gradient + for param_group in opt.param_groups: + for param in param_group["params"]: + assert param.requires_grad + + def test_training_step(input_data, target_data): initializer = mart.attack.initializer.Constant(1337) optimizer = Mock() From 739e4dbc3446d4de66ae24ab5689c6dfe2b1ce2d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 24 Mar 2023 10:50:08 -0700 Subject: [PATCH 089/163] style --- tests/test_perturber.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_perturber.py b/tests/test_perturber.py index 341eb12c..a23d025e 100644 --- a/tests/test_perturber.py +++ b/tests/test_perturber.py @@ -5,6 +5,7 @@ # import importlib +from functools import partial from unittest.mock import Mock, patch import pytest @@ -31,6 +32,7 @@ def test_configure_perturbation(input_data): projector.assert_not_called() composer.assert_not_called() + def test_forward(input_data, target_data): initializer = mart.attack.initializer.Constant(1337) projector = Mock() @@ -100,7 +102,7 @@ def test_configure_optimizers_fails(): def test_optimizer_parameters_with_gradient(input_data, target_data): initializer = mart.attack.initializer.Constant(1337) - optimizer = lambda params: torch.optim.SGD(params, lr=0) + optimizer = partial(torch.optim.SGD, lr=0) projector = Mock() composer = mart.attack.composer.Additive() From e581452ba1f5d3e2253c0ddf8bacd8512fd6e4cd Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 24 Mar 2023 11:13:26 -0700 Subject: [PATCH 090/163] Remove BatchEnforcer and BatchComposer --- mart/attack/composer.py | 30 ------------------------------ mart/attack/enforcer.py | 13 ------------- 2 files changed, 43 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index c3f1c738..6fff22e0 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -11,8 +11,6 @@ import torch -__all__ = ["BatchComposer"] - class Composer(torch.nn.Module, abc.ABC): @abc.abstractclassmethod @@ -26,34 +24,6 @@ def forward( raise NotImplementedError -class BatchComposer(Composer): - def __init__(self, composer: Composer): - super().__init__() - - self.composer = composer - - def forward( - self, - perturbation: torch.Tensor | tuple, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - **kwargs, - ) -> torch.Tensor | tuple: - output = [] - - for input_i, target_i, perturbation_i in zip(input, target, perturbation): - output_i = self.composer(perturbation_i, input=input_i, target=target_i, **kwargs) - output.append(output_i) - - if isinstance(input, torch.Tensor): - output = torch.stack(output) - else: - output = tuple(output) - - return output - - class Additive(Composer): """We assume an adversary adds perturbation to the input.""" diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index 2a1a9c15..5e120532 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -86,16 +86,3 @@ def _check_constraints(self, input_adv, *, input, target): @torch.no_grad() def __call__(self, input_adv, *, input, target, **kwargs): self._check_constraints(input_adv, input=input, target=target) - - -class BatchEnforcer(Enforcer): - @torch.no_grad() - def __call__( - self, - input_adv: torch.Tensor | tuple, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - ) -> torch.Tensor | tuple: - for input_adv_i, input_i, target_i in zip(input_adv, input, target): - self._check_constraints(input_adv_i, input=input_i, target=target_i) From 911998f471e1370f497e9ce789f4d267194fab2e Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 24 Mar 2023 11:32:56 -0700 Subject: [PATCH 091/163] Revert to old gain functionality --- mart/attack/perturber.py | 10 +++++---- .../attack/classification_eps1.75_fgsm.yaml | 2 -- .../classification_eps2_pgd10_step1.yaml | 2 -- .../object_detection_mask_adversary.yaml | 3 +-- mart/configs/model/classifier.yaml | 10 ++------- .../model/torchvision_faster_rcnn.yaml | 22 ------------------- 6 files changed, 9 insertions(+), 40 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index a95e4932..49fa9cad 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -17,6 +17,7 @@ if TYPE_CHECKING: from .composer import Composer + from .gain import Gain from .initializer import Initializer from .objective import Objective @@ -32,9 +33,9 @@ def __init__( initializer: Initializer, optimizer: Callable, composer: Composer, + gain: Gain, gradient_modifier: GradientModifier | None = None, projector: Projector | None = None, - gain: str = "loss", objective: Objective | None = None, ): """_summary_ @@ -43,9 +44,9 @@ def __init__( initializer (Initializer): To initialize the perturbation. optimizer (torch.optim.Optimizer): A PyTorch optimizer. composer (Composer): A module which composes adversarial input from input and perturbation. + gain (Gain): An adversarial gain function, which is a differentiable estimate of adversarial objective. gradient_modifier (GradientModifier): To modify the gradient of perturbation. projector (Projector): To project the perturbation into some space. - gain (str): Which output to use as an adversarial gain function, which is a differentiable estimate of adversarial objective. (default: loss) objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. """ super().__init__() @@ -55,7 +56,7 @@ def __init__( self.composer = composer self.gradient_modifier = gradient_modifier or GradientModifier() self.projector = projector or Projector() - self.gain_output = gain + self.gain_fn = gain self.objective_fn = objective self.perturbation = None @@ -94,7 +95,8 @@ def training_step(self, batch, batch_idx): # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. - gain = outputs[self.gain_output] + # Use CallWith to dispatch **outputs. + gain = self.gain_fn(**outputs) # objective_fn is optional, because adversaries may never reach their objective. if self.objective_fn is not None: diff --git a/mart/configs/attack/classification_eps1.75_fgsm.yaml b/mart/configs/attack/classification_eps1.75_fgsm.yaml index d54b3546..3009a725 100644 --- a/mart/configs/attack/classification_eps1.75_fgsm.yaml +++ b/mart/configs/attack/classification_eps1.75_fgsm.yaml @@ -20,8 +20,6 @@ optimizer: max_iters: 1 -gain: "loss" - initializer: constant: 0 diff --git a/mart/configs/attack/classification_eps2_pgd10_step1.yaml b/mart/configs/attack/classification_eps2_pgd10_step1.yaml index a4413ef1..9c6be04b 100644 --- a/mart/configs/attack/classification_eps2_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps2_pgd10_step1.yaml @@ -20,8 +20,6 @@ optimizer: max_iters: 10 -gain: "loss" - initializer: eps: 2 diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 10041fdd..e069b45c 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -6,6 +6,7 @@ defaults: - projector: mask_range - callbacks: [image_visualizer] - objective: zero_ap + - gain: rcnn_training_loss - composer: overlay - enforcer: default - enforcer/constraints: [mask, pixel_range] @@ -16,7 +17,5 @@ optimizer: max_iters: 5 -gain: "loss" - initializer: constant: 127 diff --git a/mart/configs/model/classifier.yaml b/mart/configs/model/classifier.yaml index 50a3fff0..ad664989 100644 --- a/mart/configs/model/classifier.yaml +++ b/mart/configs/model/classifier.yaml @@ -34,16 +34,12 @@ validation_sequence: - preprocessor: tensor: input - logits: ["preprocessor"] - - loss: - input: logits - target: target - preds: input: logits - output: preds: preds target: target logits: logits - loss: loss # The simplified version. # We treat a list as the `_call_with_args_` parameter. @@ -53,11 +49,9 @@ test_sequence: seq020: logits: ["preprocessor"] seq030: - loss: ["logits", "target"] - seq040: preds: ["logits"] - seq050: - output: { preds: preds, target: target, logits: logits, loss: loss } + seq040: + output: { preds: preds, target: target, logits: logits } modules: preprocessor: ??? diff --git a/mart/configs/model/torchvision_faster_rcnn.yaml b/mart/configs/model/torchvision_faster_rcnn.yaml index 8c62182f..0187b99f 100644 --- a/mart/configs/model/torchvision_faster_rcnn.yaml +++ b/mart/configs/model/torchvision_faster_rcnn.yaml @@ -50,21 +50,10 @@ validation_sequence: losses_and_detections: ["preprocessor", "target"] seq030: - loss: - # Sum up the losses. - [ - "losses_and_detections.training.loss_objectness", - "losses_and_detections.training.loss_rpn_box_reg", - "losses_and_detections.training.loss_classifier", - "losses_and_detections.training.loss_box_reg", - ] - - seq040: output: { "preds": "losses_and_detections.eval", "target": "target", - "loss": "loss", "rpn_loss.loss_objectness": "losses_and_detections.training.loss_objectness", "rpn_loss.loss_rpn_box_reg": "losses_and_detections.training.loss_rpn_box_reg", "box_loss.loss_classifier": "losses_and_detections.training.loss_classifier", @@ -79,21 +68,10 @@ test_sequence: losses_and_detections: ["preprocessor", "target"] seq030: - loss: - # Sum up the losses. - [ - "losses_and_detections.training.loss_objectness", - "losses_and_detections.training.loss_rpn_box_reg", - "losses_and_detections.training.loss_classifier", - "losses_and_detections.training.loss_box_reg", - ] - - seq040: output: { "preds": "losses_and_detections.eval", "target": "target", - "loss": "loss", "rpn_loss.loss_objectness": "losses_and_detections.training.loss_objectness", "rpn_loss.loss_rpn_box_reg": "losses_and_detections.training.loss_rpn_box_reg", "box_loss.loss_classifier": "losses_and_detections.training.loss_classifier", From 77f9828139c31900817cef122593db55b6056284 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 24 Mar 2023 11:34:02 -0700 Subject: [PATCH 092/163] Revert change to enforcer --- mart/attack/enforcer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index 5e120532..c3b52046 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -84,5 +84,5 @@ def _check_constraints(self, input_adv, *, input, target): constraint(input_adv, input=input, target=target) @torch.no_grad() - def __call__(self, input_adv, *, input, target, **kwargs): + def __call__(self, input_adv, *, input, target): self._check_constraints(input_adv, input=input, target=target) From d435d1e89974cabf743e45692aba13d99332b788 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Fri, 24 Mar 2023 12:09:04 -0700 Subject: [PATCH 093/163] fix perturber tests to take gain --- tests/test_perturber.py | 67 ++++++++++++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 17 deletions(-) diff --git a/tests/test_perturber.py b/tests/test_perturber.py index a23d025e..66a75e55 100644 --- a/tests/test_perturber.py +++ b/tests/test_perturber.py @@ -21,9 +21,10 @@ def test_configure_perturbation(input_data): initializer = Mock() projector = Mock() composer = Mock() + gain = Mock() perturber = Perturber( - initializer=initializer, optimizer=None, composer=composer, projector=projector + initializer=initializer, optimizer=None, composer=composer, projector=projector, gain=gain ) perturber.configure_perturbation(input_data) @@ -31,15 +32,17 @@ def test_configure_perturbation(input_data): initializer.assert_called_once() projector.assert_not_called() composer.assert_not_called() + gain.assert_not_called() def test_forward(input_data, target_data): initializer = mart.attack.initializer.Constant(1337) projector = Mock() composer = mart.attack.composer.Additive() + gain = Mock() perturber = Perturber( - initializer=initializer, optimizer=None, composer=composer, projector=projector + initializer=initializer, optimizer=None, composer=composer, projector=projector, gain=gain ) perturber.configure_perturbation(input_data) @@ -51,15 +54,17 @@ def test_forward(input_data, target_data): # perturber needs to project and compose perturbation on every call assert projector.call_count == 2 + gain.assert_not_called() def test_forward_fails(input_data, target_data): initializer = mart.attack.initializer.Constant(1337) projector = Mock() composer = mart.attack.composer.Additive() + gain = Mock() perturber = Perturber( - initializer=initializer, optimizer=None, composer=composer, projector=projector + initializer=initializer, optimizer=None, composer=composer, projector=projector, gain=gain ) with pytest.raises(MisconfigurationException): @@ -71,9 +76,14 @@ def test_configure_optimizers(input_data, target_data): optimizer = Mock() projector = Mock() composer = mart.attack.composer.Additive() + gain = Mock() perturber = Perturber( - initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + initializer=initializer, + optimizer=optimizer, + composer=composer, + projector=projector, + gain=gain, ) perturber.configure_perturbation(input_data) @@ -84,6 +94,7 @@ def test_configure_optimizers(input_data, target_data): assert optimizer.call_count == 2 assert projector.call_count == 2 + gain.assert_not_called() def test_configure_optimizers_fails(): @@ -91,9 +102,14 @@ def test_configure_optimizers_fails(): optimizer = Mock() projector = Mock() composer = mart.attack.composer.Additive() + gain = Mock() perturber = Perturber( - initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + initializer=initializer, + optimizer=optimizer, + composer=composer, + projector=projector, + gain=gain, ) with pytest.raises(MisconfigurationException): @@ -105,9 +121,14 @@ def test_optimizer_parameters_with_gradient(input_data, target_data): optimizer = partial(torch.optim.SGD, lr=0) projector = Mock() composer = mart.attack.composer.Additive() + gain = Mock() perturber = Perturber( - initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + initializer=initializer, + optimizer=optimizer, + composer=composer, + projector=projector, + gain=gain, ) perturber.configure_perturbation(input_data) @@ -124,18 +145,23 @@ def test_training_step(input_data, target_data): optimizer = Mock() projector = Mock() composer = mart.attack.composer.Additive() - gain = Mock(shape=[]) - model = Mock(return_value={"loss": gain}) + gain = Mock(return_value=torch.tensor(1337)) + model = Mock(return_value={}) perturber = Perturber( - initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + initializer=initializer, + optimizer=optimizer, + composer=composer, + projector=projector, + gain=gain, ) output = perturber.training_step( {"input": input_data, "target": target_data, "model": model}, 0 ) - assert output == gain + gain.assert_called_once() + assert output == 1337 def test_training_step_with_many_gain(input_data, target_data): @@ -143,18 +169,22 @@ def test_training_step_with_many_gain(input_data, target_data): optimizer = Mock() projector = Mock() composer = mart.attack.composer.Additive() - gain = torch.tensor([1234, 5678]) - model = Mock(return_value={"loss": gain}) + gain = Mock(return_value=torch.tensor([1234, 5678])) + model = Mock(return_value={}) perturber = Perturber( - initializer=initializer, optimizer=optimizer, composer=composer, projector=projector + initializer=initializer, + optimizer=optimizer, + composer=composer, + projector=projector, + gain=gain, ) output = perturber.training_step( {"input": input_data, "target": target_data, "model": model}, 0 ) - assert output == gain.sum() + assert output == 1234 + 5678 def test_training_step_with_objective(input_data, target_data): @@ -162,8 +192,8 @@ def test_training_step_with_objective(input_data, target_data): optimizer = Mock() projector = Mock() composer = mart.attack.composer.Additive() - gain = torch.tensor([1234, 5678]) - model = Mock(return_value={"loss": gain}) + gain = Mock(return_value=torch.tensor([1234, 5678])) + model = Mock(return_value={}) objective = Mock(return_value=torch.tensor([True, False], dtype=torch.bool)) perturber = Perturber( @@ -172,13 +202,14 @@ def test_training_step_with_objective(input_data, target_data): composer=composer, projector=projector, objective=objective, + gain=gain, ) output = perturber.training_step( {"input": input_data, "target": target_data, "model": model}, 0 ) - assert output == gain[1] + assert output == 5678 objective.assert_called_once() @@ -189,6 +220,7 @@ def test_configure_gradient_clipping(): composer = mart.attack.composer.Additive() optimizer = Mock(param_groups=[{"params": Mock()}, {"params": Mock()}]) gradient_modifier = Mock() + gain = Mock() perturber = Perturber( optimizer=optimizer, @@ -196,6 +228,7 @@ def test_configure_gradient_clipping(): initializer=None, composer=None, projector=None, + gain=gain, ) # We need to mock a trainer since LightningModule does some checks perturber.trainer = Mock(gradient_clip_val=1.0, gradient_clip_algorithm="norm") From 16654875cd83a533bbce5d09c2fdb9056525a36b Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 15:39:25 -0700 Subject: [PATCH 094/163] Cleaning. --- mart/attack/perturbation_manager.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/mart/attack/perturbation_manager.py b/mart/attack/perturbation_manager.py index 85a8257c..42328bdd 100644 --- a/mart/attack/perturbation_manager.py +++ b/mart/attack/perturbation_manager.py @@ -22,21 +22,27 @@ def __init__( optim_params: dict | None = None, ) -> None: + # In case gradient_modifier or projector is None. + def nop(*args, **kwargs): + pass + + gradient_modifier = gradient_modifier or nop + projector = projector or nop + + # Backward compatibility, in case modality is unknown, and not given in input. if not isinstance(initializer, dict): initializer = {None: initializer} - - if gradient_modifier is None and not isinstance(gradient_modifier, dict): + if not isinstance(gradient_modifier, dict): gradient_modifier = {None: gradient_modifier} - - if projector is not None and not isinstance(projector, dict): + if not isinstance(projector, dict): projector = {None: projector} + # In case optimization parameters are not given. + optim_params = optim_params or {modality: {} for modality in initializer.keys()} + self.initializer = initializer self.gradient_modifier = gradient_modifier self.projector = projector - - if optim_params is None: - optim_params = {None: {}} | {modality: {} for modality in self.initializer.keys()} self.optim_params = optim_params self._perturbation = None From 28ea18e7e5202c497b1e8f78d0fe247e22647c76 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 15:48:03 -0700 Subject: [PATCH 095/163] Comments. --- mart/attack/perturbation_manager.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/mart/attack/perturbation_manager.py b/mart/attack/perturbation_manager.py index 42328bdd..648f6f5a 100644 --- a/mart/attack/perturbation_manager.py +++ b/mart/attack/perturbation_manager.py @@ -49,11 +49,18 @@ def nop(*args, **kwargs): @property def perturbation(self): - # Return perturbation that is homomorphic with input, even if the underlying perturbation could be its sub-components. + """Return perturbation that is homomorphic with input.""" + # TODO: Compose perturbation from sub-componenets. return self._perturbation def initialize(self, input): - # Create raw perturbation that is used to produce parameter groups for optimization. + """Create and initialize raw perturbation components. + + With raw perturbation components, we can + 1. compose perturbation that is homomorphic to input. + 2. compose parameter groups for optimization. + """ + # TODO: Raw perturbation is not necessarily homorphic with input. self._perturbation = self._initialize(input) def _initialize(self, input, modality=None): @@ -67,7 +74,8 @@ def _initialize(self, input, modality=None): self.initializer[modality](pert) # Gradient modifier hook. - # TODO: self.gradient_modifier[modality](pert) + # FIXME: use actual gradient modifier, self.gradient_modifier[modality](pert) + # The current implementation of gradient modifiers is not hookable. if self.gradient_modifier is not None: pert.register_hook(lambda grad: grad.sign()) From 5ff8eea6da66100eb641c545d1eee152fddbf46d Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 15:53:52 -0700 Subject: [PATCH 096/163] Cleaning. --- mart/attack/perturbation_manager.py | 4 +++- mart/attack/perturber.py | 7 +++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/mart/attack/perturbation_manager.py b/mart/attack/perturbation_manager.py index 648f6f5a..50fced56 100644 --- a/mart/attack/perturbation_manager.py +++ b/mart/attack/perturbation_manager.py @@ -103,17 +103,19 @@ def _project(self, perturbation, input, target, modality=None): for perturbation_i, input_i, target_i in zip(perturbation, input, target): self._project(perturbation_i, input_i, target_i, modality=modality) + @property def parameter_groups(self): param_groups = self._parameter_groups(self._perturbation) return param_groups def _parameter_groups(self, pert, modality=None): - """Return parameter groups as a list of dictionaries.""" + """Recursively return parameter groups as a list of dictionaries.""" if isinstance(pert, torch.Tensor): return [{"params": pert} | self.optim_params[modality]] elif isinstance(pert, dict): ret = [self._parameter_groups(pert_i, modality) for modality, pert_i in pert.items()] + # Concatenate a list of lists. return list(itertools.chain.from_iterable(ret)) elif isinstance(pert, list) or isinstance(pert, tuple): param_list = [] diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 23da11e7..6e734238 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -75,10 +75,13 @@ def project(self, input, target): def perturbation(self): return self.pert_manager.perturbation + @property + def parameter_groups(self): + return self.pert_manager.parameter_groups + def configure_optimizers(self): # Parameter initialization is done in Adversary before fit() by invoking initialize(input). - param_groups = self.pert_manager.parameter_groups() - return self.optimizer_fn(param_groups) + return self.optimizer_fn(self.parameter_groups) def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally From 80d796ac4ef2c75e81ee17a13bd33d6d72197818 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 16:16:38 -0700 Subject: [PATCH 097/163] Add ModalityComposer that is aware of modalities and the dictionary/list/tuple input structure. --- mart/attack/composer.py | 44 +++++++++++++++++++++- mart/configs/attack/composer/modality.yaml | 2 + 2 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 mart/configs/attack/composer/modality.yaml diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 04948ff3..3858482b 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -11,7 +11,7 @@ import torch -__all__ = ["BatchComposer"] +__all__ = ["BatchComposer", "ModalityComposer"] class Composer(torch.nn.Module, abc.ABC): @@ -57,6 +57,48 @@ def forward( return output +class ModalityComposer(Composer): + def __init__(self, sub_composers: dict | Composer): + super().__init__() + + # Backward compatibility, in case modality is unknown, and not given in input. + if isinstance(sub_composers, Composer): + sub_composers = {None: sub_composers} + + self.sub_composers = sub_composers + + def _compose(self, perturbation, *, input, target, modality=None): + """Recursively compose output from perturbation and input.""" + if isinstance(perturbation, torch.Tensor): + output = self.sub_composers[modality](perturbation, input=input, target=target) + return output + elif isinstance(perturbation, dict): + output = {} + for modality, pert in perturbation.items(): + output[modality] = self._compose( + pert, input=input[modality], target=target, modality=modality + ) + return output + elif isinstance(perturbation, list) or isinstance(perturbation, tuple): + output = [] + for pert_i, input_i, target_i in zip(perturbation, input, target): + output.append(self._compose(pert_i, input=input_i, target=target_i)) + if isinstance(perturbation, tuple): + output = tuple(output) + return output + + def forward( + self, + perturbation: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, + ) -> torch.Tensor | tuple: + output = self._compose(perturbation, input=input, target=target) + return output + + class Additive(Composer): """We assume an adversary adds perturbation to the input.""" diff --git a/mart/configs/attack/composer/modality.yaml b/mart/configs/attack/composer/modality.yaml new file mode 100644 index 00000000..5e90a3b8 --- /dev/null +++ b/mart/configs/attack/composer/modality.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.ModalityComposer +sub_composers: ??? From 49a96206a19ce4b94dc5676eac8b561c15828ccd Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 16:17:23 -0700 Subject: [PATCH 098/163] Configure adveraary to use modality_composer. --- mart/configs/attack/object_detection_mask_adversary.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 696844c1..79193267 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -6,7 +6,8 @@ defaults: - projector@projector.rgb: mask_range # - callbacks: [progress_bar, image_visualizer] - objective: zero_ap - - composer: batch_overlay + - composer: modality + - composer@composer.sub_composers.rgb: overlay - enforcer: batch - enforcer/constraints: [mask, pixel_range] From 7a8c01bf7528902e2198ab7341c242665b823ca6 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 16:18:57 -0700 Subject: [PATCH 099/163] Remove BatchComposer. --- mart/attack/composer.py | 33 +------------------ .../attack/composer/batch_additive.yaml | 5 --- .../attack/composer/batch_overlay.yaml | 5 --- 3 files changed, 1 insertion(+), 42 deletions(-) delete mode 100644 mart/configs/attack/composer/batch_additive.yaml delete mode 100644 mart/configs/attack/composer/batch_overlay.yaml diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 3858482b..19299e36 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -11,7 +11,7 @@ import torch -__all__ = ["BatchComposer", "ModalityComposer"] +__all__ = ["ModalityComposer"] class Composer(torch.nn.Module, abc.ABC): @@ -26,37 +26,6 @@ def forward( raise NotImplementedError -class BatchComposer(Composer): - def __init__(self, composer: Composer): - super().__init__() - - self.composer = composer - - def forward( - self, - perturbation: torch.Tensor | tuple, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - **kwargs, - ) -> torch.Tensor | tuple: - output = [] - - for input_i, target_i, perturbation_i in zip(input, target, perturbation): - # FIXME: Make it modality-aware. - output_i = self.composer( - perturbation_i["rgb"], input=input_i["rgb"], target=target_i, **kwargs - ) - output.append({"rgb": output_i}) - - if isinstance(input, torch.Tensor): - output = torch.stack(output) - else: - output = tuple(output) - - return output - - class ModalityComposer(Composer): def __init__(self, sub_composers: dict | Composer): super().__init__() diff --git a/mart/configs/attack/composer/batch_additive.yaml b/mart/configs/attack/composer/batch_additive.yaml deleted file mode 100644 index 49ea1f8f..00000000 --- a/mart/configs/attack/composer/batch_additive.yaml +++ /dev/null @@ -1,5 +0,0 @@ -defaults: - - /attack/composer@composer: additive - -_target_: mart.attack.BatchComposer -composer: ??? diff --git a/mart/configs/attack/composer/batch_overlay.yaml b/mart/configs/attack/composer/batch_overlay.yaml deleted file mode 100644 index f69ce0ce..00000000 --- a/mart/configs/attack/composer/batch_overlay.yaml +++ /dev/null @@ -1,5 +0,0 @@ -defaults: - - /attack/composer@composer: overlay - -_target_: mart.attack.BatchComposer -composer: ??? From ac0f42de9288dc88aea4b8e7d0e0c97976e0b491 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 16:21:40 -0700 Subject: [PATCH 100/163] Cleaning. --- mart/attack/composer.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 19299e36..da8f284d 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -27,13 +27,8 @@ def forward( class ModalityComposer(Composer): - def __init__(self, sub_composers: dict | Composer): + def __init__(self, sub_composers: dict[str, Composer]): super().__init__() - - # Backward compatibility, in case modality is unknown, and not given in input. - if isinstance(sub_composers, Composer): - sub_composers = {None: sub_composers} - self.sub_composers = sub_composers def _compose(self, perturbation, *, input, target, modality=None): From 427142ec8f04f5a691bda19d9f4336f8772396d1 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 16:50:40 -0700 Subject: [PATCH 101/163] Add ModalityEnforcer. --- mart/attack/enforcer.py | 22 ++++++++++++++++++++++ mart/configs/attack/enforcer/modality.yaml | 2 ++ 2 files changed, 24 insertions(+) create mode 100644 mart/configs/attack/enforcer/modality.yaml diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index 1506a230..5d9d7e5d 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -11,6 +11,8 @@ import torch +__all__ = ["ModalityEnforcer"] + class ConstraintViolated(Exception): pass @@ -88,6 +90,26 @@ def __call__(self, input_adv, *, input, target, **kwargs): self._check_constraints(input_adv, input=input, target=target) +class ModalityEnforcer(Enforcer): + def __init__(self, sub_enforcers=None) -> None: + self.sub_enforcers = sub_enforcers + + def _enforce(self, input_adv, *, input, target, modality=None): + if isinstance(input_adv, torch.Tensor): + self.sub_enforcers[modality](input_adv, input=input, target=target) + elif isinstance(input_adv, dict): + for modality in input_adv: + self._enforce( + input_adv[modality], input=input[modality], target=target, modality=modality + ) + elif isinstance(input_adv, list) or isinstance(input_adv, tuple): + for input_adv_i, input_i, target_i in zip(input_adv, input, target): + self._enforce(input_adv_i, input=input_i, target=target_i) + + def __call__(self, input_adv, *, input, target, **kwargs): + self._enforce(input_adv, input=input, target=target) + + class BatchEnforcer(Enforcer): @torch.no_grad() def __call__( diff --git a/mart/configs/attack/enforcer/modality.yaml b/mart/configs/attack/enforcer/modality.yaml new file mode 100644 index 00000000..d3013fc1 --- /dev/null +++ b/mart/configs/attack/enforcer/modality.yaml @@ -0,0 +1,2 @@ +_target_: mart.attack.ModalityEnforcer +sub_enforcers: ??? From 1231b374e0bcc254b444c021796d224f0e486107 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 16:51:18 -0700 Subject: [PATCH 102/163] Configure adversary to use modalithy enforcer. --- mart/configs/attack/object_detection_mask_adversary.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 79193267..f7dca659 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -8,8 +8,10 @@ defaults: - objective: zero_ap - composer: modality - composer@composer.sub_composers.rgb: overlay - - enforcer: batch - - enforcer/constraints: [mask, pixel_range] + - enforcer: modality + - enforcer@enforcer.sub_enforcers.rgb: default + - enforcer/constraints@enforcer.sub_enforcers.rgb.constraints: + [mask, pixel_range] # Make a 5-step attack for the demonstration purpose. optimizer: From 5f7bec808d26ea0779d527ce86bc37df5da2e1c8 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 17:22:12 -0700 Subject: [PATCH 103/163] Add mart.utils.get_tensor_device(). --- mart/utils/utils.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/mart/utils/utils.py b/mart/utils/utils.py index 80609331..2c04805f 100644 --- a/mart/utils/utils.py +++ b/mart/utils/utils.py @@ -7,6 +7,7 @@ from typing import Any, Callable, Dict, List, Tuple import hydra +import torch from hydra.core.hydra_config import HydraConfig from omegaconf import DictConfig, OmegaConf from pytorch_lightning import Callback @@ -26,6 +27,7 @@ "log_hyperparameters", "save_file", "task_wrapper", + "get_tensor_device", ] log = pylogger.get_pylogger(__name__) @@ -272,3 +274,22 @@ def get_resume_checkpoint(config: DictConfig) -> Tuple[DictConfig]: config = hydra.compose(config_name, overrides=overrides) return config + + +def get_tensor_device(data): + """Get device of the first tensor hidden in dict/list/tuple.""" + + def _get_first_tensor(data): + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, list) or isinstance(data, tuple) or isinstance(data, dict): + data = data.values() if isinstance(data, dict) else data + for sub_data in data: + ret = _get_first_tensor(sub_data) + if ret is not None: + return ret + else: + return None + + tensor = _get_first_tensor(data) + return tensor.device From e7a84432dd461a2ccaf9cfe6de6a4c8835622db3 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 17:22:54 -0700 Subject: [PATCH 104/163] Use mart.utils.get_tensor_device() in Adversary. --- mart/attack/adversary.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 64138fef..afcd2803 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -12,7 +12,7 @@ import pytorch_lightning as pl import torch -from mart.utils import silent +from mart.utils import get_tensor_device, silent from .enforcer import Enforcer from .perturber import Perturber @@ -76,12 +76,7 @@ def forward(self, **batch): # Late bind attacker on same device as input if isinstance(self.attacker, partial): inputs = batch["input"] - - if isinstance(inputs, tuple): - # FIXME: Make it modality-aware to get a tensor. - inputs = inputs[0]["rgb"] - - device = inputs.device + device = get_tensor_device(inputs) if device.type == "cuda": accelerator = "gpu" From b2dd91019f7282edd6425c05549faaa09e2a3c17 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 17:23:37 -0700 Subject: [PATCH 105/163] Comment. --- mart/attack/adversary.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index afcd2803..1491c1b7 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -76,6 +76,7 @@ def forward(self, **batch): # Late bind attacker on same device as input if isinstance(self.attacker, partial): inputs = batch["input"] + # The tensor may hide behind dict/list/tuple. device = get_tensor_device(inputs) if device.type == "cuda": From f170c0361df51007612c2f2b9b639306a2ba4239 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 17:33:20 -0700 Subject: [PATCH 106/163] Backward compatibiility for COCO without modalities. --- mart/attack/composer.py | 7 ++++++- mart/attack/enforcer.py | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index da8f284d..c8ba3617 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -27,8 +27,13 @@ def forward( class ModalityComposer(Composer): - def __init__(self, sub_composers: dict[str, Composer]): + def __init__(self, sub_composers: dict[str, Composer] | Composer): super().__init__() + + # Backward compatibility for datasets which do not have modality tokens. + if isinstance(sub_composers, Composer): + sub_composers = {None: sub_composers} + self.sub_composers = sub_composers def _compose(self, perturbation, *, input, target, modality=None): diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index 5d9d7e5d..3fad161b 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -91,7 +91,12 @@ def __call__(self, input_adv, *, input, target, **kwargs): class ModalityEnforcer(Enforcer): - def __init__(self, sub_enforcers=None) -> None: + def __init__(self, sub_enforcers: dict | Enforcer) -> None: + + # Backward compatibility for datasets which do not have modality tokens. + if isinstance(sub_enforcers, Enforcer): + sub_enforcers = {None: sub_enforcers} + self.sub_enforcers = sub_enforcers def _enforce(self, input_adv, *, input, target, modality=None): From 6a4d45b886b0790eab17f7a7043793572b1cf6aa Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 17:34:15 -0700 Subject: [PATCH 107/163] Create separate adversary configs for dataset with modalities. --- .../object_detection_mask_adversary.yaml | 16 +++++------- .../object_detection_rgb_mask_adversary.yaml | 26 +++++++++++++++++++ 2 files changed, 33 insertions(+), 9 deletions(-) create mode 100644 mart/configs/attack/object_detection_rgb_mask_adversary.yaml diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index f7dca659..b7173a96 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -1,17 +1,16 @@ defaults: - adversary - optimizer: sgd - - initializer@initializer.rgb: constant - - gradient_modifier@gradient_modifier.rgb: sign - - projector@projector.rgb: mask_range + - initializer: constant + - gradient_modifier: sign + - projector: mask_range # - callbacks: [progress_bar, image_visualizer] - objective: zero_ap - composer: modality - - composer@composer.sub_composers.rgb: overlay + - composer@composer.sub_composers: overlay - enforcer: modality - - enforcer@enforcer.sub_enforcers.rgb: default - - enforcer/constraints@enforcer.sub_enforcers.rgb.constraints: - [mask, pixel_range] + - enforcer@enforcer.sub_enforcers: default + - enforcer/constraints@enforcer.sub_enforcers.constraints: [mask, pixel_range] # Make a 5-step attack for the demonstration purpose. optimizer: @@ -22,5 +21,4 @@ max_iters: 5 gain: "loss" initializer: - rgb: - constant: 127 + constant: 127 diff --git a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml new file mode 100644 index 00000000..f7dca659 --- /dev/null +++ b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml @@ -0,0 +1,26 @@ +defaults: + - adversary + - optimizer: sgd + - initializer@initializer.rgb: constant + - gradient_modifier@gradient_modifier.rgb: sign + - projector@projector.rgb: mask_range + # - callbacks: [progress_bar, image_visualizer] + - objective: zero_ap + - composer: modality + - composer@composer.sub_composers.rgb: overlay + - enforcer: modality + - enforcer@enforcer.sub_enforcers.rgb: default + - enforcer/constraints@enforcer.sub_enforcers.rgb.constraints: + [mask, pixel_range] + +# Make a 5-step attack for the demonstration purpose. +optimizer: + lr: 55 + +max_iters: 5 + +gain: "loss" + +initializer: + rgb: + constant: 127 From ec8bc4b473a8ecb38189219d98fccd80cd421287 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 17:36:08 -0700 Subject: [PATCH 108/163] Fix tests. --- tests/test_perturbation_manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_perturbation_manager.py b/tests/test_perturbation_manager.py index 09bf7713..59d6eb24 100644 --- a/tests/test_perturbation_manager.py +++ b/tests/test_perturbation_manager.py @@ -17,7 +17,7 @@ def test_perturbation_tensor(): assert pert.shape == pert.shape assert (pert == 0).all() - param_groups = pert_manager.parameter_groups() + param_groups = pert_manager.parameter_groups assert isinstance(param_groups, Iterable) assert param_groups[0]["params"].requires_grad @@ -32,7 +32,7 @@ def test_perturbation_dict(): assert (pert["rgb"] == 0).all() assert (pert["depth"] == 1).all() - param_groups = pert_manager.parameter_groups() + param_groups = pert_manager.parameter_groups assert len(param_groups) == 2 param_groups = list(param_groups) assert param_groups[0]["params"].requires_grad @@ -54,7 +54,7 @@ def test_perturbation_tuple_dict(): assert (pert[1]["rgb"] == 0).all() assert (pert[1]["depth"] == 1).all() - param_groups = pert_manager.parameter_groups() + param_groups = pert_manager.parameter_groups assert len(param_groups) == 4 param_groups = list(param_groups) assert param_groups[0]["params"].requires_grad From ed3eda1d5001318db6e31843ef6e3d8410368ed7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 24 Mar 2023 17:42:18 -0700 Subject: [PATCH 109/163] Bug fix. --- mart/attack/perturber.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 6e734238..67bcee62 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -115,6 +115,11 @@ def forward( target: torch.Tensor | dict[str, Any] | tuple, **kwargs, ): + if self.perturbation is None: + # TODO: raise Exception? + # The optimizer won't get parameters earlier if this happens. + self.initialize(input=input, target=target) + # Project perturbation... self.project(input, target) From aa08118ec74542405bddfb7ea4d44dd9c542497d Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 27 Mar 2023 07:55:35 -0700 Subject: [PATCH 110/163] cleanup --- mart/attack/adversary.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 50c78fe5..3428f656 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -8,16 +8,18 @@ from functools import partial from itertools import cycle -from typing import Any +from typing import TYPE_CHECKING, Any import pytorch_lightning as pl import torch from mart.utils import silent -from .enforcer import Enforcer from .perturber import Perturber +if TYPE_CHECKING: + from .enforcer import Enforcer + __all__ = ["Adversary"] From 74f8fab4b54c03bd1d9b63d74dfad85531345ca2 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 27 Mar 2023 08:30:58 -0700 Subject: [PATCH 111/163] Place Trainer on same device as Perturber --- mart/attack/adversary.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 3428f656..8ca3d63d 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -104,7 +104,7 @@ def _initialize_attack(self, input: torch.Tensor | tuple): return self.attacker # Convert torch.device to PL accelerator - device = input[0].device if isinstance(input, tuple) else input.device + device = self.perturber.device if device.type == "cuda": accelerator = "gpu" From f1c2a9df8467a3adda4a658a0e31afd145076bc3 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 28 Mar 2023 11:23:59 -0700 Subject: [PATCH 112/163] Make composer, enforcer and projector tuple aware --- mart/attack/adversary.py | 36 ++++-------- mart/attack/composer.py | 40 +++++++------ mart/attack/enforcer.py | 53 ++++++++++++----- mart/attack/perturber.py | 22 +------ mart/attack/projector.py | 121 +++++++++++++++------------------------ 5 files changed, 122 insertions(+), 150 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 8ca3d63d..c950241b 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -70,36 +70,36 @@ def __init__( self.enforcer = enforcer @silent() - def forward(self, *, input: torch.Tensor | tuple, **batch): + def forward(self, **batch): # Adversary lives within a sequence of model. To signal the adversary should attack, one # must pass a model to attack when calling the adversary. Since we do not know where the # Adversary lives inside the model, we also need the remaining sequence to be able to # get a loss. if "model" in batch and "sequence" in batch: - self._attack(input=input, **batch) + self._attack(**batch) # Always use perturb the current input. - input_adv = self.perturber(input=input, **batch) + input_adv = self.perturber(**batch) # Enforce constraints after the attack optimization ends. if "model" in batch and "sequence" in batch: - self._enforce(input_adv, input=input, **batch) + self.enforcer(input_adv, **batch) return input_adv - def _attack(self, input: torch.Tensor | tuple, **kwargs): - batch = {"input": input, **kwargs} + def _attack(self, input, **batch): + batch = {"input": input, **batch} + + # Configure and reset perturber to use batch inputs + self.perturber.configure_perturbation(input) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. - attacker = self._initialize_attack(input) + attacker = self._get_attacker(input) attacker.fit_loop.max_epochs += 1 attacker.fit(self.perturber, train_dataloaders=cycle([batch])) - def _initialize_attack(self, input: torch.Tensor | tuple): - # Configure perturber to use batch inputs - self.perturber.configure_perturbation(input) - + def _get_attacker(self, input): if not isinstance(self.attacker, partial): return self.attacker @@ -119,17 +119,3 @@ def _initialize_attack(self, input: torch.Tensor | tuple): self.attacker = self.attacker(accelerator=accelerator, devices=devices) return self.attacker - - def _enforce( - self, - input_adv: torch.Tensor | tuple, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - **kwargs, - ): - if not isinstance(input_adv, tuple): - self.enforcer(input_adv, input=input, target=target) - else: - for inp_adv, inp, tar in zip(input_adv, input, target): - self.enforcer(inp_adv, input=inp, target=tar) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 6fff22e0..ddfdc45b 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -12,41 +12,47 @@ import torch -class Composer(torch.nn.Module, abc.ABC): - @abc.abstractclassmethod - def forward( +class Composer(abc.ABC): + def __call__( self, perturbation: torch.Tensor | tuple, *, input: torch.Tensor | tuple, target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, ) -> torch.Tensor | tuple: - raise NotImplementedError - - -class Additive(Composer): - """We assume an adversary adds perturbation to the input.""" - - def forward( + if isinstance(perturbation, tuple): + input_adv = tuple( + self.compose(perturbation_i, input=input_i, target=target_i) + for perturbation_i, input_i, target_i in zip(perturbation, input, target) + ) + else: + input_adv = self.compose(perturbation, input=input, target=target) + + return input_adv + + @abc.abstractmethod + def compose( self, perturbation: torch.Tensor, *, input: torch.Tensor, target: torch.Tensor | dict[str, Any], ) -> torch.Tensor: + raise NotImplementedError + + +class Additive(Composer): + """We assume an adversary adds perturbation to the input.""" + + def compose(self, perturbation, *, input, target): return input + perturbation class Overlay(Composer): """We assume an adversary overlays a patch to the input.""" - def forward( - self, - perturbation: torch.Tensor, - *, - input: torch.Tensor, - target: torch.Tensor | dict[str, Any], - ) -> torch.Tensor: + def compose(self, perturbation, *, input, target): # True is mutable, False is immutable. mask = target["perturbable_mask"] diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index c3b52046..4d4a1364 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -17,8 +17,27 @@ class ConstraintViolated(Exception): class Constraint(abc.ABC): - @abc.abstractclassmethod - def __call__(self, input_adv, *, input, target) -> None: + def __call__( + self, + input_adv: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + ) -> None: + if isinstance(input_adv, tuple): + for input_adv_i, input_i, target_i in zip(input_adv, input, target): + self.verify(input_adv_i, input=input_i, target=target_i) + else: + self.verify(input_adv, input=input, target=target) + + @abc.abstractmethod + def verify( + self, + input_adv: torch.Tensor, + *, + input: torch.Tensor, + target: torch.Tensor | dict[str, Any], + ) -> None: raise NotImplementedError @@ -27,19 +46,21 @@ def __init__(self, min, max): self.min = min self.max = max - def __call__(self, input_adv, *, input, target): + def verify(self, input_adv, *, input, target): if torch.any(input_adv < self.min) or torch.any(input_adv > self.max): raise ConstraintViolated(f"Adversarial input is outside [{self.min}, {self.max}].") class Lp(Constraint): - def __init__(self, eps: float, p: int | float | None = torch.inf, dim=None, keepdim=False): + def __init__( + self, eps: float, p: int | float = torch.inf, dim: int | None = None, keepdim: bool = False + ): self.p = p self.eps = eps self.dim = dim self.keepdim = keepdim - def __call__(self, input_adv, *, input, target): + def verify(self, input_adv, *, input, target): perturbation = input_adv - input norm_vals = perturbation.norm(p=self.p, dim=self.dim, keepdim=self.keepdim) norm_max = norm_vals.max() @@ -50,12 +71,12 @@ def __call__(self, input_adv, *, input, target): class Integer(Constraint): - def __init__(self, rtol=0, atol=0, equal_nan=False): + def __init__(self, rtol: float = 0.0, atol: float = 0.0, equal_nan: bool = False): self.rtol = rtol self.atol = atol self.equal_nan = equal_nan - def __call__(self, input_adv, *, input, target): + def verify(self, input_adv, *, input, target): if not torch.isclose( input_adv, input_adv.round(), rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan ).all(): @@ -63,7 +84,7 @@ def __call__(self, input_adv, *, input, target): class Mask(Constraint): - def __call__(self, input_adv, *, input, target): + def verify(self, input_adv, *, input, target): # True/1 is mutable, False/0 is immutable. # mask.shape=(H, W) mask = target["perturbable_mask"] @@ -76,13 +97,17 @@ def __call__(self, input_adv, *, input, target): class Enforcer: - def __init__(self, constraints=None) -> None: + def __init__(self, constraints: dict[str, Constraint] | None = None) -> None: self.constraints = constraints or {} - def _check_constraints(self, input_adv, *, input, target): + @torch.no_grad() + def __call__( + self, + input_adv: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, + ) -> None: for constraint in self.constraints.values(): constraint(input_adv, input=input, target=target) - - @torch.no_grad() - def __call__(self, input_adv, *, input, target): - self._check_constraints(input_adv, input=input, target=target) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 49fa9cad..8f38ec13 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -122,28 +122,12 @@ def configure_gradient_clipping( for group in optimizer.param_groups: self.gradient_modifier(group["params"]) - def forward( - self, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - **kwargs, - ): + def forward(self, **batch): if self.perturbation is None: raise MisconfigurationException( "You need to call the configure_perturbation before forward." ) - def project_and_compose(pert, inp, tar): - self.projector(pert, inp, tar) - return self.composer(pert, input=inp, target=tar) - - if not isinstance(self.perturbation, tuple): - input_adv = project_and_compose(self.perturbation, input, target) - else: - input_adv = tuple( - project_and_compose(pert, inp, tar) - for pert, inp, tar in zip(self.perturbation, input, target) - ) + self.projector(self.perturbation, **batch) - return input_adv + return self.composer(self.perturbation, **batch) diff --git a/mart/attack/projector.py b/mart/attack/projector.py index 4c360688..92391c67 100644 --- a/mart/attack/projector.py +++ b/mart/attack/projector.py @@ -4,23 +4,37 @@ # SPDX-License-Identifier: BSD-3-Clause # -import abc -from typing import Any, Dict, List, Optional, Union +from __future__ import annotations -import torch +from typing import Any -__all__ = ["Projector"] +import torch -class Projector(abc.ABC): +class Projector: """A projector modifies nn.Parameter's data.""" @torch.no_grad() def __call__( self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], + perturbation: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, + ) -> None: + if isinstance(perturbation, tuple): + for perturbation_i, input_i, target_i in zip(perturbation, input, target): + self.project(perturbation_i, input=input_i, target=target_i) + else: + self.project(perturbation, input=input, target=target) + + def project( + self, + perturbation: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, ) -> None: pass @@ -28,18 +42,20 @@ def __call__( class Compose(Projector): """Apply a list of perturbation modifier.""" - def __init__(self, projectors: List[Projector]): + def __init__(self, projectors: list[Projector]): self.projectors = projectors @torch.no_grad() def __call__( self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], + perturbation: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, ) -> None: for projector in self.projectors: - projector(tensor, input, target) + projector(perturbation, input=input, target=target) def __repr__(self): projector_names = [repr(p) for p in self.projectors] @@ -49,26 +65,15 @@ def __repr__(self): class Range(Projector): """Clamp the perturbation so that the output is range-constrained.""" - def __init__( - self, - quantize: Optional[bool] = False, - min: Optional[Union[int, float]] = 0, - max: Optional[Union[int, float]] = 255, - ): + def __init__(self, quantize: bool = False, min: int | float = 0, max: int | float = 255): self.quantize = quantize self.min = min self.max = max - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: + def project(self, perturbation, *, input, target): if self.quantize: - tensor.round_() - tensor.clamp_(self.min, self.max) + perturbation.round_() + perturbation.clamp_(self.min, self.max) def __repr__(self): return ( @@ -82,26 +87,15 @@ class RangeAdditive(Projector): The projector assumes an additive perturbation threat model. """ - def __init__( - self, - quantize: Optional[bool] = False, - min: Optional[Union[int, float]] = 0, - max: Optional[Union[int, float]] = 255, - ): + def __init__(self, quantize: bool = False, min: int | float = 0, max: int | float = 255): self.quantize = quantize self.min = min self.max = max - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: + def project(self, perturbation, *, input, target): if self.quantize: - tensor.round_() - tensor.clamp_(self.min - input, self.max - input) + perturbation.round_() + perturbation.clamp_(self.min - input, self.max - input) def __repr__(self): return ( @@ -112,7 +106,7 @@ def __repr__(self): class Lp(Projector): """Project perturbations to Lp norm, only if the Lp norm is larger than eps.""" - def __init__(self, eps: float, p: Optional[Union[int, float]] = torch.inf): + def __init__(self, eps: int | float, p: int | float = torch.inf): """_summary_ Args: @@ -123,55 +117,32 @@ def __init__(self, eps: float, p: Optional[Union[int, float]] = torch.inf): self.p = p self.eps = eps - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: - pert_norm = tensor.norm(p=self.p) + def project(self, perturbation, *, input, target): + pert_norm = perturbation.norm(p=self.p) if pert_norm > self.eps: # We only upper-bound the norm. - tensor.mul_(self.eps / pert_norm) + perturbation.mul_(self.eps / pert_norm) class LinfAdditiveRange(Projector): """Make sure the perturbation is within the Linf norm ball, and "input + perturbation" is within the [min, max] range.""" - def __init__( - self, - eps: float, - min: Optional[Union[int, float]] = 0, - max: Optional[Union[int, float]] = 255, - ): + def __init__(self, eps: int | float, min: int | float = 0, max: int | float = 255): self.eps = eps self.min = min self.max = max - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: + def project(self, perturbation, *, input, target): eps_min = (input - self.eps).clamp(self.min, self.max) - input eps_max = (input + self.eps).clamp(self.min, self.max) - input - tensor.clamp_(eps_min, eps_max) + perturbation.clamp_(eps_min, eps_max) class Mask(Projector): - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: - tensor.mul_(target["perturbable_mask"]) + def project(self, perturbation, *, input, target): + perturbation.mul_(target["perturbable_mask"]) def __repr__(self): return f"{self.__class__.__name__}()" From c5020b52898ac6a2667eb13066b352cfae71d7ec Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 28 Mar 2023 11:35:30 -0700 Subject: [PATCH 113/163] fix projector tests --- tests/test_projector.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_projector.py b/tests/test_projector.py index 9983fa4a..a397a98c 100644 --- a/tests/test_projector.py +++ b/tests/test_projector.py @@ -36,7 +36,7 @@ def test_range_projector_repr(): @pytest.mark.parametrize("max", [10, 100, 110]) def test_range_projector(quantize, min, max, input_data, target_data, perturbation): projector = Range(quantize, min, max) - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) assert torch.max(perturbation) <= max assert torch.min(perturbation) >= min @@ -61,7 +61,7 @@ def test_range_additive_projector(quantize, min, max, input_data, target_data, p expected_perturbation = torch.clone(perturbation) projector = RangeAdditive(quantize, min, max) - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) # modify expected_perturbation if quantize: @@ -78,7 +78,7 @@ def test_lp_projector(eps, p, input_data, target_data, perturbation): expected_perturbation = torch.clone(perturbation) projector = Lp(eps, p) - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) # modify expected_perturbation pert_norm = expected_perturbation.norm(p=p) @@ -95,7 +95,7 @@ def test_linf_additive_range_projector(min, max, eps, input_data, target_data, p expected_perturbation = torch.clone(perturbation) projector = LinfAdditiveRange(eps, min, max) - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) # get expected result eps_min = (input_data - eps).clamp(min, max) - input_data @@ -117,7 +117,7 @@ def test_mask_projector(input_data, target_data, perturbation): expected_perturbation = torch.clone(perturbation) projector = Mask() - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) # get expected output expected_perturbation.mul_(target_data["perturbable_mask"]) @@ -156,7 +156,7 @@ def test_compose(input_data, target_data): compose = Compose(projectors) tensor = Mock() tensor.norm.return_value = 10 - compose(tensor, input_data, target_data) + compose(tensor, input=input_data, target=target_data) # RangeProjector, RangeAdditiveProjector, and LinfAdditiveRangeProjector calls `clamp_` assert tensor.clamp_.call_count == 3 From 8773d7fc6f201b01f5825438abe85362e9bca031 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 28 Mar 2023 11:51:39 -0700 Subject: [PATCH 114/163] Gracefully fail when input is a dict --- mart/attack/perturber.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 8f38ec13..42ab7e01 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -67,10 +67,12 @@ def create_and_initialize(inp): self.initializer(pert) return pert - if not isinstance(input, tuple): - self.perturbation = create_and_initialize(input) - else: + if isinstance(input, tuple): self.perturbation = tuple(create_and_initialize(inp) for inp in input) + elif isinstance(input, dict): + raise NotImplementedError + else: + self.perturbation = create_and_initialize(input) def configure_optimizers(self): if self.perturbation is None: From d88c4587914243c00dcd0b3774c9ffed74d822e2 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 28 Mar 2023 13:13:39 -0700 Subject: [PATCH 115/163] Make Projector batch aware --- mart/attack/perturber/perturber.py | 2 +- mart/attack/projector.py | 121 +++++++++++------------------ 2 files changed, 47 insertions(+), 76 deletions(-) diff --git a/mart/attack/perturber/perturber.py b/mart/attack/perturber/perturber.py index d7eed81f..e8394f4f 100644 --- a/mart/attack/perturber/perturber.py +++ b/mart/attack/perturber/perturber.py @@ -55,7 +55,7 @@ def projector_wrapper(perturber_module, args): raise ValueError("Perturbation must be initialized") input, target = args - return projector(perturber_module.perturbation, input, target) + return projector(perturber_module.perturbation, input=input, target=target) # Will be called before forward() is called. if projector is not None: diff --git a/mart/attack/projector.py b/mart/attack/projector.py index 4c360688..92391c67 100644 --- a/mart/attack/projector.py +++ b/mart/attack/projector.py @@ -4,23 +4,37 @@ # SPDX-License-Identifier: BSD-3-Clause # -import abc -from typing import Any, Dict, List, Optional, Union +from __future__ import annotations -import torch +from typing import Any -__all__ = ["Projector"] +import torch -class Projector(abc.ABC): +class Projector: """A projector modifies nn.Parameter's data.""" @torch.no_grad() def __call__( self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], + perturbation: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, + ) -> None: + if isinstance(perturbation, tuple): + for perturbation_i, input_i, target_i in zip(perturbation, input, target): + self.project(perturbation_i, input=input_i, target=target_i) + else: + self.project(perturbation, input=input, target=target) + + def project( + self, + perturbation: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, ) -> None: pass @@ -28,18 +42,20 @@ def __call__( class Compose(Projector): """Apply a list of perturbation modifier.""" - def __init__(self, projectors: List[Projector]): + def __init__(self, projectors: list[Projector]): self.projectors = projectors @torch.no_grad() def __call__( self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], + perturbation: torch.Tensor | tuple, + *, + input: torch.Tensor | tuple, + target: torch.Tensor | dict[str, Any] | tuple, + **kwargs, ) -> None: for projector in self.projectors: - projector(tensor, input, target) + projector(perturbation, input=input, target=target) def __repr__(self): projector_names = [repr(p) for p in self.projectors] @@ -49,26 +65,15 @@ def __repr__(self): class Range(Projector): """Clamp the perturbation so that the output is range-constrained.""" - def __init__( - self, - quantize: Optional[bool] = False, - min: Optional[Union[int, float]] = 0, - max: Optional[Union[int, float]] = 255, - ): + def __init__(self, quantize: bool = False, min: int | float = 0, max: int | float = 255): self.quantize = quantize self.min = min self.max = max - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: + def project(self, perturbation, *, input, target): if self.quantize: - tensor.round_() - tensor.clamp_(self.min, self.max) + perturbation.round_() + perturbation.clamp_(self.min, self.max) def __repr__(self): return ( @@ -82,26 +87,15 @@ class RangeAdditive(Projector): The projector assumes an additive perturbation threat model. """ - def __init__( - self, - quantize: Optional[bool] = False, - min: Optional[Union[int, float]] = 0, - max: Optional[Union[int, float]] = 255, - ): + def __init__(self, quantize: bool = False, min: int | float = 0, max: int | float = 255): self.quantize = quantize self.min = min self.max = max - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: + def project(self, perturbation, *, input, target): if self.quantize: - tensor.round_() - tensor.clamp_(self.min - input, self.max - input) + perturbation.round_() + perturbation.clamp_(self.min - input, self.max - input) def __repr__(self): return ( @@ -112,7 +106,7 @@ def __repr__(self): class Lp(Projector): """Project perturbations to Lp norm, only if the Lp norm is larger than eps.""" - def __init__(self, eps: float, p: Optional[Union[int, float]] = torch.inf): + def __init__(self, eps: int | float, p: int | float = torch.inf): """_summary_ Args: @@ -123,55 +117,32 @@ def __init__(self, eps: float, p: Optional[Union[int, float]] = torch.inf): self.p = p self.eps = eps - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: - pert_norm = tensor.norm(p=self.p) + def project(self, perturbation, *, input, target): + pert_norm = perturbation.norm(p=self.p) if pert_norm > self.eps: # We only upper-bound the norm. - tensor.mul_(self.eps / pert_norm) + perturbation.mul_(self.eps / pert_norm) class LinfAdditiveRange(Projector): """Make sure the perturbation is within the Linf norm ball, and "input + perturbation" is within the [min, max] range.""" - def __init__( - self, - eps: float, - min: Optional[Union[int, float]] = 0, - max: Optional[Union[int, float]] = 255, - ): + def __init__(self, eps: int | float, min: int | float = 0, max: int | float = 255): self.eps = eps self.min = min self.max = max - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: + def project(self, perturbation, *, input, target): eps_min = (input - self.eps).clamp(self.min, self.max) - input eps_max = (input + self.eps).clamp(self.min, self.max) - input - tensor.clamp_(eps_min, eps_max) + perturbation.clamp_(eps_min, eps_max) class Mask(Projector): - @torch.no_grad() - def __call__( - self, - tensor: torch.Tensor, - input: torch.Tensor, - target: Union[torch.Tensor, Dict[str, Any]], - ) -> None: - tensor.mul_(target["perturbable_mask"]) + def project(self, perturbation, *, input, target): + perturbation.mul_(target["perturbable_mask"]) def __repr__(self): return f"{self.__class__.__name__}()" From 13b8e32b5804a059b6ba6bb74ed65fdaf39dec97 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 28 Mar 2023 13:19:04 -0700 Subject: [PATCH 116/163] Update projector tests --- tests/test_projector.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_projector.py b/tests/test_projector.py index 9983fa4a..a397a98c 100644 --- a/tests/test_projector.py +++ b/tests/test_projector.py @@ -36,7 +36,7 @@ def test_range_projector_repr(): @pytest.mark.parametrize("max", [10, 100, 110]) def test_range_projector(quantize, min, max, input_data, target_data, perturbation): projector = Range(quantize, min, max) - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) assert torch.max(perturbation) <= max assert torch.min(perturbation) >= min @@ -61,7 +61,7 @@ def test_range_additive_projector(quantize, min, max, input_data, target_data, p expected_perturbation = torch.clone(perturbation) projector = RangeAdditive(quantize, min, max) - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) # modify expected_perturbation if quantize: @@ -78,7 +78,7 @@ def test_lp_projector(eps, p, input_data, target_data, perturbation): expected_perturbation = torch.clone(perturbation) projector = Lp(eps, p) - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) # modify expected_perturbation pert_norm = expected_perturbation.norm(p=p) @@ -95,7 +95,7 @@ def test_linf_additive_range_projector(min, max, eps, input_data, target_data, p expected_perturbation = torch.clone(perturbation) projector = LinfAdditiveRange(eps, min, max) - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) # get expected result eps_min = (input_data - eps).clamp(min, max) - input_data @@ -117,7 +117,7 @@ def test_mask_projector(input_data, target_data, perturbation): expected_perturbation = torch.clone(perturbation) projector = Mask() - projector(perturbation, input_data, target_data) + projector(perturbation, input=input_data, target=target_data) # get expected output expected_perturbation.mul_(target_data["perturbable_mask"]) @@ -156,7 +156,7 @@ def test_compose(input_data, target_data): compose = Compose(projectors) tensor = Mock() tensor.norm.return_value = 10 - compose(tensor, input_data, target_data) + compose(tensor, input=input_data, target=target_data) # RangeProjector, RangeAdditiveProjector, and LinfAdditiveRangeProjector calls `clamp_` assert tensor.clamp_.call_count == 3 From 5c5a56c062ec059754dee7033286f200658d358b Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 28 Mar 2023 16:28:09 -0700 Subject: [PATCH 117/163] Fix merge errors. --- mart/attack/adversary.py | 2 +- mart/attack/composer.py | 12 +++++++++++- mart/attack/perturbation_manager.py | 2 +- .../attack/object_detection_rgb_mask_adversary.yaml | 6 +++--- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 2a0c7caa..af33fb9a 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -91,7 +91,7 @@ def _attack(self, input, **batch): batch = {"input": input, **batch} # Configure and reset perturber to use batch inputs - self.perturber.configure_perturbation(input) + self.perturber.initialize(**batch) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 791fdecf..381339ac 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -96,7 +96,7 @@ def _compose(self, perturbation, *, input, target, modality=None): output = tuple(output) return output - def forward( + def __call__( self, perturbation: torch.Tensor | tuple, *, @@ -106,3 +106,13 @@ def forward( ) -> torch.Tensor | tuple: output = self._compose(perturbation, input=input, target=target) return output + + # We have to implement an abstract method... + def compose( + self, + perturbation: torch.Tensor, + *, + input: torch.Tensor, + target: torch.Tensor | dict[str, Any], + ) -> torch.Tensor: + pass diff --git a/mart/attack/perturbation_manager.py b/mart/attack/perturbation_manager.py index 50fced56..a37a2da3 100644 --- a/mart/attack/perturbation_manager.py +++ b/mart/attack/perturbation_manager.py @@ -95,7 +95,7 @@ def _project(self, perturbation, input, target, modality=None): """Recursively project perturbation tensors that may hide behind dictionaries, list or tuple.""" if isinstance(input, torch.Tensor): - self.projector[modality](perturbation, input, target) + self.projector[modality](perturbation, input=input, target=target) elif isinstance(input, dict): for modality_i, input_i in input.items(): self._project(perturbation[modality_i], input_i, target, modality=modality_i) diff --git a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml index f7dca659..34b3afc2 100644 --- a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml @@ -4,8 +4,10 @@ defaults: - initializer@initializer.rgb: constant - gradient_modifier@gradient_modifier.rgb: sign - projector@projector.rgb: mask_range - # - callbacks: [progress_bar, image_visualizer] + # TODO: Modality-aware visualizer + # - callbacks: [image_visualizer] - objective: zero_ap + - gain: rcnn_training_loss - composer: modality - composer@composer.sub_composers.rgb: overlay - enforcer: modality @@ -19,8 +21,6 @@ optimizer: max_iters: 5 -gain: "loss" - initializer: rgb: constant: 127 From a3e5114cecb3aeb048029233d05de089bbbeea60 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 28 Mar 2023 16:31:41 -0700 Subject: [PATCH 118/163] Remove get_tensor_device() because we now get device from LightningModule. --- mart/attack/adversary.py | 2 +- mart/utils/utils.py | 20 -------------------- 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index af33fb9a..99dea5f9 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -13,7 +13,7 @@ import pytorch_lightning as pl import torch -from mart.utils import get_tensor_device, silent +from mart.utils import silent from .perturber import Perturber diff --git a/mart/utils/utils.py b/mart/utils/utils.py index 2c04805f..26949d93 100644 --- a/mart/utils/utils.py +++ b/mart/utils/utils.py @@ -27,7 +27,6 @@ "log_hyperparameters", "save_file", "task_wrapper", - "get_tensor_device", ] log = pylogger.get_pylogger(__name__) @@ -274,22 +273,3 @@ def get_resume_checkpoint(config: DictConfig) -> Tuple[DictConfig]: config = hydra.compose(config_name, overrides=overrides) return config - - -def get_tensor_device(data): - """Get device of the first tensor hidden in dict/list/tuple.""" - - def _get_first_tensor(data): - if isinstance(data, torch.Tensor): - return data - elif isinstance(data, list) or isinstance(data, tuple) or isinstance(data, dict): - data = data.values() if isinstance(data, dict) else data - for sub_data in data: - ret = _get_first_tensor(sub_data) - if ret is not None: - return ret - else: - return None - - tensor = _get_first_tensor(data) - return tensor.device From 645c005c7fbd897015c972cbb71de76e56aaa6d8 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Tue, 28 Mar 2023 16:32:12 -0700 Subject: [PATCH 119/163] Cleaning. --- mart/utils/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mart/utils/utils.py b/mart/utils/utils.py index 26949d93..80609331 100644 --- a/mart/utils/utils.py +++ b/mart/utils/utils.py @@ -7,7 +7,6 @@ from typing import Any, Callable, Dict, List, Tuple import hydra -import torch from hydra.core.hydra_config import HydraConfig from omegaconf import DictConfig, OmegaConf from pytorch_lightning import Callback From 764220fbe01c7cd8b189da8aaf33f7b4da522e03 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 29 Mar 2023 11:17:40 -0700 Subject: [PATCH 120/163] Simplify ModalityEnforcer's configuration. --- mart/attack/enforcer.py | 14 +++++--------- mart/configs/attack/enforcer/modality.yaml | 1 - .../object_detection_rgb_mask_adversary.yaml | 4 +--- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index bda97822..d19d4e7c 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -116,17 +116,13 @@ def __call__( class ModalityEnforcer(Enforcer): - def __init__(self, sub_enforcers: dict | Enforcer) -> None: + def __init__(self, **modality_constraints: dict[str, dict[str, Constraint]]) -> None: + self.modality_constraints = modality_constraints - # Backward compatibility for datasets which do not have modality tokens. - if isinstance(sub_enforcers, Enforcer): - sub_enforcers = {None: sub_enforcers} - - self.sub_enforcers = sub_enforcers - - def _enforce(self, input_adv, *, input, target, modality=None): + def _enforce(self, input_adv, *, input, target, modality="default"): if isinstance(input_adv, torch.Tensor): - self.sub_enforcers[modality](input_adv, input=input, target=target) + for constraint in self.modality_constraints[modality].values(): + constraint(input_adv, input=input, target=target) elif isinstance(input_adv, dict): for modality in input_adv: self._enforce( diff --git a/mart/configs/attack/enforcer/modality.yaml b/mart/configs/attack/enforcer/modality.yaml index d3013fc1..e5d7c6ac 100644 --- a/mart/configs/attack/enforcer/modality.yaml +++ b/mart/configs/attack/enforcer/modality.yaml @@ -1,2 +1 @@ _target_: mart.attack.ModalityEnforcer -sub_enforcers: ??? diff --git a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml index 34b3afc2..41d3528d 100644 --- a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml @@ -11,9 +11,7 @@ defaults: - composer: modality - composer@composer.sub_composers.rgb: overlay - enforcer: modality - - enforcer@enforcer.sub_enforcers.rgb: default - - enforcer/constraints@enforcer.sub_enforcers.rgb.constraints: - [mask, pixel_range] + - enforcer/constraints@enforcer.rgb: [mask, pixel_range] # Make a 5-step attack for the demonstration purpose. optimizer: From fd4c2821ccd88e2f0b9a2b1e4dfcbd29abf5219f Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 29 Mar 2023 11:22:33 -0700 Subject: [PATCH 121/163] Simplify config of ModalityComposer. --- mart/attack/composer.py | 15 +++++---------- mart/configs/attack/composer/modality.yaml | 1 - .../object_detection_rgb_mask_adversary.yaml | 2 +- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/mart/attack/composer.py b/mart/attack/composer.py index 381339ac..ae665a2a 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -67,19 +67,14 @@ def compose(self, perturbation, *, input, target): class ModalityComposer(Composer): - def __init__(self, sub_composers: dict[str, Composer] | Composer): - super().__init__() + def __init__(self, **modality_composers): + self.modality_composers = modality_composers - # Backward compatibility for datasets which do not have modality tokens. - if isinstance(sub_composers, Composer): - sub_composers = {None: sub_composers} - - self.sub_composers = sub_composers - - def _compose(self, perturbation, *, input, target, modality=None): + def _compose(self, perturbation, *, input, target, modality="default"): """Recursively compose output from perturbation and input.""" if isinstance(perturbation, torch.Tensor): - output = self.sub_composers[modality](perturbation, input=input, target=target) + composer = self.modality_composers[modality] + output = composer(perturbation, input=input, target=target) return output elif isinstance(perturbation, dict): output = {} diff --git a/mart/configs/attack/composer/modality.yaml b/mart/configs/attack/composer/modality.yaml index 5e90a3b8..e84f946c 100644 --- a/mart/configs/attack/composer/modality.yaml +++ b/mart/configs/attack/composer/modality.yaml @@ -1,2 +1 @@ _target_: mart.attack.ModalityComposer -sub_composers: ??? diff --git a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml index 41d3528d..8c6f96ca 100644 --- a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml @@ -9,7 +9,7 @@ defaults: - objective: zero_ap - gain: rcnn_training_loss - composer: modality - - composer@composer.sub_composers.rgb: overlay + - composer@composer.rgb: overlay - enforcer: modality - enforcer/constraints@enforcer.rgb: [mask, pixel_range] From 86a34522dbec729744c80ba7fbf13a3c879c5d6c Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 29 Mar 2023 11:23:13 -0700 Subject: [PATCH 122/163] Update config for non-modality adversary. --- mart/configs/attack/object_detection_mask_adversary.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 1b693954..4dcc5ddc 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -8,10 +8,9 @@ defaults: - objective: zero_ap - gain: rcnn_training_loss - composer: modality - - composer@composer.sub_composers: overlay + - composer@composer.default: overlay - enforcer: modality - - enforcer@enforcer.sub_enforcers: default - - enforcer/constraints@enforcer.sub_enforcers.constraints: [mask, pixel_range] + - enforcer/constraints@enforcer.default: [mask, pixel_range] # Make a 5-step attack for the demonstration purpose. optimizer: From 13cd5e6eb453fc18d04f065fef6c5f0b4f2257f2 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 29 Mar 2023 17:51:45 -0700 Subject: [PATCH 123/163] Resume previous API. --- mart/attack/adversary.py | 2 +- mart/attack/perturber.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index 99dea5f9..c950241b 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -91,7 +91,7 @@ def _attack(self, input, **batch): batch = {"input": input, **batch} # Configure and reset perturber to use batch inputs - self.perturber.initialize(**batch) + self.perturber.configure_perturbation(input) # Attack, aka fit a perturbation, for one epoch by cycling over the same input batch. # We use Trainer.limit_train_batches to control the number of attack iterations. diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 044e271c..95a0d86c 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -67,7 +67,7 @@ def __init__( optim_params=optim_params, ) - def initialize(self, *, input, **kwargs): + def configure_perturbation(self, input): self.pert_manager.initialize(input) def project(self, input, target): From 676f9b508dca7a462f59a7d730c1cb53cd84c686 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 29 Mar 2023 20:54:29 -0700 Subject: [PATCH 124/163] Integrate PerturbationManager into Perturber. --- mart/attack/perturbation_manager.py | 128 ---------------------------- mart/attack/perturber.py | 106 +++++++++++++++++++---- tests/test_perturbation_manager.py | 24 +++--- 3 files changed, 104 insertions(+), 154 deletions(-) delete mode 100644 mart/attack/perturbation_manager.py diff --git a/mart/attack/perturbation_manager.py b/mart/attack/perturbation_manager.py deleted file mode 100644 index a37a2da3..00000000 --- a/mart/attack/perturbation_manager.py +++ /dev/null @@ -1,128 +0,0 @@ -# -# Copyright (C) 2022 Intel Corporation -# -# SPDX-License-Identifier: BSD-3-Clause -# - -from __future__ import annotations - -import itertools -from typing import Callable - -import torch - - -class PerturbationManager: - def __init__( - self, - *, - initializer: Callable | dict, - gradient_modifier: Callable | dict | None = None, - projector: Callable | dict | None = None, - optim_params: dict | None = None, - ) -> None: - - # In case gradient_modifier or projector is None. - def nop(*args, **kwargs): - pass - - gradient_modifier = gradient_modifier or nop - projector = projector or nop - - # Backward compatibility, in case modality is unknown, and not given in input. - if not isinstance(initializer, dict): - initializer = {None: initializer} - if not isinstance(gradient_modifier, dict): - gradient_modifier = {None: gradient_modifier} - if not isinstance(projector, dict): - projector = {None: projector} - - # In case optimization parameters are not given. - optim_params = optim_params or {modality: {} for modality in initializer.keys()} - - self.initializer = initializer - self.gradient_modifier = gradient_modifier - self.projector = projector - self.optim_params = optim_params - - self._perturbation = None - - @property - def perturbation(self): - """Return perturbation that is homomorphic with input.""" - # TODO: Compose perturbation from sub-componenets. - return self._perturbation - - def initialize(self, input): - """Create and initialize raw perturbation components. - - With raw perturbation components, we can - 1. compose perturbation that is homomorphic to input. - 2. compose parameter groups for optimization. - """ - # TODO: Raw perturbation is not necessarily homorphic with input. - self._perturbation = self._initialize(input) - - def _initialize(self, input, modality=None): - """Recursively materialize and initialize perturbation that is homomorphic as input; Hook - gradient modifiers.""" - if isinstance(input, torch.Tensor): - # Materialize. - pert = torch.zeros_like(input, requires_grad=True) - - # Initialize. - self.initializer[modality](pert) - - # Gradient modifier hook. - # FIXME: use actual gradient modifier, self.gradient_modifier[modality](pert) - # The current implementation of gradient modifiers is not hookable. - if self.gradient_modifier is not None: - pert.register_hook(lambda grad: grad.sign()) - - return pert - elif isinstance(input, dict): - return {modality: self._initialize(inp, modality) for modality, inp in input.items()} - elif isinstance(input, list): - return [self._initialize(inp) for inp in input] - elif isinstance(input, tuple): - return tuple(self._initialize(inp) for inp in input) - - def project(self, input, target): - if self.projector is not None: - self._project(self._perturbation, input, target) - - def _project(self, perturbation, input, target, modality=None): - """Recursively project perturbation tensors that may hide behind dictionaries, list or - tuple.""" - if isinstance(input, torch.Tensor): - self.projector[modality](perturbation, input=input, target=target) - elif isinstance(input, dict): - for modality_i, input_i in input.items(): - self._project(perturbation[modality_i], input_i, target, modality=modality_i) - elif isinstance(input, list) or isinstance(input, tuple): - for perturbation_i, input_i, target_i in zip(perturbation, input, target): - self._project(perturbation_i, input_i, target_i, modality=modality) - - @property - def parameter_groups(self): - param_groups = self._parameter_groups(self._perturbation) - return param_groups - - def _parameter_groups(self, pert, modality=None): - """Recursively return parameter groups as a list of dictionaries.""" - - if isinstance(pert, torch.Tensor): - return [{"params": pert} | self.optim_params[modality]] - elif isinstance(pert, dict): - ret = [self._parameter_groups(pert_i, modality) for modality, pert_i in pert.items()] - # Concatenate a list of lists. - return list(itertools.chain.from_iterable(ret)) - elif isinstance(pert, list) or isinstance(pert, tuple): - param_list = [] - for pert_i in pert: - param_list.extend(self._parameter_groups(pert_i)) - return param_list - - def __call__(self, input): - self.initialize(input) - return self.perturbation diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 95a0d86c..d6562754 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -6,6 +6,7 @@ from __future__ import annotations +import itertools from typing import TYPE_CHECKING, Any, Callable import pytorch_lightning as pl @@ -13,7 +14,6 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException from .gradient_modifier import GradientModifier -from .perturbation_manager import PerturbationManager from .projector import Projector if TYPE_CHECKING: @@ -58,28 +58,102 @@ def __init__( self.gain_fn = gain self.objective_fn = objective - # An object manage the perturbation in both the tensor and the parameter form. # FIXME: gradient_modifier should be a hook operating on .grad directly. - self.pert_manager = PerturbationManager( - initializer=initializer, - gradient_modifier=gradient_modifier, - projector=projector, - optim_params=optim_params, - ) - def configure_perturbation(self, input): - self.pert_manager.initialize(input) + # In case gradient_modifier or projector is None. + def nop(*args, **kwargs): + pass - def project(self, input, target): - return self.pert_manager.project(input, target) + gradient_modifier = gradient_modifier or nop + projector = projector or nop - @property - def perturbation(self): - return self.pert_manager.perturbation + # Backward compatibility, in case modality is unknown, and not given in input. + if not isinstance(initializer, dict): + initializer = {"default": initializer} + if not isinstance(gradient_modifier, dict): + gradient_modifier = {"default": gradient_modifier} + if not isinstance(projector, dict): + projector = {"default": projector} + + # In case optimization parameters are not given. + optim_params = optim_params or {modality: {} for modality in initializer.keys()} + + self.initializer = initializer + self.gradient_modifier = gradient_modifier + self.projector = projector + self.optim_params = optim_params + + self.perturbation = None + + def configure_perturbation(self, input): + # Recursively configure perturbation in tensor. + self.perturbation = self._configure_perturbation(input) + + def _configure_perturbation(self, input, modality="default"): + """Recursively create and initialize perturbation that is homomorphic as input; Hook + gradient modifiers.""" + if isinstance(input, torch.Tensor): + # Create. + pert = torch.empty_like(input, requires_grad=True) + + # Initialize. + self.initializer[modality](pert) + + # Gradient modifier hook. + # FIXME: use actual gradient modifier, self.gradient_modifier[modality](pert) + # The current implementation of gradient modifiers is not hookable. + if self.gradient_modifier is not None: + pert.register_hook(lambda grad: grad.sign()) + + return pert + elif isinstance(input, dict): + return { + modality: self._configure_perturbation(inp, modality) + for modality, inp in input.items() + } + elif isinstance(input, list): + return [self._configure_perturbation(inp) for inp in input] + elif isinstance(input, tuple): + return tuple(self._configure_perturbation(inp) for inp in input) @property def parameter_groups(self): - return self.pert_manager.parameter_groups + """Extract parameter groups for optimization from perturbation tensor(s).""" + param_groups = self._parameter_groups(self.perturbation) + return param_groups + + def _parameter_groups(self, pert, modality="default"): + """Recursively return parameter groups as a list of dictionaries.""" + + if isinstance(pert, torch.Tensor): + return [{"params": pert} | self.optim_params[modality]] + elif isinstance(pert, dict): + ret = [self._parameter_groups(pert_i, modality) for modality, pert_i in pert.items()] + # Concatenate a list of lists. + return list(itertools.chain.from_iterable(ret)) + elif isinstance(pert, list) or isinstance(pert, tuple): + param_list = [] + for pert_i in pert: + param_list.extend(self._parameter_groups(pert_i)) + return param_list + + def project(self, input, target): + if self.projector is not None: + self._project(self.perturbation, input=input, target=target) + + def _project(self, perturbation, *, input, target, modality="default"): + """Recursively project perturbation tensors that may hide behind dictionaries, list or + tuple.""" + if isinstance(input, torch.Tensor): + self.projector[modality](perturbation, input=input, target=target) + elif isinstance(input, dict): + for modality_i, input_i in input.items(): + self._project( + perturbation[modality_i], input=input_i, target=target, modality=modality_i + ) + elif isinstance(input, list) or isinstance(input, tuple): + for perturbation_i, input_i, target_i in zip(perturbation, input, target): + self._project(perturbation_i, input=input_i, target=target_i, modality=modality) def configure_optimizers(self): # Parameter initialization is done in Adversary before fit() by invoking initialize(input). diff --git a/tests/test_perturbation_manager.py b/tests/test_perturbation_manager.py index 59d6eb24..9dd9fc16 100644 --- a/tests/test_perturbation_manager.py +++ b/tests/test_perturbation_manager.py @@ -1,23 +1,25 @@ from typing import Iterable +from unittest.mock import Mock import torch from mart.attack.initializer import Constant -from mart.attack.perturbation_manager import PerturbationManager +from mart.attack.perturber import Perturber def test_perturbation_tensor(): input_data = torch.tensor([1.0, 2.0]) initializer = Constant(constant=0) - pert_manager = PerturbationManager(initializer=initializer) + perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) - pert = pert_manager(input_data) + perturber.configure_perturbation(input_data) + pert = perturber.perturbation assert isinstance(pert, torch.Tensor) assert pert.shape == pert.shape assert (pert == 0).all() - param_groups = pert_manager.parameter_groups + param_groups = perturber.parameter_groups assert isinstance(param_groups, Iterable) assert param_groups[0]["params"].requires_grad @@ -25,14 +27,15 @@ def test_perturbation_tensor(): def test_perturbation_dict(): input_data = {"rgb": torch.tensor([1.0, 2.0]), "depth": torch.tensor([1.0, 2.0])} initializer = {"rgb": Constant(constant=0), "depth": Constant(constant=1)} - pert_manager = PerturbationManager(initializer=initializer) + perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) - pert = pert_manager(input_data) + perturber.configure_perturbation(input_data) + pert = perturber.perturbation assert isinstance(pert, dict) assert (pert["rgb"] == 0).all() assert (pert["depth"] == 1).all() - param_groups = pert_manager.parameter_groups + param_groups = perturber.parameter_groups assert len(param_groups) == 2 param_groups = list(param_groups) assert param_groups[0]["params"].requires_grad @@ -45,16 +48,17 @@ def test_perturbation_tuple_dict(): {"rgb": torch.tensor([-1.0, -2.0]), "depth": torch.tensor([-3.0, -4.0])}, ) initializer = {"rgb": Constant(constant=0), "depth": Constant(constant=1)} - pert_manager = PerturbationManager(initializer=initializer) + perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) - pert = pert_manager(input_data) + perturber.configure_perturbation(input_data) + pert = perturber.perturbation assert isinstance(pert, tuple) assert (pert[0]["rgb"] == 0).all() assert (pert[0]["depth"] == 1).all() assert (pert[1]["rgb"] == 0).all() assert (pert[1]["depth"] == 1).all() - param_groups = pert_manager.parameter_groups + param_groups = perturber.parameter_groups assert len(param_groups) == 4 param_groups = list(param_groups) assert param_groups[0]["params"].requires_grad From 543f9fc306e974123ceda7d741573d2a0e78598d Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 29 Mar 2023 20:57:36 -0700 Subject: [PATCH 125/163] Restore perturbation runtim checking. --- mart/attack/perturber.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index d6562754..422cc5bd 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -156,7 +156,11 @@ def _project(self, perturbation, *, input, target, modality="default"): self._project(perturbation_i, input=input_i, target=target_i, modality=modality) def configure_optimizers(self): - # Parameter initialization is done in Adversary before fit() by invoking initialize(input). + # parameter_groups is generated from perturbation. + if self.perturbation is None: + raise MisconfigurationException( + "You need to call the configure_perturbation before fit." + ) return self.optimizer_fn(self.parameter_groups) def training_step(self, batch, batch_idx): From a2132bff91fa04d64d6bc59f255edaddc93ee3db Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Wed, 29 Mar 2023 20:59:57 -0700 Subject: [PATCH 126/163] Update API. --- mart/attack/perturber.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 422cc5bd..71a67683 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -137,7 +137,7 @@ def _parameter_groups(self, pert, modality="default"): param_list.extend(self._parameter_groups(pert_i)) return param_list - def project(self, input, target): + def project(self, *, input, target, **kwargs): if self.projector is not None: self._project(self.perturbation, input=input, target=target) @@ -202,7 +202,7 @@ def forward( ) # Project perturbation... - self.project(input, target) + self.project(input=input, target=target) # Compose adversarial input. input_adv = self.composer(self.perturbation, input=input, target=target) From 1911ce66a92f812d317a66125eaa7a2b3b443eb0 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 17:51:17 -0700 Subject: [PATCH 127/163] Add a reusable modality_dispatch(). --- mart/utils/__init__.py | 1 + mart/utils/modality_dispatch.py | 55 +++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 mart/utils/modality_dispatch.py diff --git a/mart/utils/__init__.py b/mart/utils/__init__.py index 50e71b3d..4c1ae708 100644 --- a/mart/utils/__init__.py +++ b/mart/utils/__init__.py @@ -1,5 +1,6 @@ from .adapters import * from .export import * +from .modality_dispatch import * from .monkey_patch import * from .pylogger import * from .rich_utils import * diff --git a/mart/utils/modality_dispatch.py b/mart/utils/modality_dispatch.py new file mode 100644 index 00000000..dc30a5f9 --- /dev/null +++ b/mart/utils/modality_dispatch.py @@ -0,0 +1,55 @@ +# +# Copyright (C) 2022 Intel Corporation +# +# SPDX-License-Identifier: BSD-3-Clause +# + +from __future__ import annotations + +from typing import Any, Callable + +import torch +from torch import Tensor + +__all__ = ["modality_dispatch"] + + +def modality_dispatch( + modality_func: dict[str, Callable], + data: Tensor | tuple | list[Tensor] | dict[str, Tensor], + *, + input: Tensor | tuple | list[Tensor] | dict[str, Tensor], + target: torch.Tensor | dict[str, Any], + modality: str = "default", +): + """Recursively dispatch data and input to functions of the same modality.""" + + assert type(data) == type(input) + + if isinstance(input, torch.Tensor): + return modality_func[modality](data, input=input, target=target) + elif isinstance(input, dict): + # The dict input has modalities specified in keys, passing them recursively. + output = {} + for modality in input.keys(): + output[modality] = modality_dispatch( + modality_func, + data[modality], + input=input[modality], + target=target, + modality=modality, + ) + return output + elif isinstance(input, (list, tuple)): + # The list or tuple input is a collection of sub-input and sub-target. + output = [] + for data_i, input_i, target_i in zip(data, input, target): + output_i = modality_dispatch( + modality_func, data_i, input=input_i, target=target_i, modality=modality + ) + output.append(output_i) + if isinstance(input, tuple): + output = tuple(output) + return output + else: + raise ValueError(f"Unsupported data type of input: {type(input)}.") From 604ae6480d1f38d98fe2a8fa4dcaa75a05bd06da Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 17:58:40 -0700 Subject: [PATCH 128/163] Use modality_dispatch() for projector and composer. --- mart/attack/perturber.py | 23 +++++++------------ .../object_detection_rgb_mask_adversary.yaml | 1 - 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 71a67683..3d7b8a45 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -13,6 +13,8 @@ import torch from pytorch_lightning.utilities.exceptions import MisconfigurationException +from mart.utils import modality_dispatch + from .gradient_modifier import GradientModifier from .projector import Projector @@ -139,21 +141,12 @@ def _parameter_groups(self, pert, modality="default"): def project(self, *, input, target, **kwargs): if self.projector is not None: - self._project(self.perturbation, input=input, target=target) + modality_dispatch( + self.projector, self.perturbation, input=input, target=target, modality="default" + ) - def _project(self, perturbation, *, input, target, modality="default"): - """Recursively project perturbation tensors that may hide behind dictionaries, list or - tuple.""" - if isinstance(input, torch.Tensor): - self.projector[modality](perturbation, input=input, target=target) - elif isinstance(input, dict): - for modality_i, input_i in input.items(): - self._project( - perturbation[modality_i], input=input_i, target=target, modality=modality_i - ) - elif isinstance(input, list) or isinstance(input, tuple): - for perturbation_i, input_i, target_i in zip(perturbation, input, target): - self._project(perturbation_i, input=input_i, target=target_i, modality=modality) + def compose(self, *, input, target): + return modality_dispatch(self.composer, self.perturbation, input=input, target=target) def configure_optimizers(self): # parameter_groups is generated from perturbation. @@ -205,6 +198,6 @@ def forward( self.project(input=input, target=target) # Compose adversarial input. - input_adv = self.composer(self.perturbation, input=input, target=target) + input_adv = self.compose(input=input, target=target) return input_adv diff --git a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml index 9cf68e41..d7819733 100644 --- a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml @@ -8,7 +8,6 @@ defaults: # - callbacks: [image_visualizer] - objective: zero_ap - gain: rcnn_training_loss - - composer: modality - composer@composer.rgb: overlay - enforcer: default - enforcer/constraints@enforcer.rgb: [mask, pixel_range] From ece768ee267215927e1f6522ecc7ff0132f3f3e8 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 18:01:09 -0700 Subject: [PATCH 129/163] Remove modality-aware composer. --- mart/attack/composer.py | 50 ---------------------- mart/configs/attack/composer/modality.yaml | 1 - 2 files changed, 51 deletions(-) delete mode 100644 mart/configs/attack/composer/modality.yaml diff --git a/mart/attack/composer.py b/mart/attack/composer.py index ae665a2a..ddfdc45b 100644 --- a/mart/attack/composer.py +++ b/mart/attack/composer.py @@ -11,8 +11,6 @@ import torch -__all__ = ["ModalityComposer"] - class Composer(abc.ABC): def __call__( @@ -60,54 +58,6 @@ def compose(self, perturbation, *, input, target): # Convert mask to a Tensor with same torch.dtype and torch.device as input, # because some data modules (e.g. Armory) gives binary mask. - # FIXME: input can be a dictionary {"rgb": tensor} mask = mask.to(input) return input * (1 - mask) + perturbation * mask - - -class ModalityComposer(Composer): - def __init__(self, **modality_composers): - self.modality_composers = modality_composers - - def _compose(self, perturbation, *, input, target, modality="default"): - """Recursively compose output from perturbation and input.""" - if isinstance(perturbation, torch.Tensor): - composer = self.modality_composers[modality] - output = composer(perturbation, input=input, target=target) - return output - elif isinstance(perturbation, dict): - output = {} - for modality, pert in perturbation.items(): - output[modality] = self._compose( - pert, input=input[modality], target=target, modality=modality - ) - return output - elif isinstance(perturbation, list) or isinstance(perturbation, tuple): - output = [] - for pert_i, input_i, target_i in zip(perturbation, input, target): - output.append(self._compose(pert_i, input=input_i, target=target_i)) - if isinstance(perturbation, tuple): - output = tuple(output) - return output - - def __call__( - self, - perturbation: torch.Tensor | tuple, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - **kwargs, - ) -> torch.Tensor | tuple: - output = self._compose(perturbation, input=input, target=target) - return output - - # We have to implement an abstract method... - def compose( - self, - perturbation: torch.Tensor, - *, - input: torch.Tensor, - target: torch.Tensor | dict[str, Any], - ) -> torch.Tensor: - pass diff --git a/mart/configs/attack/composer/modality.yaml b/mart/configs/attack/composer/modality.yaml deleted file mode 100644 index e84f946c..00000000 --- a/mart/configs/attack/composer/modality.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: mart.attack.ModalityComposer From 1c7054bf38980a914ace3b3d3ecfc91e90079bab Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 18:06:05 -0700 Subject: [PATCH 130/163] Use modality_dispatch in Enforcer. --- mart/attack/enforcer.py | 55 ++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 31 deletions(-) diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index babc44e6..8c9fd742 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -7,9 +7,13 @@ from __future__ import annotations import abc +from functools import partial from typing import Any import torch +from torch import Tensor + +from mart.utils import modality_dispatch __all__ = ["Enforcer"] @@ -21,20 +25,20 @@ class ConstraintViolated(Exception): class Constraint(abc.ABC): def __call__( self, - input_adv: torch.Tensor, + input_adv: Tensor, *, - input: torch.Tensor, - target: torch.Tensor | dict[str, Any], + input: Tensor, + target: Tensor | dict[str, Any], ) -> None: self.verify(input_adv, input=input, target=target) @abc.abstractmethod def verify( self, - input_adv: torch.Tensor, + input_adv: Tensor, *, - input: torch.Tensor, - target: torch.Tensor | dict[str, Any], + input: Tensor, + target: Tensor | dict[str, Any], ) -> None: raise NotImplementedError @@ -97,14 +101,19 @@ def verify(self, input_adv, *, input, target): class Enforcer: def __init__(self, **modality_constraints: dict[str, dict[str, Constraint]]) -> None: self.modality_constraints = modality_constraints + # Prepare for modality_dispatch(). + self.modality_func = { + modality: partial(self._enforce, modality=modality) + for modality in self.modality_constraints + } @torch.no_grad() def _enforce( self, - input_adv: torch.Tensor, + input_adv: Tensor, *, - input: torch.Tensor, - target: torch.Tensor | dict[str, Any], + input: Tensor, + target: Tensor | dict[str, Any], modality: str, ): for constraint in self.modality_constraints[modality].values(): @@ -112,28 +121,12 @@ def _enforce( def __call__( self, - input_adv: torch.Tensor | tuple | list[torch.Tensor] | dict[str, torch.Tensor], + input_adv: Tensor | list[Tensor] | list[dict[str, Tensor]], *, - input: torch.Tensor | tuple | list[torch.Tensor] | dict[str, torch.Tensor], - target: torch.Tensor | dict[str, Any], - modality: str = "constraints", + input: Tensor | list[Tensor] | list[dict[str, Tensor]], + target: Tensor | dict[str, Any], **kwargs, ): - assert type(input_adv) == type(input) - - if isinstance(input_adv, torch.Tensor): - # Finally we can verify constraints on tensor, per its modality. - # Set modality="constraints" by default, so that it is backward compatible with existing configs without modalities. - self._enforce(input_adv, input=input, target=target, modality=modality) - elif isinstance(input_adv, dict): - # The dict input has modalities specified in keys, passing them recursively. - for modality in input_adv: - self(input_adv[modality], input=input[modality], target=target, modality=modality) - elif isinstance(input_adv, (list, tuple)): - # We assume a modality-dictionary only contains tensors, but not list/tuple. - assert modality == "constraints" - # The list or tuple input is a collection of sub-input and sub-target. - for input_adv_i, input_i, target_i in zip(input_adv, input, target): - self(input_adv_i, input=input_i, target=target_i, modality=modality) - else: - raise ValueError(f"Unsupported data type of input_adv: {type(input_adv)}.") + modality_dispatch( + self.modality_func, input_adv, input=input, target=target, modality="constraints" + ) From 5f576859c43760007c442aa89b50ed08ae5306de Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 18:08:06 -0700 Subject: [PATCH 131/163] Remove dead code. --- mart/configs/attack/enforcer/modality.yaml | 1 - 1 file changed, 1 deletion(-) delete mode 100644 mart/configs/attack/enforcer/modality.yaml diff --git a/mart/configs/attack/enforcer/modality.yaml b/mart/configs/attack/enforcer/modality.yaml deleted file mode 100644 index e5d7c6ac..00000000 --- a/mart/configs/attack/enforcer/modality.yaml +++ /dev/null @@ -1 +0,0 @@ -_target_: mart.attack.ModalityEnforcer From 7db9f8cbc6137b93e9953cf6db8f1fa90d709cac Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 18:09:36 -0700 Subject: [PATCH 132/163] Fix config. --- mart/configs/attack/object_detection_mask_adversary.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 519a4fb3..659a6945 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -7,7 +7,6 @@ defaults: - callbacks: [image_visualizer] - objective: zero_ap - gain: rcnn_training_loss - - composer: modality - composer@composer.default: overlay - enforcer: default - enforcer/constraints: [mask, pixel_range] From ae2c4d854bac73dda053bdaeee87ca7a43f1f4c5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 18:21:56 -0700 Subject: [PATCH 133/163] Use modality_dispatch for initializer and gradient_modifier. --- mart/attack/perturber.py | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 3d7b8a45..feec9bea 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -7,6 +7,7 @@ from __future__ import annotations import itertools +from functools import partial from typing import TYPE_CHECKING, Any, Callable import pytorch_lightning as pl @@ -88,16 +89,8 @@ def nop(*args, **kwargs): self.perturbation = None def configure_perturbation(self, input): - # Recursively configure perturbation in tensor. - self.perturbation = self._configure_perturbation(input) - - def _configure_perturbation(self, input, modality="default"): - """Recursively create and initialize perturbation that is homomorphic as input; Hook - gradient modifiers.""" - if isinstance(input, torch.Tensor): - # Create. + def create_init_grad(data, *, input, target, modality="default"): pert = torch.empty_like(input, requires_grad=True) - # Initialize. self.initializer[modality](pert) @@ -106,17 +99,16 @@ def _configure_perturbation(self, input, modality="default"): # The current implementation of gradient modifiers is not hookable. if self.gradient_modifier is not None: pert.register_hook(lambda grad: grad.sign()) - return pert - elif isinstance(input, dict): - return { - modality: self._configure_perturbation(inp, modality) - for modality, inp in input.items() - } - elif isinstance(input, list): - return [self._configure_perturbation(inp) for inp in input] - elif isinstance(input, tuple): - return tuple(self._configure_perturbation(inp) for inp in input) + + modality_func = { + modality: partial(create_init_grad, modality=modality) for modality in self.initializer + } + + # Recursively configure perturbation in tensor. + self.perturbation = modality_dispatch( + modality_func, input, input=input, target=input, modality="default" + ) @property def parameter_groups(self): From 2cfa4bba53522c2ae3126b891bae3582794dbba5 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 18:23:55 -0700 Subject: [PATCH 134/163] Type annotation. --- mart/attack/perturber.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index feec9bea..9803aa4a 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -88,7 +88,7 @@ def nop(*args, **kwargs): self.perturbation = None - def configure_perturbation(self, input): + def configure_perturbation(self, input: torch.Tensor | tuple | tuple[dict[str, torch.Tensor]]): def create_init_grad(data, *, input, target, modality="default"): pert = torch.empty_like(input, requires_grad=True) # Initialize. From 998d345500e6f9d7143b3d75bc5565705c93121d Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 18:25:24 -0700 Subject: [PATCH 135/163] Remove dead code. --- mart/configs/attack/classification_eps8_pgd10_step1.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/mart/configs/attack/classification_eps8_pgd10_step1.yaml b/mart/configs/attack/classification_eps8_pgd10_step1.yaml index 2b6241b9..ab5ff843 100644 --- a/mart/configs/attack/classification_eps8_pgd10_step1.yaml +++ b/mart/configs/attack/classification_eps8_pgd10_step1.yaml @@ -20,8 +20,6 @@ optimizer: max_iters: 10 -gain: "loss" - initializer: eps: 8 From 71a29a31b645d97bd171e554df56b83f9a0566e3 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Thu, 30 Mar 2023 18:26:19 -0700 Subject: [PATCH 136/163] Fix config. --- mart/configs/attack/object_detection_mask_adversary.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/configs/attack/object_detection_mask_adversary.yaml b/mart/configs/attack/object_detection_mask_adversary.yaml index 659a6945..e069b45c 100644 --- a/mart/configs/attack/object_detection_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_mask_adversary.yaml @@ -7,7 +7,7 @@ defaults: - callbacks: [image_visualizer] - objective: zero_ap - gain: rcnn_training_loss - - composer@composer.default: overlay + - composer: overlay - enforcer: default - enforcer/constraints: [mask, pixel_range] From b9a44b232d14d27d895038f5959d29ea47ad70c7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 09:57:55 -0700 Subject: [PATCH 137/163] Comment. --- mart/attack/perturber.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 9803aa4a..2f74a98d 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -78,7 +78,7 @@ def nop(*args, **kwargs): if not isinstance(projector, dict): projector = {"default": projector} - # In case optimization parameters are not given. + # Backward compatibility, in case optimization parameters are not given. optim_params = optim_params or {modality: {} for modality in initializer.keys()} self.initializer = initializer @@ -101,6 +101,7 @@ def create_init_grad(data, *, input, target, modality="default"): pert.register_hook(lambda grad: grad.sign()) return pert + # Make a dictionary of modality-function. modality_func = { modality: partial(create_init_grad, modality=modality) for modality in self.initializer } From 9a705dbf654f4d4b71ef425348ae938461b0c174 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 10:11:14 -0700 Subject: [PATCH 138/163] Improve _parameter_groups(). --- mart/attack/perturber.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 2f74a98d..5187b87e 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -121,16 +121,22 @@ def _parameter_groups(self, pert, modality="default"): """Recursively return parameter groups as a list of dictionaries.""" if isinstance(pert, torch.Tensor): + # Return a list of dictionary instead of a dictionary, easier to extend later. return [{"params": pert} | self.optim_params[modality]] elif isinstance(pert, dict): - ret = [self._parameter_groups(pert_i, modality) for modality, pert_i in pert.items()] - # Concatenate a list of lists. - return list(itertools.chain.from_iterable(ret)) - elif isinstance(pert, list) or isinstance(pert, tuple): + param_list = [] + for modality, pert_i in pert.items(): + ret_modality = self._parameter_groups(pert_i, modality=modality) + param_list.extend(ret_modality) + return param_list + elif isinstance(pert, (list, tuple)): param_list = [] for pert_i in pert: - param_list.extend(self._parameter_groups(pert_i)) + ret_i = self._parameter_groups(pert_i, modality=modality) + param_list.extend(ret_i) return param_list + else: + raise ValueError(f"Unsupported data type of input: {type(pert)}.") def project(self, *, input, target, **kwargs): if self.projector is not None: From 0b2cdd5f12f336f6f77f2cf0f296279ecf1f4938 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 10:13:14 -0700 Subject: [PATCH 139/163] Rename test cases. --- tests/test_perturbation_manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_perturbation_manager.py b/tests/test_perturbation_manager.py index 9dd9fc16..59b0fbee 100644 --- a/tests/test_perturbation_manager.py +++ b/tests/test_perturbation_manager.py @@ -7,7 +7,7 @@ from mart.attack.perturber import Perturber -def test_perturbation_tensor(): +def test_perturbation_tensor_to_param_groups(): input_data = torch.tensor([1.0, 2.0]) initializer = Constant(constant=0) @@ -24,7 +24,7 @@ def test_perturbation_tensor(): assert param_groups[0]["params"].requires_grad -def test_perturbation_dict(): +def test_perturbation_dict_to_param_groups(): input_data = {"rgb": torch.tensor([1.0, 2.0]), "depth": torch.tensor([1.0, 2.0])} initializer = {"rgb": Constant(constant=0), "depth": Constant(constant=1)} perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) @@ -42,7 +42,7 @@ def test_perturbation_dict(): # assert (param_groups[0]["params"] == 0).all() -def test_perturbation_tuple_dict(): +def test_perturbation_tuple_dict_to_param_groups(): input_data = ( {"rgb": torch.tensor([1.0, 2.0]), "depth": torch.tensor([3.0, 4.0])}, {"rgb": torch.tensor([-1.0, -2.0]), "depth": torch.tensor([-3.0, -4.0])}, From 7b1149ab78f118235b65e667484f881d45509d59 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 10:15:24 -0700 Subject: [PATCH 140/163] Merge test cases. --- tests/test_perturbation_manager.py | 64 ------------------------------ tests/test_perturber.py | 63 +++++++++++++++++++++++++++-- 2 files changed, 60 insertions(+), 67 deletions(-) delete mode 100644 tests/test_perturbation_manager.py diff --git a/tests/test_perturbation_manager.py b/tests/test_perturbation_manager.py deleted file mode 100644 index 59b0fbee..00000000 --- a/tests/test_perturbation_manager.py +++ /dev/null @@ -1,64 +0,0 @@ -from typing import Iterable -from unittest.mock import Mock - -import torch - -from mart.attack.initializer import Constant -from mart.attack.perturber import Perturber - - -def test_perturbation_tensor_to_param_groups(): - input_data = torch.tensor([1.0, 2.0]) - initializer = Constant(constant=0) - - perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) - - perturber.configure_perturbation(input_data) - pert = perturber.perturbation - assert isinstance(pert, torch.Tensor) - assert pert.shape == pert.shape - assert (pert == 0).all() - - param_groups = perturber.parameter_groups - assert isinstance(param_groups, Iterable) - assert param_groups[0]["params"].requires_grad - - -def test_perturbation_dict_to_param_groups(): - input_data = {"rgb": torch.tensor([1.0, 2.0]), "depth": torch.tensor([1.0, 2.0])} - initializer = {"rgb": Constant(constant=0), "depth": Constant(constant=1)} - perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) - - perturber.configure_perturbation(input_data) - pert = perturber.perturbation - assert isinstance(pert, dict) - assert (pert["rgb"] == 0).all() - assert (pert["depth"] == 1).all() - - param_groups = perturber.parameter_groups - assert len(param_groups) == 2 - param_groups = list(param_groups) - assert param_groups[0]["params"].requires_grad - # assert (param_groups[0]["params"] == 0).all() - - -def test_perturbation_tuple_dict_to_param_groups(): - input_data = ( - {"rgb": torch.tensor([1.0, 2.0]), "depth": torch.tensor([3.0, 4.0])}, - {"rgb": torch.tensor([-1.0, -2.0]), "depth": torch.tensor([-3.0, -4.0])}, - ) - initializer = {"rgb": Constant(constant=0), "depth": Constant(constant=1)} - perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) - - perturber.configure_perturbation(input_data) - pert = perturber.perturbation - assert isinstance(pert, tuple) - assert (pert[0]["rgb"] == 0).all() - assert (pert[0]["depth"] == 1).all() - assert (pert[1]["rgb"] == 0).all() - assert (pert[1]["depth"] == 1).all() - - param_groups = perturber.parameter_groups - assert len(param_groups) == 4 - param_groups = list(param_groups) - assert param_groups[0]["params"].requires_grad diff --git a/tests/test_perturber.py b/tests/test_perturber.py index 66a75e55..2ac3abd2 100644 --- a/tests/test_perturber.py +++ b/tests/test_perturber.py @@ -4,16 +4,16 @@ # SPDX-License-Identifier: BSD-3-Clause # -import importlib from functools import partial -from unittest.mock import Mock, patch +from typing import Iterable +from unittest.mock import Mock import pytest import torch from pytorch_lightning.utilities.exceptions import MisconfigurationException import mart -from mart.attack.adversary import Adversary +from mart.attack.initializer import Constant from mart.attack.perturber import Perturber @@ -237,3 +237,60 @@ def test_configure_gradient_clipping(): # Once for each parameter in the optimizer assert gradient_modifier.call_count == 2 + + +def test_perturbation_tensor_to_param_groups(): + input_data = torch.tensor([1.0, 2.0]) + initializer = Constant(constant=0) + + perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) + + perturber.configure_perturbation(input_data) + pert = perturber.perturbation + assert isinstance(pert, torch.Tensor) + assert pert.shape == pert.shape + assert (pert == 0).all() + + param_groups = perturber.parameter_groups + assert isinstance(param_groups, Iterable) + assert param_groups[0]["params"].requires_grad + + +def test_perturbation_dict_to_param_groups(): + input_data = {"rgb": torch.tensor([1.0, 2.0]), "depth": torch.tensor([1.0, 2.0])} + initializer = {"rgb": Constant(constant=0), "depth": Constant(constant=1)} + perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) + + perturber.configure_perturbation(input_data) + pert = perturber.perturbation + assert isinstance(pert, dict) + assert (pert["rgb"] == 0).all() + assert (pert["depth"] == 1).all() + + param_groups = perturber.parameter_groups + assert len(param_groups) == 2 + param_groups = list(param_groups) + assert param_groups[0]["params"].requires_grad + # assert (param_groups[0]["params"] == 0).all() + + +def test_perturbation_tuple_dict_to_param_groups(): + input_data = ( + {"rgb": torch.tensor([1.0, 2.0]), "depth": torch.tensor([3.0, 4.0])}, + {"rgb": torch.tensor([-1.0, -2.0]), "depth": torch.tensor([-3.0, -4.0])}, + ) + initializer = {"rgb": Constant(constant=0), "depth": Constant(constant=1)} + perturber = Perturber(initializer=initializer, optimizer=Mock(), composer=Mock(), gain=Mock()) + + perturber.configure_perturbation(input_data) + pert = perturber.perturbation + assert isinstance(pert, tuple) + assert (pert[0]["rgb"] == 0).all() + assert (pert[0]["depth"] == 1).all() + assert (pert[1]["rgb"] == 0).all() + assert (pert[1]["depth"] == 1).all() + + param_groups = perturber.parameter_groups + assert len(param_groups) == 4 + param_groups = list(param_groups) + assert param_groups[0]["params"].requires_grad From 8d3e6d8eb488697ae6b96ed36dc5931e072e31c9 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 10:18:01 -0700 Subject: [PATCH 141/163] Remove unused imports. --- mart/attack/perturber.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 5187b87e..b82c8248 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -6,7 +6,6 @@ from __future__ import annotations -import itertools from functools import partial from typing import TYPE_CHECKING, Any, Callable From 483521f83bb46d139699b72a26806de8559d6ff7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 10:23:15 -0700 Subject: [PATCH 142/163] Make a similar Perturber.forward(). --- mart/attack/perturber.py | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index b82c8248..7b5a159e 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -137,14 +137,14 @@ def _parameter_groups(self, pert, modality="default"): else: raise ValueError(f"Unsupported data type of input: {type(pert)}.") - def project(self, *, input, target, **kwargs): + def project(self, perturbation, *, input, target, **kwargs): if self.projector is not None: modality_dispatch( - self.projector, self.perturbation, input=input, target=target, modality="default" + self.projector, perturbation, input=input, target=target, modality="default" ) - def compose(self, *, input, target): - return modality_dispatch(self.composer, self.perturbation, input=input, target=target) + def compose(self, perturbation, *, input, target, **kwargs): + return modality_dispatch(self.composer, perturbation, input=input, target=target) def configure_optimizers(self): # parameter_groups is generated from perturbation. @@ -180,22 +180,16 @@ def training_step(self, batch, batch_idx): return gain - def forward( - self, - *, - input: torch.Tensor | tuple, - target: torch.Tensor | dict[str, Any] | tuple, - **kwargs, - ): + def forward(self, **batch): if self.perturbation is None: raise MisconfigurationException( "You need to call the configure_perturbation before forward." ) # Project perturbation... - self.project(input=input, target=target) + self.project(self.perturbation, **batch) # Compose adversarial input. - input_adv = self.compose(input=input, target=target) + input_adv = self.compose(self.perturbation, **batch) return input_adv From 4a17a3585e232164fd59df467311f53a63b04f6e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 10:35:48 -0700 Subject: [PATCH 143/163] Comment. --- mart/attack/perturber.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 7b5a159e..dc744842 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -89,6 +89,7 @@ def nop(*args, **kwargs): def configure_perturbation(self, input: torch.Tensor | tuple | tuple[dict[str, torch.Tensor]]): def create_init_grad(data, *, input, target, modality="default"): + # Though data and target are not used, they are required placeholders for modality_dispatch(). pert = torch.empty_like(input, requires_grad=True) # Initialize. self.initializer[modality](pert) @@ -106,6 +107,7 @@ def create_init_grad(data, *, input, target, modality="default"): } # Recursively configure perturbation in tensor. + # Though only input=input is used, we have to fill the placeholders of data and target. self.perturbation = modality_dispatch( modality_func, input, input=input, target=input, modality="default" ) From a02de28c5c20c6d99201311deb0b885017d5c2d0 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 10:37:57 -0700 Subject: [PATCH 144/163] Comment. --- mart/utils/modality_dispatch.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mart/utils/modality_dispatch.py b/mart/utils/modality_dispatch.py index dc30a5f9..a2e6f7a2 100644 --- a/mart/utils/modality_dispatch.py +++ b/mart/utils/modality_dispatch.py @@ -22,7 +22,10 @@ def modality_dispatch( target: torch.Tensor | dict[str, Any], modality: str = "default", ): - """Recursively dispatch data and input to functions of the same modality.""" + """Recursively dispatch data and input/target to functions of the same modality. + + The function returns an object that is homomorphic to input and data. + """ assert type(data) == type(input) From 68923bceba091a7f09e62d530f70608744045ea8 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 10:58:39 -0700 Subject: [PATCH 145/163] Revert gradient_modifier. --- mart/attack/perturber.py | 40 ++++++++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index dc744842..20263b00 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -7,7 +7,7 @@ from __future__ import annotations from functools import partial -from typing import TYPE_CHECKING, Any, Callable +from typing import TYPE_CHECKING, Callable import pytorch_lightning as pl import torch @@ -55,13 +55,11 @@ def __init__( """ super().__init__() + # Modality-neutral objects. self.optimizer_fn = optimizer - self.composer = composer self.gain_fn = gain self.objective_fn = objective - # FIXME: gradient_modifier should be a hook operating on .grad directly. - # In case gradient_modifier or projector is None. def nop(*args, **kwargs): pass @@ -69,6 +67,7 @@ def nop(*args, **kwargs): gradient_modifier = gradient_modifier or nop projector = projector or nop + # Modality-specific objects. # Backward compatibility, in case modality is unknown, and not given in input. if not isinstance(initializer, dict): initializer = {"default": initializer} @@ -76,34 +75,34 @@ def nop(*args, **kwargs): gradient_modifier = {"default": gradient_modifier} if not isinstance(projector, dict): projector = {"default": projector} + if not isinstance(composer, dict): + composer = {"default": composer} # Backward compatibility, in case optimization parameters are not given. - optim_params = optim_params or {modality: {} for modality in initializer.keys()} + if optim_params is None: + optim_params = {modality: {} for modality in initializer.keys()} + # Modality-specific objects. self.initializer = initializer self.gradient_modifier = gradient_modifier self.projector = projector + self.composer = composer self.optim_params = optim_params self.perturbation = None def configure_perturbation(self, input: torch.Tensor | tuple | tuple[dict[str, torch.Tensor]]): - def create_init_grad(data, *, input, target, modality="default"): + def create_and_initialize(data, *, input, target, modality="default"): # Though data and target are not used, they are required placeholders for modality_dispatch(). pert = torch.empty_like(input, requires_grad=True) # Initialize. self.initializer[modality](pert) - - # Gradient modifier hook. - # FIXME: use actual gradient modifier, self.gradient_modifier[modality](pert) - # The current implementation of gradient modifiers is not hookable. - if self.gradient_modifier is not None: - pert.register_hook(lambda grad: grad.sign()) return pert # Make a dictionary of modality-function. modality_func = { - modality: partial(create_init_grad, modality=modality) for modality in self.initializer + modality: partial(create_and_initialize, modality=modality) + for modality in self.initializer } # Recursively configure perturbation in tensor. @@ -123,7 +122,8 @@ def _parameter_groups(self, pert, modality="default"): if isinstance(pert, torch.Tensor): # Return a list of dictionary instead of a dictionary, easier to extend later. - return [{"params": pert} | self.optim_params[modality]] + # Add the modality notation so that we can perform gradient modification later. + return [{"params": pert, "modality": modality} | self.optim_params[modality]] elif isinstance(pert, dict): param_list = [] for modality, pert_i in pert.items(): @@ -182,6 +182,18 @@ def training_step(self, batch, batch_idx): return gain + def configure_gradient_clipping( + self, optimizer, optimizer_idx, gradient_clip_val=None, gradient_clip_algorithm=None + ): + # Configuring gradient clipping in pl.Trainer is still useful, so use it. + super().configure_gradient_clipping( + optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm + ) + + for group in optimizer.param_groups: + modality = "default" if "modality" not in group else group["modality"] + self.gradient_modifier[modality](group["params"]) + def forward(self, **batch): if self.perturbation is None: raise MisconfigurationException( From 33acb979268b4d1c6522279c5198268a0b7f2c46 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 11:03:49 -0700 Subject: [PATCH 146/163] Comment. --- mart/attack/perturber.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 20263b00..db2a0ba0 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -200,10 +200,7 @@ def forward(self, **batch): "You need to call the configure_perturbation before forward." ) - # Project perturbation... self.project(self.perturbation, **batch) - - # Compose adversarial input. input_adv = self.compose(self.perturbation, **batch) return input_adv From 3c6a7f70869b7ea290466bfa7d9a44dcb013acbf Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 11:16:17 -0700 Subject: [PATCH 147/163] Make image_visualizer modality-aware. --- mart/attack/callbacks/visualizer.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mart/attack/callbacks/visualizer.py b/mart/attack/callbacks/visualizer.py index 3354321e..c1f0e35b 100644 --- a/mart/attack/callbacks/visualizer.py +++ b/mart/attack/callbacks/visualizer.py @@ -15,11 +15,12 @@ class PerturbedImageVisualizer(Callback): """Save adversarial images as files.""" - def __init__(self, folder): + def __init__(self, folder, modality="rgb"): super().__init__() # FIXME: This should use the Trainer's logging directory. self.folder = folder + self.modality = modality self.convert = ToPILImage() if not os.path.isdir(self.folder): @@ -35,6 +36,9 @@ def on_train_end(self, trainer, model): adv_input = model(input=self.input, target=self.target) for img, tgt in zip(adv_input, self.target): + # Modality aware. + if isinstance(img, dict): + img = img[self.modality] fname = tgt["file_name"] fpath = os.path.join(self.folder, fname) im = self.convert(img / 255) From b0707237c191e19afc8528aed2825b1f05b686f1 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 11:16:44 -0700 Subject: [PATCH 148/163] Enable visualizer. --- mart/configs/attack/object_detection_rgb_mask_adversary.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml index d7819733..1fc21948 100644 --- a/mart/configs/attack/object_detection_rgb_mask_adversary.yaml +++ b/mart/configs/attack/object_detection_rgb_mask_adversary.yaml @@ -4,8 +4,7 @@ defaults: - initializer@initializer.rgb: constant - gradient_modifier@gradient_modifier.rgb: sign - projector@projector.rgb: mask_range - # TODO: Modality-aware visualizer - # - callbacks: [image_visualizer] + - callbacks: [image_visualizer] - objective: zero_ap - gain: rcnn_training_loss - composer@composer.rgb: overlay From 3a86b693f997abf8fffad7065522cae82ed167a4 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 15:30:41 -0700 Subject: [PATCH 149/163] Clean up nop(). --- mart/attack/perturber.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index db2a0ba0..a6e589fa 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -60,12 +60,9 @@ def __init__( self.gain_fn = gain self.objective_fn = objective - # In case gradient_modifier or projector is None. - def nop(*args, **kwargs): - pass - - gradient_modifier = gradient_modifier or nop - projector = projector or nop + # Replace None with nop(). + gradient_modifier = gradient_modifier or GradientModifier() + projector = projector or Projector() # Modality-specific objects. # Backward compatibility, in case modality is unknown, and not given in input. @@ -140,10 +137,9 @@ def _parameter_groups(self, pert, modality="default"): raise ValueError(f"Unsupported data type of input: {type(pert)}.") def project(self, perturbation, *, input, target, **kwargs): - if self.projector is not None: - modality_dispatch( - self.projector, perturbation, input=input, target=target, modality="default" - ) + modality_dispatch( + self.projector, perturbation, input=input, target=target, modality="default" + ) def compose(self, perturbation, *, input, target, **kwargs): return modality_dispatch(self.composer, perturbation, input=input, target=target) From 7bbe3479feda6b9f9c6deb4419a3a043f0deeef4 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Fri, 31 Mar 2023 15:33:41 -0700 Subject: [PATCH 150/163] Cleanup. --- mart/attack/perturber.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index a6e589fa..44f8c276 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -92,7 +92,6 @@ def configure_perturbation(self, input: torch.Tensor | tuple | tuple[dict[str, t def create_and_initialize(data, *, input, target, modality="default"): # Though data and target are not used, they are required placeholders for modality_dispatch(). pert = torch.empty_like(input, requires_grad=True) - # Initialize. self.initializer[modality](pert) return pert @@ -142,7 +141,9 @@ def project(self, perturbation, *, input, target, **kwargs): ) def compose(self, perturbation, *, input, target, **kwargs): - return modality_dispatch(self.composer, perturbation, input=input, target=target) + return modality_dispatch( + self.composer, perturbation, input=input, target=target, modality="default" + ) def configure_optimizers(self): # parameter_groups is generated from perturbation. From b86cdc99f2b9c97abc4dc13d969349d580483235 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 10:08:19 -0700 Subject: [PATCH 151/163] project() -> project_(). --- mart/attack/perturber.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 44f8c276..0d0ce15e 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -135,7 +135,8 @@ def _parameter_groups(self, pert, modality="default"): else: raise ValueError(f"Unsupported data type of input: {type(pert)}.") - def project(self, perturbation, *, input, target, **kwargs): + def project_(self, perturbation, *, input, target, **kwargs): + """In-place projection.""" modality_dispatch( self.projector, perturbation, input=input, target=target, modality="default" ) @@ -197,7 +198,7 @@ def forward(self, **batch): "You need to call the configure_perturbation before forward." ) - self.project(self.perturbation, **batch) + self.project_(self.perturbation, **batch) input_adv = self.compose(self.perturbation, **batch) return input_adv From b68d3081791e6081b781d15975d323531730ee2c Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 10:11:55 -0700 Subject: [PATCH 152/163] Remove property decorator for parameter_groups(). --- mart/attack/perturber.py | 3 +-- tests/test_perturber.py | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 0d0ce15e..f50e8576 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -107,7 +107,6 @@ def create_and_initialize(data, *, input, target, modality="default"): modality_func, input, input=input, target=input, modality="default" ) - @property def parameter_groups(self): """Extract parameter groups for optimization from perturbation tensor(s).""" param_groups = self._parameter_groups(self.perturbation) @@ -152,7 +151,7 @@ def configure_optimizers(self): raise MisconfigurationException( "You need to call the configure_perturbation before fit." ) - return self.optimizer_fn(self.parameter_groups) + return self.optimizer_fn(self.parameter_groups()) def training_step(self, batch, batch_idx): # copy batch since we modify it and it is used internally diff --git a/tests/test_perturber.py b/tests/test_perturber.py index 2ac3abd2..96e8954b 100644 --- a/tests/test_perturber.py +++ b/tests/test_perturber.py @@ -251,7 +251,7 @@ def test_perturbation_tensor_to_param_groups(): assert pert.shape == pert.shape assert (pert == 0).all() - param_groups = perturber.parameter_groups + param_groups = perturber.parameter_groups() assert isinstance(param_groups, Iterable) assert param_groups[0]["params"].requires_grad @@ -267,7 +267,7 @@ def test_perturbation_dict_to_param_groups(): assert (pert["rgb"] == 0).all() assert (pert["depth"] == 1).all() - param_groups = perturber.parameter_groups + param_groups = perturber.parameter_groups() assert len(param_groups) == 2 param_groups = list(param_groups) assert param_groups[0]["params"].requires_grad @@ -290,7 +290,7 @@ def test_perturbation_tuple_dict_to_param_groups(): assert (pert[1]["rgb"] == 0).all() assert (pert[1]["depth"] == 1).all() - param_groups = perturber.parameter_groups + param_groups = perturber.parameter_groups() assert len(param_groups) == 4 param_groups = list(param_groups) assert param_groups[0]["params"].requires_grad From e9613072383201621d7df13ba19009f613785be3 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 10:13:50 -0700 Subject: [PATCH 153/163] Make sure we create float perturbation tensors. --- mart/attack/perturber.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index f50e8576..5255c573 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -91,7 +91,7 @@ def __init__( def configure_perturbation(self, input: torch.Tensor | tuple | tuple[dict[str, torch.Tensor]]): def create_and_initialize(data, *, input, target, modality="default"): # Though data and target are not used, they are required placeholders for modality_dispatch(). - pert = torch.empty_like(input, requires_grad=True) + pert = torch.empty_like(input, dtype=torch.float, requires_grad=True) self.initializer[modality](pert) return pert From aa95721deec3b732ab9ec0a6e2da426a6f1ef651 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 10:15:54 -0700 Subject: [PATCH 154/163] Update type annotation. --- mart/attack/perturber.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 5255c573..16cc35fd 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -33,12 +33,12 @@ class Perturber(pl.LightningModule): def __init__( self, *, - initializer: Initializer, + initializer: Initializer | dict[str, Initializer], optimizer: Callable, - composer: Composer, + composer: Composer | dict[str, Composer], gain: Gain, - gradient_modifier: GradientModifier | None = None, - projector: Projector | None = None, + gradient_modifier: GradientModifier | dict[str, GradientModifier] | None = None, + projector: Projector | dict[str, Projector] | None = None, objective: Objective | None = None, optim_params: dict | None = None, ): From 877a00eb921bee8afb4d46e95dc456aea8167c8e Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 10:47:28 -0700 Subject: [PATCH 155/163] Fix type annotation and comment. --- mart/attack/perturber.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 16cc35fd..68cc532d 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -7,7 +7,7 @@ from __future__ import annotations from functools import partial -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING, Any, Callable import pytorch_lightning as pl import torch @@ -33,25 +33,26 @@ class Perturber(pl.LightningModule): def __init__( self, *, - initializer: Initializer | dict[str, Initializer], optimizer: Callable, - composer: Composer | dict[str, Composer], gain: Gain, + composer: Composer | dict[str, Composer], + initializer: Initializer | dict[str, Initializer], gradient_modifier: GradientModifier | dict[str, GradientModifier] | None = None, projector: Projector | dict[str, Projector] | None = None, objective: Objective | None = None, - optim_params: dict | None = None, + optim_params: dict[str, dict[str, Any]] | None = None, ): """_summary_ Args: - initializer (Initializer): To initialize the perturbation. - optimizer (torch.optim.Optimizer): A PyTorch optimizer. - composer (Composer): A module which composes adversarial input from input and perturbation. - gain (Gain): An adversarial gain function, which is a differentiable estimate of adversarial objective. - gradient_modifier (GradientModifier): To modify the gradient of perturbation. - projector (Projector): To project the perturbation into some space. - objective (Objective): A function for computing adversarial objective, which returns True or False. Optional. + optimizer: A partial of PyTorch optimizer that awaits parameters to optimize. + gain: An adversarial gain function, which is a differentiable estimate of adversarial objective. + composer: A module which composes adversarial input from input and perturbation. Modality-aware. + initializer: To initialize the perturbation. Modality-aware. + gradient_modifier: To modify the gradient of perturbation. Modality-aware. + projector: To project the perturbation into some space. Modality-aware. + objective: A function for computing adversarial objective, which returns True or False. Optional. + optim_params: A dictionary of optimization hyper-parameters. E.g. {"rgb": {"lr": 0.1}}. """ super().__init__() From 6bd039905b1b78addc8852fe2b4d98360423ecbe Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 11:03:51 -0700 Subject: [PATCH 156/163] =?UTF-8?q?Extend=20modality=5Fdispatch()=20to=20s?= =?UTF-8?q?upport=20modality=5Ffunc(modality=3D=3F).?= --- mart/attack/enforcer.py | 7 +------ mart/attack/perturber.py | 8 +------- mart/utils/modality_dispatch.py | 9 +++++++-- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index 8c9fd742..8f0be705 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -101,11 +101,6 @@ def verify(self, input_adv, *, input, target): class Enforcer: def __init__(self, **modality_constraints: dict[str, dict[str, Constraint]]) -> None: self.modality_constraints = modality_constraints - # Prepare for modality_dispatch(). - self.modality_func = { - modality: partial(self._enforce, modality=modality) - for modality in self.modality_constraints - } @torch.no_grad() def _enforce( @@ -128,5 +123,5 @@ def __call__( **kwargs, ): modality_dispatch( - self.modality_func, input_adv, input=input, target=target, modality="constraints" + self._enforce, input_adv, input=input, target=target, modality="constraints" ) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 68cc532d..a7409db9 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -96,16 +96,10 @@ def create_and_initialize(data, *, input, target, modality="default"): self.initializer[modality](pert) return pert - # Make a dictionary of modality-function. - modality_func = { - modality: partial(create_and_initialize, modality=modality) - for modality in self.initializer - } - # Recursively configure perturbation in tensor. # Though only input=input is used, we have to fill the placeholders of data and target. self.perturbation = modality_dispatch( - modality_func, input, input=input, target=input, modality="default" + create_and_initialize, input, input=input, target=input, modality="default" ) def parameter_groups(self): diff --git a/mart/utils/modality_dispatch.py b/mart/utils/modality_dispatch.py index a2e6f7a2..fcc9c551 100644 --- a/mart/utils/modality_dispatch.py +++ b/mart/utils/modality_dispatch.py @@ -15,7 +15,7 @@ def modality_dispatch( - modality_func: dict[str, Callable], + modality_func: Callable | dict[str, Callable], data: Tensor | tuple | list[Tensor] | dict[str, Tensor], *, input: Tensor | tuple | list[Tensor] | dict[str, Tensor], @@ -30,7 +30,12 @@ def modality_dispatch( assert type(data) == type(input) if isinstance(input, torch.Tensor): - return modality_func[modality](data, input=input, target=target) + if isinstance(modality_func, dict): + # A dictionary of Callable indexed by modality. + return modality_func[modality](data, input=input, target=target) + else: + # A Callable with modality=? as a keyword argument. + return modality_func(data, input=input, target=target, modality=modality) elif isinstance(input, dict): # The dict input has modalities specified in keys, passing them recursively. output = {} From f33dbc8dc2d6d72016a0c13ab3a44e1872d01bc4 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 11:16:08 -0700 Subject: [PATCH 157/163] MODALITY_DEFAULT = "default" --- mart/attack/perturber.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index a7409db9..a95bcbf0 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -30,6 +30,8 @@ class Perturber(pl.LightningModule): """Peturbation optimization module.""" + MODALITY_DEFAULT = "default" + def __init__( self, *, @@ -68,13 +70,13 @@ def __init__( # Modality-specific objects. # Backward compatibility, in case modality is unknown, and not given in input. if not isinstance(initializer, dict): - initializer = {"default": initializer} + initializer = {self.MODALITY_DEFAULT: initializer} if not isinstance(gradient_modifier, dict): - gradient_modifier = {"default": gradient_modifier} + gradient_modifier = {self.MODALITY_DEFAULT: gradient_modifier} if not isinstance(projector, dict): - projector = {"default": projector} + projector = {self.MODALITY_DEFAULT: projector} if not isinstance(composer, dict): - composer = {"default": composer} + composer = {self.MODALITY_DEFAULT: composer} # Backward compatibility, in case optimization parameters are not given. if optim_params is None: @@ -90,7 +92,7 @@ def __init__( self.perturbation = None def configure_perturbation(self, input: torch.Tensor | tuple | tuple[dict[str, torch.Tensor]]): - def create_and_initialize(data, *, input, target, modality="default"): + def create_and_initialize(data, *, input, target, modality): # Though data and target are not used, they are required placeholders for modality_dispatch(). pert = torch.empty_like(input, dtype=torch.float, requires_grad=True) self.initializer[modality](pert) @@ -99,15 +101,15 @@ def create_and_initialize(data, *, input, target, modality="default"): # Recursively configure perturbation in tensor. # Though only input=input is used, we have to fill the placeholders of data and target. self.perturbation = modality_dispatch( - create_and_initialize, input, input=input, target=input, modality="default" + create_and_initialize, input, input=input, target=input, modality=self.MODALITY_DEFAULT ) def parameter_groups(self): """Extract parameter groups for optimization from perturbation tensor(s).""" - param_groups = self._parameter_groups(self.perturbation) + param_groups = self._parameter_groups(self.perturbation, modality=self.MODALITY_DEFAULT) return param_groups - def _parameter_groups(self, pert, modality="default"): + def _parameter_groups(self, pert, *, modality): """Recursively return parameter groups as a list of dictionaries.""" if isinstance(pert, torch.Tensor): @@ -132,12 +134,16 @@ def _parameter_groups(self, pert, modality="default"): def project_(self, perturbation, *, input, target, **kwargs): """In-place projection.""" modality_dispatch( - self.projector, perturbation, input=input, target=target, modality="default" + self.projector, + perturbation, + input=input, + target=target, + modality=self.MODALITY_DEFAULT, ) def compose(self, perturbation, *, input, target, **kwargs): return modality_dispatch( - self.composer, perturbation, input=input, target=target, modality="default" + self.composer, perturbation, input=input, target=target, modality=self.MODALITY_DEFAULT ) def configure_optimizers(self): @@ -183,7 +189,7 @@ def configure_gradient_clipping( ) for group in optimizer.param_groups: - modality = "default" if "modality" not in group else group["modality"] + modality = self.MODALITY_DEFAULT if "modality" not in group else group["modality"] self.gradient_modifier[modality](group["params"]) def forward(self, **batch): From 9eedad8a9620e4ef152e3e2d920452e36574cdc7 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 11:18:33 -0700 Subject: [PATCH 158/163] Clean up. --- mart/attack/perturber.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index a95bcbf0..80aaca2a 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -6,7 +6,6 @@ from __future__ import annotations -from functools import partial from typing import TYPE_CHECKING, Any, Callable import pytorch_lightning as pl @@ -94,6 +93,7 @@ def __init__( def configure_perturbation(self, input: torch.Tensor | tuple | tuple[dict[str, torch.Tensor]]): def create_and_initialize(data, *, input, target, modality): # Though data and target are not used, they are required placeholders for modality_dispatch(). + # TODO: we don't want an integer tensor, but make sure it does not affect mixed precision training. pert = torch.empty_like(input, dtype=torch.float, requires_grad=True) self.initializer[modality](pert) return pert From 3710d488f47cd617fd17b86946ef6eb292d32b45 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 11:21:52 -0700 Subject: [PATCH 159/163] Clean up. --- mart/attack/enforcer.py | 1 - 1 file changed, 1 deletion(-) diff --git a/mart/attack/enforcer.py b/mart/attack/enforcer.py index 8f0be705..427d4891 100644 --- a/mart/attack/enforcer.py +++ b/mart/attack/enforcer.py @@ -7,7 +7,6 @@ from __future__ import annotations import abc -from functools import partial from typing import Any import torch From 9ce0ea2774cbfaee4bfd4ebf7eeae8d0d493955c Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 15:15:24 -0700 Subject: [PATCH 160/163] Allow Adversary not in the first place of a sequence --- mart/attack/adversary.py | 4 ++-- mart/attack/perturber.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mart/attack/adversary.py b/mart/attack/adversary.py index c950241b..d158f6af 100644 --- a/mart/attack/adversary.py +++ b/mart/attack/adversary.py @@ -75,14 +75,14 @@ def forward(self, **batch): # must pass a model to attack when calling the adversary. Since we do not know where the # Adversary lives inside the model, we also need the remaining sequence to be able to # get a loss. - if "model" in batch and "sequence" in batch: + if "model" in batch and batch["model"] is not None and "sequence" in batch: self._attack(**batch) # Always use perturb the current input. input_adv = self.perturber(**batch) # Enforce constraints after the attack optimization ends. - if "model" in batch and "sequence" in batch: + if "model" in batch and batch["model"] is not None and "sequence" in batch: self.enforcer(input_adv, **batch) return input_adv diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index 80aaca2a..bfa4d7b0 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -160,7 +160,8 @@ def training_step(self, batch, batch_idx): # We need to evaluate the perturbation against the whole model, so call it normally to get a gain. model = batch.pop("model") - outputs = model(**batch) + # When an Adversary takes input from another module in the sequence, we would have to specify kwargs of Adversary, and model would be a required kwarg. + outputs = model(**batch, model=None) # FIXME: This should really be just `return outputs`. But this might require a new sequence? # FIXME: Everything below here should live in the model as modules. From 655f4e0f66348890e8c7f5eddba9e5feef4a14a1 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 15:20:16 -0700 Subject: [PATCH 161/163] Fix tests. --- tests/test_adversary.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_adversary.py b/tests/test_adversary.py index 2232e12d..c14e1569 100644 --- a/tests/test_adversary.py +++ b/tests/test_adversary.py @@ -49,7 +49,7 @@ def test_adversary_with_model(input_data, target_data, perturbation): attacker=attacker, ) - output_data = adversary(input=input_data, target=target_data, model=None, sequence=None) + output_data = adversary(input=input_data, target=target_data, model=Mock(), sequence=None) # The enforcer is only called when model is not None. enforcer.assert_called_once() @@ -95,7 +95,7 @@ def test_adversary_perturbation(input_data, target_data, perturbation): attacker=attacker, ) - _ = adversary(input=input_data, target=target_data, model=None, sequence=None) + _ = adversary(input=input_data, target=target_data, model=Mock(), sequence=None) output_data = adversary(input=input_data, target=target_data) # The enforcer is only called when model is not None. From 48cb56f982804f25317c0428fc6f4ba7abd333d8 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 15:36:34 -0700 Subject: [PATCH 162/163] Allow target is None in modality_dispatch(). --- mart/utils/modality_dispatch.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mart/utils/modality_dispatch.py b/mart/utils/modality_dispatch.py index fcc9c551..6ea6206e 100644 --- a/mart/utils/modality_dispatch.py +++ b/mart/utils/modality_dispatch.py @@ -6,6 +6,7 @@ from __future__ import annotations +from itertools import cycle from typing import Any, Callable import torch @@ -19,7 +20,7 @@ def modality_dispatch( data: Tensor | tuple | list[Tensor] | dict[str, Tensor], *, input: Tensor | tuple | list[Tensor] | dict[str, Tensor], - target: torch.Tensor | dict[str, Any], + target: torch.Tensor | dict[str, Any] | list[dict[str, Any]] | None, modality: str = "default", ): """Recursively dispatch data and input/target to functions of the same modality. @@ -28,6 +29,9 @@ def modality_dispatch( """ assert type(data) == type(input) + if target is None: + # Make target zips well with input. + target = cycle([None]) if isinstance(input, torch.Tensor): if isinstance(modality_func, dict): From fa27723319acf0dcc065bb7daad5784f199bbd60 Mon Sep 17 00:00:00 2001 From: Weilin Xu Date: Mon, 3 Apr 2023 15:36:52 -0700 Subject: [PATCH 163/163] Remove target=input. --- mart/attack/perturber.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mart/attack/perturber.py b/mart/attack/perturber.py index bfa4d7b0..5ba079b8 100644 --- a/mart/attack/perturber.py +++ b/mart/attack/perturber.py @@ -101,7 +101,7 @@ def create_and_initialize(data, *, input, target, modality): # Recursively configure perturbation in tensor. # Though only input=input is used, we have to fill the placeholders of data and target. self.perturbation = modality_dispatch( - create_and_initialize, input, input=input, target=input, modality=self.MODALITY_DEFAULT + create_and_initialize, input, input=input, target=None, modality=self.MODALITY_DEFAULT ) def parameter_groups(self):