From 48e3be991b263c29d9934caeed9e899cf8286bde Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 12 Jun 2023 16:05:47 -0700 Subject: [PATCH 01/12] Make *_step_log dicts where the key is the logging name and value is the output key --- mart/models/modular.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mart/models/modular.py b/mart/models/modular.py index eb5dd934..1fe4be99 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -70,13 +70,13 @@ def __init__( self.lr_scheduler = lr_scheduler - self.training_step_log = training_step_log or ["loss"] + self.training_step_log = training_step_log or {} self.training_metrics = training_metrics - self.validation_step_log = validation_step_log or [] + self.validation_step_log = validation_step_log or {} self.validation_metrics = validation_metrics - self.test_step_log = test_step_log or [] + self.test_step_log = test_step_log or {} self.test_metrics = test_metrics # Load state dict for specified modules. We flatten it because Hydra @@ -115,8 +115,8 @@ def training_step(self, batch, batch_idx): input, target = batch output = self(input=input, target=target, model=self.model, step="training") - for name in self.training_step_log: - self.log(f"training/{name}", output[name]) + for log_name, output_key in self.training_step_log.items(): + self.log(f"training/{log_name}", output[output_key], sync_dist=True) assert "loss" in output return output @@ -149,8 +149,8 @@ def validation_step(self, batch, batch_idx): input, target = batch output = self(input=input, target=target, model=self.model, step="validation") - for name in self.validation_step_log: - self.log(f"validation/{name}", output[name]) + for log_name, output_key in self.validation_step_log.items(): + self.log(f"validation/{log_name}", output[output_key], sync_dist=True) return output @@ -175,8 +175,8 @@ def test_step(self, batch, batch_idx): input, target = batch output = self(input=input, target=target, model=self.model, step="test") - for name in self.test_step_log: - self.log(f"test/{name}", output[name]) + for log_name, output_key in self.test_step_log.items(): + self.log(f"test/{log_name}", output[output_key], sync_dist=True) return output From 01a20664b2011c20727d1158b1b7476dc9d44779 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 13 Jun 2023 13:57:49 -0700 Subject: [PATCH 02/12] Fix configs --- mart/configs/model/torchvision_faster_rcnn.yaml | 11 ++++------- mart/configs/model/torchvision_object_detection.yaml | 3 ++- mart/configs/model/torchvision_retinanet.yaml | 4 +++- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mart/configs/model/torchvision_faster_rcnn.yaml b/mart/configs/model/torchvision_faster_rcnn.yaml index c5237184..bc0ce228 100644 --- a/mart/configs/model/torchvision_faster_rcnn.yaml +++ b/mart/configs/model/torchvision_faster_rcnn.yaml @@ -4,13 +4,10 @@ defaults: # log all losses separately in training. training_step_log: - [ - "rpn_loss.loss_objectness", - "rpn_loss.loss_rpn_box_reg", - "box_loss.loss_classifier", - "box_loss.loss_box_reg", - "loss", - ] + rpn_loss_objectness: "rpn_loss.loss_objectness" + rpn_loss_rpn_box_reg: "rpn_loss.loss_rpn_box_reg" + box_loss_classifier: "box_loss.loss_classifier" + box_loss_box_reg: "box_loss.loss_box_reg" training_sequence: seq010: diff --git a/mart/configs/model/torchvision_object_detection.yaml b/mart/configs/model/torchvision_object_detection.yaml index a1495dad..c81930a8 100644 --- a/mart/configs/model/torchvision_object_detection.yaml +++ b/mart/configs/model/torchvision_object_detection.yaml @@ -3,7 +3,8 @@ defaults: - modular - /model/modules@modules.preprocessor: tuple_normalizer -training_step_log: ??? +training_step_log: + loss: "loss" training_sequence: ??? diff --git a/mart/configs/model/torchvision_retinanet.yaml b/mart/configs/model/torchvision_retinanet.yaml index 4c45917c..695263a2 100644 --- a/mart/configs/model/torchvision_retinanet.yaml +++ b/mart/configs/model/torchvision_retinanet.yaml @@ -3,7 +3,9 @@ defaults: - torchvision_object_detection # log all losses separately in training. -training_step_log: ["loss_classifier", "loss_box_reg"] +training_step_log: + loss_classifier: "loss_classifier" + loss_box_reg: "loss_box_reg" training_sequence: - preprocessor: ["input"] From df1d0b266483ce62fc076573a94449574d082a6c Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 13 Jun 2023 13:57:58 -0700 Subject: [PATCH 03/12] remove sync_dist --- mart/models/modular.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mart/models/modular.py b/mart/models/modular.py index 1fe4be99..e09f2c9d 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -116,7 +116,7 @@ def training_step(self, batch, batch_idx): output = self(input=input, target=target, model=self.model, step="training") for log_name, output_key in self.training_step_log.items(): - self.log(f"training/{log_name}", output[output_key], sync_dist=True) + self.log(f"training/{log_name}", output[output_key]) assert "loss" in output return output @@ -150,7 +150,7 @@ def validation_step(self, batch, batch_idx): output = self(input=input, target=target, model=self.model, step="validation") for log_name, output_key in self.validation_step_log.items(): - self.log(f"validation/{log_name}", output[output_key], sync_dist=True) + self.log(f"validation/{log_name}", output[output_key]) return output @@ -176,7 +176,7 @@ def test_step(self, batch, batch_idx): output = self(input=input, target=target, model=self.model, step="test") for log_name, output_key in self.test_step_log.items(): - self.log(f"test/{log_name}", output[output_key], sync_dist=True) + self.log(f"test/{log_name}", output[output_key]) return output From 14f4d1fa9f437849655e820f287885864939bf07 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 13 Jun 2023 14:05:53 -0700 Subject: [PATCH 04/12] backwards compatibility --- mart/models/modular.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/mart/models/modular.py b/mart/models/modular.py index e09f2c9d..b24ce6ae 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -70,12 +70,21 @@ def __init__( self.lr_scheduler = lr_scheduler + # Be backwards compatible by turning list into dict where each item is its own key-value + if isinstance(training_step_log, (list, tuple)): + training_step_log = { item: item for item in training_step_log} self.training_step_log = training_step_log or {} self.training_metrics = training_metrics + # Be backwards compatible by turning list into dict where each item is its own key-value + if isinstance(validation_step_log, (list, tuple)): + validation_step_log = { item: item for item in validation_step_log} self.validation_step_log = validation_step_log or {} self.validation_metrics = validation_metrics + # Be backwards compatible by turning list into dict where each item is its own key-value + if isinstance(test_step_log, (list, tuple)): + test_step_log = { item: item for item in test_step_log} self.test_step_log = test_step_log or {} self.test_metrics = test_metrics From 2e30587274d99afe8f8dc255da44af68a30a05ef Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 13 Jun 2023 14:06:03 -0700 Subject: [PATCH 05/12] Revert "Fix configs" This reverts commit 01a20664b2011c20727d1158b1b7476dc9d44779. --- mart/configs/model/torchvision_faster_rcnn.yaml | 11 +++++++---- mart/configs/model/torchvision_object_detection.yaml | 3 +-- mart/configs/model/torchvision_retinanet.yaml | 4 +--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mart/configs/model/torchvision_faster_rcnn.yaml b/mart/configs/model/torchvision_faster_rcnn.yaml index bc0ce228..c5237184 100644 --- a/mart/configs/model/torchvision_faster_rcnn.yaml +++ b/mart/configs/model/torchvision_faster_rcnn.yaml @@ -4,10 +4,13 @@ defaults: # log all losses separately in training. training_step_log: - rpn_loss_objectness: "rpn_loss.loss_objectness" - rpn_loss_rpn_box_reg: "rpn_loss.loss_rpn_box_reg" - box_loss_classifier: "box_loss.loss_classifier" - box_loss_box_reg: "box_loss.loss_box_reg" + [ + "rpn_loss.loss_objectness", + "rpn_loss.loss_rpn_box_reg", + "box_loss.loss_classifier", + "box_loss.loss_box_reg", + "loss", + ] training_sequence: seq010: diff --git a/mart/configs/model/torchvision_object_detection.yaml b/mart/configs/model/torchvision_object_detection.yaml index c81930a8..a1495dad 100644 --- a/mart/configs/model/torchvision_object_detection.yaml +++ b/mart/configs/model/torchvision_object_detection.yaml @@ -3,8 +3,7 @@ defaults: - modular - /model/modules@modules.preprocessor: tuple_normalizer -training_step_log: - loss: "loss" +training_step_log: ??? training_sequence: ??? diff --git a/mart/configs/model/torchvision_retinanet.yaml b/mart/configs/model/torchvision_retinanet.yaml index 695263a2..4c45917c 100644 --- a/mart/configs/model/torchvision_retinanet.yaml +++ b/mart/configs/model/torchvision_retinanet.yaml @@ -3,9 +3,7 @@ defaults: - torchvision_object_detection # log all losses separately in training. -training_step_log: - loss_classifier: "loss_classifier" - loss_box_reg: "loss_box_reg" +training_step_log: ["loss_classifier", "loss_box_reg"] training_sequence: - preprocessor: ["input"] From 6fef148cb94379eeb4cef983080b90740e5f0bc1 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 13 Jun 2023 14:08:48 -0700 Subject: [PATCH 06/12] style --- mart/models/modular.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mart/models/modular.py b/mart/models/modular.py index b24ce6ae..d1d2752c 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -72,19 +72,19 @@ def __init__( # Be backwards compatible by turning list into dict where each item is its own key-value if isinstance(training_step_log, (list, tuple)): - training_step_log = { item: item for item in training_step_log} + training_step_log = {item: item for item in training_step_log} self.training_step_log = training_step_log or {} self.training_metrics = training_metrics # Be backwards compatible by turning list into dict where each item is its own key-value if isinstance(validation_step_log, (list, tuple)): - validation_step_log = { item: item for item in validation_step_log} + validation_step_log = {item: item for item in validation_step_log} self.validation_step_log = validation_step_log or {} self.validation_metrics = validation_metrics # Be backwards compatible by turning list into dict where each item is its own key-value if isinstance(test_step_log, (list, tuple)): - test_step_log = { item: item for item in test_step_log} + test_step_log = {item: item for item in test_step_log} self.test_step_log = test_step_log or {} self.test_metrics = test_metrics From c4e0d78813a85fc7f6176ce67def781635b7b3de Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 12 Jun 2023 16:04:53 -0700 Subject: [PATCH 07/12] Make metric logging keys configurable --- mart/models/modular.py | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/mart/models/modular.py b/mart/models/modular.py index eb5dd934..4fcfe783 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -36,6 +36,9 @@ def __init__( test_step_log=None, test_metrics=None, load_state_dict=None, + output_loss_key="loss", + output_preds_key="preds", + output_target_key="target", ): super().__init__() @@ -88,6 +91,10 @@ def __init__( logger.info(f"Loading state_dict {path} for {module.__class__.__name__}...") module.load_state_dict(torch.load(path, map_location="cpu")) + self.output_loss_key = output_loss_key + self.output_preds_key = output_preds_key + self.output_target_key = output_target_key + def configure_optimizers(self): config = {} config["optimizer"] = self.optimizer_fn(self.model) @@ -118,19 +125,15 @@ def training_step(self, batch, batch_idx): for name in self.training_step_log: self.log(f"training/{name}", output[name]) - assert "loss" in output - return output - - def training_step_end(self, output): if self.training_metrics is not None: # Some models only return loss in the training mode. - if "preds" not in output or "target" not in output: + if self.output_preds_key not in output or self.output_target_key not in output: raise ValueError( - "You have specified training_metrics, but the model does not return preds and target during training. You can either nullify training_metrics or configure the model to return preds and target in the training output." + f"You have specified training_metrics, but the model does not return {self.output_preds_key} or {self.output_target_key} during training. You can either nullify training_metrics or configure the model to return {self.output_preds_key} and {self.output_target_key} in the training output." ) - self.training_metrics(output["preds"], output["target"]) - loss = output.pop("loss") - return loss + self.training_metrics(output[self.output_preds_key], output[self.output_target_key]) + + return output[self.output_loss_key] def training_epoch_end(self, outputs): if self.training_metrics is not None: @@ -152,13 +155,9 @@ def validation_step(self, batch, batch_idx): for name in self.validation_step_log: self.log(f"validation/{name}", output[name]) - return output + self.validation_metrics(output[self.output_preds_key], output[self.output_target_key]) - def validation_step_end(self, output): - self.validation_metrics(output["preds"], output["target"]) - - # I don't know why this is required to prevent CUDA memory leak in validaiton and test. (Not required in training.) - output.clear() + return None def validation_epoch_end(self, outputs): metrics = self.validation_metrics.compute() @@ -178,13 +177,9 @@ def test_step(self, batch, batch_idx): for name in self.test_step_log: self.log(f"test/{name}", output[name]) - return output - - def test_step_end(self, output): - self.test_metrics(output["preds"], output["target"]) + self.test_metrics(output[self.output_preds_key], output[self.output_target_key]) - # I don't know why this is required to prevent CUDA memory leak in validaiton and test. (Not required in training.) - output.clear() + return None def test_epoch_end(self, outputs): metrics = self.test_metrics.compute() From 508798ca12d98d2ce757bcb18779b7c3fe474cdd Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 13 Jun 2023 14:15:36 -0700 Subject: [PATCH 08/12] cleanup --- mart/models/modular.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/mart/models/modular.py b/mart/models/modular.py index 4fcfe783..d663dda3 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -125,6 +125,10 @@ def training_step(self, batch, batch_idx): for name in self.training_step_log: self.log(f"training/{name}", output[name]) + assert "loss" in output + return output + + def training_step_end(self, output): if self.training_metrics is not None: # Some models only return loss in the training mode. if self.output_preds_key not in output or self.output_target_key not in output: @@ -132,8 +136,8 @@ def training_step(self, batch, batch_idx): f"You have specified training_metrics, but the model does not return {self.output_preds_key} or {self.output_target_key} during training. You can either nullify training_metrics or configure the model to return {self.output_preds_key} and {self.output_target_key} in the training output." ) self.training_metrics(output[self.output_preds_key], output[self.output_target_key]) - - return output[self.output_loss_key] + loss = output.pop(self.output_loss_key) + return loss def training_epoch_end(self, outputs): if self.training_metrics is not None: @@ -155,9 +159,13 @@ def validation_step(self, batch, batch_idx): for name in self.validation_step_log: self.log(f"validation/{name}", output[name]) + return output + + def validation_step_end(self, output): self.validation_metrics(output[self.output_preds_key], output[self.output_target_key]) - return None + # I don't know why this is required to prevent CUDA memory leak in validaiton and test. (Not required in training.) + output.clear() def validation_epoch_end(self, outputs): metrics = self.validation_metrics.compute() @@ -177,9 +185,13 @@ def test_step(self, batch, batch_idx): for name in self.test_step_log: self.log(f"test/{name}", output[name]) + return output + + def test_step_end(self, output): self.test_metrics(output[self.output_preds_key], output[self.output_target_key]) - return None + # I don't know why this is required to prevent CUDA memory leak in validaiton and test. (Not required in training.) + output.clear() def test_epoch_end(self, outputs): metrics = self.test_metrics.compute() From fc770e81d7783edf7e0ef7bf04b4b12a25a2eaa0 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 13 Jun 2023 17:12:51 -0700 Subject: [PATCH 09/12] Remove *_step_end --- mart/models/modular.py | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/mart/models/modular.py b/mart/models/modular.py index d663dda3..4fcfe783 100644 --- a/mart/models/modular.py +++ b/mart/models/modular.py @@ -125,10 +125,6 @@ def training_step(self, batch, batch_idx): for name in self.training_step_log: self.log(f"training/{name}", output[name]) - assert "loss" in output - return output - - def training_step_end(self, output): if self.training_metrics is not None: # Some models only return loss in the training mode. if self.output_preds_key not in output or self.output_target_key not in output: @@ -136,8 +132,8 @@ def training_step_end(self, output): f"You have specified training_metrics, but the model does not return {self.output_preds_key} or {self.output_target_key} during training. You can either nullify training_metrics or configure the model to return {self.output_preds_key} and {self.output_target_key} in the training output." ) self.training_metrics(output[self.output_preds_key], output[self.output_target_key]) - loss = output.pop(self.output_loss_key) - return loss + + return output[self.output_loss_key] def training_epoch_end(self, outputs): if self.training_metrics is not None: @@ -159,13 +155,9 @@ def validation_step(self, batch, batch_idx): for name in self.validation_step_log: self.log(f"validation/{name}", output[name]) - return output - - def validation_step_end(self, output): self.validation_metrics(output[self.output_preds_key], output[self.output_target_key]) - # I don't know why this is required to prevent CUDA memory leak in validaiton and test. (Not required in training.) - output.clear() + return None def validation_epoch_end(self, outputs): metrics = self.validation_metrics.compute() @@ -185,13 +177,9 @@ def test_step(self, batch, batch_idx): for name in self.test_step_log: self.log(f"test/{name}", output[name]) - return output - - def test_step_end(self, output): self.test_metrics(output[self.output_preds_key], output[self.output_target_key]) - # I don't know why this is required to prevent CUDA memory leak in validaiton and test. (Not required in training.) - output.clear() + return None def test_epoch_end(self, outputs): metrics = self.test_metrics.compute() From c31f4deddb65f6008f8c570bafd3ed2bf1c33d77 Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Mon, 12 Jun 2023 15:56:55 -0700 Subject: [PATCH 10/12] Don't require output module with SequentialDict --- mart/nn/nn.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/mart/nn/nn.py b/mart/nn/nn.py index 93b0f07f..754e8657 100644 --- a/mart/nn/nn.py +++ b/mart/nn/nn.py @@ -49,10 +49,6 @@ class SequentialDict(torch.nn.ModuleDict): """ def __init__(self, modules, sequences=None): - - if "output" not in modules: - raise ValueError("Modules must have an module named 'output'") - super().__init__(modules) self._sequences = { @@ -121,7 +117,8 @@ def forward(self, step=None, sequence=None, **kwargs): # Pop the executed module to proceed with the sequence sequence.popitem(last=False) - return kwargs["output"] + # return kwargs as DotDict + return DotDict(kwargs) class ReturnKwargs(torch.nn.Module): From 549f705c3a71e58cb014fd3ffe564f717580578a Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 13 Jun 2023 17:42:07 -0700 Subject: [PATCH 11/12] fix configs and tests --- .../attack/gain/rcnn_training_loss.yaml | 8 ++-- mart/configs/model/classifier.yaml | 17 ------- .../model/torchvision_faster_rcnn.yaml | 46 ++----------------- .../model/torchvision_object_detection.yaml | 10 ++-- mart/configs/model/torchvision_retinanet.yaml | 27 ++--------- tests/test_experiments.py | 6 +-- 6 files changed, 18 insertions(+), 96 deletions(-) diff --git a/mart/configs/attack/gain/rcnn_training_loss.yaml b/mart/configs/attack/gain/rcnn_training_loss.yaml index eb7abb9c..9ed8671b 100644 --- a/mart/configs/attack/gain/rcnn_training_loss.yaml +++ b/mart/configs/attack/gain/rcnn_training_loss.yaml @@ -2,8 +2,8 @@ _target_: mart.nn.CallWith module: _target_: mart.nn.Sum arg_keys: - - rpn_loss.loss_objectness - - rpn_loss.loss_rpn_box_reg - - box_loss.loss_classifier - - box_loss.loss_box_reg + - "losses_and_detections.training.loss_objectness" + - "losses_and_detections.training.loss_rpn_box_reg" + - "losses_and_detections.training.loss_classifier" + - "losses_and_detections.training.loss_box_reg" kwarg_keys: null diff --git a/mart/configs/model/classifier.yaml b/mart/configs/model/classifier.yaml index ad664989..df1a9c5b 100644 --- a/mart/configs/model/classifier.yaml +++ b/mart/configs/model/classifier.yaml @@ -17,14 +17,6 @@ training_sequence: seq040: preds: _call_with_args_: ["logits"] - seq050: - output: - { - "preds": "preds", - "target": "target", - "logits": "logits", - "loss": "loss", - } # The kwargs-centric version. # We may use *args as **kwargs to avoid the lengthy _call_with_args_. @@ -36,10 +28,6 @@ validation_sequence: - logits: ["preprocessor"] - preds: input: logits - - output: - preds: preds - target: target - logits: logits # The simplified version. # We treat a list as the `_call_with_args_` parameter. @@ -50,8 +38,6 @@ test_sequence: logits: ["preprocessor"] seq030: preds: ["logits"] - seq040: - output: { preds: preds, target: target, logits: logits } modules: preprocessor: ??? @@ -64,6 +50,3 @@ modules: preds: _target_: torch.nn.Softmax dim: 1 - - output: - _target_: mart.nn.ReturnKwargs diff --git a/mart/configs/model/torchvision_faster_rcnn.yaml b/mart/configs/model/torchvision_faster_rcnn.yaml index c5237184..65200579 100644 --- a/mart/configs/model/torchvision_faster_rcnn.yaml +++ b/mart/configs/model/torchvision_faster_rcnn.yaml @@ -4,13 +4,10 @@ defaults: # log all losses separately in training. training_step_log: - [ - "rpn_loss.loss_objectness", - "rpn_loss.loss_rpn_box_reg", - "box_loss.loss_classifier", - "box_loss.loss_box_reg", - "loss", - ] + loss_objectness: "losses_and_detections.training.loss_objectness" + loss_rpn_box_reg: "losses_and_detections.training.loss_rpn_box_reg" + loss_classifier: "losses_and_detections.training.loss_classifier" + loss_box_reg: "losses_and_detections.training.loss_box_reg" training_sequence: seq010: @@ -29,19 +26,6 @@ training_sequence: "losses_and_detections.training.loss_box_reg", ] - seq040: - output: - # Output all losses for logging, defined in model.training_step_log - { - "preds": "losses_and_detections.eval", - "target": "target", - "loss": "loss", - "rpn_loss.loss_objectness": "losses_and_detections.training.loss_objectness", - "rpn_loss.loss_rpn_box_reg": "losses_and_detections.training.loss_rpn_box_reg", - "box_loss.loss_classifier": "losses_and_detections.training.loss_classifier", - "box_loss.loss_box_reg": "losses_and_detections.training.loss_box_reg", - } - validation_sequence: seq010: preprocessor: ["input"] @@ -49,17 +33,6 @@ validation_sequence: seq020: losses_and_detections: ["preprocessor", "target"] - seq030: - output: - { - "preds": "losses_and_detections.eval", - "target": "target", - "rpn_loss.loss_objectness": "losses_and_detections.training.loss_objectness", - "rpn_loss.loss_rpn_box_reg": "losses_and_detections.training.loss_rpn_box_reg", - "box_loss.loss_classifier": "losses_and_detections.training.loss_classifier", - "box_loss.loss_box_reg": "losses_and_detections.training.loss_box_reg", - } - test_sequence: seq010: preprocessor: ["input"] @@ -67,17 +40,6 @@ test_sequence: seq020: losses_and_detections: ["preprocessor", "target"] - seq030: - output: - { - "preds": "losses_and_detections.eval", - "target": "target", - "rpn_loss.loss_objectness": "losses_and_detections.training.loss_objectness", - "rpn_loss.loss_rpn_box_reg": "losses_and_detections.training.loss_rpn_box_reg", - "box_loss.loss_classifier": "losses_and_detections.training.loss_classifier", - "box_loss.loss_box_reg": "losses_and_detections.training.loss_box_reg", - } - modules: losses_and_detections: # 17s: DualModeGeneralizedRCNN diff --git a/mart/configs/model/torchvision_object_detection.yaml b/mart/configs/model/torchvision_object_detection.yaml index a1495dad..1bbd678c 100644 --- a/mart/configs/model/torchvision_object_detection.yaml +++ b/mart/configs/model/torchvision_object_detection.yaml @@ -3,14 +3,15 @@ defaults: - modular - /model/modules@modules.preprocessor: tuple_normalizer -training_step_log: ??? +training_step_log: + loss: "loss" training_sequence: ??? - validation_sequence: ??? - test_sequence: ??? +output_preds_key: "losses_and_detections.eval" + modules: losses_and_detections: # Return losses in the training mode and predictions in the eval mode in one pass. @@ -19,6 +20,3 @@ modules: loss: _target_: mart.nn.Sum - - output: - _target_: mart.nn.ReturnKwargs diff --git a/mart/configs/model/torchvision_retinanet.yaml b/mart/configs/model/torchvision_retinanet.yaml index 4c45917c..34b66945 100644 --- a/mart/configs/model/torchvision_retinanet.yaml +++ b/mart/configs/model/torchvision_retinanet.yaml @@ -3,7 +3,9 @@ defaults: - torchvision_object_detection # log all losses separately in training. -training_step_log: ["loss_classifier", "loss_box_reg"] +training_step_log: + loss_classifier: "losses_and_detections.training.classification" + loss_box_reg: "losses_and_detections.training.bbox_regression" training_sequence: - preprocessor: ["input"] @@ -14,37 +16,14 @@ training_sequence: "losses_and_detections.training.classification", "losses_and_detections.training.bbox_regression", ] - - output: - # Output all losses for logging, defined in model.training_step_log - { - "preds": "losses_and_detections.eval", - "target": "target", - "loss": "loss", - "loss_classifier": "losses_and_detections.training.classification", - "loss_box_reg": "losses_and_detections.training.bbox_regression", - } validation_sequence: - preprocessor: ["input"] - losses_and_detections: ["preprocessor", "target"] - - output: - { - "preds": "losses_and_detections.eval", - "target": "target", - "loss_classifier": "losses_and_detections.training.classification", - "loss_box_reg": "losses_and_detections.training.bbox_regression", - } test_sequence: - preprocessor: ["input"] - losses_and_detections: ["preprocessor", "target"] - - output: - { - "preds": "losses_and_detections.eval", - "target": "target", - "loss_classifier": "losses_and_detections.training.classification", - "loss_box_reg": "losses_and_detections.training.bbox_regression", - } modules: losses_and_detections: diff --git a/tests/test_experiments.py b/tests/test_experiments.py index d128c1df..cf4ffea7 100644 --- a/tests/test_experiments.py +++ b/tests/test_experiments.py @@ -209,7 +209,7 @@ def test_coco_fasterrcnn_experiment(coco_cfg, tmp_path): "-m", "experiment=COCO_TorchvisionFasterRCNN", "hydra.sweep.dir=" + str(tmp_path), - "optimized_metric=training/rpn_loss.loss_objectness", + "optimized_metric=training/loss_objectness", ] + overrides run_sh_command(command) @@ -224,7 +224,7 @@ def test_coco_fasterrcnn_adv_experiment(coco_cfg, tmp_path): "-m", "experiment=COCO_TorchvisionFasterRCNN_Adv", "hydra.sweep.dir=" + str(tmp_path), - "optimized_metric=training/rpn_loss.loss_objectness", + "optimized_metric=training/loss_objectness", ] + overrides run_sh_command(command) @@ -256,7 +256,7 @@ def test_armory_carla_fasterrcnn_experiment(carla_cfg, tmp_path): "experiment=ArmoryCarlaOverObjDet_TorchvisionFasterRCNN", "+attack@model.modules.input_adv_test=object_detection_mask_adversary", "hydra.sweep.dir=" + str(tmp_path), - "optimized_metric=training/rpn_loss.loss_objectness", + "optimized_metric=training/loss_objectness", ] + overrides run_sh_command(command) From 5e7381743d018ab27d3ed61fb033244192b2ebbf Mon Sep 17 00:00:00 2001 From: Cory Cornelius Date: Tue, 13 Jun 2023 17:55:33 -0700 Subject: [PATCH 12/12] Generalize attack objectives --- mart/configs/attack/objective/misclassification.yaml | 4 ++-- mart/configs/attack/objective/object_detection_missed.yaml | 2 +- mart/configs/attack/objective/zero_ap.yaml | 4 ++-- mart/configs/model/modular.yaml | 3 +++ 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/mart/configs/attack/objective/misclassification.yaml b/mart/configs/attack/objective/misclassification.yaml index e2e9b819..82e055cd 100644 --- a/mart/configs/attack/objective/misclassification.yaml +++ b/mart/configs/attack/objective/misclassification.yaml @@ -2,6 +2,6 @@ _target_: mart.nn.CallWith module: _target_: mart.attack.objective.Mispredict arg_keys: - - preds - - target + - ${model.output_preds_key} + - ${model.output_target_key} kwarg_keys: null diff --git a/mart/configs/attack/objective/object_detection_missed.yaml b/mart/configs/attack/objective/object_detection_missed.yaml index dec2410c..7ebb1dc3 100644 --- a/mart/configs/attack/objective/object_detection_missed.yaml +++ b/mart/configs/attack/objective/object_detection_missed.yaml @@ -3,5 +3,5 @@ module: _target_: mart.attack.objective.Missed confidence_threshold: 0.0 arg_keys: - - preds + - ${model.output_preds_key} kwarg_keys: null diff --git a/mart/configs/attack/objective/zero_ap.yaml b/mart/configs/attack/objective/zero_ap.yaml index 6a43f77d..91dc5b96 100644 --- a/mart/configs/attack/objective/zero_ap.yaml +++ b/mart/configs/attack/objective/zero_ap.yaml @@ -4,6 +4,6 @@ module: iou_threshold: 0.5 confidence_threshold: 0.0 arg_keys: - - preds - - target + - ${model.output_preds_key} + - ${model.output_target_key} kwarg_keys: null diff --git a/mart/configs/model/modular.yaml b/mart/configs/model/modular.yaml index f4a6976f..6c137a53 100644 --- a/mart/configs/model/modular.yaml +++ b/mart/configs/model/modular.yaml @@ -1,6 +1,9 @@ _target_: mart.models.LitModular _convert_: all +output_preds_key: "preds" +output_target_key: "target" + modules: ??? optimizer: ???