diff --git a/encoder.py b/encoder.py index d9dccb1c4a3d855f58b1fd6243604bdcb9b842a6..1b77a669bf05430d4035d56004c66ab13f11f264 100644 --- a/encoder.py +++ b/encoder.py @@ -248,14 +248,14 @@ class EncoderAlgorithm(QgsProcessingAlgorithm): 'ViT base DINO', 'ViT tiny Imagenet (smallest)', 'ViT base MAE', - # 'SAM', + 'SAM', '--Empty--' ] self.timm_backbone_opt = [ 'vit_base_patch16_224.dino', 'vit_tiny_patch16_224.augreg_in21k', 'vit_base_patch16_224.mae', - # 'samvit_base_patch16.sa1b', + 'samvit_base_patch16.sa1b', ] self.addParameter ( QgsProcessingParameterEnum( diff --git a/tests/test_encoder.py b/tests/test_encoder.py index 948c45340a76b37048acaf39be23d9a008db5f72..cc3dd41a06da89bec0744ecef887dc2be5fe1a16 100644 --- a/tests/test_encoder.py +++ b/tests/test_encoder.py @@ -98,13 +98,13 @@ class TestEncoderAlgorithm(unittest.TestCase): 'vit_base_patch16_224.dino', 'vit_tiny_patch16_224.augreg_in21k', 'vit_base_patch16_224.mae', - # 'samvit_base_patch16.sa1b', + 'samvit_base_patch16.sa1b', ] expected_output_size = [ torch.Size([1,197,768]), torch.Size([1,197,192]), torch.Size([1,197,768]), - # torch.Size([1, 256, 64, 64]), + torch.Size([1, 256, 64, 64]), ] for arch, exp_feat_size in zip(archs, expected_output_size):