diff --git a/PyTorch/dev/cv/image_classification/DIN_ID2837_for_PyTorch/layer.py b/PyTorch/dev/cv/image_classification/DIN_ID2837_for_PyTorch/layer.py index ca24a1b63bdff8c460286ef6a7c1a7cfb8a33c1a..1ef051546344bc1ad44974e118fa2d99f0ec0e2a 100644 --- a/PyTorch/dev/cv/image_classification/DIN_ID2837_for_PyTorch/layer.py +++ b/PyTorch/dev/cv/image_classification/DIN_ID2837_for_PyTorch/layer.py @@ -122,7 +122,7 @@ class AttentionLayer( nn.Module): combination = torch.cat( [ fact, query, fact * query, query - fact ], dim = 2) scores = self.model( combination).squeeze() - scores = torch.where( mask == 1, scores, torch.ones_like( scores) * ( -2 ** 31 ) ) + scores = torch.where( mask == 1, scores, torch.ones_like( scores) * ( -65504 ) ) scores = ( scores.softmax( dim = -1) * mask ).view( (B , 1, T))