From 15c1f1f6471344e7cd03aa97d9763b89f8471090 Mon Sep 17 00:00:00 2001 From: fhw Date: Wed, 10 Jul 2024 19:55:47 +0800 Subject: [PATCH] ffn + Dropout --- mmpose/models/backbones/vit_sam.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mmpose/models/backbones/vit_sam.py b/mmpose/models/backbones/vit_sam.py index 7858034..70f8c82 100644 --- a/mmpose/models/backbones/vit_sam.py +++ b/mmpose/models/backbones/vit_sam.py @@ -259,7 +259,7 @@ class CustomAttentionFFN(nn.Module): nn.Linear(dim, dim * 4), nn.GELU(), nn.Linear(dim * 4, dim), - nn.DropPath(proj_drop) + nn.Dropout(proj_drop) ) self.norm1 = nn.LayerNorm(dim) self.norm2 = nn.LayerNorm(dim) @@ -330,8 +330,8 @@ class ViTSam(BaseBackbone): param.requires_grad = False # 交叉注意力 - # self.cross_attn = Cross_Attention(embed_dim, num_heads=num_heads, qkv_bias=qkv_bias, \ - # qk_scale=qk_scale, attn_drop=attn_drop_rate, proj_drop=drop_rate) + self.cross_attn = Cross_Attention(embed_dim, num_heads=num_heads, qkv_bias=qkv_bias, \ + qk_scale=qk_scale, attn_drop=attn_drop_rate, proj_drop=drop_rate) # vit_token做自注意力后,再和sam_token做交叉注意力,得到的结果再经过FFN # self.custom_attn_ffn = CustomAttentionFFN(embed_dim, num_heads=num_heads, qkv_bias=qkv_bias, \ @@ -342,7 +342,7 @@ class ViTSam(BaseBackbone): nn.Linear(embed_dim, embed_dim * 4), nn.GELU(), nn.Linear(embed_dim * 4, embed_dim), - nn.DropPath(drop_rate) + nn.Dropout(drop_rate) ) self.sam_norm = norm_layer(embed_dim)