From 5494206a04b86f95aef5b0aa61c8658985e6459e Mon Sep 17 00:00:00 2001 From: thanhvc3 Date: Mon, 29 Apr 2024 17:06:55 +0700 Subject: [PATCH] try modify swin --- models.py | 44 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/models.py b/models.py index 40ea5c1..04795de 100644 --- a/models.py +++ b/models.py @@ -547,6 +547,7 @@ class FouriER(torch.nn.Module): down_patch_size=3 down_stride=2 down_pad=1 + window_size = 4 num_classes=self.p.embed_dim for i in range(len(layers)): stage = basic_blocks(embed_dims[i], i, layers, @@ -556,7 +557,8 @@ class FouriER(torch.nn.Module): drop_path_rate=drop_path_rate, use_layer_scale=use_layer_scale, layer_scale_init_value=layer_scale_init_value, - num_heads=num_heads[i]) + num_heads=num_heads[i], input_resolution=(image_h // (2**i), image_w // (2**i)), + window_size=window_size, shift_size=0 if (i % 2 == 0) else window_size // 2) network.append(stage) if i >= len(layers) - 1: break @@ -734,7 +736,7 @@ def basic_blocks(dim, index, layers, pool_size=3, mlp_ratio=4., act_layer=nn.GELU, norm_layer=GroupNorm, drop_rate=.0, drop_path_rate=0., - use_layer_scale=True, layer_scale_init_value=1e-5, num_heads = 4): + use_layer_scale=True, layer_scale_init_value=1e-5, num_heads = 4, input_resolution = None, window_size = 4, shift_size = 2): """ generate PoolFormer blocks for a stage return: PoolFormer blocks @@ -749,7 +751,8 @@ def basic_blocks(dim, index, layers, drop=drop_rate, drop_path=block_dpr, use_layer_scale=use_layer_scale, layer_scale_init_value=layer_scale_init_value, - num_heads=num_heads + num_heads=num_heads, input_resolution = input_resolution, + window_size=window_size, shift_size=shift_size )) blocks = nn.Sequential(*blocks) @@ -933,14 +936,16 @@ class PoolFormerBlock(nn.Module): def __init__(self, dim, pool_size=3, mlp_ratio=4., act_layer=nn.GELU, norm_layer=GroupNorm, drop=0., drop_path=0., num_heads=4, - use_layer_scale=True, layer_scale_init_value=1e-5): + use_layer_scale=True, layer_scale_init_value=1e-5, input_resolution = None, window_size = 4, shift_size = 2): super().__init__() self.norm1 = norm_layer(dim) #self.token_mixer = Pooling(pool_size=pool_size) # self.token_mixer = FNetBlock() - self.window_size = 4 + self.window_size = window_size + self.shift_size = shift_size + self.input_resolution = input_resolution self.attn_mask = None self.token_mixer = WindowAttention(dim=dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, attn_drop=0.1, proj_drop=0.2) self.norm2 = norm_layer(dim) @@ -957,6 +962,31 @@ class PoolFormerBlock(nn.Module): layer_scale_init_value * torch.ones((dim)), requires_grad=True) self.layer_scale_2 = nn.Parameter( layer_scale_init_value * torch.ones((dim)), requires_grad=True) + + if self.shift_size > 0: + # calculate attention mask for SW-MSA + H, W = self.input_resolution + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + + self.register_buffer("attn_mask", attn_mask) def forward(self, x): B, C, H, W = x.shape @@ -965,6 +995,10 @@ class PoolFormerBlock(nn.Module): attn_windows = self.token_mixer(x_windows, mask=self.attn_mask) attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) x_attn = window_reverse(attn_windows, self.window_size, H, W) + if self.shift_size > 0: + x = torch.roll(x_attn, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = x_attn if self.use_layer_scale: x = x + self.drop_path( self.layer_scale_1.unsqueeze(-1).unsqueeze(-1)