【暂存】multi-head output
class ConResNet(nn.Module): """backbone + projection head""" def __init__(self, name='resnet50', head='mlp', feat_dim=128, selfcon_pos=[False,False,False], selfcon_arch='resnet', selfcon_size='same', dataset=''): super(ConResNet, self).__init__() model_fun, dim_in = model_dict[name] self.encoder = model_fun(selfcon_pos=selfcon_pos, selfcon_arch=selfcon_arch, selfcon_size=selfcon_size, dataset=dataset) if head == 'linear': self.head = nn.Linear(dim_in, feat_dim) self.sub_heads = [] for pos in selfcon_pos: if pos: self.sub_heads.append(nn.Linear(dim_in, feat_dim)) elif head == 'mlp': self.head = nn.Sequential( nn.Linear(dim_in, dim_in), nn.ReLU(inplace=True), nn.Linear(dim_in, feat_dim) ) heads = [] for pos in selfcon_pos: if pos: heads.append(nn.Sequential( nn.Linear(dim_in, dim_in), nn.ReLU(inplace=True), nn.Linear(dim_in, feat_dim) )) self.sub_heads = nn.ModuleList(heads) else: raise NotImplementedError( 'head not supported: {}'.format(head)) def forward(self, x): sub_feat, feat = self.encoder(x) print(sub_feat,feat) print("******") sh_feat = [] for sf, sub_head in zip(sub_feat, self.sub_heads): print(sf,sub_head,sub_feat,self.sub_heads) sh_feat.append(F.normalize(sub_head(sf), dim=1)) print("***") print(sh_feat) print("*************************") input() feat = F.normalize(self.head(feat), dim=1) return sh_feat, feat
Core Code:
sub_feat, feat = self.encoder(x) print(sub_feat,feat) print("******") sh_feat = [] for sf, sub_head in zip(sub_feat, self.sub_heads): print(sf,sub_head,sub_feat,self.sub_heads) sh_feat.append(F.normalize(sub_head(sf), dim=1)) print("***") print(sh_feat) print("*************************") input() feat = F.normalize(self.head(feat), dim=1) return sh_feat, feat
Files already downloaded and verified
[tensor([[0.3709, 0.4060, 0.4357,? ..., 0.4369, 0.7116, 0.6154],
??????? [0.3255, 0.8886, 0.4587,? ..., 0.3097, 0.6247, 0.6515],
??????? [0.3265, 0.5572, 0.5465,? ..., 0.1894, 0.5648, 0.7019],
??????? ...,
??????? [0.3285, 0.7454, 0.3656,? ..., 0.4739, 0.4305, 0.3940],
??????? [4.2099, 2.0158, 2.8063,? ..., 3.5441, 2.7423, 1.8907],
??????? [5.0028, 4.0435, 6.1597,? ..., 6.2176, 2.4172, 1.5914]],
?????? device='cuda:0', grad_fn=<ReshapeAliasBackward0>)] tensor([[0.5324, 0.5882, 0.8503,? ..., 0.7225, 0.8470, 0.3471],
??????? [0.7275, 0.6198, 0.7865,? ..., 0.7751, 0.9618, 0.8199],
??????? [0.7426, 0.7385, 0.9845,? ..., 0.4950, 0.7898, 0.4606],
??????? ...,
??????? [0.6489, 0.7440, 1.0151,? ..., 0.5916, 1.0089, 0.2878],
??????? [0.6675, 1.0148, 1.7714,? ..., 1.4871, 0.8820, 5.0314],
??????? [0.7732, 0.9582, 2.3896,? ..., 1.7296, 0.9024, 5.8410]],
?????? device='cuda:0', grad_fn=<ReshapeAliasBackward0>)
******
tensor([[0.3709, 0.4060, 0.4357,? ..., 0.4369, 0.7116, 0.6154],
??????? [0.3255, 0.8886, 0.4587,? ..., 0.3097, 0.6247, 0.6515],
??????? [0.3265, 0.5572, 0.5465,? ..., 0.1894, 0.5648, 0.7019],
??????? ...,
??????? [0.3285, 0.7454, 0.3656,? ..., 0.4739, 0.4305, 0.3940],
??????? [4.2099, 2.0158, 2.8063,? ..., 3.5441, 2.7423, 1.8907],
??????? [5.0028, 4.0435, 6.1597,? ..., 6.2176, 2.4172, 1.5914]],
?????? device='cuda:0', grad_fn=<ReshapeAliasBackward0>) Sequential(
? (0): Linear(in_features=2048, out_features=2048, bias=True)
? (1): ReLU(inplace=True)
? (2): Linear(in_features=2048, out_features=128, bias=True)
) [tensor([[0.3709, 0.4060, 0.4357,? ..., 0.4369, 0.7116, 0.6154],
??????? [0.3255, 0.8886, 0.4587,? ..., 0.3097, 0.6247, 0.6515],
??????? [0.3265, 0.5572, 0.5465,? ..., 0.1894, 0.5648, 0.7019],
??????? ...,
??????? [0.3285, 0.7454, 0.3656,? ..., 0.4739, 0.4305, 0.3940],
??????? [4.2099, 2.0158, 2.8063,? ..., 3.5441, 2.7423, 1.8907],
??????? [5.0028, 4.0435, 6.1597,? ..., 6.2176, 2.4172, 1.5914]],
?????? device='cuda:0', grad_fn=<ReshapeAliasBackward0>)] ModuleList(
? (0): Sequential(
??? (0): Linear(in_features=2048, out_features=2048, bias=True)
??? (1): ReLU(inplace=True)
??? (2): Linear(in_features=2048, out_features=128, bias=True)
? )
)
***
[tensor([[ 0.0400,? 0.0202,? 0.2806,? ..., -0.0758,? 0.0742, -0.0301],
??????? [ 0.0687,? 0.0602,? 0.2580,? ..., -0.0850,? 0.0880, -0.0687],
??????? [ 0.0607, -0.0127,? 0.2398,? ..., -0.0618,? 0.0845, -0.0610],
??????? ...,
??????? [ 0.0440,? 0.0067,? 0.2463,? ..., -0.0795,? 0.0996, -0.0472],
??????? [ 0.1166,? 0.0371,? 0.1152,? ..., -0.1231, -0.0744,? 0.0168],
??????? [ 0.0630,? 0.0231,? 0.1265,? ..., -0.1107, -0.0084, -0.0108]],
?????? device='cuda:0', grad_fn=<DivBackward0>)]
*************************
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。 如若内容造成侵权/违法违规/事实不符,请联系我的编程经验分享网邮箱:veading@qq.com进行投诉反馈,一经查实,立即删除!