If I want to use Mobilevit in Cifar10, how should I set the parameters?
Because I changed the input size, but the parameters don't match.
For example:
EinopsError: Shape mismatch, can't divide axis of length 2 in chunks of 4
During handling of the above exception, another exception occurred:
EinopsError Traceback (most recent call last)
C:\Users\ADMINI~1\AppData\Local\Temp/ipykernel_16468/2105769576.py in
44 torch.cuda.empty_cache()
45 if name == "main":
---> 46 main()
C:\Users\ADMINI~1\AppData\Local\Temp/ipykernel_16468/2105769576.py in main()
31
32 for epoch in range(1, total_epoch+1):
---> 33 train_one_epoch(model,
34 train_dataloader,
35 criterion,
C:\Users\ADMINI~1\AppData\Local\Temp/ipykernel_16468/4152541084.py in train_one_epoch(model, dataloader, criterion, optimizer, epoch, total_epoch, report_freq)
70 label = data[1].to(device)
71
---> 72 out = model(image)
73 loss = criterion(out, label)
74
D:\anaconda\envs\CoAtnet\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
E:\code\awesome_lightweight_networks-main\light_cnns\Transformer\mobile_vit.py in forward(self, x)
216
217 x = self.mv25
--> 218 x = self.mvit1
219
220 x = self.mv26
D:\anaconda\envs\CoAtnet\lib\site-packages\torch\nn\modules\module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
E:\code\awesome_lightweight_networks-main\light_cnns\Transformer\mobile_vit.py in forward(self, x)
162 # Global representations
163 _, _, h, w = x.shape
--> 164 x = rearrange(x, 'b d (h ph) (w pw) -> b (ph pw) (h w) d', ph=self.ph, pw=self.pw)
165 x = self.transformer(x)
166 x = rearrange(x, 'b (ph pw) (h w) d -> b d (h ph) (w pw)', h=h // self.ph, w=w // self.pw, ph=self.ph,
D:\anaconda\envs\CoAtnet\lib\site-packages\einops\einops.py in rearrange(tensor, pattern, **axes_lengths)
450 raise TypeError("Rearrange can't be applied to an empty list")
451 tensor = get_backend(tensor[0]).stack_on_zeroth_dimension(tensor)
--> 452 return reduce(tensor, pattern, reduction='rearrange', **axes_lengths)
453
454
D:\anaconda\envs\CoAtnet\lib\site-packages\einops\einops.py in reduce(tensor, pattern, reduction, **axes_lengths)
388 message += '\n Input is list. '
389 message += 'Additional info: {}.'.format(axes_lengths)
--> 390 raise EinopsError(message + '\n {}'.format(e))
391
392
EinopsError: Error while processing rearrange-reduction pattern "b d (h ph) (w pw) -> b (ph pw) (h w) d".
Input tensor shape: torch.Size([200, 80, 2, 2]). Additional info: {'ph': 4, 'pw': 4}.
Shape mismatch, can't divide axis of length 2 in chunks of 4