I changed this line as sugggested to use the GPU:
# This can be changed to a GPU, e.g. 'cuda:0'.
dev = torch.device('cuda:0')
And I tried to execute the notebook. I got the following error message:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/tmp/ipykernel_11/3257249919.py in <module>
1 import torch.nn.functional as F
2
----> 3 z_logits = enc(x)
4 z = torch.argmax(z_logits, axis=1)
5 z = F.one_hot(z, num_classes=enc.vocab_size).permute(0, 3, 1, 2).float()
/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
/opt/conda/lib/python3.8/site-packages/dall_e/encoder.py in forward(self, x)
91 raise ValueError('input must have dtype torch.float32')
92
---> 93 return self.blocks(x)
/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
/opt/conda/lib/python3.8/site-packages/torch/nn/modules/container.py in forward(self, input)
139 def forward(self, input):
140 for module in self:
--> 141 input = module(input)
142 return input
143
/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
1112 full_backward_hooks, non_full_backward_hooks = [], []
/opt/conda/lib/python3.8/site-packages/dall_e/utils.py in forward(self, x)
41 w, b = self.w, self.b
42
---> 43 return F.conv2d(x, w, b, padding=(self.kw - 1) // 2)
44
45 def map_pixels(x: torch.Tensor) -> torch.Tensor:
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument weight in method wrapper___slow_conv2d_forward)