Hello,
I encountered the following error when attempting to execute the example from notebook 3.
It's the same code on a newly-initialized environment, so I couldn't figure out what may cause this problem.
Traceback (most recent call last):
File "C:\research\feat\feat\detector.py", line 787, in process_frame
detected_faces = self.detect_faces(frame=frames)
File "C:\research\feat\feat\detector.py", line 325, in detect_faces
faces, poses = self.face_detector(frame)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 99, in __call__
preds = self.scale_and_predict(img)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 132, in scale_and_predict
preds = self.predict(img, border_size, scale)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 169, in predict
pred = self.model.predict([self.transform(img)])[0]
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 107, in predict
predictions = self.run_model(imgs)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 95, in run_model
outputs = self.fpn_model(imgs, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 168, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 178, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 86, in parallel_apply
output.reraise()
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\_utils.py", line 457, in reraise
raise exception
RuntimeError: Caught RuntimeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 61, in _worker
output = module(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\research\feat\feat\facepose_detectors\img2pose\deps\generalized_rcnn.py", line 59, in forward
images, targets = self.transform(images, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 127, in forward
image = self.normalize(image)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 152, in normalize
return (image - mean[:, None, None]) / std[:, None, None]
RuntimeError: The size of tensor a (2) must match the size of tensor b (3) at non-singleton dimension 0
exception occurred in the batch
Since singleframe4error=FALSE, giving up this entire batch result
Traceback (most recent call last):
File "C:\research\feat\feat\detector.py", line 787, in process_frame
detected_faces = self.detect_faces(frame=frames)
File "C:\research\feat\feat\detector.py", line 325, in detect_faces
faces, poses = self.face_detector(frame)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 99, in __call__
preds = self.scale_and_predict(img)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 132, in scale_and_predict
preds = self.predict(img, border_size, scale)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 169, in predict
pred = self.model.predict([self.transform(img)])[0]
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 107, in predict
predictions = self.run_model(imgs)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 95, in run_model
outputs = self.fpn_model(imgs, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 168, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 178, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 86, in parallel_apply
output.reraise()
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\_utils.py", line 457, in reraise
raise exception
RuntimeError: Caught RuntimeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 61, in _worker
output = module(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\research\feat\feat\facepose_detectors\img2pose\deps\generalized_rcnn.py", line 59, in forward
images, targets = self.transform(images, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 127, in forward
image = self.normalize(image)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 152, in normalize
return (image - mean[:, None, None]) / std[:, None, None]
RuntimeError: The size of tensor a (2) must match the size of tensor b (3) at non-singleton dimension 0
exception occurred in the batch
Since singleframe4error=FALSE, giving up this entire batch result
Traceback (most recent call last):
File "C:\research\feat\feat\detector.py", line 787, in process_frame
detected_faces = self.detect_faces(frame=frames)
File "C:\research\feat\feat\detector.py", line 325, in detect_faces
faces, poses = self.face_detector(frame)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 99, in __call__
preds = self.scale_and_predict(img)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 132, in scale_and_predict
preds = self.predict(img, border_size, scale)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 169, in predict
pred = self.model.predict([self.transform(img)])[0]
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 107, in predict
predictions = self.run_model(imgs)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 95, in run_model
outputs = self.fpn_model(imgs, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 168, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 178, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 86, in parallel_apply
output.reraise()
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\_utils.py", line 457, in reraise
raise exception
RuntimeError: Caught RuntimeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 61, in _worker
output = module(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\research\feat\feat\facepose_detectors\img2pose\deps\generalized_rcnn.py", line 59, in forward
images, targets = self.transform(images, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 127, in forward
image = self.normalize(image)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 152, in normalize
return (image - mean[:, None, None]) / std[:, None, None]
RuntimeError: The size of tensor a (2) must match the size of tensor b (3) at non-singleton dimension 0
exception occurred in the batch
Since singleframe4error=FALSE, giving up this entire batch result
Traceback (most recent call last):
File "C:\research\feat\feat\detector.py", line 787, in process_frame
detected_faces = self.detect_faces(frame=frames)
File "C:\research\feat\feat\detector.py", line 325, in detect_faces
faces, poses = self.face_detector(frame)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 99, in __call__
preds = self.scale_and_predict(img)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 132, in scale_and_predict
preds = self.predict(img, border_size, scale)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 169, in predict
pred = self.model.predict([self.transform(img)])[0]
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 107, in predict
predictions = self.run_model(imgs)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 95, in run_model
outputs = self.fpn_model(imgs, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 168, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 178, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 86, in parallel_apply
output.reraise()
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\_utils.py", line 457, in reraise
raise exception
RuntimeError: Caught RuntimeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 61, in _worker
output = module(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\research\feat\feat\facepose_detectors\img2pose\deps\generalized_rcnn.py", line 59, in forward
images, targets = self.transform(images, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 127, in forward
image = self.normalize(image)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 152, in normalize
return (image - mean[:, None, None]) / std[:, None, None]
RuntimeError: The size of tensor a (2) must match the size of tensor b (3) at non-singleton dimension 0
exception occurred in the batch
Since singleframe4error=FALSE, giving up this entire batch result
Traceback (most recent call last):
File "C:\research\feat\feat\detector.py", line 787, in process_frame
detected_faces = self.detect_faces(frame=frames)
File "C:\research\feat\feat\detector.py", line 325, in detect_faces
faces, poses = self.face_detector(frame)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 99, in __call__
preds = self.scale_and_predict(img)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 132, in scale_and_predict
preds = self.predict(img, border_size, scale)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_test.py", line 169, in predict
pred = self.model.predict([self.transform(img)])[0]
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 107, in predict
predictions = self.run_model(imgs)
File "C:\research\feat\feat\facepose_detectors\img2pose\img2pose_model.py", line 95, in run_model
outputs = self.fpn_model(imgs, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 168, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\data_parallel.py", line 178, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 86, in parallel_apply
output.reraise()
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\_utils.py", line 457, in reraise
raise exception
RuntimeError: Caught RuntimeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\parallel\parallel_apply.py", line 61, in _worker
output = module(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\research\feat\feat\facepose_detectors\img2pose\deps\generalized_rcnn.py", line 59, in forward
images, targets = self.transform(images, targets)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torch\nn\modules\module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 127, in forward
image = self.normalize(image)
File "C:\Users\owner\anaconda3\envs\py38\lib\site-packages\torchvision\models\detection\transform.py", line 152, in normalize
return (image - mean[:, None, None]) / std[:, None, None]
RuntimeError: The size of tensor a (2) must match the size of tensor b (3) at non-singleton dimension 0
exception occurred in the batch
Since singleframe4error=FALSE, giving up this entire batch result