latest version of onnx or torch fails pytest:
pip install onnx onnxruntime --upgrade
produces
Successfully installed onnx-1.10.2 onnxruntime-1.9.0
which fails the pipeline
================================================================================================================================== test session starts ===================================================================================================================================
platform linux -- Python 3.9.7, pytest-6.2.5, py-1.11.0, pluggy-1.0.0
rootdir: <me>/Documents/travail/programs/onnx-pytorch
plugins: dash-2.0.0
collected 88 items
onnx_pytorch/tests/test_base.py .F.................F..................s................................................. [100%]
======================================================================================================================================== FAILURES ========================================================================================================================================
_________________________________________________________________________________________________________________ TestBase.test_conv_batchnorm_maxpool_flatten_add_relu __________________________________________________________________________________________________________________
self = <onnx_pytorch.tests.test_base.TestBase object at 0x7fce8a666880>
def test_conv_batchnorm_maxpool_flatten_add_relu(self):
reset_model(13)
nps = [np.random.randn(1, 3, 224, 224).astype(np.float32)]
inputs = Input(*nps)
conv_node = Conv(inputs[0],
np.random.randn(32, 3, 3, 3).astype(np.float32),
np.random.randn(32).astype(np.float32))
bn_node = BatchNormalization(
conv_node,
np.ones(32,).astype(np.float32),
np.zeros(32,).astype(np.float32),
np.random.randn(32).astype(np.float32),
np.abs(np.random.randn(32).astype(np.float32)),
)
max_pool_node = MaxPool(bn_node,
kernel_shape=(3, 3),
strides=(2, 2),
pads=(0, 0, 1, 1))
flatten_node = Flatten(max_pool_node, axis=1)
add_node = Add(flatten_node, np.random.randn(1).astype(np.float32))
relu_node = Relu(add_node)
Output(relu_node)
> self._run(list(zip(inputs, nps)))
onnx_pytorch/tests/test_base.py:103:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <onnx_pytorch.tests.test_base.TestBase object at 0x7fce8a666880>
inputs_np = [('_t_Input_0', array([[[[ 1.0018734 , -0.62048906, 1.2765806 , ..., 0.25725722,
-1.1847678 , 1.8534303 ]... [-0.86980325, -0.2758593 , 0.05530448, ..., 0.2182875 ,
0.33060816, 0.6260562 ]]]], dtype=float32))]
def _run(self, inputs_np):
inputs_np_dict = {k: v for k, v in inputs_np if k != ""}
model = onnx.ModelProto()
model.CopyFrom(omm.model)
sess_options = onnxruntime.SessionOptions()
session = onnxruntime.InferenceSession(model.SerializeToString(),
sess_options)
ort_outputs = session.run(None, inputs_np_dict)
model.graph.ClearField("value_info")
initializers = {i.name: i for i in model.graph.initializer}
for i in model.graph.input:
if i.name in initializers:
continue
for idx, d in enumerate(i.type.tensor_type.shape.dim):
if d.dim_param != "":
d.ClearField("dim_param")
d.dim_value = inputs_np_dict[i.name].shape[idx]
try:
model = SymbolicShapeInference.infer_shapes(model, 2**31 - 1, True, True,
1)
except:
logging.warning("Shape infer by onnxruntime failed.")
with TemporaryDirectory() as tmpdir:
clear_op_code_generator()
model_code_generator = code_gen.get_model_code_generator(
model,
output_dir=tmpdir,
tensor_inplace=True,
simplify_names=True,
shape_infer=False)
model_code_generator.run()
spec = importlib.util.spec_from_file_location(
"model", os.path.join(tmpdir, "model.py"))
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
pt_outputs = mod.test_run_model(
[torch.from_numpy(v) for k, v in inputs_np if k != ""])
if type(pt_outputs) == torch.Tensor:
pt_outputs = [pt_outputs.detach().numpy()]
elif type(pt_outputs) in (list, tuple):
pt_outputs = [o.detach().numpy() for o in pt_outputs]
for l, r in zip(ort_outputs, pt_outputs):
> assert np.allclose(l, r, atol=1e-4, rtol=1e-4, equal_nan=True)
E assert False
E + where False = <function allclose at 0x7fcee3f60550>(array([[1.3416731 , 0.8318468 , 0.6191998 , ..., 1.1701062 , 0.6089205 ,\n 0.57694536]], dtype=float32), array([[10.049213 , 6.957016 , 5.667273 , ..., 10.965231 , 7.2742968,\n 7.0639963]], dtype=float32), atol=0.0001, rtol=0.0001, equal_nan=True)
E + where <function allclose at 0x7fcee3f60550> = np.allclose
onnx_pytorch/tests/test_base.py:67: AssertionError
---------------------------------------------------------------------------------------------------------------------------------- Captured stdout call ----------------------------------------------------------------------------------------------------------------------------------
# Autogenerated by onnx-pytorch.
import glob
import os
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self._vars = nn.ParameterDict()
self._regularizer_params = []
for b in glob.glob(
os.path.join(os.path.dirname(__file__), "variables", "*.npy")):
v = torch.from_numpy(np.load(b))
requires_grad = v.dtype.is_floating_point or v.dtype.is_complex
self._vars[os.path.basename(b)[:-4]] = nn.Parameter(v, requires_grad=requires_grad)
self.n_Conv_0 = nn.Conv2d(**{'groups': 1, 'dilation': 1, 'out_channels': 32, 'padding': 0, 'kernel_size': (3, 3), 'stride': 1, 'in_channels': 3, 'bias': True})
self.n_Conv_0.weight.data = self._vars["t_0"]
self.n_Conv_0.bias.data = self._vars["t_1"]
self.n_BatchNormalization_0 = nn.BatchNorm2d(**{'num_features': 32, 'eps': 9.999999747378752e-06, 'momentum': 0.8999999761581421})
self.n_BatchNormalization_0.weight.data = self._vars["t_2"]
self.n_BatchNormalization_0.bias.data = self._vars["t_3"]
self.n_BatchNormalization_0.running_mean.data = self._vars["t_4"]
self.n_BatchNormalization_0.running_var.data = self._vars["t_5"]
self.n_MaxPool_0 = nn.MaxPool2d(**{'dilation': 1, 'kernel_size': [3, 3], 'ceil_mode': False, 'stride': [2, 2], 'return_indices': True})
self.n_Flatten_0 = nn.Flatten(**{'start_dim': 1})
def forward(self, *inputs):
t_7, = inputs
t_8 = self.n_Conv_0(t_7)
t_9 = self.n_BatchNormalization_0(t_8)
t_9 = F.pad(t_9, [0, 1, 0, 1], value=float('-inf'))
t_14, t_15 = self.n_MaxPool_0(t_9)
t_16 = self.n_Flatten_0(t_14)
t_17 = torch.add(t_16, self._vars["t_6"])
t_18 = F.relu(t_17)
return t_18
def compatible_auto_pad(self, input, kernel_spatial_shape, nn_mod, auto_pad=None, **kwargs):
input_spatial_shape = input.shape[2:]
d = len(input_spatial_shape)
strides = nn_mod.stride
dilations = nn_mod.dilation
output_spatial_shape = [math.ceil(float(l) / float(r)) for l, r in zip(input.shape[2:], strides)]
pt_padding = [0] * 2 * d
pad_shape = [0] * d
for i in range(d):
pad_shape[i] = (output_spatial_shape[i] - 1) * strides[i] + ((kernel_spatial_shape[i] - 1) * dilations[i] + 1) - input_spatial_shape[i]
mean = pad_shape[i] // 2
if auto_pad == b"SAME_UPPER":
l, r = pad_shape[i] - mean, mean
else:
l, r = mean, pad_shape[i] - mean
pt_padding.insert(0, r)
pt_padding.insert(0, l)
return F.pad(input, pt_padding)
@torch.no_grad()
def test_run_model(inputs=[torch.from_numpy(np.random.randn(*[1, 3, 224, 224]).astype(np.float32))]):
model = Model()
model.eval()
rs = model(*inputs)
print(rs)
return rs
tensor([[10.04921341, 6.95701599, 5.66727304, ..., 10.96523094,
7.27429676, 7.06399632]])
----------------------------------------------------------------------------------------------------------------------------------- Captured log call ------------------------------------------------------------------------------------------------------------------------------------
WARNING root:__init__.py:41 Cannot get default value for dilations of MaxPool.
WARNING root:__init__.py:41 Cannot get default value for kernel_shape of MaxPool.
WARNING root:__init__.py:41 Cannot get default value for pads of MaxPool.
WARNING root:__init__.py:41 Cannot get default value for strides of MaxPool.
WARNING root:MaxPool.py:47 MaxPool with asymmetric padding will get incorrect indices.
___________________________________________________________________________________________________________________________ TestBase.test_batch_normalization ____________________________________________________________________________________________________________________________
self = <onnx_pytorch.tests.test_base.TestBase object at 0x7fce88ce44c0>
def test_batch_normalization(self):
reset_model(13)
nps = [np.random.randn(1, 32, 3, 3).astype(np.float32)]
inputs = Input(*nps)
Output(BatchNormalization(
inputs[0],
np.ones(32,).astype(np.float32),
np.zeros(32,).astype(np.float32),
np.random.randn(32).astype(np.float32),
np.abs(np.random.randn(32).astype(np.float32)),
),
output_num=1)
> self._run(list(zip(inputs, nps)))
onnx_pytorch/tests/test_base.py:239:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <onnx_pytorch.tests.test_base.TestBase object at 0x7fce88ce44c0>
inputs_np = [('_t_Input_0', array([[[[ 6.35267049e-02, 5.02886951e-01, -6.22651100e-01],
[ 1.44260633e+00, 1.56048670e-...51401734e-01, 5.14413416e-01],
[-1.90268409e+00, -7.60383308e-02, 2.99409509e-01]]]],
dtype=float32))]
def _run(self, inputs_np):
inputs_np_dict = {k: v for k, v in inputs_np if k != ""}
model = onnx.ModelProto()
model.CopyFrom(omm.model)
sess_options = onnxruntime.SessionOptions()
session = onnxruntime.InferenceSession(model.SerializeToString(),
sess_options)
ort_outputs = session.run(None, inputs_np_dict)
model.graph.ClearField("value_info")
initializers = {i.name: i for i in model.graph.initializer}
for i in model.graph.input:
if i.name in initializers:
continue
for idx, d in enumerate(i.type.tensor_type.shape.dim):
if d.dim_param != "":
d.ClearField("dim_param")
d.dim_value = inputs_np_dict[i.name].shape[idx]
try:
model = SymbolicShapeInference.infer_shapes(model, 2**31 - 1, True, True,
1)
except:
logging.warning("Shape infer by onnxruntime failed.")
with TemporaryDirectory() as tmpdir:
clear_op_code_generator()
model_code_generator = code_gen.get_model_code_generator(
model,
output_dir=tmpdir,
tensor_inplace=True,
simplify_names=True,
shape_infer=False)
model_code_generator.run()
spec = importlib.util.spec_from_file_location(
"model", os.path.join(tmpdir, "model.py"))
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
pt_outputs = mod.test_run_model(
[torch.from_numpy(v) for k, v in inputs_np if k != ""])
if type(pt_outputs) == torch.Tensor:
pt_outputs = [pt_outputs.detach().numpy()]
elif type(pt_outputs) in (list, tuple):
pt_outputs = [o.detach().numpy() for o in pt_outputs]
for l, r in zip(ort_outputs, pt_outputs):
> assert np.allclose(l, r, atol=1e-4, rtol=1e-4, equal_nan=True)
E assert False
E + where False = <function allclose at 0x7fcee3f60550>(array([[[[-0.13030988, 0.44412366, -1.0274405 ],\n [ 1.6727427 , -0.00934371, -0.14003941],\n [ 1.48930...,\n [ 0.7121257 , -0.5435372 , 0.5330533 ],\n [-1.9084809 , -0.06336791, 0.31587568]]]], dtype=float32), array([[[[ 1.03302915e-02, 4.43110734e-01, -6.65571392e-01],\n [ 1.36875701e+00, 1.01466656e-01, 3.00002005e...8.79306126e+00, 1.40610695e+01],\n [ 2.11407280e+00, 1.11426420e+01, 1.29983692e+01]]]],\n dtype=float32), atol=0.0001, rtol=0.0001, equal_nan=True)
E + where <function allclose at 0x7fcee3f60550> = np.allclose
onnx_pytorch/tests/test_base.py:67: AssertionError
---------------------------------------------------------------------------------------------------------------------------------- Captured stdout call ----------------------------------------------------------------------------------------------------------------------------------
# Autogenerated by onnx-pytorch.
import glob
import os
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self._vars = nn.ParameterDict()
self._regularizer_params = []
for b in glob.glob(
os.path.join(os.path.dirname(__file__), "variables", "*.npy")):
v = torch.from_numpy(np.load(b))
requires_grad = v.dtype.is_floating_point or v.dtype.is_complex
self._vars[os.path.basename(b)[:-4]] = nn.Parameter(v, requires_grad=requires_grad)
self.n_BatchNormalization_0 = nn.BatchNorm2d(**{'num_features': 32, 'eps': 9.999999747378752e-06, 'momentum': 0.8999999761581421})
self.n_BatchNormalization_0.weight.data = self._vars["t_0"]
self.n_BatchNormalization_0.bias.data = self._vars["t_1"]
self.n_BatchNormalization_0.running_mean.data = self._vars["t_2"]
self.n_BatchNormalization_0.running_var.data = self._vars["t_3"]
def forward(self, *inputs):
t_4, = inputs
t_5 = self.n_BatchNormalization_0(t_4)
return t_5
@torch.no_grad()
def test_run_model(inputs=[torch.from_numpy(np.random.randn(*[1, 32, 3, 3]).astype(np.float32))]):
model = Model()
model.eval()
rs = model(*inputs)
print(rs)
return rs
tensor([[[[ 1.03302915e-02, 4.43110734e-01, -6.65571392e-01],
[ 1.36875701e+00, 1.01466656e-01, 3.00002005e-03],
[ 1.23055291e+00, -6.36751056e-01, -8.78339052e-01]],
[[-4.64856595e-01, 1.01388752e+00, 2.45039845e+00],
[-1.51369238e+00, -7.56639481e-01, -1.26973033e+00],
[ 3.04206324e+00, -1.07024908e+00, 1.22984998e-01]],
[[-2.69752383e-01, -9.64242399e-01, -2.14787436e+00],
[-3.66215348e-01, -7.90006399e-01, -1.19138491e+00],
[-6.34383440e-01, 4.39469069e-01, -1.50392938e+00]],
[[ 5.44885218e-01, 1.98177516e+00, 2.14701653e+00],
[ 2.57987189e+00, 6.98854351e+00, 5.21536064e+00],
[-1.14435458e+00, 1.33780324e+00, 3.80742407e+00]],
[[-1.26968300e+00, -4.35954601e-01, 5.31747639e-01],
[-2.33643723e+00, -2.31319714e+00, -1.69136405e+00],
[-1.01814747e+00, -1.30057871e+00, 1.37861446e-01]],
[[-7.35616326e-01, -1.18806839e+00, -1.10327315e+00],
[-1.21497869e+00, 2.44642749e-01, -1.08295512e+00],
[-7.17091501e-01, -2.20478797e+00, -1.50086403e+00]],
[[-3.56589526e-01, -1.32543945e+00, -3.12406365e-02],
[-7.59021521e-01, 8.00770998e-01, -1.86119422e-01],
[-2.47674465e-01, 3.34041089e-01, 4.68768179e-01]],
[[-3.02949500e+00, -9.34190691e-01, -6.01976514e-01],
[-1.39591777e+00, 9.02901888e-01, -1.70761660e-01],
[-7.49238193e-01, -8.39863300e-01, -1.61441386e+00]],
[[ 5.27461350e-01, -1.29779911e+00, -1.84558618e+00],
[-1.37622201e+00, -2.75002476e-02, -4.80182886e-01],
[-1.48854208e+00, -2.23460600e-01, -1.37674761e+00]],
[[ 8.06057811e-01, 8.74002814e-01, -1.36947542e-01],
[ 1.77069342e+00, 1.01755619e+00, 3.84808660e-01],
[ 6.74725831e-01, 3.76408148e+00, 2.22828791e-01]],
[[ 3.71400404e+00, 2.69624019e+00, 1.77703583e+00],
[ 2.33299780e+00, 2.48477370e-01, 3.29037476e+00],
[ 1.03505504e+00, 2.66409278e+00, 3.81201744e+00]],
[[ 1.02166690e-01, -1.42813325e-01, -4.73593771e-01],
[-2.43843883e-01, 4.17272627e-01, 8.99561644e-01],
[-7.05574870e-01, 2.67669708e-01, 5.22894859e-01]],
[[-1.17352533e+00, -5.71887255e-01, -3.19737315e-01],
[-1.18356705e+00, -2.85988569e+00, -7.28449404e-01],
[-1.39273572e+00, -1.43941092e+00, -4.75017697e-01]],
[[-9.16496933e-01, -1.37783527e+00, 1.75405681e+00],
[-2.10685277e+00, -1.30036724e+00, 2.50304151e+00],
[ 3.88478422e+00, 8.30973566e-01, 3.44308519e+00]],
[[-1.08552837e+00, -1.35483885e+00, 9.10718501e-01],
[ 7.22618103e-01, -3.82872492e-01, 3.09645385e-01],
[ 1.25192356e+00, 1.48433483e+00, -7.20467627e-01]],
[[ 2.90476012e+00, 2.38905120e+00, 3.20962930e+00],
[ 4.72063154e-01, 1.03854692e+00, 1.42332995e+00],
[-2.65931457e-01, 2.61525941e+00, 1.36843193e+00]],
[[ 2.29905200e+00, 7.33413887e+00, -2.16392994e+01],
[-9.26441479e+00, -4.63282776e+00, 8.38395882e+00],
[-6.14768124e+00, -1.39623775e+01, -5.33043909e+00]],
[[-1.18203115e+00, 7.83545434e-01, -1.33013463e+00],
[ 1.55748868e+00, 2.99707323e-01, -1.74411178e-01],
[-3.15904379e-01, -1.27137268e+00, 2.87169278e-01]],
[[ 2.82064867e+00, -3.11068088e-01, -7.12420881e-01],
[ 1.99217871e-01, 8.75358164e-01, 5.74787557e-01],
[ 1.21458745e+00, 1.32562840e+00, 1.46251321e-01]],
[[-2.08626246e+00, -1.01060474e+00, -1.84688258e+00],
[-1.30853727e-01, -7.70996749e-01, 7.53721535e-01],
[ 1.19904697e+00, -1.62641481e-01, -8.22388411e-01]],
[[ 1.33589315e+00, 3.14021409e-01, 2.48438573e+00],
[-2.21844530e+00, 5.82929230e+00, 2.27573776e+00],
[ 5.50253439e+00, 2.19331694e+00, 4.72958851e+00]],
[[-1.88447189e+00, -9.36176181e-01, -1.94018316e+00],
[-1.43561804e+00, -4.47861242e+00, -3.19850969e+00],
[-9.75790977e-01, -2.53019547e+00, -2.31218606e-01]],
[[ 1.56031847e+00, -8.49840164e-01, 2.18206739e+00],
[ 1.86757004e+00, -9.00376320e-01, -3.14888433e-02],
[-2.60793537e-01, 3.81440073e-01, 1.87343729e+00]],
[[-2.49012423e+00, -1.80255661e+01, -1.39246368e+01],
[-7.12090111e+00, -1.14031210e+01, -3.02313328e+00],
[-5.08311844e+00, -7.04758024e+00, -8.73173904e+00]],
[[-3.17438930e-01, -5.40359974e-01, -8.29769790e-01],
[-2.39079952e+00, -7.72985220e-01, -1.00527453e+00],
[-4.49523091e-01, -1.43823814e+00, -8.15485835e-01]],
[[-1.75956070e+00, -3.46495295e+00, -5.70724130e-01],
[-1.35396278e+00, -1.52985775e+00, -9.15392518e-01],
[ 1.32145539e-01, -1.15701056e+00, -3.28669786e+00]],
[[ 9.83868241e-01, 1.86329472e+00, 3.16185784e+00],
[ 3.53541660e+00, 3.46067637e-01, -4.36942726e-01],
[ 8.96343887e-01, 1.15589023e+00, 1.66808695e-01]],
[[ 1.45385325e+00, -2.57331681e+00, 2.47062397e+00],
[ 5.09636497e+00, -4.55582333e+00, 6.47839642e+00],
[ 6.10593510e+00, 8.07678998e-01, 2.03531766e+00]],
[[-7.87889004e+00, 2.15410185e+00, -1.72434068e+00],
[-4.13584518e+00, -5.07564878e+00, -7.04525948e+00],
[-4.00902462e+00, 6.43981886e+00, 4.90088892e+00]],
[[-8.97298872e-01, -6.58248663e-01, 3.97185832e-01],
[ 1.26078165e+00, -5.88805914e-01, -1.58723903e+00],
[ 1.83342293e-01, 5.42823195e-01, -8.95587146e-01]],
[[-2.58091998e+00, 1.56836367e+00, 4.73235160e-01],
[ 6.95867360e-01, 3.10397220e+00, 8.56488526e-01],
[-5.79270065e-01, -1.23413563e+00, 2.25809479e+00]],
[[ 1.47533607e+01, 5.50610733e+00, 1.87684441e+01],
[ 1.49373131e+01, 8.79306126e+00, 1.40610695e+01],
[ 2.11407280e+00, 1.11426420e+01, 1.29983692e+01]]]])
==================================================================================================================================== warnings summary ====================================================================================================================================
../../../../anaconda3/envs/onnx-pytorch/lib/python3.9/site-packages/onnx/mapping.py:27
<me>/anaconda3/envs/onnx-pytorch/lib/python3.9/site-packages/onnx/mapping.py:27: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
int(TensorProto.STRING): np.dtype(np.object)
onnx_pytorch/tests/test_base.py: 186 warnings
<me>/anaconda3/envs/onnx-pytorch/lib/python3.9/site-packages/onnx/numpy_helper.py:93: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
if arr.dtype == np.object:
onnx_pytorch/tests/test_base.py::TestBase::test_conv_batchnorm_maxpool_flatten_add_relu
<me>/anaconda3/envs/onnx-pytorch/lib/python3.9/site-packages/onnx/helper.py:365: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3, and in 3.10 it will stop working
is_iterable = isinstance(value, collections.Iterable)
onnx_pytorch/tests/test_base.py::TestBase::test_and
onnx_pytorch/tests/test_base.py::TestBase::test_and
/tmp/tmpdcjl7rk5/model.py:33: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
onnx_pytorch/tests/test_base.py::TestBase::test_non_zero
/tmp/tmpxjta2pa8/model.py:33: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
onnx_pytorch/tests/test_base.py::TestBase::test_resize_downsample_sizes_linear_pytorch_half_pixel
onnx_pytorch/tests/test_base.py::TestBase::test_resize_pt_bilinear
<me>/anaconda3/envs/onnx-pytorch/lib/python3.9/site-packages/torch/nn/functional.py:3454: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.
warnings.warn(
-- Docs: https://docs.pytest.org/en/stable/warnings.html
================================================================================================================================ short test summary info =================================================================================================================================
FAILED onnx_pytorch/tests/test_base.py::TestBase::test_conv_batchnorm_maxpool_flatten_add_relu - assert False
FAILED onnx_pytorch/tests/test_base.py::TestBase::test_batch_normalization - assert False
================================================================================================================= 2 failed, 85 passed, 1 skipped, 193 warnings in 1.50s ==================================================================================================================