mldl@mldlUB1604:~/ub16_prj/e2e-model-learning/power_sched$ python3 main.py --save .
setGPU: Setting GPU to: 0
0 0.07094819098711014 0.01747439242899418
1 0.0608951672911644 0.015597431920468807
2 0.04586026072502136 0.012867853045463562
3 0.03554176539182663 0.011823796667158604
..................................
997 0.002964276820421219 0.010087293572723866
998 0.0029753427952528 0.010150546208024025
999 0.0028919086325913668 0.011296983808279037
ERROR:root:An unexpected error occurred while tokenizing input
The following traceback may be corrupted or invalid
The error message is: ('EOF in multi-line string', (48, 66))
TypeError Traceback (most recent call last)
/usr/local/lib/python3.5/dist-packages/qpth-0.0.6-py3.5.egg/qpth/solvers/pdipm/batch.py in pre_factor_kkt(Q=
( 0 ,.,.) =
1055.5512 0.0000 0.0000...ch.cuda.DoubleTensor of size 2553x24x24 (GPU 0)]
, G=
( 0 ,.,.) =
1 -1 0 ... 0 0 0
...ch.cuda.DoubleTensor of size 2553x46x24 (GPU 0)]
, A=[torch.cuda.DoubleTensor with no dimension]
)
357 try:
--> 358 Q_LU = Q.btrifact(pivot=False)
Q_LU = undefined
Q.btrifact = <built-in method btrifact of torch.cuda.DoubleTensor object at 0x7f2709a5c7c8>
global pivot = undefined
359 except:
TypeError: btrifact received an invalid combination of arguments - got (pivot=bool, ), but expected one of:
- ()
didn't match because some of the keywords were incorrect: pivot
- (torch.cuda.IntTensor info)
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
/home/mldl/ub16_prj/e2e-model-learning/power_sched/main.py in ()
151
152 if name=='main':
--> 153 main()
global main = <function main at 0x7f2709e1e620>
/home/mldl/ub16_prj/e2e-model-learning/power_sched/main.py in main()
71 model_rmse = nets.run_rmse_net(
72 model_rmse, variables_rmse, X_train, Y_train)
---> 73 nets.eval_net("rmse_net", model_rmse, variables_rmse, params, save_folder)
global nets.eval_net = <function eval_net at 0x7f270ec02510>
model_rmse = Net (
(lin): Linear (149 -> 24)
(net): Sequential (
(0): Linear (149 -> 200)
(1): BatchNorm1d(200, eps=1e-05, momentum=0.1, affine=True)
(2): ReLU ()
(3): Dropout (p = 0.2)
(4): Linear (200 -> 200)
(5): BatchNorm1d(200, eps=1e-05, momentum=0.1, affine=True)
(6): ReLU ()
(7): Dropout (p = 0.2)
(8): Linear (200 -> 24)
)
)
variables_rmse = {'X_train_': Variable containing:
-4.0186e-01 7.3033e-02 3.3559e-02 ... -1.6573e-01 4.8663e-02 -6.3155e-01
4.1893e-01 4.7020e-01 5.5965e-01 ... -1.6573e-01 7.2976e-02 -6.3155e-01
9.7893e-01 1.0885e+00 1.3457e+00 ... -1.6573e-01 9.7268e-02 -6.3155e-01
... ⋱ ...
-1.0027e+00 -9.6148e-01 -9.6799e-01 ... -1.3273e-01 -1.4598e-01 1.5815e+00
-6.2014e-01 -6.1378e-01 -6.5050e-01 ... -1.3273e-01 -1.2164e-01 1.5815e+00
-8.7676e-01 -8.8478e-01 -8.8041e-01 ... -1.3273e-01 -9.7281e-02 -6.3233e-01
[torch.cuda.FloatTensor of size 2553x149 (GPU 0)]
, 'X_test_': Variable containing:
-0.1722 -0.0922 -0.0812 ... -0.1327 -0.0729 -0.6323
0.0751 0.1430 0.2965 ... -0.1327 -0.0485 -0.6323
0.5183 0.4600 0.7180 ... -0.1327 -0.0241 -0.6323
... ⋱ ...
-1.0261 -1.1711 -1.2253 ... -0.1327 -1.4622 -0.6323
-1.2920 -1.3552 -1.4059 ... -0.1327 -1.4635 -0.6323
-1.0121 -1.1967 -1.2472 ... -0.1327 -1.4645 -0.6323
[torch.cuda.FloatTensor of size 639x149 (GPU 0)]
, 'Y_test_': Variable containing:
1.5750 1.5000 1.4730 ... 1.8880 1.8380 1.7480
1.6700 1.5620 1.5500 ... 1.8470 1.8250 1.7090
1.6560 1.5600 1.5430 ... 1.7330 1.6720 1.5990
... ⋱ ...
1.2820 1.2070 1.1620 ... 1.6300 1.5460 1.4420
1.3420 1.2380 1.1910 ... 1.5690 1.4880 1.3880
1.2810 1.2810 1.2810 ... 1.2810 1.2810 1.2810
[torch.cuda.FloatTensor of size 639x24 (GPU 0)]
, 'Y_train_': Variable containing:
1.6384 1.5479 1.5014 ... 2.0454 2.0098 1.8751
1.7482 1.6577 1.6313 ... 2.0735 2.0128 1.9111
1.7923 1.7135 1.6666 ... 1.9348 1.9248 1.8024
... ⋱ ...
1.4260 1.3520 1.3000 ... 1.5640 1.5180 1.4510
1.3710 1.2990 1.2580 ... 1.6990 1.6590 1.6090
1.5220 1.4540 1.4040 ... 1.8120 1.7540 1.6820
[torch.cuda.FloatTensor of size 2553x24 (GPU 0)]
}
params = {'c_ramp': 0.4, 'n': 24, 'gamma_over': 0.5, 'gamma_under': 50}
save_folder = './0'
74
75 # Randomly construct hold-out set for task net training.
/home/mldl/ub16_prj/e2e-model-learning/power_sched/nets.py in eval_net(which='rmse_net', model=Net (
(lin): Linear (149 -> 24)
(net): Seque...opout (p = 0.2)
(8): Linear (200 -> 24)
)
), variables={'X_test_': Variable containing:
-0.1722 -0.0922 -0.0812 .....[torch.cuda.FloatTensor of size 639x149 (GPU 0)]
, 'X_train_': Variable containing:
-4.0186e-01 7.3033e-02 3....torch.cuda.FloatTensor of size 2553x149 (GPU 0)]
, 'Y_test_': Variable containing:
1.5750 1.5000 1.4730 .....
[torch.cuda.FloatTensor of size 639x24 (GPU 0)]
, 'Y_train_': Variable containing:
1.6384 1.5479 1.5014 .....[torch.cuda.FloatTensor of size 2553x24 (GPU 0)]
}, params={'c_ramp': 0.4, 'gamma_over': 0.5, 'gamma_under': 50, 'n': 24}, save_folder='./0')
142
143 # Eval model on task loss
--> 144 Y_sched_train = solver(mu_pred_train.double(), sig_pred_train.double())
Y_sched_train = undefined
solver = SolveScheduling (
)
mu_pred_train.double = <bound method Variable.double of Variable containing:
1.6559 1.5848 1.5492 ... 2.0905 2.0244 1.8988
1.7428 1.6612 1.6278 ... 2.0502 1.9847 1.8801
1.7963 1.7084 1.6635 ... 1.9366 1.8893 1.7936
... ⋱ ...
1.4143 1.3457 1.3031 ... 1.6152 1.5751 1.5050
1.3843 1.3180 1.2787 ... 1.6053 1.5626 1.4952
1.5086 1.4384 1.4004 ... 1.8398 1.7719 1.6712
[torch.cuda.FloatTensor of size 2553x24 (GPU 0)]
sig_pred_train.double = <bound method Variable.double of Variable containing:
1.00000e-02 *
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
... ⋱ ...
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
[torch.cuda.FloatTensor of size 2553x24 (GPU 0)]
145 train_loss_task = task_loss(
146 Y_sched_train.float(), variables['Y_train_'], params)
/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py in call(self=SolveScheduling (
), *input=(Variable containing:
1.6559 1.5848 1.5492 .....torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, Variable containing:
1.00000e-02 *
1.9104 2.32...torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
), **kwargs={})
204
205 def call(self, *input, **kwargs):
--> 206 result = self.forward(*input, **kwargs)
result = undefined
self.forward = <bound method SolveScheduling.forward of SolveScheduling (
)>
input = (Variable containing:
1.6559 1.5848 1.5492 ... 2.0905 2.0244 1.8988
1.7428 1.6612 1.6278 ... 2.0502 1.9847 1.8801
1.7963 1.7084 1.6635 ... 1.9366 1.8893 1.7936
... ⋱ ...
1.4143 1.3457 1.3031 ... 1.6152 1.5751 1.5050
1.3843 1.3180 1.2787 ... 1.6053 1.5626 1.4952
1.5086 1.4384 1.4004 ... 1.8398 1.7719 1.6712
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, Variable containing:
1.00000e-02 *
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
... ⋱ ...
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
1.9104 2.3216 2.4676 ... 6.8909 6.7232 6.3667
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
)
kwargs = {}
207 for hook in self._forward_hooks.values():
208 hook_result = hook(self, input, result)
/home/mldl/ub16_prj/e2e-model-learning/power_sched/model_classes.py in forward(self=SolveScheduling (
), mu=Variable containing:
1.6559 1.5848 1.5492 .....torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, sig=Variable containing:
1.00000e-02 *
1.9104 2.32...torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
)
150 d2g = GQuadraticApprox(self.params["gamma_under"],
151 self.params["gamma_over"])(z0, mu0, sig0)
--> 152 z0_new = SolveSchedulingQP(self.params)(z0, mu0, dg, d2g)
z0_new = undefined
global SolveSchedulingQP = <class 'model_classes.SolveSchedulingQP'>
self.params = {'c_ramp': 0.4, 'n': 24, 'gamma_over': 0.5, 'gamma_under': 50}
z0 = Variable containing:
1.6559 1.5848 1.5492 ... 2.0905 2.0244 1.8988
1.7428 1.6612 1.6278 ... 2.0502 1.9847 1.8801
1.7963 1.7084 1.6635 ... 1.9366 1.8893 1.7936
... ⋱ ...
1.4143 1.3457 1.3031 ... 1.6152 1.5751 1.5050
1.3843 1.3180 1.2787 ... 1.6053 1.5626 1.4952
1.5086 1.4384 1.4004 ... 1.8398 1.7719 1.6712
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
mu0 = Variable containing:
1.6559 1.5848 1.5492 ... 2.0905 2.0244 1.8988
1.7428 1.6612 1.6278 ... 2.0502 1.9847 1.8801
1.7963 1.7084 1.6635 ... 1.9366 1.8893 1.7936
... ⋱ ...
1.4143 1.3457 1.3031 ... 1.6152 1.5751 1.5050
1.3843 1.3180 1.2787 ... 1.6053 1.5626 1.4952
1.5086 1.4384 1.4004 ... 1.8398 1.7719 1.6712
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
dg = Variable containing:
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
... ⋱ ...
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
d2g = Variable containing:
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
... ⋱ ...
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
153 solution_diff = (z0-z0_new).norm().data[0]
154 print("+ SQP Iter: {}, Solution diff = {}".format(i, solution_diff))
/usr/local/lib/python3.5/dist-packages/torch/nn/modules/module.py in call(self=SolveSchedulingQP (
), *input=(Variable containing:
1.6559 1.5848 1.5492 .....torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, Variable containing:
1.6559 1.5848 1.5492 .....torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, Variable containing:
-24.7500 -24.7500 -24.7500 ...torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, Variable containing:
1054.5512 867.7901 816...torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
), **kwargs={})
204
205 def call(self, *input, **kwargs):
--> 206 result = self.forward(*input, **kwargs)
result = undefined
self.forward = <bound method SolveSchedulingQP.forward of SolveSchedulingQP (
)>
input = (Variable containing:
1.6559 1.5848 1.5492 ... 2.0905 2.0244 1.8988
1.7428 1.6612 1.6278 ... 2.0502 1.9847 1.8801
1.7963 1.7084 1.6635 ... 1.9366 1.8893 1.7936
... ⋱ ...
1.4143 1.3457 1.3031 ... 1.6152 1.5751 1.5050
1.3843 1.3180 1.2787 ... 1.6053 1.5626 1.4952
1.5086 1.4384 1.4004 ... 1.8398 1.7719 1.6712
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, Variable containing:
1.6559 1.5848 1.5492 ... 2.0905 2.0244 1.8988
1.7428 1.6612 1.6278 ... 2.0502 1.9847 1.8801
1.7963 1.7084 1.6635 ... 1.9366 1.8893 1.7936
... ⋱ ...
1.4143 1.3457 1.3031 ... 1.6152 1.5751 1.5050
1.3843 1.3180 1.2787 ... 1.6053 1.5626 1.4952
1.5086 1.4384 1.4004 ... 1.8398 1.7719 1.6712
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, Variable containing:
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
... ⋱ ...
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
-24.7500 -24.7500 -24.7500 ... -24.7500 -24.7500 -24.7500
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, Variable containing:
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
... ⋱ ...
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
1054.5512 867.7901 816.4561 ... 292.3651 299.6580 316.4354
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
)
kwargs = {}
207 for hook in self._forward_hooks.values():
208 hook_result = hook(self, input, result)
/home/mldl/ub16_prj/e2e-model-learning/power_sched/model_classes.py in forward(self=SolveSchedulingQP (
), z0=Variable containing:
1.6559 1.5848 1.5492 .....torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, mu=Variable containing:
1.6559 1.5848 1.5492 .....torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, dg=Variable containing:
-24.7500 -24.7500 -24.7500 ...torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, d2g=Variable containing:
1054.5512 867.7901 816...torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
)
118 h = self.h.unsqueeze(0).expand(nBatch, self.h.size(0))
119
--> 120 out = QPFunction(verbose=False)(Q, p, G, h, self.e, self.e)
out = undefined
global QPFunction = <class 'qpth.qp.QPFunction'>
global verbose = undefined
Q = Variable containing:
( 0 ,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
( 1 ,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
( 2 ,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
...
(2550,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
(2551,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
(2552,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
[torch.cuda.DoubleTensor of size 2553x24x24 (GPU 0)]
p = Variable containing:
-1772.5911 -1401.5665 -1291.1130 ... -638.0358 -633.3941 -627.4944
-1864.3215 -1468.0045 -1355.4388 ... -626.1972 -621.4645 -621.5749
-1920.8175 -1508.9659 -1384.5890 ... -592.8813 -592.7801 -594.1101
... ⋱ ...
-1517.6264 -1193.8847 -1090.0132 ... -498.5796 -498.3313 -502.4959
-1485.9994 -1169.8336 -1070.0472 ... -495.6922 -494.5639 -499.3930
-1617.1337 -1274.4051 -1169.5335 ... -564.4880 -557.4778 -555.2510
[torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
G = Variable containing:
( 0 ,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
( 1 ,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
( 2 ,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
...
(2550,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
(2551,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
(2552,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
[torch.cuda.DoubleTensor of size 2553x46x24 (GPU 0)]
h = Variable containing:
0.4000 0.4000 0.4000 ... 0.4000 0.4000 0.4000
0.4000 0.4000 0.4000 ... 0.4000 0.4000 0.4000
0.4000 0.4000 0.4000 ... 0.4000 0.4000 0.4000
... ⋱ ...
0.4000 0.4000 0.4000 ... 0.4000 0.4000 0.4000
0.4000 0.4000 0.4000 ... 0.4000 0.4000 0.4000
0.4000 0.4000 0.4000 ... 0.4000 0.4000 0.4000
[torch.cuda.DoubleTensor of size 2553x46 (GPU 0)]
self.e = Variable containing:[torch.cuda.DoubleTensor with no dimension]
121 return out
122
/usr/local/lib/python3.5/dist-packages/qpth-0.0.6-py3.5.egg/qpth/qp.py in forward(self=<qpth.qp.QPFunction object>, Q_=
( 0 ,.,.) =
1055.5512 0.0000 0.0000...ch.cuda.DoubleTensor of size 2553x24x24 (GPU 0)]
, p_=
-1772.5911 -1401.5665 -1291.1130 ... -638.03...torch.cuda.DoubleTensor of size 2553x24 (GPU 0)]
, G_=
( 0 ,.,.) =
1 -1 0 ... 0 0 0
...ch.cuda.DoubleTensor of size 2553x46x24 (GPU 0)]
, h_=
0.4000 0.4000 0.4000 ... 0.4000 0.4000 ...torch.cuda.DoubleTensor of size 2553x46 (GPU 0)]
, A_=[torch.cuda.DoubleTensor with no dimension]
, b_=[torch.cuda.DoubleTensor with no dimension]
)
89
90 if self.solver == QPSolvers.PDIPM_BATCHED:
---> 91 self.Q_LU, self.S_LU, self.R = pdipm_b.pre_factor_kkt(Q, G, A)
self.Q_LU = undefined
self.S_LU = undefined
self.R = undefined
global pdipm_b.pre_factor_kkt = <function pre_factor_kkt at 0x7f270ebf3620>
Q =
( 0 ,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
( 1 ,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
( 2 ,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
...
(2550,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
(2551,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
(2552,.,.) =
1055.5512 0.0000 0.0000 ... 0.0000 0.0000 0.0000
0.0000 868.7901 0.0000 ... 0.0000 0.0000 0.0000
0.0000 0.0000 817.4561 ... 0.0000 0.0000 0.0000
... ⋱ ...
0.0000 0.0000 0.0000 ... 293.3651 0.0000 0.0000
0.0000 0.0000 0.0000 ... 0.0000 300.6580 0.0000
0.0000 0.0000 0.0000 ... 0.0000 0.0000 317.4354
[torch.cuda.DoubleTensor of size 2553x24x24 (GPU 0)]
G =
( 0 ,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
( 1 ,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
( 2 ,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
...
(2550,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
(2551,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
(2552,.,.) =
1 -1 0 ... 0 0 0
0 1 -1 ... 0 0 0
0 0 1 ... 0 0 0
... ⋱ ...
-0 -0 -0 ... 1 -0 -0
-0 -0 -0 ... -1 1 -0
-0 -0 -0 ... -0 -1 1
[torch.cuda.DoubleTensor of size 2553x46x24 (GPU 0)]
A = [torch.cuda.DoubleTensor with no dimension]
92 zhats, self.nus, self.lams, self.slacks = pdipm_b.forward(
93 Q, p, G, h, A, b, self.Q_LU, self.S_LU, self.R,
/usr/local/lib/python3.5/dist-packages/qpth-0.0.6-py3.5.egg/qpth/solvers/pdipm/batch.py in pre_factor_kkt(Q=
( 0 ,.,.) =
1055.5512 0.0000 0.0000...ch.cuda.DoubleTensor of size 2553x24x24 (GPU 0)]
, G=
( 0 ,.,.) =
1 -1 0 ... 0 0 0
...ch.cuda.DoubleTensor of size 2553x46x24 (GPU 0)]
, A=[torch.cuda.DoubleTensor with no dimension]
)
362 Please make sure that your Q matrix is PSD and has
363 a non-zero diagonal.
--> 364 """)
global Factor = undefined
global the = undefined
global U22 = undefined
global block = undefined
global that = undefined
global we = undefined
global can = undefined
global only = undefined
global do = undefined
global after = undefined
global know = undefined
global D = undefined
365
366 # S = [ A Q^{-1} A^T A Q^{-1} G^T ]
RuntimeError:
qpth Error: Cannot perform LU factorization on Q.
Please make sure that your Q matrix is PSD and has
a non-zero diagonal.
/usr/local/lib/python3.5/dist-packages/qpth-0.0.6-py3.5.egg/qpth/solvers/pdipm/batch.py(364)pre_factor_kkt()
362 Please make sure that your Q matrix is PSD and has
363 a non-zero diagonal.
--> 364 """)
365
366 # S = [ A Q^{-1} A^T A Q^{-1} G^T ]
ipdb>