Hi there!
I'm trying to implement a NormalWishart-Normal model with Edward. I think the model representation is OK but, what do you think?. Here is the code:
# -*- coding: UTF-8 -*-
"""
NormalWishart-Normal Model
Posterior inference with Edward BBVI
[DOING]
"""
import edward as ed
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from edward.models import MultivariateNormalFull, WishartCholesky
from scipy.stats import invwishart
N = 1000
D = 2
# Data generation
# NIW Inverse Wishart hyperparameters
v = 3.
W = np.array([[20., 30.], [25., 40.]])
sigma = invwishart.rvs(v, W)
# NIW Normal hyperparameters
m = np.array([1., 1.])
k = 0.8
mu = np.random.multivariate_normal(m, sigma / k)
xn_data = np.random.multivariate_normal(mu, sigma, N)
plt.scatter(xn_data[:, 0], xn_data[:, 1], cmap=cm.gist_rainbow, s=5)
plt.show()
print('mu={}'.format(mu))
print('sigma={}'.format(sigma))
# Prior definition
v_prior = tf.Variable(3., dtype=tf.float64, trainable=False)
W_prior = tf.Variable(np.array([[1., 0.], [0., 1.]]),
dtype=tf.float64, trainable=False)
m_prior = tf.Variable(np.array([0.5, 0.5]), dtype=tf.float64, trainable=False)
k_prior = tf.Variable(0.6, dtype=tf.float64, trainable=False)
print('***** PRIORS *****')
print('v_prior: {}'.format(v_prior))
print('W_prior: {}'.format(W_prior))
print('m_prior: {}'.format(m_prior))
print('k_prior: {}'.format(k_prior))
# Posterior inference
# Probabilistic model
sigma = WishartCholesky(df=v_prior, scale=W_prior)
mu = MultivariateNormalFull(m_prior, k_prior * sigma)
xn = MultivariateNormalFull(tf.reshape(tf.tile(mu, [N]), [N, D]),
tf.reshape(tf.tile(sigma, [N, 1]), [N, 2, 2]))
print('***** PROBABILISTIC MODEL *****')
print('mu: {}'.format(mu))
print('sigma: {}'.format(sigma))
print('xn: {}'.format(xn))
# Variational model
qmu = MultivariateNormalFull(
tf.Variable(tf.random_normal([D], dtype=tf.float64), name='v1'),
tf.nn.softplus(
tf.Variable(tf.random_normal([D, D], dtype=tf.float64), name='v2')))
qsigma = WishartCholesky(
df=tf.nn.softplus(
tf.Variable(tf.random_normal([], dtype=tf.float64), name='v3')),
scale=tf.nn.softplus(
tf.Variable(tf.random_normal([D, D], dtype=tf.float64), name='v4')))
print('***** VARIATIONAL MODEL *****')
print('qmu: {}'.format(qmu))
print('qsigma: {}'.format(qsigma))
# Inference
print('xn_data: {}'.format(xn_data.dtype))
inference = ed.KLqp({mu: qmu, sigma: qsigma}, data={xn: xn_data})
inference.run(n_iter=2000, n_samples=20)
But it seems there is a type error:
File "NW_normal_edward.py", line 78, in <module>
inference.run(n_iter=2000, n_samples=20)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/edward/inferences/inference.py", line 218, in run
self.initialize(*args, **kwargs)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/edward/inferences/klqp.py", line 66, in initialize
return super(KLqp, self).initialize(*args, **kwargs)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/edward/inferences/variational_inference.py", line 70, in initialize
self.loss, grads_and_vars = self.build_loss_and_gradients(var_list)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/edward/inferences/klqp.py", line 108, in build_loss_and_gradients
return build_reparam_loss_and_gradients(self, var_list)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/edward/inferences/klqp.py", line 343, in build_reparam_loss_and_gradients
z_copy = copy(z, dict_swap, scope=scope)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/edward/util/random_variables.py", line 176, in copy
new_rv = rv.__class__(*args, **kwargs)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/edward/models/random_variable.py", line 62, in __init__
super(RandomVariable, self).__init__(*args, **kwargs)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/tensorflow/contrib/distributions/python/ops/wishart.py", line 521, in __init__
name=ns)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/tensorflow/contrib/distributions/python/ops/wishart.py", line 125, in __init__
dtype=self._scale_operator_pd.dtype, name="dimension")
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 651, in convert_to_tensor
as_ref=False)
File "/home/alberto/.virtualenvs/GMM/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 730, in internal_convert_to_tensor
dtype.name, ret.dtype.name))
RuntimeError: dimension: Conversion function <function _constant_tensor_conversion_function at 0x7f35210922a8> for type <type 'object'> returned incompatible dtype: requested = float64_ref, actual = float64
Do you think it is a Tensorflow's WishartCholesky problem? Do you have some model example using Wishart distribution in Edward?