Skip to content

Adding logdet #1777

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Feb 17, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 16 additions & 2 deletions pymc3/distributions/multivariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
from theano.tensor.nlinalg import det, matrix_inverse, trace

import pymc3 as pm

from pymc3.theanof import logdet
from . import transforms
from .distribution import Continuous, Discrete, draw_values, generate_samples
from ..model import Deterministic
Expand Down Expand Up @@ -83,14 +85,23 @@ class MvNormal(Continuous):
Covariance matrix.
tau : array, optional
Precision matrix.

Flags
----------
gpu_compat : False, because LogDet is not GPU compatible yet.
If this is set as true, the GPU compatible (but numerically unstable) log(det) is used.

"""

def __init__(self, mu, cov=None, tau=None, *args, **kwargs):
def __init__(self, mu, cov=None, tau=None, gpu_compat=False, *args, **kwargs):
super(MvNormal, self).__init__(*args, **kwargs)
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)
tau, cov = get_tau_cov(mu, tau=tau, cov=cov)
self.tau = tt.as_tensor_variable(tau)
self.cov = tt.as_tensor_variable(cov)
self.gpu_compat = gpu_compat
if gpu_compat is False and theano.config.device == 'gpu':
warnings.warn("The function used is not GPU compatible. Please check the gpu_compat flag")

def random(self, point=None, size=None):
mu, cov = draw_values([self.mu, self.cov], point=point)
Expand All @@ -113,7 +124,10 @@ def logp(self, value):
delta = value - mu
k = tau.shape[0]

result = k * tt.log(2 * np.pi) + tt.log(1. / det(tau))
if self.gpu_compat:
result = k * tt.log(2 * np.pi) + tt.log(1. / det(tau))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the first term can be moved up top, then the if-clause just does -logdet or -tt.log(det(tau)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can use this code in future, it worked for me

else:
result = k * tt.log(2 * np.pi) - logdet(tau)
result += (delta.dot(tau) * delta).sum(axis=delta.ndim - 1)
return -1 / 2. * result

Expand Down
31 changes: 30 additions & 1 deletion pymc3/tests/test_theanof.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
import numpy as np
import theano
from ..theanof import DataGenerator, GeneratorOp, generator

from theano.tests import unittest_tools as utt
from pymc3.theanof import LogDet, logdet
from .helpers import SeededTest

def integers():
i = 0
Expand All @@ -18,6 +20,33 @@ def integers_ndim(ndim):
yield np.ones((2,) * ndim) * i
i += 1

class TestLogDet(SeededTest):

def setUp(self):
utt.seed_rng()
self.op_class = LogDet
self.op = logdet

def validate(self, input_mat):
x = theano.tensor.matrix()
f = theano.function([x], self.op(x))
out = f(input_mat)
svd_diag = np.linalg.svd(input_mat, compute_uv=False)
numpy_out = np.sum(np.log(np.abs(svd_diag)))

# Compare the result computed to the expected value.
utt.assert_allclose(numpy_out, out)

# Test gradient:
utt.verify_grad(self.op, [input_mat])

def test_basic(self):
# Calls validate with different params
test_case_1 = np.random.randn(3, 3) / np.sqrt(3)
test_case_2 = np.random.randn(10, 10) / np.sqrt(10)
self.validate(test_case_1.astype(theano.config.floatX))
self.validate(test_case_2.astype(theano.config.floatX))


class TestGenerator(unittest.TestCase):
def test_basic(self):
Expand Down
41 changes: 38 additions & 3 deletions pymc3/theanof.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,20 @@
import numpy as np
import theano

from .vartypes import typefilter, continuous_types
from theano import theano, scalar, tensor as tt
from theano.gof.graph import inputs
from theano.gof import Op
from theano.gof import Op, Apply
from theano.configparser import change_flags
from theano.tensor.nlinalg import matrix_inverse
from .memoize import memoize
from .blocking import ArrayOrdering
from .data import DataGenerator

__all__ = ['gradient', 'hessian', 'hessian_diag', 'inputvars',
'cont_inputs', 'floatX', 'jacobian',
'CallableTensor', 'join_nonshared_inputs',
'make_shared_replacements', 'generator']
'make_shared_replacements', 'generator', 'LogDet', 'logdet']


def inputvars(a):
Expand Down Expand Up @@ -59,11 +61,44 @@ def floatX(X):
Theano derivative functions
"""

class LogDet(Op):
"""Computes the logarithm of absolute determinant of a square
matrix M, log(abs(det(M))), on CPU. Avoids det(M) overflow/
underflow.

Note: Once PR #3959 (https://github.com/Theano/Theano/pull/3959/) by harpone is merged,
this must be removed.
"""
def make_node(self, x):
x = theano.tensor.as_tensor_variable(x)
o = theano.tensor.scalar(dtype=x.dtype)
return Apply(self, [x], [o])

def perform(self, node, inputs, outputs):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Signature of overriden method changed, should be perform(self, node, inputs, output_storage, params=None) for consistency

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's not crucial thing, so you can take it in account only if you decide moving logdet to math.py

try:
(x,) = inputs
(z,) = outputs
s = np.linalg.svd(x, compute_uv=False)
log_det = np.sum(np.log(np.abs(s)))
z[0] = np.asarray(log_det, dtype=x.dtype)
except Exception:
print('Failed to compute logdet of {}.'.format(x))
raise

def grad(self, inputs, g_outputs):
[gz] = g_outputs
[x] = inputs
return [gz * matrix_inverse(x).T]

def __str__(self):
return "LogDet"

logdet = LogDet()

def gradient1(f, v):
"""flat gradient of f wrt v"""
return tt.flatten(tt.grad(f, v, disconnected_inputs='warn'))


empty_gradient = tt.zeros(0, dtype='float32')


Expand Down