|
27 | 27 | from aesara.tensor.random.op import RandomVariable
|
28 | 28 |
|
29 | 29 | from pymc.aesaraf import constant_fold, floatX, intX
|
30 |
| -from pymc.distributions import distribution, multivariate |
31 |
| -from pymc.distributions.continuous import Flat, Normal, get_tau_sigma |
| 30 | +from pymc.distributions import distribution |
| 31 | +from pymc.distributions.continuous import Normal, get_tau_sigma |
32 | 32 | from pymc.distributions.distribution import (
|
33 | 33 | Distribution,
|
34 | 34 | SymbolicRandomVariable,
|
35 | 35 | _moment,
|
36 | 36 | moment,
|
37 | 37 | )
|
38 | 38 | from pymc.distributions.logprob import ignore_logprob, logp
|
| 39 | +from pymc.distributions.multivariate import MvNormal, MvStudentT |
39 | 40 | from pymc.distributions.shape_utils import (
|
40 | 41 | _change_dist_size,
|
41 | 42 | change_dist_size,
|
42 | 43 | get_support_shape,
|
43 | 44 | get_support_shape_1d,
|
44 |
| - to_tuple, |
45 | 45 | )
|
46 | 46 | from pymc.exceptions import NotConstantValueError
|
47 | 47 | from pymc.util import check_dist_not_registered
|
@@ -300,6 +300,118 @@ def get_dists(cls, mu=0.0, sigma=1.0, *, init_dist=None, **kwargs):
|
300 | 300 | return init_dist, innovation_dist, kwargs
|
301 | 301 |
|
302 | 302 |
|
| 303 | +class MvGaussianRandomWalk(PredefinedRandomWalk): |
| 304 | + r"""Random Walk with Multivariate Normal innovations |
| 305 | +
|
| 306 | + Parameters |
| 307 | + ---------- |
| 308 | + mu: tensor_like of float |
| 309 | + innovation drift |
| 310 | + cov: tensor_like of float |
| 311 | + pos def matrix, innovation covariance matrix |
| 312 | + tau: tensor_like of float |
| 313 | + pos def matrix, inverse covariance matrix |
| 314 | + chol: tensor_like of float |
| 315 | + Cholesky decomposition of covariance matrix |
| 316 | + lower : bool, default=True |
| 317 | + Whether the cholesky fatcor is given as a lower triangular matrix. |
| 318 | + init_dist: distribution |
| 319 | + Unnamed multivariate distribution of the initial value. Unnamed refers to distributions |
| 320 | + created with the ``.dist()`` API. |
| 321 | +
|
| 322 | + .. warning:: init_dist will be cloned, rendering them independent of the ones passed as input. |
| 323 | +
|
| 324 | + steps : int, optional |
| 325 | + Number of steps in Random Walk (steps > 0). Only needed if shape is not |
| 326 | + provided. |
| 327 | +
|
| 328 | + Notes |
| 329 | + ----- |
| 330 | + Only one of cov, tau or chol is required. |
| 331 | +
|
| 332 | + """ |
| 333 | + |
| 334 | + @classmethod |
| 335 | + def get_dists(cls, mu, *, cov=None, tau=None, chol=None, lower=True, init_dist=None, **kwargs): |
| 336 | + if "init" in kwargs: |
| 337 | + warnings.warn( |
| 338 | + "init parameter is now called init_dist. Using init will raise an error " |
| 339 | + "in a future release.", |
| 340 | + FutureWarning, |
| 341 | + ) |
| 342 | + init_dist = kwargs.pop("init") |
| 343 | + |
| 344 | + if init_dist is None: |
| 345 | + warnings.warn( |
| 346 | + "Initial distribution not specified, defaulting to `MvNormal.dist(0, I*100)`." |
| 347 | + "You can specify an init_dist manually to suppress this warning.", |
| 348 | + UserWarning, |
| 349 | + ) |
| 350 | + init_dist = MvNormal.dist(at.zeros_like(mu.shape[-1]), at.eye(mu.shape[-1]) * 100) |
| 351 | + |
| 352 | + innovation_dist = MvNormal.dist(mu=mu, cov=cov, tau=tau, chol=chol, lower=lower) |
| 353 | + return init_dist, innovation_dist, kwargs |
| 354 | + |
| 355 | + |
| 356 | +class MvStudentTRandomWalk(PredefinedRandomWalk): |
| 357 | + r"""Multivariate Random Walk with StudentT innovations |
| 358 | +
|
| 359 | + Parameters |
| 360 | + ---------- |
| 361 | + nu: int |
| 362 | + degrees of freedom |
| 363 | + mu: tensor_like of float |
| 364 | + innovation drift |
| 365 | + scale: tensor_like of float |
| 366 | + pos def matrix, innovation covariance matrix |
| 367 | + tau: tensor_like of float |
| 368 | + pos def matrix, inverse covariance matrix |
| 369 | + chol: tensor_like of float |
| 370 | + Cholesky decomposition of covariance matrix |
| 371 | + lower : bool, default=True |
| 372 | + Whether the cholesky fatcor is given as a lower triangular matrix. |
| 373 | + init_dist: distribution |
| 374 | + Unnamed multivariate distribution of the initial value. Unnamed refers to distributions |
| 375 | + created with the ``.dist()`` API. |
| 376 | +
|
| 377 | + .. warning:: init_dist will be cloned, rendering them independent of the ones passed as input. |
| 378 | +
|
| 379 | + steps : int, optional |
| 380 | + Number of steps in Random Walk (steps > 0). Only needed if shape is not |
| 381 | + provided. |
| 382 | +
|
| 383 | + Notes |
| 384 | + ----- |
| 385 | + Only one of cov, tau or chol is required. |
| 386 | +
|
| 387 | + """ |
| 388 | + |
| 389 | + @classmethod |
| 390 | + def get_dists( |
| 391 | + cls, *, nu, mu, scale=None, tau=None, chol=None, lower=True, init_dist=None, **kwargs |
| 392 | + ): |
| 393 | + if "init" in kwargs: |
| 394 | + warnings.warn( |
| 395 | + "init parameter is now called init_dist. Using init will raise an error " |
| 396 | + "in a future release.", |
| 397 | + FutureWarning, |
| 398 | + ) |
| 399 | + init_dist = kwargs.pop("init") |
| 400 | + |
| 401 | + if init_dist is None: |
| 402 | + warnings.warn( |
| 403 | + "Initial distribution not specified, defaulting to `MvNormal.dist(0, I*100)`." |
| 404 | + "You can specify an init_dist manually to suppress this warning.", |
| 405 | + UserWarning, |
| 406 | + ) |
| 407 | + init_dist = MvNormal.dist(at.zeros_like(mu.shape[-1]), at.eye(mu.shape[-1]) * 100) |
| 408 | + |
| 409 | + innovation_dist = MvStudentT.dist( |
| 410 | + nu=nu, mu=mu, scale=scale, tau=tau, chol=chol, lower=lower, cov=kwargs.pop("cov", None) |
| 411 | + ) |
| 412 | + return init_dist, innovation_dist, kwargs |
| 413 | + |
| 414 | + |
303 | 415 | class AutoRegressiveRV(SymbolicRandomVariable):
|
304 | 416 | """A placeholder used to specify a log-likelihood for an AR sub-graph."""
|
305 | 417 |
|
@@ -817,171 +929,3 @@ def logp(self, x):
|
817 | 929 |
|
818 | 930 | def _distr_parameters_for_repr(self):
|
819 | 931 | return ["dt"]
|
820 |
| - |
821 |
| - |
822 |
| -class MvGaussianRandomWalk(distribution.Continuous): |
823 |
| - r""" |
824 |
| - Multivariate Random Walk with Normal innovations |
825 |
| -
|
826 |
| - Parameters |
827 |
| - ---------- |
828 |
| - mu: tensor |
829 |
| - innovation drift, defaults to 0.0 |
830 |
| - cov: tensor |
831 |
| - pos def matrix, innovation covariance matrix |
832 |
| - tau: tensor |
833 |
| - pos def matrix, inverse covariance matrix |
834 |
| - chol: tensor |
835 |
| - Cholesky decomposition of covariance matrix |
836 |
| - init: distribution |
837 |
| - distribution for initial value (Defaults to Flat()) |
838 |
| -
|
839 |
| - Notes |
840 |
| - ----- |
841 |
| - Only one of cov, tau or chol is required. |
842 |
| -
|
843 |
| - """ |
844 |
| - |
845 |
| - def __new__(cls, *args, **kwargs): |
846 |
| - raise NotImplementedError(f"{cls.__name__} has not yet been ported to PyMC 4.0.") |
847 |
| - |
848 |
| - @classmethod |
849 |
| - def dist(cls, *args, **kwargs): |
850 |
| - raise NotImplementedError(f"{cls.__name__} has not yet been ported to PyMC 4.0.") |
851 |
| - |
852 |
| - def __init__( |
853 |
| - self, mu=0.0, cov=None, tau=None, chol=None, lower=True, init=None, *args, **kwargs |
854 |
| - ): |
855 |
| - super().__init__(*args, **kwargs) |
856 |
| - |
857 |
| - self.init = init or Flat.dist() |
858 |
| - self.innovArgs = (mu, cov, tau, chol, lower) |
859 |
| - self.innov = multivariate.MvNormal.dist(*self.innovArgs, shape=self.shape) |
860 |
| - self.mean = at.as_tensor_variable(0.0) |
861 |
| - |
862 |
| - def logp(self, x): |
863 |
| - """ |
864 |
| - Calculate log-probability of Multivariate Gaussian |
865 |
| - Random Walk distribution at specified value. |
866 |
| -
|
867 |
| - Parameters |
868 |
| - ---------- |
869 |
| - x: numeric |
870 |
| - Value for which log-probability is calculated. |
871 |
| -
|
872 |
| - Returns |
873 |
| - ------- |
874 |
| - TensorVariable |
875 |
| - """ |
876 |
| - |
877 |
| - if x.ndim == 1: |
878 |
| - x = x[np.newaxis, :] |
879 |
| - |
880 |
| - x_im1 = x[:-1] |
881 |
| - x_i = x[1:] |
882 |
| - |
883 |
| - return self.init.logp_sum(x[0]) + self.innov.logp_sum(x_i - x_im1) |
884 |
| - |
885 |
| - def _distr_parameters_for_repr(self): |
886 |
| - return ["mu", "cov"] |
887 |
| - |
888 |
| - def random(self, point=None, size=None): |
889 |
| - """ |
890 |
| - Draw random values from MvGaussianRandomWalk. |
891 |
| -
|
892 |
| - Parameters |
893 |
| - ---------- |
894 |
| - point: dict, optional |
895 |
| - Dict of variable values on which random values are to be |
896 |
| - conditioned (uses default point if not specified). |
897 |
| - size: int or tuple of ints, optional |
898 |
| - Desired size of random sample (returns one sample if not |
899 |
| - specified). |
900 |
| -
|
901 |
| - Returns |
902 |
| - ------- |
903 |
| - array |
904 |
| -
|
905 |
| -
|
906 |
| - Examples |
907 |
| - ------- |
908 |
| -
|
909 |
| - .. code-block:: python |
910 |
| -
|
911 |
| - with pm.Model(): |
912 |
| - mu = np.array([1.0, 0.0]) |
913 |
| - cov = np.array([[1.0, 0.0], |
914 |
| - [0.0, 2.0]]) |
915 |
| -
|
916 |
| - # draw one sample from a 2-dimensional Gaussian random walk with 10 timesteps |
917 |
| - sample = MvGaussianRandomWalk(mu, cov, shape=(10, 2)).random() |
918 |
| -
|
919 |
| - # draw three samples from a 2-dimensional Gaussian random walk with 10 timesteps |
920 |
| - sample = MvGaussianRandomWalk(mu, cov, shape=(10, 2)).random(size=3) |
921 |
| -
|
922 |
| - # draw four samples from a 2-dimensional Gaussian random walk with 10 timesteps, |
923 |
| - # indexed with a (2, 2) array |
924 |
| - sample = MvGaussianRandomWalk(mu, cov, shape=(10, 2)).random(size=(2, 2)) |
925 |
| -
|
926 |
| - """ |
927 |
| - |
928 |
| - # for each draw specified by the size input, we need to draw time_steps many |
929 |
| - # samples from MvNormal. |
930 |
| - |
931 |
| - size = to_tuple(size) |
932 |
| - multivariate_samples = self.innov.random(point=point, size=size) |
933 |
| - # this has shape (size, self.shape) |
934 |
| - if len(self.shape) == 2: |
935 |
| - # have time dimension in first slot of shape. Therefore the time |
936 |
| - # component can be accessed with the index equal to the length of size. |
937 |
| - time_axis = len(size) |
938 |
| - multivariate_samples = multivariate_samples.cumsum(axis=time_axis) |
939 |
| - if time_axis != 0: |
940 |
| - # this for loop covers the case where size is a tuple |
941 |
| - for idx in np.ndindex(size): |
942 |
| - multivariate_samples[idx] = ( |
943 |
| - multivariate_samples[idx] - multivariate_samples[idx][0] |
944 |
| - ) |
945 |
| - else: |
946 |
| - # size was passed as None |
947 |
| - multivariate_samples = multivariate_samples - multivariate_samples[0] |
948 |
| - |
949 |
| - # if the above statement fails, then only a spatial dimension was passed in for self.shape. |
950 |
| - # Therefore don't subtract off the initial value since otherwise you get all zeros |
951 |
| - # as your output. |
952 |
| - return multivariate_samples |
953 |
| - |
954 |
| - |
955 |
| -class MvStudentTRandomWalk(MvGaussianRandomWalk): |
956 |
| - r""" |
957 |
| - Multivariate Random Walk with StudentT innovations |
958 |
| -
|
959 |
| - Parameters |
960 |
| - ---------- |
961 |
| - nu: degrees of freedom |
962 |
| - mu: tensor |
963 |
| - innovation drift, defaults to 0.0 |
964 |
| - cov: tensor |
965 |
| - pos def matrix, innovation covariance matrix |
966 |
| - tau: tensor |
967 |
| - pos def matrix, inverse covariance matrix |
968 |
| - chol: tensor |
969 |
| - Cholesky decomposition of covariance matrix |
970 |
| - init: distribution |
971 |
| - distribution for initial value (Defaults to Flat()) |
972 |
| - """ |
973 |
| - |
974 |
| - def __new__(cls, *args, **kwargs): |
975 |
| - raise NotImplementedError(f"{cls.__name__} has not yet been ported to PyMC 4.0.") |
976 |
| - |
977 |
| - @classmethod |
978 |
| - def dist(cls, *args, **kwargs): |
979 |
| - raise NotImplementedError(f"{cls.__name__} has not yet been ported to PyMC 4.0.") |
980 |
| - |
981 |
| - def __init__(self, nu, *args, **kwargs): |
982 |
| - super().__init__(*args, **kwargs) |
983 |
| - self.nu = at.as_tensor_variable(nu) |
984 |
| - self.innov = multivariate.MvStudentT.dist(self.nu, None, *self.innovArgs) |
985 |
| - |
986 |
| - def _distr_parameters_for_repr(self): |
987 |
| - return ["nu", "mu", "cov"] |
0 commit comments