Skip to content

Commit 7af102d

Browse files
ArmavicaricardoV94
authored andcommitted
Fix broadcastable -> shape deprecations
1 parent 33c8004 commit 7af102d

File tree

7 files changed

+19
-33
lines changed

7 files changed

+19
-33
lines changed

pymc/aesaraf.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -619,7 +619,7 @@ def make_shared_replacements(point, vars, model):
619619
"""
620620
othervars = set(model.value_vars) - set(vars)
621621
return {
622-
var: aesara.shared(point[var.name], var.name + "_shared", broadcastable=var.broadcastable)
622+
var: aesara.shared(point[var.name], var.name + "_shared", shape=var.broadcastable)
623623
for var in othervars
624624
}
625625

pymc/distributions/continuous.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -3976,9 +3976,9 @@ def make_node(self, x, h, z):
39763976
x = at.as_tensor_variable(floatX(x))
39773977
h = at.as_tensor_variable(floatX(h))
39783978
z = at.as_tensor_variable(floatX(z))
3979-
shape = broadcast_shape(x, h, z)
3980-
broadcastable = [] if not shape else [False] * len(shape)
3981-
return Apply(self, [x, h, z], [at.TensorType(aesara.config.floatX, broadcastable)()])
3979+
bshape = broadcast_shape(x, h, z)
3980+
shape = [False] * len(bshape)
3981+
return Apply(self, [x, h, z], [at.TensorType(aesara.config.floatX, shape=shape)()])
39823982

39833983
def perform(self, node, ins, outs):
39843984
x, h, z = ins[0], ins[1], ins[2]

pymc/distributions/multivariate.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -834,7 +834,7 @@ class PosDefMatrix(Op):
834834
def make_node(self, x):
835835
x = at.as_tensor_variable(x)
836836
assert x.ndim == 2
837-
o = TensorType(dtype="int8", broadcastable=[])()
837+
o = TensorType(dtype="int8", shape=[])()
838838
return Apply(self, [x], [o])
839839

840840
# Python implementation:

pymc/model.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -362,7 +362,7 @@ def __init__(
362362
self._extra_vars_shared = {}
363363
for var, value in extra_vars_and_values.items():
364364
shared = aesara.shared(
365-
value, var.name + "_shared__", broadcastable=[s == 1 for s in value.shape]
365+
value, var.name + "_shared__", shape=[s == 1 for s in value.shape]
366366
)
367367
self._extra_vars_shared[var.name] = shared
368368
givens.append((var, shared))

pymc/smc/smc.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,7 @@ def _logp_forw(point, out_vars, in_vars, shared):
565565
new_in_vars = []
566566
for in_var in in_vars:
567567
if in_var.dtype in discrete_types:
568-
float_var = at.TensorType("floatX", in_var.broadcastable)(in_var.name)
568+
float_var = at.TensorType("floatX", in_var.shape)(in_var.name)
569569
new_in_vars.append(float_var)
570570
replace_int_input[in_var] = at.round(float_var).astype(in_var.dtype)
571571
else:

pymc/tests/test_sampling.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -474,7 +474,7 @@ def test_choose_chains(n_points, tune, expected_length, expected_n_traces):
474474
@pytest.mark.xfail(condition=(aesara.config.floatX == "float32"), reason="Fails on float32")
475475
class TestNamedSampling(SeededTest):
476476
def test_shared_named(self):
477-
G_var = shared(value=np.atleast_2d(1.0), broadcastable=(True, False), name="G")
477+
G_var = shared(value=np.atleast_2d(1.0), shape=(True, False), name="G")
478478

479479
with pm.Model():
480480
theta0 = pm.Normal(
@@ -491,7 +491,7 @@ def test_shared_named(self):
491491
assert np.isclose(res, 0.0)
492492

493493
def test_shared_unnamed(self):
494-
G_var = shared(value=np.atleast_2d(1.0), broadcastable=(True, False))
494+
G_var = shared(value=np.atleast_2d(1.0), shape=(True, False))
495495
with pm.Model():
496496
theta0 = pm.Normal(
497497
"theta0",

pymc/variational/updates.py

+10-24
Original file line numberDiff line numberDiff line change
@@ -276,7 +276,7 @@ def apply_momentum(updates, params=None, momentum=0.9):
276276
for param in params:
277277
value = param.get_value(borrow=True)
278278
velocity = aesara.shared(
279-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
279+
np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable
280280
)
281281
x = momentum * velocity + updates[param]
282282
updates[velocity] = x - param
@@ -391,7 +391,7 @@ def apply_nesterov_momentum(updates, params=None, momentum=0.9):
391391
for param in params:
392392
value = param.get_value(borrow=True)
393393
velocity = aesara.shared(
394-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
394+
np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable
395395
)
396396
x = momentum * velocity + updates[param] - param
397397
updates[velocity] = x
@@ -534,9 +534,7 @@ def adagrad(loss_or_grads=None, params=None, learning_rate=1.0, epsilon=1e-6):
534534

535535
for param, grad in zip(params, grads):
536536
value = param.get_value(borrow=True)
537-
accu = aesara.shared(
538-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
539-
)
537+
accu = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable)
540538
accu_new = accu + grad**2
541539
updates[accu] = accu_new
542540
updates[param] = param - (learning_rate * grad / at.sqrt(accu_new + epsilon))
@@ -662,9 +660,7 @@ def rmsprop(loss_or_grads=None, params=None, learning_rate=1.0, rho=0.9, epsilon
662660

663661
for param, grad in zip(params, grads):
664662
value = param.get_value(borrow=True)
665-
accu = aesara.shared(
666-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
667-
)
663+
accu = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable)
668664
accu_new = rho * accu + (one - rho) * grad**2
669665
updates[accu] = accu_new
670666
updates[param] = param - (learning_rate * grad / at.sqrt(accu_new + epsilon))
@@ -755,12 +751,10 @@ def adadelta(loss_or_grads=None, params=None, learning_rate=1.0, rho=0.95, epsil
755751
for param, grad in zip(params, grads):
756752
value = param.get_value(borrow=True)
757753
# accu: accumulate gradient magnitudes
758-
accu = aesara.shared(
759-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
760-
)
754+
accu = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable)
761755
# delta_accu: accumulate update magnitudes (recursively!)
762756
delta_accu = aesara.shared(
763-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
757+
np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable
764758
)
765759

766760
# update accu (as in rmsprop)
@@ -850,12 +844,8 @@ def adam(
850844

851845
for param, g_t in zip(params, all_grads):
852846
value = param.get_value(borrow=True)
853-
m_prev = aesara.shared(
854-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
855-
)
856-
v_prev = aesara.shared(
857-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
858-
)
847+
m_prev = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable)
848+
v_prev = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable)
859849

860850
m_t = beta1 * m_prev + (one - beta1) * g_t
861851
v_t = beta2 * v_prev + (one - beta2) * g_t**2
@@ -938,12 +928,8 @@ def adamax(
938928

939929
for param, g_t in zip(params, all_grads):
940930
value = param.get_value(borrow=True)
941-
m_prev = aesara.shared(
942-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
943-
)
944-
u_prev = aesara.shared(
945-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
946-
)
931+
m_prev = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable)
932+
u_prev = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.broadcastable)
947933

948934
m_t = beta1 * m_prev + (one - beta1) * g_t
949935
u_t = at.maximum(beta2 * u_prev, abs(g_t))

0 commit comments

Comments
 (0)