Skip to content

Commit ca84850

Browse files
ColCarrolltensorflower-gardener
authored andcommitted
Remove some deprecations:
- (since 2022-06-23) `observations_mask` from `tfd.GaussianProcessRegressionModel`: use `observations_is_missing` (with the opposite sense) instead. - (since 2022-06-23) `fixed_inputs_mask` from `tfp.math.psd_kernels.SchurComplement`: use `fixed_inputs_is_missing` (with the opposite sense) instead. - (since 2022-03-01) `variational_loss_fn` from `tfp.vi.fit_surrogate_posterior`: use the `discrepancy_fn` argument to specify a custom divergence, or pass a custom loss directly to `tfp.math.minimize`. - (since 2022-11-01) `bias_variance` and `slope_variance` from `tfp.math.psd_kernels.Polynomial` and `tfp.math.psd_kernels.Linear`: use `bias_amplitude` and `slope_amplitude` instead. - (since 2021-11-01) `use_pfor_to_compute_jacobian` from `tfp.math.ode.base.Solver`: it had no effect. - (since 2020-01-01) `scale_identity_multiplier` from `tfd.MutivariateNormalDiag`: Using `scale_diag=np.ones([batch_shape, event_shape]) * np.array(scale_identity_multiplier)[..., None]` will generally work. - (since 2019-05-22) `step_size_update_fn` from `tfp.mcmc.HamiltonianMonteCarlo`: use `tfp.mcmc.SimpleStepSizeAdaptation` instead. PiperOrigin-RevId: 503277939
1 parent 6e7cd3d commit ca84850

14 files changed

+614
-1164
lines changed

tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb

+182-233
Large diffs are not rendered by default.

tensorflow_probability/examples/jupyter_notebooks/Multilevel_Modeling_Primer.ipynb

+322-562
Large diffs are not rendered by default.

tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb

+72-137
Large diffs are not rendered by default.

tensorflow_probability/python/experimental/linalg/linear_operator_psd_kernel_test.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -277,8 +277,8 @@ def kernel_fn(eq_params, poly_params):
277277
kernel_args = (
278278
dict(length_scale=tf.random.uniform([], .5, 1.5, dtype=tf.float64),
279279
amplitude=tf.random.uniform([], 1.5, 2.5, dtype=tf.float64)),
280-
dict(bias_variance=tf.random.uniform([feature_dim], .5, 1.5,
281-
dtype=tf.float64),
280+
dict(bias_amplitude=tf.random.uniform([feature_dim], .5, 1.5,
281+
dtype=tf.float64),
282282
shift=tf.random.normal([feature_dim], dtype=tf.float64)))
283283

284284
x1 = tf.random.normal([5, feature_dim], dtype=tf.float64)

tensorflow_probability/python/experimental/mcmc/sample_sequential_monte_carlo_test.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ def testMixtureTargetLogProb(self, make_kernel_fn, optimal_accept):
104104
mixture_weight = tf.constant([w, 1. - w], tf.float64)
105105
mu = np.ones(nd) * .5
106106
component_loc = tf.cast(np.asarray([mu, -mu]), tf.float64)
107+
scale_diag = tf.tile(tf.constant([[.1], [.2]], dtype=tf.float64), [1, nd])
107108

108109
proposal = sample.Sample(
109110
normal.Normal(tf.constant(0., tf.float64), 10.), sample_shape=nd)
@@ -112,8 +113,7 @@ def testMixtureTargetLogProb(self, make_kernel_fn, optimal_accept):
112113
likelihood_dist = mixture_same_family.MixtureSameFamily(
113114
mixture_distribution=categorical.Categorical(probs=mixture_weight),
114115
components_distribution=mvn_diag.MultivariateNormalDiag(
115-
loc=component_loc,
116-
scale_diag=[[.1], [.2]] * tf.ones_like(component_loc)))
116+
loc=component_loc, scale_diag=scale_diag))
117117

118118
# Uniform prior
119119
init_log_prob = tf.zeros_like(proposal.log_prob(init_state))
@@ -144,19 +144,17 @@ def testMixtureMultiBatch(self):
144144
mu = np.ones(nd) * .5
145145
loc = tf.cast(np.asarray([mu, -mu]), tf.float64)
146146
component_loc = tf.repeat(loc[tf.newaxis, ...], n_batch, axis=0)
147+
scale_diag = tf.tile(tf.constant([[.1], [.2]], dtype=tf.float64), [1, nd])
147148

148149
likelihood_dist = mixture_same_family.MixtureSameFamily(
149150
mixture_distribution=categorical.Categorical(probs=mixture_weight),
150151
components_distribution=mvn_diag.MultivariateNormalDiag(
151-
loc=component_loc,
152-
scale_diag=tf.constant([[.1], [.2]], tf.float64) *
153-
tf.ones_like(component_loc)))
152+
loc=component_loc, scale_diag=scale_diag))
154153

155154
proposal = sample.Sample(
156155
normal.Normal(tf.constant(0., tf.float64), 10.), sample_shape=nd)
157156
init_state = proposal.sample([5000, n_batch], seed=seed)
158157
log_prob_fn = likelihood_dist.log_prob
159-
print(log_prob_fn(init_state).shape)
160158

161159
# Uniform prior
162160
init_log_prob = tf.zeros_like(log_prob_fn(init_state))

tensorflow_probability/python/math/ode/base.py

+1-8
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626
from tensorflow_probability.python.math import gradient as tfp_gradient
2727
from tensorflow_probability.python.math.ode import runge_kutta_util as rk_util
2828
from tensorflow_probability.python.math.ode import util
29-
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
3029

3130
# TODO(b/138303336): Support MATLAB-style events.
3231

@@ -42,16 +41,10 @@
4241
class Solver(object):
4342
"""Base class for an ODE solver."""
4443

45-
@deprecation.deprecated_args(
46-
'2021-11-01',
47-
'use_pfor_to_compute_jacobian is deprecated, and does nothing.',
48-
'use_pfor_to_compute_jacobian')
4944
def __init__(self,
5045
make_adjoint_solver_fn,
5146
validate_args,
52-
name,
53-
use_pfor_to_compute_jacobian=True):
54-
del use_pfor_to_compute_jacobian
47+
name):
5548
self._validate_args = validate_args
5649
self._name = name
5750
if make_adjoint_solver_fn is None:

tensorflow_probability/python/math/psd_kernels/polynomial.py

+5-83
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
from tensorflow_probability.python.internal import tensor_util
2323
from tensorflow_probability.python.math.psd_kernels import positive_semidefinite_kernel as psd_kernel
2424
from tensorflow_probability.python.math.psd_kernels.internal import util
25-
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
2625

2726
__all__ = [
2827
'Constant',
@@ -55,14 +54,7 @@ class Polynomial(psd_kernel.AutoCompositeTensorPsdKernel):
5554
5655
"""
5756

58-
@deprecation.deprecated_args(
59-
'2022-11-01',
60-
'`bias_variance` and `slope_variance` are deprecated. Please use '
61-
'`bias_amplitude` and `slope_amplitude` instead.',
62-
'bias_variance', 'slope_variance')
6357
def __init__(self,
64-
bias_variance=None,
65-
slope_variance=None,
6658
bias_amplitude=None,
6759
slope_amplitude=None,
6860
shift=None,
@@ -74,18 +66,6 @@ def __init__(self,
7466
"""Construct a Polynomial kernel instance.
7567
7668
Args:
77-
bias_variance: Deprecated. Non-negative floating point `Tensor` that
78-
controls the variance from the origin. If bias = 0, there is no
79-
variance and the fitted function goes through the origin. Must be
80-
broadcastable with `slope_variance`, `shift`, `exponent`, and inputs
81-
to `apply` and `matrix` methods. A value of `None` is treated like 0.
82-
Default Value: `None`
83-
slope_variance: Deprecated. Non-negative floating point `Tensor` that
84-
controls the variance of the regression line slope that is the basis
85-
for the polynomial. Must be broadcastable with `bias_variance`, `shift`,
86-
`exponent`, and inputs to `apply` and `matrix` methods. A value of
87-
`None` is treated like 1.
88-
Default Value: `None`
8969
bias_amplitude: Non-negative floating point `Tensor` that controls the
9070
stddev from the origin. If bias = 0, there is no stddev and the
9171
fitted function goes through the origin. Must be broadcastable with
@@ -124,16 +104,10 @@ def __init__(self,
124104
parameters = dict(locals()) if parameters is None else parameters
125105
with tf.name_scope(name):
126106
dtype = util.maybe_get_common_dtype(
127-
[bias_variance,
128-
slope_variance,
129-
bias_amplitude,
107+
[bias_amplitude,
130108
slope_amplitude,
131109
shift,
132110
exponent])
133-
self._bias_variance = tensor_util.convert_nonref_to_tensor(
134-
bias_variance, name='bias_variance', dtype=dtype)
135-
self._slope_variance = tensor_util.convert_nonref_to_tensor(
136-
slope_variance, name='slope_variance', dtype=dtype)
137111
self._bias_amplitude = tensor_util.convert_nonref_to_tensor(
138112
bias_amplitude, name='bias_amplitude', dtype=dtype)
139113
self._slope_amplitude = tensor_util.convert_nonref_to_tensor(
@@ -156,30 +130,14 @@ def _parameter_properties(cls, dtype):
156130
bias_amplitude=parameter_properties.ParameterProperties(
157131
default_constraining_bijector_fn=(
158132
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))),
159-
bias_variance=parameter_properties.ParameterProperties(
160-
default_constraining_bijector_fn=(
161-
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))),
162133
exponent=parameter_properties.ParameterProperties(
163134
default_constraining_bijector_fn=(
164135
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))),
165136
slope_amplitude=parameter_properties.ParameterProperties(
166137
default_constraining_bijector_fn=(
167138
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))),
168-
slope_variance=parameter_properties.ParameterProperties(
169-
default_constraining_bijector_fn=(
170-
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))),
171139
shift=parameter_properties.ParameterProperties())
172140

173-
@property
174-
def bias_variance(self):
175-
"""Variance on bias parameter."""
176-
return self._bias_variance
177-
178-
@property
179-
def slope_variance(self):
180-
"""Variance on slope parameter."""
181-
return self._slope_variance
182-
183141
@property
184142
def bias_amplitude(self):
185143
"""Stddev on bias parameter."""
@@ -200,16 +158,6 @@ def exponent(self):
200158
"""Exponent of the polynomial term."""
201159
return self._exponent
202160

203-
def _get_bias_amplitude(self):
204-
if self.bias_amplitude is not None:
205-
return self.bias_amplitude
206-
return self.bias_variance
207-
208-
def _get_slope_amplitude(self):
209-
if self.slope_amplitude is not None:
210-
return self.slope_amplitude
211-
return self.slope_variance
212-
213161
def _apply(self, x1, x2, example_ndims=0):
214162
if self.shift is None:
215163
dot_prod = util.sum_rightmost_ndims_preserving_shape(
@@ -226,13 +174,13 @@ def _apply(self, x1, x2, example_ndims=0):
226174
exponent = util.pad_shape_with_ones(exponent, example_ndims)
227175
dot_prod = dot_prod ** exponent
228176

229-
slope_amplitude = self._get_slope_amplitude()
177+
slope_amplitude = self.slope_amplitude
230178
if slope_amplitude is not None:
231179
slope_amplitude = tf.convert_to_tensor(slope_amplitude)
232180
slope_amplitude = util.pad_shape_with_ones(slope_amplitude, example_ndims)
233181
dot_prod = dot_prod * slope_amplitude**2.
234182

235-
bias_amplitude = self._get_bias_amplitude()
183+
bias_amplitude = self.bias_amplitude
236184
if bias_amplitude is not None:
237185
bias_amplitude = tf.convert_to_tensor(bias_amplitude)
238186
bias_amplitude = util.pad_shape_with_ones(bias_amplitude, example_ndims)
@@ -247,8 +195,8 @@ def _parameter_control_dependencies(self, is_init):
247195
ok_to_check = lambda x: ( # pylint:disable=g-long-lambda
248196
x is not None) and (is_init != tensor_util.is_ref(x))
249197

250-
bias_amplitude = self._get_bias_amplitude()
251-
slope_amplitude = self._get_slope_amplitude()
198+
bias_amplitude = self.bias_amplitude
199+
slope_amplitude = self.slope_amplitude
252200

253201
if ok_to_check(self.exponent):
254202
exponent = tf.convert_to_tensor(self.exponent)
@@ -296,14 +244,7 @@ class Linear(Polynomial):
296244
```
297245
"""
298246

299-
@deprecation.deprecated_args(
300-
'2022-11-01',
301-
'`bias_variance` and `slope_variance` are deprecated. Please use '
302-
'`bias_amplitude` and `slope_amplitude` instead.',
303-
'bias_variance', 'slope_variance')
304247
def __init__(self,
305-
bias_variance=None,
306-
slope_variance=None,
307248
bias_amplitude=None,
308249
slope_amplitude=None,
309250
shift=None,
@@ -314,17 +255,6 @@ def __init__(self,
314255
"""Construct a Linear kernel instance.
315256
316257
Args:
317-
bias_variance: Positive floating point `Tensor` that controls the variance
318-
from the origin. If bias = 0, there is no variance and the fitted
319-
function goes through the origin (also known as the homogeneous linear
320-
kernel). Must be broadcastable with `slope_variance`, `shift` and inputs
321-
to `apply` and `matrix` methods. A value of `None` is treated like 0.
322-
Default Value: `None`
323-
slope_variance: Positive floating point `Tensor` that controls the
324-
variance of the regression line slope. Must be broadcastable with
325-
`bias_variance`, `shift`, and inputs to `apply` and `matrix` methods. A
326-
value of `None` is treated like 1.
327-
Default Value: `None`
328258
bias_amplitude: Non-negative floating point `Tensor` that controls the
329259
stddev from the origin. If bias = 0, there is no stddev and the
330260
fitted function goes through the origin. Must be broadcastable with
@@ -354,8 +284,6 @@ def __init__(self,
354284
"""
355285
parameters = dict(locals()) if parameters is None else parameters
356286
super(Linear, self).__init__(
357-
bias_variance=bias_variance,
358-
slope_variance=slope_variance,
359287
bias_amplitude=bias_amplitude,
360288
slope_amplitude=slope_amplitude,
361289
shift=shift,
@@ -372,15 +300,9 @@ def _parameter_properties(cls, dtype):
372300
bias_amplitude=parameter_properties.ParameterProperties(
373301
default_constraining_bijector_fn=(
374302
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))),
375-
bias_variance=parameter_properties.ParameterProperties(
376-
default_constraining_bijector_fn=(
377-
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))),
378303
slope_amplitude=parameter_properties.ParameterProperties(
379304
default_constraining_bijector_fn=(
380305
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))),
381-
slope_variance=parameter_properties.ParameterProperties(
382-
default_constraining_bijector_fn=(
383-
lambda: softplus.Softplus(low=dtype_util.eps(dtype)))),
384306
shift=parameter_properties.ParameterProperties())
385307

386308

tensorflow_probability/python/math/psd_kernels/schur_complement.py

-45
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
from tensorflow_probability.python.internal import tensor_util
2323
from tensorflow_probability.python.math.psd_kernels import positive_semidefinite_kernel as psd_kernel
2424
from tensorflow_probability.python.math.psd_kernels.internal import util
25-
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
2625

2726

2827
__all__ = [
@@ -177,15 +176,9 @@ def posterior_mean_fn(x):
177176
"""
178177
# pylint:disable=invalid-name
179178

180-
@deprecation.deprecated_args(
181-
'2022-06-23',
182-
('The `fixed_inputs_mask` flag is deprecated; instead use '
183-
'`fixed_inputs_is_missing` (with the opposite sense).'),
184-
'fixed_inputs_mask')
185179
def __init__(self,
186180
base_kernel,
187181
fixed_inputs,
188-
fixed_inputs_mask=None,
189182
fixed_inputs_is_missing=None,
190183
diag_shift=None,
191184
cholesky_fn=None,
@@ -216,10 +209,6 @@ def __init__(self,
216209
computing the Cholesky decomposition of the k(Z, Z) matrix. The batch
217210
shape elements of `fixed_inputs` must be broadcast compatible with
218211
`base_kernel.batch_shape`.
219-
fixed_inputs_mask: Deprecated. A boolean Tensor of shape `[..., N]`. When
220-
`mask` is not None and an element of `mask` is `False`, this kernel
221-
will return values computed as if the divisor matrix did not contain the
222-
corresponding row or column.
223212
fixed_inputs_is_missing: A boolean Tensor of shape `[..., N]`.
224213
When `is_missing` is not None and an element of `mask` is `True`,
225214
this kernel will return values computed as if the divisor matrix did
@@ -268,12 +257,6 @@ def __init__(self,
268257
self._fixed_inputs = nest_util.convert_to_nested_tensor(
269258
fixed_inputs, dtype=dtype, name='fixed_inputs', convert_ref=False,
270259
allow_packing=True)
271-
if ((fixed_inputs_mask is not None) and
272-
(fixed_inputs_is_missing is not None)):
273-
raise ValueError('Expected at most one of `fixed_inputs_mask` or '
274-
'`fixed_inputs_is_missing`')
275-
self._fixed_inputs_mask = tensor_util.convert_nonref_to_tensor(
276-
fixed_inputs_mask, dtype=tf.bool, name='fixed_inputs_mask')
277260
self._fixed_inputs_is_missing = tensor_util.convert_nonref_to_tensor(
278261
fixed_inputs_is_missing,
279262
dtype=tf.bool, name='fixed_inputs_is_missing')
@@ -297,15 +280,9 @@ def __init__(self,
297280
parameters=parameters)
298281

299282
@staticmethod
300-
@deprecation.deprecated_args(
301-
'2022-06-23',
302-
('The `fixed_inputs_mask` flag is deprecated; instead use '
303-
'`fixed_inputs_is_missing` (with the opposite sense).'),
304-
'fixed_inputs_mask')
305283
def with_precomputed_divisor(
306284
base_kernel,
307285
fixed_inputs,
308-
fixed_inputs_mask=None,
309286
fixed_inputs_is_missing=None,
310287
diag_shift=None,
311288
cholesky_fn=None,
@@ -345,10 +322,6 @@ def with_precomputed_divisor(
345322
computing the Cholesky decomposition of the k(Z, Z) matrix. The batch
346323
shape elements of `fixed_inputs` must be broadcast compatible with
347324
`base_kernel.batch_shape`.
348-
fixed_inputs_mask: Deprecated. A boolean Tensor of shape `[..., N]`. When
349-
`mask` is not None and an element of `mask` is `False`, the returned
350-
kernel will return values computed as if the divisor matrix did not
351-
contain the corresponding row or column.
352325
fixed_inputs_is_missing: A boolean Tensor of shape `[..., N]`. When
353326
`is_missing` is not None and an element of `is_missing` is `True`, the
354327
returned kernel will return values computed as if the divisor matrix
@@ -381,13 +354,6 @@ def with_precomputed_divisor(
381354
float_dtype = dtype
382355
fixed_inputs = nest_util.convert_to_nested_tensor(
383356
fixed_inputs, dtype=dtype, allow_packing=True)
384-
if ((fixed_inputs_mask is not None) and
385-
(fixed_inputs_is_missing is not None)):
386-
raise ValueError('Expected at most one of `fixed_inputs_mask` or '
387-
'`fixed_inputs_is_missing`')
388-
if fixed_inputs_mask is not None:
389-
fixed_inputs_is_missing = ~tf.convert_to_tensor(
390-
fixed_inputs_mask, tf.bool)
391357
if fixed_inputs_is_missing is not None:
392358
fixed_inputs_is_missing = tf.convert_to_tensor(
393359
fixed_inputs_is_missing, tf.bool)
@@ -434,8 +400,6 @@ def _get_fixed_inputs_is_missing(self):
434400
fixed_inputs_is_missing = self._fixed_inputs_is_missing
435401
if fixed_inputs_is_missing is not None:
436402
fixed_inputs_is_missing = tf.convert_to_tensor(fixed_inputs_is_missing)
437-
if self._fixed_inputs_mask is not None:
438-
fixed_inputs_is_missing = ~tf.convert_to_tensor(self._fixed_inputs_mask)
439403
return fixed_inputs_is_missing
440404

441405
def _apply(self, x1, x2, example_ndims):
@@ -561,8 +525,6 @@ def _parameter_properties(cls, dtype):
561525
fixed_inputs=parameter_properties.ParameterProperties(
562526
event_ndims=lambda self: tf.nest.map_structure( # pylint: disable=g-long-lambda
563527
lambda nd: nd + 1, self.base_kernel.feature_ndims)),
564-
fixed_inputs_mask=parameter_properties.ParameterProperties(
565-
event_ndims=1),
566528
fixed_inputs_is_missing=parameter_properties.ParameterProperties(
567529
event_ndims=1),
568530
diag_shift=parameter_properties.ParameterProperties(
@@ -601,14 +563,7 @@ def _divisor_matrix_cholesky(
601563
def divisor_matrix_cholesky(
602564
self,
603565
fixed_inputs=None,
604-
fixed_inputs_mask=None,
605566
fixed_inputs_is_missing=None):
606567
if self._precomputed_divisor_matrix_cholesky is not None:
607568
return self._precomputed_divisor_matrix_cholesky
608-
if ((fixed_inputs_mask is not None) and
609-
(fixed_inputs_is_missing is not None)):
610-
raise ValueError('Expected only one of `fixed_inputs_mask` or '
611-
'`fixed_inputs_is_missing` to be set.')
612-
if fixed_inputs_mask is not None:
613-
fixed_inputs_is_missing = ~tf.convert_to_tensor(fixed_inputs_mask)
614569
return self._divisor_matrix_cholesky(fixed_inputs, fixed_inputs_is_missing)

0 commit comments

Comments
 (0)