Skip to content

Commit

Permalink
Merge pull request #2076 from GPflow/develop
Browse files Browse the repository at this point in the history
Release 2.8.1
  • Loading branch information
uri-granta authored Jun 27, 2023
2 parents 024d381 a602aff commit 8549323
Show file tree
Hide file tree
Showing 10 changed files with 46 additions and 16 deletions.
4 changes: 2 additions & 2 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 12,13 @@ parameters:
default: "2.4.0"
max_tf_ver:
type: string
default: "2.10.0"
default: "2.12.0"
min_tfp_ver:
type: string
default: "0.12.0"
max_tfp_ver:
type: string
default: "0.18.0"
default: "0.20.0"
min_venv_dir:
type: string
default: min_venv
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -77,3 77,6 @@ target/

# TensorFlow logs
events.out.tfevents.*

# pyenv
.python-version
1 change: 1 addition & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,5 88,6 @@ Because GitHub's [graph of contributors](http://github.com/GPflow/GPflow/graphs/
[@sethaxen](https://github.com/sethaxen)
[@khurram-ghani](https://github.com/khurram-ghani)
[@partev](https://github.com/partev)
[@uri-granta](https://github.com/uri-granta)

Add yourself when you first contribute to GPflow's code, tests, or documentation!
15 changes: 15 additions & 0 deletions RELEASE.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 66,21 @@ This release contains contributions from:
<INSERT>, <NAME>, <HERE>, <USING>, <GITHUB>, <HANDLE>


# Release 2.8.1

A small fix to ensure support for (and testing with) TensorFlow 2.12.

## Bug Fixes and Other Changes

* Support and test with TensorFlow 2.12

## Thanks to our Contributors

This release contains contributions from:

uri-granta


# Release 2.8.0

The main focus of this release is to provide users control over arguments for `tf.function`
Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 1 @@
2.8.0
2.8.1
2 changes: 1 addition & 1 deletion doc/sphinx/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 24,7 @@
author = "The GPflow Contributors"

# The full version, including alpha/beta/rc tags
release = "2.8.0"
release = "2.8.1"

# -- General configuration ---------------------------------------------------

Expand Down
4 changes: 4 additions & 0 deletions doc/sphinx/installation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 25,10 @@ explicitly install specific versions of these.
--------------------- ---------------------------------
| 2.10.* | 0.18.* |
--------------------- ---------------------------------
| 2.11.* | 0.19.* |
--------------------- ---------------------------------
| 2.12.* | 0.20.* |
--------------------- ---------------------------------

Second, a word of warning about new Mac computers. On new Mac machines you will need to install
``tensorflow-macos`` instead of the regular ``tensorflow``.
Expand Down
20 changes: 13 additions & 7 deletions doc/sphinx/notebooks/advanced/natural_gradients.pct.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 22,12 @@
import numpy as np
import tensorflow as tf

try:
# use legacy Adam optimizer to support old TF versions
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam

import gpflow
from gpflow import set_trainable
from gpflow.ci_utils import reduce_in_tests
Expand Down Expand Up @@ -115,8 121,8 @@
set_trainable(vgp.q_mu, False)
set_trainable(vgp.q_sqrt, False)

adam_opt_for_vgp = tf.optimizers.Adam(adam_learning_rate)
adam_opt_for_gpr = tf.optimizers.Adam(adam_learning_rate)
adam_opt_for_vgp = Adam(adam_learning_rate)
adam_opt_for_gpr = Adam(adam_learning_rate)

# %%
for i in range(iterations):
Expand Down Expand Up @@ -243,15 249,15 @@
)

# ordinary gradients with Adam for SVGP
ordinary_adam_opt = tf.optimizers.Adam(adam_learning_rate)
ordinary_adam_opt = Adam(adam_learning_rate)

# NatGrads and Adam for SVGP
# Stop Adam from optimizing the variational parameters
set_trainable(svgp_natgrad.q_mu, False)
set_trainable(svgp_natgrad.q_sqrt, False)

# Create the optimize_tensors for SVGP
natgrad_adam_opt = tf.optimizers.Adam(adam_learning_rate)
natgrad_adam_opt = Adam(adam_learning_rate)

natgrad_opt = NaturalGradient(gamma=0.1)
variational_params = [(svgp_natgrad.q_mu, svgp_natgrad.q_sqrt)]
Expand Down Expand Up @@ -333,15 339,15 @@
)

# ordinary gradients with Adam for VGP with Bernoulli likelihood
adam_opt = tf.optimizers.Adam(adam_learning_rate)
adam_opt = Adam(adam_learning_rate)

# NatGrads and Adam for VGP with Bernoulli likelihood
# Stop Adam from optimizing the variational parameters
set_trainable(vgp_bernoulli_natgrad.q_mu, False)
set_trainable(vgp_bernoulli_natgrad.q_sqrt, False)

# Create the optimize_tensors for VGP with natural gradients
natgrad_adam_opt = tf.optimizers.Adam(adam_learning_rate)
natgrad_adam_opt = Adam(adam_learning_rate)
natgrad_opt = NaturalGradient(gamma=0.1)
variational_params = [
(vgp_bernoulli_natgrad.q_mu, vgp_bernoulli_natgrad.q_sqrt)
Expand Down Expand Up @@ -392,7 398,7 @@
set_trainable(vgp_bernoulli_natgrads_xi.q_sqrt, False)

# Create the optimize_tensors for VGP with Bernoulli likelihood
adam_opt = tf.optimizers.Adam(adam_learning_rate)
adam_opt = Adam(adam_learning_rate)
natgrad_opt = NaturalGradient(gamma=0.01)

variational_params = [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -314,11 314,11 @@ def _conditional_fused(self, Xnew, full_cov, full_output_cov):
fmean = tf.matmul(A, f, transpose_a=True)

if q_sqrt is not None:
if q_sqrt.get_shape().ndims == 2:
if q_sqrt.shape.ndims == 2:
# LTA = A * tf.expand_dims(q_sqrt, 2) # K x M x N
# won't work # make ticket for this?
raise NotImplementedError
elif q_sqrt.get_shape().ndims == 3:
elif q_sqrt.shape.ndims == 3:
# L = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # K x M x M

# K x M x N
Expand All @@ -331,8 331,7 @@ def _conditional_fused(self, Xnew, full_cov, full_output_cov):
ATL = tf.matmul(A, q_sqrt, transpose_a=True)
else:
raise ValueError(
"Bad dimension for q_sqrt: %s"
% str(q_sqrt.get_shape().ndims)
"Bad dimension for q_sqrt: %s" % str(q_sqrt.shape.ndims)
)
if full_cov:
# fvar = fvar tf.matmul(LTA, LTA, transpose_a=True) # K x N x N
Expand Down
4 changes: 3 additions & 1 deletion gpflow/optimizers/natgrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 201,8 @@ def __init__(
"""
name = self.__class__.__name__ if name is None else name
super().__init__(name)
# explicitly store name (as TF <2.12 stores it differently from TF 2.12 )
self.natgrad_name = name
self.gamma = gamma
self.xi_transform = xi_transform

Expand Down Expand Up @@ -263,7 265,7 @@ def _natgrad_steps(
q_mu_grads, q_sqrt_grads = tape.gradient(loss, [q_mu_vars, q_sqrt_vars])
# NOTE that these are the gradients in *unconstrained* space

with tf.name_scope(f"{self._name}/natural_gradient_steps"):
with tf.name_scope(f"{self.natgrad_name}/natural_gradient_steps"):
for q_mu_grad, q_sqrt_grad, q_mu, q_sqrt, xi_transform in zip(
q_mu_grads, q_sqrt_grads, q_mus, q_sqrts, xis
):
Expand Down

0 comments on commit 8549323

Please sign in to comment.