From 20c7e41d12300cf63587a24b3ede1f25ffb6a416 Mon Sep 17 00:00:00 2001 From: T2T Team Date: Wed, 29 Nov 2017 15:21:31 -0800 Subject: [PATCH 01/11] Discrete autoencoder with VQ-VAE as in https://arxiv.org/abs/1711.00937. PiperOrigin-RevId: 177371794 --- README.md | 7 +- setup.py | 1 - tensor2tensor/bin/t2t-bleu | 200 ------------------------ tensor2tensor/bin/t2t-datagen | 0 tensor2tensor/bin/t2t-decoder | 7 +- tensor2tensor/bin/t2t-make-tf-configs | 0 tensor2tensor/bin/t2t-trainer | 0 tensor2tensor/models/transformer_vae.py | 21 ++- tensor2tensor/utils/bleu_hook.py | 68 +------- tensor2tensor/utils/bleu_hook_test.py | 8 +- tensor2tensor/utils/decoding.py | 4 +- 11 files changed, 27 insertions(+), 289 deletions(-) delete mode 100755 tensor2tensor/bin/t2t-bleu mode change 100755 => 100644 tensor2tensor/bin/t2t-datagen mode change 100755 => 100644 tensor2tensor/bin/t2t-decoder mode change 100755 => 100644 tensor2tensor/bin/t2t-make-tf-configs mode change 100755 => 100644 tensor2tensor/bin/t2t-trainer diff --git a/README.md b/README.md index c125ce3bd..9525e9bcb 100644 --- a/README.md +++ b/README.md @@ -126,12 +126,9 @@ t2t-decoder \ --output_dir=$TRAIN_DIR \ --decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \ --decode_from_file=$DECODE_FILE - --decode_to_file=translation.en -``` - -# Eval BLEU -t2t-bleu --translation=translation.en --reference=ref-translation.de +cat $DECODE_FILE.$MODEL.$HPARAMS.beam$BEAM_SIZE.alpha$ALPHA.decodes +``` --- diff --git a/setup.py b/setup.py index 3acde12ab..5027918af 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,6 @@ 'tensor2tensor/bin/t2t-datagen', 'tensor2tensor/bin/t2t-decoder', 'tensor2tensor/bin/t2t-make-tf-configs', - 'tensor2tensor/bin/t2t-bleu', ], install_requires=[ 'bz2file', diff --git a/tensor2tensor/bin/t2t-bleu b/tensor2tensor/bin/t2t-bleu deleted file mode 100755 index 60623857f..000000000 --- a/tensor2tensor/bin/t2t-bleu +++ /dev/null @@ -1,200 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2017 The Tensor2Tensor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Evaluate BLEU score for all checkpoints in a given directory. - -This script can be used in two ways. - -To evaluate an already translated file: -`t2t-bleu --translation=my-wmt13.de --reference=wmt13_deen.de` - -To evaluate all checkpoints in a given directory: -`t2t-bleu - --model_dir=t2t_train - --data_dir=t2t_data - --translations_dir=my-translations - --problems=translate_ende_wmt32k - --hparams_set=transformer_big_single_gpu - --source=wmt13_deen.en - --reference=wmt13_deen.de` - -In addition to the above-mentioned compulsory parameters, -there are optional parameters: - - * bleu_variant: cased (case-sensitive), uncased, both (default). - * translations_dir: Where to store the translated files? Default="translations". - * even_subdir: Where in the model_dir to store the even file? Default="", - which means TensorBoard will show it as the same run as the training, but it will warn - about "more than one metagraph event per run". event_subdir can be used e.g. if running - this script several times with different `--decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA"`. - * tag_suffix: Default="", so the tags will be BLEU_cased and BLEU_uncased. Again, tag_suffix - can be used e.g. for different beam sizes if these should be plotted in different graphs. - * min_steps: Don't evaluate checkpoints with less steps. - Default=-1 means check the `last_evaluated_step.txt` file, which contains the number of steps - of the last successfully evaluated checkpoint. - * report_zero: Store BLEU=0 and guess its time based on flags.txt. Default=True. - This is useful, so TensorBoard reports correct relative time for the remaining checkpoints. - This flag is set to False if min_steps is > 0. - * wait_secs: Wait upto N seconds for a new checkpoint. Default=0. - This is useful for continuous evaluation of a running training, - in which case this should be equal to save_checkpoints_secs plus some reserve. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -import os -import time -from collections import namedtuple -from tensor2tensor.utils import decoding -from tensor2tensor.utils import trainer_utils -from tensor2tensor.utils import usr_dir -from tensor2tensor.utils import bleu_hook -import tensorflow as tf - -flags = tf.flags -FLAGS = flags.FLAGS - -# t2t-bleu specific options -flags.DEFINE_string("bleu_variant", "both", "Possible values: cased(case-sensitive), uncased, both(default).") -flags.DEFINE_string("model_dir", "", "Directory to load model checkpoints from.") -flags.DEFINE_string("translation", None, "Path to the MT system translation file") -flags.DEFINE_string("source", None, "Path to the source-language file to be translated") -flags.DEFINE_string("reference", None, "Path to the reference translation file") -flags.DEFINE_string("translations_dir", "translations", "Where to store the translated files") -flags.DEFINE_string("event_subdir", "", "Where in model_dir to store the event file") -flags.DEFINE_string("tag_suffix", "", "What to add to BLEU_cased and BLEU_uncased tags. Default=''.") -flags.DEFINE_integer("min_steps", -1, "Don't evaluate checkpoints with less steps.") -flags.DEFINE_integer("wait_secs", 0, "Wait upto N seconds for a new checkpoint, cf. save_checkpoints_secs.") -flags.DEFINE_bool("report_zero", None, "Store BLEU=0 and guess its time based on flags.txt") - -# options derived from t2t-decode -flags.DEFINE_integer("decode_shards", 1, "Number of decoding replicas.") -flags.DEFINE_string("t2t_usr_dir", "", - "Path to a Python module that will be imported. The " - "__init__.py file should include the necessary imports. " - "The imported files should contain registrations, " - "e.g. @registry.register_model calls, that will then be " - "available to the t2t-decoder.") -flags.DEFINE_string("master", "", "Address of TensorFlow master.") -flags.DEFINE_string("schedule", "train_and_evaluate", - "Must be train_and_evaluate for decoding.") - -Model = namedtuple('Model', 'filename time steps') - - -def read_checkpoints_list(model_dir, min_steps): - models = [Model(x[:-6], os.path.getctime(x), int(x[:-6].rsplit('-')[-1])) - for x in tf.gfile.Glob(os.path.join(model_dir, 'model.ckpt-*.index'))] - return sorted((x for x in models if x.steps > min_steps), key=lambda x: x.steps) - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - if FLAGS.translation: - if FLAGS.model_dir: - raise ValueError('Cannot specify both --translation and --model_dir.') - if FLAGS.bleu_variant in ('uncased', 'both'): - bleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, FLAGS.translation, case_sensitive=False) - print("BLEU_uncased = %6.2f" % bleu) - if FLAGS.bleu_variant in ('cased', 'both'): - bleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, FLAGS.translation, case_sensitive=True) - print("BLEU_cased = %6.2f" % bleu) - return - - usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) - FLAGS.model = FLAGS.model or 'transformer' - FLAGS.output_dir = FLAGS.model_dir - trainer_utils.log_registry() - trainer_utils.validate_flags() - assert FLAGS.schedule == "train_and_evaluate" - data_dir = os.path.expanduser(FLAGS.data_dir) - model_dir = os.path.expanduser(FLAGS.model_dir) - - hparams = trainer_utils.create_hparams( - FLAGS.hparams_set, data_dir, passed_hparams=FLAGS.hparams) - trainer_utils.add_problem_hparams(hparams, FLAGS.problems) - estimator, _ = trainer_utils.create_experiment_components( - data_dir=data_dir, - model_name=FLAGS.model, - hparams=hparams, - run_config=trainer_utils.create_run_config(model_dir)) - - decode_hp = decoding.decode_hparams(FLAGS.decode_hparams) - decode_hp.add_hparam("shards", FLAGS.decode_shards) - decode_hp.add_hparam("shard_id", FLAGS.worker_id) - - os.makedirs(FLAGS.translations_dir, exist_ok=True) - translated_base_file = os.path.join(FLAGS.translations_dir, FLAGS.problems) - event_dir = os.path.join(FLAGS.model_dir, FLAGS.event_subdir) - last_step_file = os.path.join(event_dir, 'last_evaluated_step.txt') - if FLAGS.min_steps == -1: - try: - with open(last_step_file) as ls_file: - FLAGS.min_steps = int(ls_file.read()) - except FileNotFoundError: - FLAGS.min_steps = 0 - if FLAGS.report_zero is None: - FLAGS.report_zero = FLAGS.min_steps == 0 - - models = read_checkpoints_list(model_dir, FLAGS.min_steps) - tf.logging.info("Found %d models with steps: %s" % (len(models), ", ".join(str(x.steps) for x in models))) - - writer = tf.summary.FileWriter(event_dir) - if FLAGS.report_zero: - start_time = os.path.getctime(os.path.join(model_dir, 'flags.txt')) - values = [] - if FLAGS.bleu_variant in ('uncased', 'both'): - values.append(tf.Summary.Value(tag='BLEU_uncased' + FLAGS.tag_suffix, simple_value=0)) - if FLAGS.bleu_variant in ('cased', 'both'): - values.append(tf.Summary.Value(tag='BLEU_cased' + FLAGS.tag_suffix, simple_value=0)) - writer.add_event(tf.summary.Event(summary=tf.Summary(value=values), wall_time=start_time, step=0)) - - exit_time = time.time() + FLAGS.wait_secs - min_steps = FLAGS.min_steps - while True: - if not models and FLAGS.wait_secs: - tf.logging.info('All checkpoints evaluated. Waiting till %s if a new checkpoint appears' % time.asctime(time.localtime(exit_time))) - while True: - time.sleep(10) - models = read_checkpoints_list(model_dir, min_steps) - if models or time.time() > exit_time: - break - if not models: - return - - model = models.pop(0) - exit_time, min_steps = model.time + FLAGS.wait_secs, model.steps - tf.logging.info("Evaluating " + model.filename) - out_file = translated_base_file + '-' + str(model.steps) - tf.logging.set_verbosity(tf.logging.ERROR) # decode_from_file logs all the translations as INFO - decoding.decode_from_file(estimator, FLAGS.source, decode_hp, out_file, checkpoint_path=model.filename) - tf.logging.set_verbosity(tf.logging.INFO) - values = [] - if FLAGS.bleu_variant in ('uncased', 'both'): - bleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, out_file, case_sensitive=False) - values.append(tf.Summary.Value(tag='BLEU_uncased' + FLAGS.tag_suffix, simple_value=bleu)) - tf.logging.info("%s: BLEU_uncased = %6.2f" % (model.filename, bleu)) - if FLAGS.bleu_variant in ('cased', 'both'): - bleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, out_file, case_sensitive=True) - values.append(tf.Summary.Value(tag='BLEU_cased' + FLAGS.tag_suffix, simple_value=bleu)) - tf.logging.info("%s: BLEU_cased = %6.2f" % (model.filename, bleu)) - writer.add_event(tf.summary.Event(summary=tf.Summary(value=values), wall_time=model.time, step=model.steps)) - writer.flush() - with open(last_step_file, 'w') as ls_file: - ls_file.write(str(model.steps) + '\n') - - -if __name__ == "__main__": - tf.app.run() diff --git a/tensor2tensor/bin/t2t-datagen b/tensor2tensor/bin/t2t-datagen old mode 100755 new mode 100644 diff --git a/tensor2tensor/bin/t2t-decoder b/tensor2tensor/bin/t2t-decoder old mode 100755 new mode 100644 index 4c83610b3..712cb45ce --- a/tensor2tensor/bin/t2t-decoder +++ b/tensor2tensor/bin/t2t-decoder @@ -46,10 +46,7 @@ import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS -flags.DEFINE_string("output_dir", "", - "Training directory where the latest checkpoint is used.") -flags.DEFINE_string("checkpoint_path", None, - "Path to the model checkpoint. Overrides output_dir.") +flags.DEFINE_string("output_dir", "", "Training directory to load from.") flags.DEFINE_string("decode_from_file", None, "Path to the source file for decoding") flags.DEFINE_string("decode_to_file", None, @@ -93,7 +90,7 @@ def main(_): decoding.decode_interactively(estimator, decode_hp) elif FLAGS.decode_from_file: decoding.decode_from_file(estimator, FLAGS.decode_from_file, decode_hp, - FLAGS.decode_to_file, checkpoint_path=FLAGS.checkpoint_path) + FLAGS.decode_to_file) else: decoding.decode_from_dataset( estimator, diff --git a/tensor2tensor/bin/t2t-make-tf-configs b/tensor2tensor/bin/t2t-make-tf-configs old mode 100755 new mode 100644 diff --git a/tensor2tensor/bin/t2t-trainer b/tensor2tensor/bin/t2t-trainer old mode 100755 new mode 100644 diff --git a/tensor2tensor/models/transformer_vae.py b/tensor2tensor/models/transformer_vae.py index 0bb5efea9..4a7290c23 100644 --- a/tensor2tensor/models/transformer_vae.py +++ b/tensor2tensor/models/transformer_vae.py @@ -147,8 +147,9 @@ def nearest(x, means, hparams): transpose_b=True) _, nearest_idx = tf.nn.top_k(- dist, k=1) nearest_hot = tf.one_hot(tf.squeeze(nearest_idx, axis=1), hparams.v_size) - nearest_hot = tf.reshape(nearest_hot, [tf.shape(x)[0], tf.shape(x)[1], - tf.shape(x)[2], hparams.v_size]) + shape = common_layers.shape_list(x) + shape[-1] = hparams.v_size + nearest_hot = tf.reshape(nearest_hot, shape=shape) return tf.stop_gradient(nearest_hot) @@ -156,8 +157,12 @@ def kmeans(x, means, hparams, name): with tf.variable_scope(name): x_means_hot = nearest(x, means, hparams) x_means = tf.gather(means, tf.argmax(x_means_hot, axis=-1)) - kl = tf.reduce_sum(tf.square(x - x_means), axis=-1) - return x_means_hot, tf.reduce_mean(kl) # * 10.0 + x_flat = tf.reshape(x, [-1, hparams.hidden_size]) + kl = tf.reduce_mean(tf.reduce_sum(tf.square(x_flat - x_means), axis=-1)) + reg_loss1 = tf.nn.l2_loss((tf.stop_gradient(x) - x_means)) + reg_loss2 = hparams.beta * tf.nn.l2_loss((x - tf.stop_gradient(x_means))) + l = kl + reg_loss1 + reg_loss2 + return x_means_hot, x_means, l def bit_to_int(x_bit, nbits): @@ -233,6 +238,12 @@ def embed(x): _, hot, l = dae(x, hparams, name) c = tf.argmax(hot, axis=-1) h1 = tf.layers.dense(hot, hparams.hidden_size, name="dae_dense") + if hparams.bottleneck_kind == "vq-vae": + means = tf.get_variable(name="means", shape=[hparams.v_size, + hparams.hidden_size]) + x_means_hot, x_means, l = kmeans(x, means, hparams, name="vq-vae-kmeans") + h1 = x_means + c = tf.argmax(x_means_hot, axis=-1) h2 = tf.layers.dense(tf.nn.relu(h1), filter_size, name="vch2") res = tf.layers.dense(tf.nn.relu(h2), hparams.hidden_size, name="vcfin") return res, c, l, embed @@ -500,6 +511,8 @@ def transformer_ae_small(): hparams.add_hparam("decode_autoregressive", True) hparams.add_hparam("do_vae", True) hparams.add_hparam("bit_vae", True) + hparams.add_hparam("beta", 0.25) + hparams.kl_warmup_steps = 150000 return hparams diff --git a/tensor2tensor/utils/bleu_hook.py b/tensor2tensor/utils/bleu_hook.py index 270c44788..20a7c8426 100644 --- a/tensor2tensor/utils/bleu_hook.py +++ b/tensor2tensor/utils/bleu_hook.py @@ -20,9 +20,6 @@ import collections import math -import re -import sys -import unicodedata # Dependency imports @@ -30,7 +27,6 @@ # pylint: disable=redefined-builtin from six.moves import xrange from six.moves import zip -import six # pylint: enable=redefined-builtin import tensorflow as tf @@ -96,17 +92,10 @@ def compute_bleu(reference_corpus, matches_by_order[len(ngram) - 1] += overlap[ngram] for ngram in translation_ngram_counts: possible_matches_by_order[len(ngram)-1] += translation_ngram_counts[ngram] - assert reference_length, "no reference provided" - assert translation_length, "no translation provided" precisions = [0] * max_order - smooth = 1.0 for i in xrange(0, max_order): if possible_matches_by_order[i] > 0: - if matches_by_order[i] > 0: - precisions[i] = matches_by_order[i] / possible_matches_by_order[i] - else: - smooth *= 2 - precisions[i] = 1.0 / (smooth * possible_matches_by_order[i]) + precisions[i] = matches_by_order[i] / possible_matches_by_order[i] else: precisions[i] = 0.0 @@ -142,58 +131,3 @@ def bleu_score(predictions, labels, **unused_kwargs): bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return bleu, tf.constant(1.0) - - -class UnicodeRegex: - """Ad-hoc hack to recognize all punctuation and symbols. - - without dependening on https://pypi.python.org/pypi/regex/.""" - def _property_chars(prefix): - return ''.join(six.unichr(x) for x in range(sys.maxunicode) - if unicodedata.category(six.unichr(x)).startswith(prefix)) - punctuation = _property_chars('P') - nondigit_punct_re = re.compile(r'([^\d])([' + punctuation + r'])') - punct_nondigit_re = re.compile(r'([' + punctuation + r'])([^\d])') - symbol_re = re.compile('([' + _property_chars('S') + '])') - - -def bleu_tokenize(string): - r"""Tokenize a string following the official BLEU implementation. - - See https://github.com/moses-smt/mosesdecoder/blob/master/scripts/generic/mteval-v14.pl#L954-L983 - In our case, the input string is expected to be just one line - and no HTML entities de-escaping is needed. - So we just tokenize on punctuation and symbols, - except when a punctuation is preceded and followed by a digit - (e.g. a comma/dot as a thousand/decimal separator). - - Note that a numer (e.g. a year) followed by a dot at the end of sentence is NOT tokenized, - i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` - does not match this case (unless we add a space after each sentence). - However, this error is already in the original mteval-v14.pl - and we want to be consistent with it. - - Args: - string: the input string - - Returns: - a list of tokens - """ - string = UnicodeRegex.nondigit_punct_re.sub(r'\1 \2 ', string) - string = UnicodeRegex.punct_nondigit_re.sub(r' \1 \2', string) - string = UnicodeRegex.symbol_re.sub(r' \1 ', string) - return string.split() - - -def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False): - """Compute BLEU for two files (reference and hypothesis translation).""" - # TODO: Does anyone care about Python2 compatibility? - ref_lines = open(ref_filename, 'rt', encoding='utf-8').read().splitlines() - hyp_lines = open(hyp_filename, 'rt', encoding='utf-8').read().splitlines() - assert len(ref_lines) == len(hyp_lines) - if not case_sensitive: - ref_lines = [x.lower() for x in ref_lines] - hyp_lines = [x.lower() for x in hyp_lines] - ref_tokens = [bleu_tokenize(x) for x in ref_lines] - hyp_tokens = [bleu_tokenize(x) for x in hyp_lines] - return compute_bleu(ref_tokens, hyp_tokens) diff --git a/tensor2tensor/utils/bleu_hook_test.py b/tensor2tensor/utils/bleu_hook_test.py index f5976941f..bf08174f8 100644 --- a/tensor2tensor/utils/bleu_hook_test.py +++ b/tensor2tensor/utils/bleu_hook_test.py @@ -39,9 +39,8 @@ def testComputeNotEqual(self): translation_corpus = [[1, 2, 3, 4]] reference_corpus = [[5, 6, 7, 8]] bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus) - # The smoothing prevents 0 for small corpora - actual_bleu = 0.0798679 - self.assertAllClose(bleu, actual_bleu, atol=1e-03) + actual_bleu = 0.0 + self.assertEqual(bleu, actual_bleu) def testComputeMultipleBatch(self): translation_corpus = [[1, 2, 3, 4], [5, 6, 7, 0]] @@ -54,9 +53,8 @@ def testComputeMultipleNgrams(self): reference_corpus = [[1, 2, 1, 13], [12, 6, 7, 4, 8, 9, 10]] translation_corpus = [[1, 2, 1, 3], [5, 6, 7, 4]] bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus) - actual_bleu = 0.3436 + actual_bleu = 0.486 self.assertAllClose(bleu, actual_bleu, atol=1e-03) - if __name__ == '__main__': tf.test.main() diff --git a/tensor2tensor/utils/decoding.py b/tensor2tensor/utils/decoding.py index 426110ad8..d0913e0e1 100644 --- a/tensor2tensor/utils/decoding.py +++ b/tensor2tensor/utils/decoding.py @@ -200,7 +200,7 @@ def decode_from_dataset(estimator, tf.logging.info("Completed inference on %d samples." % num_predictions) # pylint: disable=undefined-loop-variable -def decode_from_file(estimator, filename, decode_hp, decode_to_file=None, checkpoint_path=None): +def decode_from_file(estimator, filename, decode_hp, decode_to_file=None): """Compute predictions on entries in filename and write them out.""" if not decode_hp.batch_size: decode_hp.batch_size = 32 @@ -230,7 +230,7 @@ def input_fn(): return _decode_input_tensor_to_features_dict(example, hparams) decodes = [] - result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) + result_iter = estimator.predict(input_fn) for result in result_iter: if decode_hp.return_beams: beam_decodes = [] From c9144dfa5f514cab529f487b069415daee5e211e Mon Sep 17 00:00:00 2001 From: Noam Shazeer Date: Thu, 30 Nov 2017 11:23:07 -0800 Subject: [PATCH 02/11] Packed datasets - combine examples to constant length for efficient TPU training. Modify transformer to keep the packed-together examples from attending to one another. PiperOrigin-RevId: 177481956 --- .../data_generators/generator_utils.py | 163 ++++++++++++------ tensor2tensor/data_generators/inspect.py | 14 +- tensor2tensor/data_generators/lm1b.py | 8 +- tensor2tensor/data_generators/problem.py | 60 +++++-- .../data_generators/translate_ende.py | 16 +- tensor2tensor/layers/common_attention.py | 18 ++ tensor2tensor/layers/common_layers.py | 12 +- tensor2tensor/models/transformer.py | 129 ++++++++++---- 8 files changed, 304 insertions(+), 116 deletions(-) diff --git a/tensor2tensor/data_generators/generator_utils.py b/tensor2tensor/data_generators/generator_utils.py index aa55ccb13..2d21da2ba 100644 --- a/tensor2tensor/data_generators/generator_utils.py +++ b/tensor2tensor/data_generators/generator_utils.py @@ -449,66 +449,131 @@ def shuffle_dataset(filenames): tf.gfile.Remove(fname) -def combine_examples_no_inputs(examples, max_length): - """Combine examples into longer examples. +class SequencePacker(object): + """Helper for constructing a packed example of sequence examples. - Concatenate targets to form target sequences with length up to max_length. - Target sequences longer than max_length are chopped into multiple sequences. + See comments to pack_examples() + """ - Args: - examples: a generator returning feature dictionaries. - max_length: an integer. + def __init__(self, first_sequence, spacing=2): + self._spacing = spacing + self._ids = first_sequence[:] + self._segmentation = [1] * len(first_sequence) + self._position = range(len(first_sequence)) - Yields: - feature dictionaries. - """ - partial = [] - for example in examples: - x = example["targets"] - if len(x) + len(partial) > max_length: - if partial: - yield {"inputs": [0], "targets": partial} - partial = [] - if len(x) > max_length: - num_fragments = len(x) // max_length - for i in xrange(num_fragments): - yield {"inputs": [0], "targets": x[max_length * i:max_length * (i + 1)]} - partial = x[max_length * num_fragments:] - else: - partial += x - if partial: - yield {"inputs": [0], "targets": partial} + def add(self, ids): + padding = [0] * self._spacing + self._ids.extend(padding + ids) + next_segment_num = self._segmentation[-1] + 1 if self._segmentation else 1 + self._segmentation.extend(padding + [next_segment_num] * len(ids)) + self._position.extend(padding + range(len(ids))) + + def can_fit(self, ids, packed_length): + return len(self._ids) + self._spacing + len(ids) <= packed_length + def to_dict(self): + return {"inputs": [0], + "targets": self._ids, + "targets_segmentation": self._segmentation, + "targets_position": self._position} -def combine_examples_with_inputs(examples, max_length): - """Combine examples into longer examples. - We combine multiple examples by concatenating the inputs and concatenating - the targets. Sequences where the inputs or the targets are too long are - emitted as singletons (not chopped). +class SequencePairPacker(object): + """Helper for packing sequence-to-sequence examples into bigger examples. + + See comments to pack_examples() + """ + + def __init__(self, first_sequence_pair, spacing=2): + self._inputs = SequencePacker(first_sequence_pair[0], spacing) + self._targets = SequencePacker(first_sequence_pair[1], spacing) + + def add(self, pair): + self._inputs.add(pair[0]) + self._targets.add(pair[1]) + + def can_fit(self, pair, packed_length): + return (self._inputs.can_fit(pair[0], packed_length) and + self._targets.can_fit(pair[1], packed_length)) + + def to_dict(self): + ret = self._targets.to_dict() + inputs_dict = self._inputs.to_dict() + ret["inputs"] = inputs_dict["targets"] + ret["inputs_segmentation"] = inputs_dict["targets_segmentation"] + ret["inputs_position"] = inputs_dict["targets_position"] + return ret + + +def pack_examples(examples, + has_inputs, + packed_length=256, + spacing=2, + queue_size=10, + chop_long_sequences=False): + """Pack examples into longer examples. + + If has_inputs=False, we are packing single-sequence examples with + targets only and no inputs. + + In this case, we concatenate the targets from several examples to form + each new example. We insert a number of zeros for spacing between the + original sequences. This is to help the sequences stay separate + under convolutions. If chop_long_sequences is set, then any input sequence + longer than packed_length gets chopped up into multiple examples. Otherwise, + long sequences are emitted as singletons. + + If has_inputs=True, then we are packing sequence-to-sequence + examples. We combine several examples by concatenating the inputs + (as above) and concatenating the targets (as above). Chopping of + long sequences is not supported. + + The packed examples are represented as dictionaries containing: + "inputs", "targets": the packed sequences described above + "inputs_segmentation", "targets_segmentation": + Sequences aligned with "inputs", "targets" specifying to which original + sequence each position belongs. Numbering starts from 1, and 0 is used + for spacing. This information is useful for preventing attention across + segments. + e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4] + "inputs_position", "targets_position": + Sequences aligned with "inputs", "targets" specifying position within + the original sequence. This is useful for positional encodings. + e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2] Args: examples: a generator returning feature dictionaries. - max_length: an integer. + has_inputs: a boolean + packed_length: an integer + spacing: an integer + queue_size: an integer + chop_long_sequences: a boolean Yields: feature dictionaries. """ - partial_a = [] - partial_b = [] + packer = SequencePairPacker if has_inputs else SequencePacker + combined = [] for example in examples: - a = example["inputs"] - b = example["targets"] - if (len(a) + len(partial_a) > max_length or - len(b) + len(partial_b) > max_length): - if partial_a or partial_b: - yield {"inputs": partial_a, "targets": partial_b} - partial_a = [] - partial_b = [] - if len(a) > max_length or len(b) > max_length: - yield {"inputs": a, "targets": b} - else: - partial_a += a - partial_b += b - if partial_a or partial_b: - yield {"inputs": partial_a, "targets": partial_b} + x = ((example["inputs"], example["targets"]) + if has_inputs else example["targets"]) + if chop_long_sequences and len(x) > packed_length: + assert not has_inputs + num_fragments = len(x) // packed_length + for i in xrange(num_fragments): + yield packer( + x[packed_length * i:packed_length * (i + 1)], spacing).to_dict() + x = x[packed_length * num_fragments:] + added = False + for c in combined: + if c.can_fit(x, packed_length): + c.add(x) + added = True + break + if not added: + if len(combined) == queue_size: + yield combined[0].to_dict() + combined = combined[1:] + combined.append(packer(x, spacing)) + for c in combined: + yield c.to_dict() diff --git a/tensor2tensor/data_generators/inspect.py b/tensor2tensor/data_generators/inspect.py index c84f00606..0293ca9c4 100644 --- a/tensor2tensor/data_generators/inspect.py +++ b/tensor2tensor/data_generators/inspect.py @@ -40,6 +40,7 @@ tf.flags.DEFINE_string("input_filename", "", "input filename") tf.flags.DEFINE_bool("print_inputs", False, "Print decoded inputs to stdout") tf.flags.DEFINE_bool("print_targets", False, "Print decoded targets to stdout") +tf.flags.DEFINE_bool("print_all", False, "Print all fields") FLAGS = tf.flags.FLAGS @@ -75,12 +76,15 @@ def main(_): total_sequences += 1 max_input_length = max(max_input_length, len(inputs)) max_target_length = max(max_target_length, len(targets)) + if FLAGS.print_all: + for k, v in x.features.feature.iteritems(): + print("%s: %s" % (k, v.int64_list.value)) - tf.logging.info("total_sequences: %d", total_sequences) - tf.logging.info("total_input_tokens: %d", total_input_tokens) - tf.logging.info("total_target_tokens: %d", total_target_tokens) - tf.logging.info("max_input_length: %d", max_input_length) - tf.logging.info("max_target_length: %d", max_target_length) + print("total_sequences: %d" % total_sequences) + print("total_input_tokens: %d" % total_input_tokens) + print("total_target_tokens: %d" % total_target_tokens) + print("max_input_length: %d" % max_input_length) + print("max_target_length: %d" % max_target_length) if __name__ == "__main__": diff --git a/tensor2tensor/data_generators/lm1b.py b/tensor2tensor/data_generators/lm1b.py index 3fa7d7e47..cd0eb8e3c 100644 --- a/tensor2tensor/data_generators/lm1b.py +++ b/tensor2tensor/data_generators/lm1b.py @@ -224,11 +224,11 @@ def generator(self, data_dir, tmp_dir, is_training): @registry.register_problem -class LanguagemodelLm1b8kConcat512(LanguagemodelLm1b32k): +class LanguagemodelLm1b8kPacked(LanguagemodelLm1b32k): """A language model on the 1B words corpus. 8k vocabualry. - Training/eval examples are concatenated to a maximum length of 512. + Training/eval examples are concatenated to a maximum length of 256. Happy TPU Training. @@ -241,8 +241,8 @@ def targeted_vocab_size(self): return 2**13 # 8192 @property - def combine_to_length(self): - return 512 + def packed_length(self): + return 256 @registry.register_problem diff --git a/tensor2tensor/data_generators/problem.py b/tensor2tensor/data_generators/problem.py index d2e30cbff..d80cc01da 100644 --- a/tensor2tensor/data_generators/problem.py +++ b/tensor2tensor/data_generators/problem.py @@ -670,20 +670,15 @@ def generator(self, data_dir, tmp_dir, is_training): """ raise NotImplementedError() - def maybe_combine_examples(self, generator): - if self.combine_to_length: - if self.has_inputs: - return generator_utils.combine_examples_with_inputs( - generator, self.combine_to_length) - else: - return generator_utils.combine_examples_no_inputs( - generator, self.combine_to_length) - else: - return generator - @property - def combine_to_length(self): - """An optional integer. Concatenate examples into bigger examples.""" + def packed_length(self): + """Pack multiple examples into a single example of constant length. + + This is useful for TPU training. See generator_utils.pack_examples(). + + Returns: + an optional integer + """ return None @property @@ -723,6 +718,15 @@ def use_subword_tokenizer(self): def has_inputs(self): return True # Set to False for language models. + def _maybe_pack_examples(self, generator): + """Helper to generate_data().""" + if self.packed_length: + return generator_utils.pack_examples( + generator, self.has_inputs, self.packed_length, + chop_long_sequences=not self.has_inputs) + else: + return generator + def generate_data(self, data_dir, tmp_dir, task_id=-1): train_paths = self.training_filepaths( data_dir, self.num_shards, shuffled=False) @@ -731,14 +735,14 @@ def generate_data(self, data_dir, tmp_dir, task_id=-1): if self.use_train_shards_for_dev: all_paths = train_paths + dev_paths generator_utils.generate_files( - self.maybe_combine_examples(self.generator(data_dir, tmp_dir, True)), + self._maybe_pack_examples(self.generator(data_dir, tmp_dir, True)), all_paths) generator_utils.shuffle_dataset(all_paths) else: generator_utils.generate_dataset_and_shuffle( - self.maybe_combine_examples(self.generator(data_dir, tmp_dir, True)), + self._maybe_pack_examples(self.generator(data_dir, tmp_dir, True)), train_paths, - self.maybe_combine_examples(self.generator(data_dir, tmp_dir, False)), + self._maybe_pack_examples(self.generator(data_dir, tmp_dir, False)), dev_paths) def feature_encoders(self, data_dir): @@ -770,6 +774,30 @@ def hparams(self, defaults, unused_model_hparams): p.target_space_id = self.target_space_id if self.is_character_level: p.loss_multiplier = 2.0 + if self.packed_length: + identity = (registry.Modalities.GENERIC, None) + if self.has_inputs: + p.input_modality["inputs_segmentation"] = identity + p.input_modality["inputs_position"] = identity + p.input_modality["targets_segmentation"] = identity + p.input_modality["targets_position"] = identity + + def example_reading_spec(self): + data_fields = { + "targets": tf.VarLenFeature(tf.int64) + } + if self.has_inputs: + data_fields["inputs"] = tf.VarLenFeature(tf.int64) + + if self.packed_length: + if self.has_inputs: + data_fields["inputs_segmentation"] = tf.VarLenFeature(tf.int64) + data_fields["inputs_position"] = tf.VarLenFeature(tf.int64) + data_fields["targets_segmentation"] = tf.VarLenFeature(tf.int64) + data_fields["targets_position"] = tf.VarLenFeature(tf.int64) + + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) def eval_metrics(self): return [ diff --git a/tensor2tensor/data_generators/translate_ende.py b/tensor2tensor/data_generators/translate_ende.py index 2dc8e3a00..bbd502fdd 100644 --- a/tensor2tensor/data_generators/translate_ende.py +++ b/tensor2tensor/data_generators/translate_ende.py @@ -117,15 +117,15 @@ def target_space_id(self): @registry.register_problem -class TranslateEndeWmtBpe32kConcat512(TranslateEndeWmtBpe32k): +class TranslateEndeWmtBpe32kPacked(TranslateEndeWmtBpe32k): """Problem spec for WMT En-De translation, BPE version. - Training/eval examples are concatenated to a maximum length of 512. + Training/eval examples are concatenated to a maximum length of 256. """ @property - def combine_to_length(self): - return 512 + def packed_length(self): + return 256 @registry.register_problem @@ -168,6 +168,14 @@ def targeted_vocab_size(self): return 2**15 # 32768 +@registry.register_problem +class TranslateEndeWmt32kPacked(TranslateEndeWmt32k): + + @property + def packed_length(self): + return 256 + + @registry.register_problem class TranslateEndeWmtCharacters(translate.TranslateProblem): """Problem spec for WMT En-De translation.""" diff --git a/tensor2tensor/layers/common_attention.py b/tensor2tensor/layers/common_attention.py index f0bbaa39e..23cf074af 100644 --- a/tensor2tensor/layers/common_attention.py +++ b/tensor2tensor/layers/common_attention.py @@ -613,6 +613,24 @@ def attention_bias_lower_triangle(length): return attention_bias_local(length, -1, 0) +@expert_utils.add_name_scope() +def attention_bias_same_segment(query_segment_id, memory_segment_id): + """Create an bias tensor to be added to attention logits. + + Positions with the same segment_ids can see each other. + + Args: + query_segment_id: a float `Tensor` with shape [batch, query_length]. + memory_segment_id: a float `Tensor` with shape [batch, memory_length]. + + Returns: + a `Tensor` with shape [batch, 1, query_length, memory_length]. + """ + ret = tf.to_float(tf.not_equal(tf.expand_dims(query_segment_id, 2), + tf.expand_dims(memory_segment_id, 1))) * -1e9 + return tf.expand_dims(ret, axis=1) + + @expert_utils.add_name_scope() def attention_bias_ignore_padding(memory_padding): """Create an bias tensor to be added to attention logits. diff --git a/tensor2tensor/layers/common_layers.py b/tensor2tensor/layers/common_layers.py index f04d27f1d..ca8a28b99 100644 --- a/tensor2tensor/layers/common_layers.py +++ b/tensor2tensor/layers/common_layers.py @@ -1265,13 +1265,12 @@ def relu_density_logit(x, reduce_dims): return scaled -def maybe_zero_out_padding(inputs, kernel_size, padding, nonpadding_mask): +def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask): """If necessary, zero out inputs to a conv for padding positions. Args: inputs: a Tensor with shape [batch, length, ...] kernel_size: an integer or pair of integers - padding: a string, e.g. "SAME" nonpadding_mask: a Tensor with shape [batch, length] Returns: @@ -1279,7 +1278,6 @@ def maybe_zero_out_padding(inputs, kernel_size, padding, nonpadding_mask): """ if (kernel_size != 1 and kernel_size != (1, 1) and - padding == "SAME" and nonpadding_mask is not None): while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims: nonpadding_mask = tf.expand_dims(nonpadding_mask, -1) @@ -1310,13 +1308,13 @@ def conv_relu_conv(inputs, """Hidden layer with RELU activation followed by linear projection.""" with tf.variable_scope(name, "conv_relu_conv", [inputs]): inputs = maybe_zero_out_padding( - inputs, first_kernel_size, padding, nonpadding_mask) + inputs, first_kernel_size, nonpadding_mask) h = tpu_conv1d(inputs, filter_size, first_kernel_size, padding=padding, name="conv1") h = tf.nn.relu(h) if dropout != 0.0: h = tf.nn.dropout(h, 1.0 - dropout) - h = maybe_zero_out_padding(h, second_kernel_size, padding, nonpadding_mask) + h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) return tpu_conv1d(h, output_size, second_kernel_size, padding=padding, name="conv2") @@ -1333,7 +1331,7 @@ def sepconv_relu_sepconv(inputs, """Hidden layer with RELU activation followed by linear projection.""" with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]): inputs = maybe_zero_out_padding( - inputs, first_kernel_size, padding, nonpadding_mask) + inputs, first_kernel_size, nonpadding_mask) if inputs.get_shape().ndims == 3: is_3d = True inputs = tf.expand_dims(inputs, 2) @@ -1344,7 +1342,7 @@ def sepconv_relu_sepconv(inputs, padding=padding, name="conv1") if dropout != 0.0: h = tf.nn.dropout(h, 1.0 - dropout) - h = maybe_zero_out_padding(h, second_kernel_size, padding, nonpadding_mask) + h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) ret = separable_conv( h, output_size, second_kernel_size, padding=padding, name="conv2") if is_3d: diff --git a/tensor2tensor/models/transformer.py b/tensor2tensor/models/transformer.py index d345155f9..099a226b3 100644 --- a/tensor2tensor/models/transformer.py +++ b/tensor2tensor/models/transformer.py @@ -44,13 +44,15 @@ class Transformer(t2t_model.T2TModel): """Attention net. See file docstring.""" - def encode(self, inputs, target_space, hparams): + def encode(self, inputs, target_space, hparams, features=None): """Encode transformer inputs. Args: inputs: Transformer inputs [batch_size, input_length, hidden_dim] target_space: scalar, target space ID. hparams: hyperparmeters for model. + features: optionally pass the entire features dictionary as well. + This is needed now for "packed" datasets. Returns: Tuple of: @@ -62,13 +64,15 @@ def encode(self, inputs, target_space, hparams): inputs = common_layers.flatten4d3d(inputs) encoder_input, self_attention_bias, encoder_decoder_attention_bias = ( - transformer_prepare_encoder(inputs, target_space, hparams)) + transformer_prepare_encoder( + inputs, target_space, hparams, features=features)) encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.layer_prepostprocess_dropout) - encoder_output = transformer_encoder(encoder_input, self_attention_bias, - hparams) + encoder_output = transformer_encoder( + encoder_input, self_attention_bias, + hparams, nonpadding=_features_to_nonpadding(features, "inputs")) return encoder_output, encoder_decoder_attention_bias @@ -78,7 +82,8 @@ def decode(self, encoder_decoder_attention_bias, decoder_self_attention_bias, hparams, - cache=None): + cache=None, + nonpadding=None): """Decode Transformer outputs from encoder representation. Args: @@ -93,6 +98,7 @@ def decode(self, hparams: hyperparmeters for model. cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. + nonpadding: optional Tensor with shape [batch_size, decoder_length] Returns: Final decoder representation. [batch_size, decoder_length, hidden_dim] @@ -106,7 +112,8 @@ def decode(self, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, - cache=cache) + cache=cache, + nonpadding=nonpadding) if hparams.use_tpu and hparams.mode == tf.estimator.ModeKeys.TRAIN: # TPU does not react kindly to extra dimensions. @@ -136,17 +143,18 @@ def model_fn_body(self, features): if inputs is not None: target_space = features["target_space_id"] encoder_output, encoder_decoder_attention_bias = self.encode( - inputs, target_space, hparams) + inputs, target_space, hparams, features=features) targets = features["targets"] targets = common_layers.flatten4d3d(targets) decoder_input, decoder_self_attention_bias = transformer_prepare_decoder( - targets, hparams) + targets, hparams, features=features) return self.decode(decoder_input, encoder_output, encoder_decoder_attention_bias, - decoder_self_attention_bias, hparams) + decoder_self_attention_bias, hparams, + nonpadding=_features_to_nonpadding(features, "targets")) def _greedy_infer(self, features, decode_length): """Fast version of greedy decoding. @@ -248,7 +256,8 @@ def _fast_decode(self, inputs = input_modality.bottom_sharded(inputs, dp) with tf.variable_scope("body"): encoder_output, encoder_decoder_attention_bias = dp( - self.encode, inputs, features["target_space_id"], hparams) + self.encode, inputs, features["target_space_id"], hparams, + features=features) encoder_output = encoder_output[0] encoder_decoder_attention_bias = encoder_decoder_attention_bias[0] @@ -300,9 +309,10 @@ def symbols_to_logits_fn(ids, i, cache): bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1] with tf.variable_scope("body"): - body_outputs = dp(self.decode, targets, cache["encoder_output"], - cache["encoder_decoder_attention_bias"], bias, - hparams, cache) + body_outputs = dp( + self.decode, targets, cache["encoder_output"], + cache["encoder_decoder_attention_bias"], bias, hparams, cache, + nonpadding=_features_to_nonpadding(features, "targets")) with tf.variable_scope(target_modality.name): logits = target_modality.top_sharded(body_outputs, None, dp)[0] @@ -396,20 +406,30 @@ def model_fn_body(self, features): encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.layer_prepostprocess_dropout) - encoder_output = transformer_encoder(encoder_input, - encoder_self_attention_bias, hparams) + encoder_output = transformer_encoder( + encoder_input, encoder_self_attention_bias, hparams, + nonpadding=_features_to_nonpadding(features, "inputs")) encoder_output = tf.expand_dims(encoder_output, 2) return encoder_output -def transformer_prepare_encoder(inputs, target_space, hparams): +def _features_to_nonpadding(features, inputs_or_targets="inputs"): + key = inputs_or_targets + "_segmentation" + if features and key in features: + return tf.minimum(features[key], 1.0) + return None + + +def transformer_prepare_encoder(inputs, target_space, hparams, features=None): """Prepare one shard of the model for the encoder. Args: inputs: a Tensor. target_space: a Tensor. hparams: run hyperparameters + features: optionally pass the entire features dictionary as well. + This is needed now for "packed" datasets. Returns: encoder_input: a Tensor, bottom of encoder stack @@ -419,11 +439,24 @@ def transformer_prepare_encoder(inputs, target_space, hparams): """ ishape_static = inputs.shape.as_list() encoder_input = inputs - encoder_padding = common_attention.embedding_to_padding(encoder_input) - ignore_padding = common_attention.attention_bias_ignore_padding( - encoder_padding) - encoder_self_attention_bias = ignore_padding - encoder_decoder_attention_bias = ignore_padding + if features and "inputs_segmentation" in features: + # Packed dataset. Keep the examples from seeing each other. + inputs_segmentation = features["inputs_segmentation"] + inputs_position = features["inputs_position"] + targets_segmentation = features["targets_segmentation"] + encoder_self_attention_bias = common_attention.attention_bias_same_segment( + inputs_segmentation, inputs_segmentation) + encoder_decoder_attention_bias = ( + common_attention.attention_bias_same_segment( + targets_segmentation, inputs_segmentation)) + else: + # Usual case - not a packed dataset. + encoder_padding = common_attention.embedding_to_padding(encoder_input) + ignore_padding = common_attention.attention_bias_ignore_padding( + encoder_padding) + encoder_self_attention_bias = ignore_padding + encoder_decoder_attention_bias = ignore_padding + inputs_position = None if hparams.proximity_bias: encoder_self_attention_bias += common_attention.attention_bias_proximal( common_layers.shape_list(inputs)[1]) @@ -434,17 +467,23 @@ def transformer_prepare_encoder(inputs, target_space, hparams): emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) encoder_input += emb_target_space if hparams.pos == "timing": - encoder_input = common_attention.add_timing_signal_1d(encoder_input) + if inputs_position is not None: + encoder_input = common_attention.add_timing_signal_1d_given_position( + encoder_input, inputs_position) + else: + encoder_input = common_attention.add_timing_signal_1d(encoder_input) return (encoder_input, encoder_self_attention_bias, encoder_decoder_attention_bias) -def transformer_prepare_decoder(targets, hparams): +def transformer_prepare_decoder(targets, hparams, features=None): """Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters + features: optionally pass the entire features dictionary as well. + This is needed now for "packed" datasets. Returns: decoder_input: a Tensor, bottom of decoder stack @@ -453,19 +492,32 @@ def transformer_prepare_decoder(targets, hparams): decoder_self_attention_bias = ( common_attention.attention_bias_lower_triangle( common_layers.shape_list(targets)[1])) + if features and "targets_segmentation" in features: + # "Packed" dataset - keep the examples from seeing each other. + targets_segmentation = features["targets_segmentation"] + targets_position = features["targets_position"] + decoder_self_attention_bias += common_attention.attention_bias_same_segment( + targets_segmentation, targets_segmentation) + else: + targets_position = None if hparams.proximity_bias: decoder_self_attention_bias += common_attention.attention_bias_proximal( common_layers.shape_list(targets)[1]) decoder_input = common_layers.shift_right_3d(targets) if hparams.pos == "timing": - decoder_input = common_attention.add_timing_signal_1d(decoder_input) + if targets_position is not None: + decoder_input = common_attention.add_timing_signal_1d_given_position( + decoder_input, targets_position) + else: + decoder_input = common_attention.add_timing_signal_1d(decoder_input) return (decoder_input, decoder_self_attention_bias) def transformer_encoder(encoder_input, encoder_self_attention_bias, hparams, - name="encoder"): + name="encoder", + nonpadding=None): """A stack of transformer layers. Args: @@ -474,15 +526,24 @@ def transformer_encoder(encoder_input, (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This must either be + passed in, which we do for "packed" datasets, or inferred from + encoder_self_attention_bias. The knowledge about padding is used + for pad_remover(efficiency) and to mask out padding in convoltutional + layers. Returns: y: a Tensors """ x = encoder_input with tf.variable_scope(name): - # TODO(noam): We should pass in the padding directly. - padding = common_attention.attention_bias_to_padding( - encoder_self_attention_bias) + if nonpadding is not None: + padding = 1.0 - nonpadding + else: + padding = common_attention.attention_bias_to_padding( + encoder_self_attention_bias) + nonpadding = 1.0 - padding pad_remover = None if hparams.use_pad_remover: pad_remover = expert_utils.PadRemover(padding) @@ -505,7 +566,7 @@ def transformer_encoder(encoder_input, with tf.variable_scope("ffn"): y = transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams, pad_remover, - conv_padding="SAME", nonpadding_mask=1.0 - padding) + conv_padding="SAME", nonpadding_mask=nonpadding) x = common_layers.layer_postprocess(x, y, hparams) # if normalization is done in layer_preprocess, then it shuold also be done # on the output, since the output can grow very large, being the sum of @@ -519,7 +580,8 @@ def transformer_decoder(decoder_input, encoder_decoder_attention_bias, hparams, cache=None, - name="decoder"): + name="decoder", + nonpadding=None): """A stack of transformer layers. Args: @@ -533,6 +595,11 @@ def transformer_decoder(decoder_input, cache: dict, containing tensors which are the results of previous attentions, used for fast decoding. name: a string + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This is used + to mask out padding in convoltutional layers. We generally only + need this mask for "packed" datasets, because for ordinary datasets, + no padding is ever followed by nonpadding. Returns: y: a Tensors @@ -572,7 +639,7 @@ def transformer_decoder(decoder_input, with tf.variable_scope("ffn"): y = transformer_ffn_layer( common_layers.layer_preprocess(x, hparams), hparams, - conv_padding="LEFT") + conv_padding="LEFT", nonpadding_mask=nonpadding) x = common_layers.layer_postprocess(x, y, hparams) # if normalization is done in layer_preprocess, then it shuold also be done # on the output, since the output can grow very large, being the sum of From 7f3ef1ea3f97d81ed2ee36382788a3e2406409e2 Mon Sep 17 00:00:00 2001 From: Noam Shazeer Date: Thu, 30 Nov 2017 12:00:54 -0800 Subject: [PATCH 03/11] Make Parallelism object use reuse=True by default. Solves tpu checkpoint compatibility bug. PiperOrigin-RevId: 177487398 --- tensor2tensor/layers/modalities_test.py | 6 +++--- tensor2tensor/utils/devices.py | 1 - tensor2tensor/utils/expert_utils.py | 7 ++++--- tensor2tensor/utils/t2t_model.py | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tensor2tensor/layers/modalities_test.py b/tensor2tensor/layers/modalities_test.py index 574ddc77c..f5f7b8998 100644 --- a/tensor2tensor/layers/modalities_test.py +++ b/tensor2tensor/layers/modalities_test.py @@ -49,7 +49,7 @@ def testSymbolModalityInputs(self): vocab_size, size=(batch_size, length, 1, 1)) m = modalities.SymbolModality(model_hparams, vocab_size) data_parallelism = expert_utils.Parallelism( - ["/device:CPU:0"] * num_datashards, reuse=True) + ["/device:CPU:0"] * num_datashards) with self.test_session() as session: xs = tf.split(x, num_datashards) sharded_output = m.bottom_sharded(xs, data_parallelism) @@ -82,7 +82,7 @@ def testSymbolModalityTargets(self): vocab_size, size=(batch_size, length, height, 1)) m = modalities.SymbolModality(model_hparams, vocab_size) data_parallelism = expert_utils.Parallelism( - ["/device:CPU:0"] * num_datashards, reuse=True) + ["/device:CPU:0"] * num_datashards) with self.test_session() as session: sharded_body_output = tf.split(tf.to_float(body_output), num_datashards) sharded_targets = tf.split(targets, num_datashards) @@ -120,7 +120,7 @@ def testSymbolModalityTargetsFactored(self): vocab_size, size=(batch_size, length, height, 1)) m = modalities.SymbolModality(model_hparams, vocab_size) data_parallelism = expert_utils.Parallelism( - ["/device:CPU:0"] * num_datashards, reuse=True) + ["/device:CPU:0"] * num_datashards) with self.test_session() as session: sharded_body_output = tf.split(tf.to_float(body_output), num_datashards) sharded_targets = tf.split(targets, num_datashards) diff --git a/tensor2tensor/utils/devices.py b/tensor2tensor/utils/devices.py index cf1f5fb25..490366cab 100644 --- a/tensor2tensor/utils/devices.py +++ b/tensor2tensor/utils/devices.py @@ -147,6 +147,5 @@ def _replica_device_setter(worker_device): tf.logging.info("caching_devices: %s", caching_devices) return eu.Parallelism( datashard_devices, - reuse=True, caching_devices=caching_devices, daisy_chain_variables=hparams.daisy_chain_variables) diff --git a/tensor2tensor/utils/expert_utils.py b/tensor2tensor/utils/expert_utils.py index 7d4912bc6..8fe5479da 100644 --- a/tensor2tensor/utils/expert_utils.py +++ b/tensor2tensor/utils/expert_utils.py @@ -129,7 +129,7 @@ class Parallelism(object): def __init__(self, device_names_or_functions, - reuse=None, + reuse=True, caching_devices=None, daisy_chain_variables=False): """Create a Parallelism. @@ -945,7 +945,8 @@ def distributed_moe(data_parallelism, # We use the default of reuse=False. Otherwise, the experts would all # use the same variables. ep = Parallelism( - [expert_devices[i % len(expert_devices)] for i in xrange(num_experts)]) + [expert_devices[i % len(expert_devices)] for i in xrange(num_experts)], + reuse=None) # Experts expect 2d input tensors, so flatten the batch dimension and all # spatial dimensions together. xs_flat = dp(tf.reshape, xs, [[-1, input_size]] * dp.n) @@ -1034,7 +1035,7 @@ def local_moe(x, v = flatten_all_but_last(v) expert_kwargs[k] = dispatcher.dispatch(v) - ep = Parallelism([DEFAULT_DEV_STRING] * num_experts) + ep = Parallelism([DEFAULT_DEV_STRING] * num_experts, reuse=None) expert_outputs = ep(expert_fn, **expert_kwargs) y_flat = dispatcher.combine(expert_outputs) diff --git a/tensor2tensor/utils/t2t_model.py b/tensor2tensor/utils/t2t_model.py index ff7584b07..0f7b865b6 100644 --- a/tensor2tensor/utils/t2t_model.py +++ b/tensor2tensor/utils/t2t_model.py @@ -75,7 +75,7 @@ def __init__(self, super(T2TModel, self).__init__( trainable=mode == tf.estimator.ModeKeys.TRAIN, name=name) if data_parallelism is None: - data_parallelism = eu.Parallelism([""], reuse=True) + data_parallelism = eu.Parallelism([""]) if ps_devices is None: ps_devices = [""] if problem_hparams is None: @@ -971,7 +971,7 @@ def _create_data_parallelism(num_gpus=1, data_shard_devices += ["cpu:0"] assert len(data_shard_devices) == num_shards tf.logging.info("Data parallel devices: %s", data_shard_devices) - return eu.Parallelism(data_shard_devices, reuse=True) + return eu.Parallelism(data_shard_devices) # These metrics are implemented with py_funcs and therefore do no work with TPU From 01030eb6f9f8052114a0eb3fd91e0862da05ada9 Mon Sep 17 00:00:00 2001 From: Noam Shazeer Date: Thu, 30 Nov 2017 12:01:01 -0800 Subject: [PATCH 04/11] Remove TranslateEndeWmtBpe32kPacked. We have TranslateEndeWmt32kPacked. PiperOrigin-RevId: 177487419 --- tensor2tensor/data_generators/translate_ende.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/tensor2tensor/data_generators/translate_ende.py b/tensor2tensor/data_generators/translate_ende.py index bbd502fdd..2124be32a 100644 --- a/tensor2tensor/data_generators/translate_ende.py +++ b/tensor2tensor/data_generators/translate_ende.py @@ -116,18 +116,6 @@ def target_space_id(self): return problem.SpaceID.DE_BPE_TOK -@registry.register_problem -class TranslateEndeWmtBpe32kPacked(TranslateEndeWmtBpe32k): - """Problem spec for WMT En-De translation, BPE version. - - Training/eval examples are concatenated to a maximum length of 256. - """ - - @property - def packed_length(self): - return 256 - - @registry.register_problem class TranslateEndeWmt8k(translate.TranslateProblem): """Problem spec for WMT En-De translation.""" From 24c1fd755ebb4a3f1b81310b3560b18f1cd911bb Mon Sep 17 00:00:00 2001 From: Lukasz Kaiser Date: Thu, 30 Nov 2017 14:05:55 -0800 Subject: [PATCH 05/11] Clean up transformer_vae and add refining. PiperOrigin-RevId: 177505082 --- tensor2tensor/models/transformer_vae.py | 197 +++++++++++++----------- 1 file changed, 111 insertions(+), 86 deletions(-) diff --git a/tensor2tensor/models/transformer_vae.py b/tensor2tensor/models/transformer_vae.py index 4a7290c23..140959c34 100644 --- a/tensor2tensor/models/transformer_vae.py +++ b/tensor2tensor/models/transformer_vae.py @@ -32,6 +32,9 @@ import tensorflow as tf +_DO_SUMMARIES = True + + def residual_conv(x, repeat, k, hparams, name, reuse=None): """A stack of convolution blocks with residual connections.""" with tf.variable_scope(name, reuse=reuse): @@ -110,7 +113,8 @@ def dae(x, hparams, name): s = tf.nn.softmax((logsm + gumbel_samples) / temperature) m = tf.nn.softmax(m) kl = - tf.reduce_max(logsm, axis=-1) - tf.summary.histogram("max-log", tf.reshape(kl, [-1])) + if _DO_SUMMARIES: + tf.summary.histogram("max-log", tf.reshape(kl, [-1])) # Calculate the argmax and construct hot vectors. maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1]) maxvhot = tf.stop_gradient(tf.one_hot(maxvec, hparams.v_size)) @@ -134,7 +138,9 @@ def vae(x, z_size, name): z = mu + tf.exp(log_sigma / 2) * epsilon kl = 0.5 * tf.reduce_mean( tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1) - return z, tf.reduce_mean(kl), mu, log_sigma + free_bits = z_size // 2 + kl_loss = tf.maximum(tf.reduce_mean(kl) - free_bits, 0.0) + return z, kl_loss, mu, log_sigma def nearest(x, means, hparams): @@ -187,35 +193,39 @@ def int_to_bit(x_int, nbits): def bottleneck(x, hparams, filter_size, name): """Bottleneck.""" - def embed1(x): - if hparams.bottleneck_kind == "semhash": - c = int_to_bit(x, c_size) - h1a = tf.layers.dense(c, filter_size, name="vch1a") - h1b = tf.layers.dense(1.0 - c, filter_size, name="vch1b") - return h1a + h1b - elif hparams.bottleneck_kind == "gumbel-softmax": - hot = tf.one_hot(x, hparams.v_size) - with tf.variable_scope(name, reuse=True): - return tf.layers.dense(hot, hparams.hidden_size, name="dae_dense") - def embed(x): + """Embedding function; must be compatible with the code later.""" with tf.variable_scope(name, reuse=True): - h1 = embed1(x) + if hparams.bottleneck_kind == "semhash": + c = int_to_bit(x, z_size) + h1a = tf.layers.dense(c, filter_size, name="vch1a") + h1b = tf.layers.dense(1.0 - c, filter_size, name="vch1b") + h1 = h1a + h1b + elif hparams.bottleneck_kind == "gumbel-softmax": + hot = tf.one_hot(x, hparams.v_size) + h1 = tf.layers.dense(hot, hparams.hidden_size, name="dae_dense") + elif hparams.bottleneck_kind == "vq-vae": + means = tf.get_variable(name="means", + shape=[hparams.v_size, hparams.hidden_size]) + h1 = tf.gather(means, x) + h2 = tf.layers.dense(tf.nn.relu(h1), filter_size, name="vch2") - res = tf.layers.dense(tf.nn.relu(h2), hparams.hidden_size, name="vcfin") - return res + return tf.layers.dense(tf.nn.relu(h2), hparams.hidden_size, name="vcfin") with tf.variable_scope(name): - c_size = hparams.c_size + z_size = hparams.z_size l = tf.constant(0.0) if hparams.bottleneck_kind == "dense": - c = tf.layers.dense(x, c_size, name="vcc") + c = tf.layers.dense(x, z_size, name="vcc") + h1 = tf.layers.dense(c, filter_size, name="vch1") + if hparams.bottleneck_kind == "vae": + c, l, _, _ = vae(x, z_size, "vae") h1 = tf.layers.dense(c, filter_size, name="vch1") if hparams.bottleneck_kind == "semhash": - c = tf.layers.dense(x, c_size, name="vcc") + c = tf.layers.dense(x, z_size, name="vcc") y_clean = common_layers.saturating_sigmoid(c) - tf.summary.histogram("y_clean", tf.reshape(y_clean, [-1])) - # l = tf.reduce_mean(y_clean * (1.0 - y_clean)) + if _DO_SUMMARIES: + tf.summary.histogram("y_clean", tf.reshape(y_clean, [-1])) if hparams.noise_dev > 0 and hparams.mode == tf.estimator.ModeKeys.TRAIN: dev = hparams.noise_dev noise = tf.truncated_normal(tf.shape(c), mean=0.0, stddev=dev) @@ -233,7 +243,7 @@ def embed(x): h1b = tf.layers.dense(1.0 - c, filter_size, name="vch1b") h1 = h1a + h1b dx = tf.to_int32(tf.stop_gradient(d)) - c = bit_to_int(dx, c_size) + c = bit_to_int(dx, z_size) if hparams.bottleneck_kind == "gumbel-softmax": _, hot, l = dae(x, hparams, name) c = tf.argmax(hot, axis=-1) @@ -331,43 +341,54 @@ def next_bit(t_bit, i): def ae_transformer_internal(inputs, targets, target_space, hparams, beam_size, cache=None, predict_mask=1.0): """AE Transformer, main step used for training.""" - hparams.z_size = hparams.hidden_size - with tf.variable_scope("ae_transformer"): - # Prepare inputs, targets, k. - orig_targets = targets - batch_size = tf.shape(orig_targets)[0] - targets = tf.reshape(targets, [batch_size, -1, 1, hparams.hidden_size]) - k = hparams.num_compress_steps - - # Encoder. - if inputs is not None: - inputs = common_layers.flatten4d3d(inputs) - inputs, ed = encode(inputs, target_space, hparams, "input_enc") - else: - ed = None - - # Autoencoding. - losses = {"vc": tf.constant(0.0), "sm": tf.constant(0.0)} - if hparams.do_ae: - targets, _ = common_layers.pad_to_same_length( - targets, targets, final_length_divisible_by=2**k) - targets_c = compress(targets, False, hparams, "compress") - if hparams.mode != tf.estimator.ModeKeys.PREDICT: - # Compress and bottleneck. - t_c, t_bit, vc_loss, _ = bottleneck(targets_c, hparams, 2*2048, "vc") + # Summaries break with the do_refine cond, turn them off in that case. + global _DO_SUMMARIES + if hparams.do_refine: + _DO_SUMMARIES = False + + # Prepare. + orig_targets = targets + batch_size = tf.shape(orig_targets)[0] + targets = tf.reshape(targets, [batch_size, -1, 1, hparams.hidden_size]) + + # Encoder. + if inputs is not None: + inputs = common_layers.flatten4d3d(inputs) + inputs, ed = encode(inputs, target_space, hparams, "input_enc") + else: + ed = None + + # Autoencoding. + losses = {"extra": tf.constant(0.0), "latent_pred": tf.constant(0.0)} + if hparams.do_ae: + max_targets_len_from_inputs = tf.concat([inputs, inputs], axis=1) + targets, _ = common_layers.pad_to_same_length( + targets, max_targets_len_from_inputs, + final_length_divisible_by=2**hparams.num_compress_steps) + targets_c = compress(targets, False, hparams, "compress") + if hparams.mode != tf.estimator.ModeKeys.PREDICT: + # Compress and bottleneck. + t_c, t_bit, vc_loss, _ = bottleneck(targets_c, hparams, 2*2048, "vc") + if _DO_SUMMARIES: tf.summary.histogram("bit0", tf.reshape(t_bit[:, 0, :], [-1])) - pc = common_layers.inverse_exp_decay(hparams.startup_steps) * 0.95 - pc = pc if hparams.mode == tf.estimator.ModeKeys.TRAIN else 1.0 - cond = tf.less(tf.random_uniform([]), pc) - t_c = tf.cond(cond, lambda: t_c, lambda: targets_c) - losses["vc"] = vc_loss * tf.to_float(cond) - # Extra loss predicting latent code from input. + pc = common_layers.inverse_exp_decay(hparams.startup_steps) * 0.95 + pc = pc if hparams.mode == tf.estimator.ModeKeys.TRAIN else 1.0 + cond = tf.less(tf.random_uniform([]), pc) + t_c = tf.cond(cond, lambda: t_c, lambda: targets_c) + losses["extra"] = vc_loss * tf.to_float(cond) + # Extra loss predicting latent code from input. Discrete only. + if hparams.bottleneck_kind not in ["dense", "vae"]: t_pred = decode_transformer( inputs, ed, tf.stop_gradient(t_c), hparams, "extra") t_pred = tf.layers.dense(t_pred, 2**16, name="extra_logits") - losses["sm"] = tf.nn.sparse_softmax_cross_entropy_with_logits( + losses["latent_pred"] = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=t_bit, logits=t_pred) - losses["sm"] = tf.reduce_mean(losses["sm"]) * 0.5 * tf.to_float(cond) + losses["latent_pred"] = tf.reduce_mean( + losses["latent_pred"]) * 0.5 * tf.to_float(cond) + else: + if hparams.bottleneck_kind in ["dense", "vae"]: + targets_rand = tf.random_uniform(tf.shape(targets_c)) + t_c, _, _, _ = bottleneck(targets_rand, hparams, 2*2048, "vc") else: latent_len = tf.shape(targets_c)[1] _, _, _, embed = bottleneck(targets_c, hparams, 2*2048, "vc") @@ -378,33 +399,39 @@ def ae_transformer_internal(inputs, targets, target_space, hparams, cache = tf.reshape(cache, [1, latent_len, 1]) cache = tf.tile(cache, [beam_size, 1, 1]) t_c = embed(cache) - # Postprocess. - d = t_c - pos = tf.get_variable("pos", [1, 1000, 1, hparams.hidden_size]) - pos = pos[:, :tf.shape(t_c)[1] + 1, :, :] - t_c = tf.pad(t_c, [[0, 0], [1, 0], [0, 0], [0, 0]]) + pos - - # Masking. - if hparams.do_mask: - masking = common_layers.inverse_lin_decay(100000) - masking *= common_layers.inverse_exp_decay(25000) # Not much at start. + # Postprocess. + d = t_c + pos = tf.get_variable("pos", [1, 1000, 1, hparams.hidden_size]) + pos = pos[:, :tf.shape(t_c)[1] + 1, :, :] + t_c = tf.pad(t_c, [[0, 0], [1, 0], [0, 0], [0, 0]]) + pos + + # Masking. + if hparams.do_mask: + masking = common_layers.inverse_lin_decay(100000) + masking *= common_layers.inverse_exp_decay(25000) # Not much at start. + if not hparams.do_refine: masking -= tf.random_uniform([]) * 0.3 - masking = tf.minimum(tf.maximum(masking, 0.0), 1.0) - if hparams.mode == tf.estimator.ModeKeys.PREDICT: - masking = predict_mask - mask = tf.less(masking, tf.random_uniform(tf.shape(targets)[:-1])) - mask = tf.expand_dims(tf.to_float(mask), 3) - for i in xrange(hparams.num_compress_steps): - j = hparams.num_compress_steps - i - 1 - d = residual_conv(d, 1, (3, 1), hparams, "decompress_rc_%d" % j) - d = decompress_step(d, hparams, i > 0, False, "decompress_%d" % j) - targets = mask * targets + (1.0 - mask) * d - targets = tf.concat([tf.reverse(t_c, [1]), targets], axis=1) - - res = decode_transformer(inputs, ed, targets, hparams, "decoder") - if hparams.do_ae: - res = res[:, tf.shape(t_c)[1]:, :, :] - return res, losses, cache + masking = tf.minimum(tf.maximum(masking, 0.0), 1.0) + if hparams.mode == tf.estimator.ModeKeys.PREDICT: + masking = predict_mask + mask = tf.less(masking, tf.random_uniform(tf.shape(targets)[:-1])) + mask = tf.expand_dims(tf.to_float(mask), 3) + for i in xrange(hparams.num_compress_steps): + j = hparams.num_compress_steps - i - 1 + d = residual_conv(d, 1, (3, 1), hparams, "decompress_rc_%d" % j) + d = decompress_step(d, hparams, i > 0, False, "decompress_%d" % j) + targets = mask * targets + (1.0 - mask) * d + targets = tf.concat([tf.reverse(t_c, [1]), targets], axis=1) + + res = decode_transformer(inputs, ed, targets, hparams, "decoder") + if hparams.do_ae: + res = res[:, tf.shape(t_c)[1]:, :, :] + if hparams.do_mask and hparams.do_refine: + def refine_res(): + return residual_conv(res, 1, (5, 1), hparams, "refine") + all_masked = tf.less(tf.reduce_sum(mask), 0.1) + res = tf.cond(all_masked, refine_res, lambda: res) + return res, losses, cache @registry.register_model @@ -466,7 +493,7 @@ def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, else: batch_size = tf.shape(features["inputs"])[0] length = tf.shape(features["inputs"])[1] - target_length = tf.to_int32(1.3 * tf.to_float(length)) + target_length = tf.to_int32(2.0 * tf.to_float(length)) initial_output = tf.zeros((batch_size, target_length, 1, 1), dtype=tf.int64) @@ -489,15 +516,15 @@ def transformer_ae_small(): hparams.hidden_size = 384 hparams.filter_size = 2048 hparams.label_smoothing = 0.0 - hparams.add_hparam("c_size", 16) + hparams.add_hparam("z_size", 16) hparams.add_hparam("noise_dev", 1.0) hparams.add_hparam("d_mix", 0.5) - # Bottleneck kinds supported: dense, semhash, gumbel-softmax. + # Bottleneck kinds supported: dense, vae, semhash, gumbel-softmax, vq-vae. hparams.add_hparam("bottleneck_kind", "semhash") hparams.add_hparam("do_ae", True) hparams.add_hparam("do_mask", True) + hparams.add_hparam("do_refine", True) hparams.add_hparam("drop_inputs", False) - hparams.add_hparam("z_size", 128) hparams.add_hparam("v_size", 1024*64) hparams.add_hparam("max_context_length", 64) hparams.add_hparam("num_compress_steps", 3) @@ -522,8 +549,6 @@ def transformer_ae_cifar(): hparams = transformer_ae_small() hparams.hidden_size = 256 hparams.filter_size = 512 - hparams.z_size = 256 # 64 - hparams.z_size2 = 0 # 16 hparams.batch_size = 1024 * 4 hparams.num_compress_steps = 2 hparams.v_size = 1024 * 16 From aa2c0b733f730d31852a34e62c4c72d99d1c9a15 Mon Sep 17 00:00:00 2001 From: Ryan Sepassi Date: Thu, 30 Nov 2017 18:30:07 -0800 Subject: [PATCH 06/11] T2T depends on TF 1.4+, daisy_chain_getter bug fix, some Eager-mode improvements/fixes PiperOrigin-RevId: 177538074 --- docs/example_life.md | 2 +- setup.py | 10 +- .../data_generators/generator_utils.py | 13 + tensor2tensor/data_generators/image.py | 40 +- tensor2tensor/data_generators/problem.py | 13 +- tensor2tensor/layers/rev_block.py | 2 +- tensor2tensor/notebooks/hello_t2t.ipynb | 891 ++++++++++++++++++ tensor2tensor/utils/data_reader.py | 22 +- tensor2tensor/utils/expert_utils.py | 24 +- tensor2tensor/utils/t2t_model.py | 51 +- 10 files changed, 998 insertions(+), 70 deletions(-) create mode 100644 tensor2tensor/notebooks/hello_t2t.ipynb diff --git a/docs/example_life.md b/docs/example_life.md index f3b18a817..ce6948b05 100644 --- a/docs/example_life.md +++ b/docs/example_life.md @@ -75,7 +75,7 @@ hooks in the `Problem` class and the model's `HParams` object (typically registered in the model's file and specified by the `--hparams_set` flag). The entire input pipeline is implemented with the new `tf.data.Dataset` API -(previously `tf.contrib.data.Dataset`). +(previously `tf.data.Dataset`). The key function in the codebase for the input pipeline is [`data_reader.input_pipeline`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/data_reader.py). diff --git a/setup.py b/setup.py index 5027918af..34a94965c 100644 --- a/setup.py +++ b/setup.py @@ -15,8 +15,7 @@ package_data={ 'tensor2tensor.data_generators': ['test_data/*'], 'tensor2tensor.visualization': [ - 'attention.js', - 'TransformerVisualization.ipynb' + 'attention.js', 'TransformerVisualization.ipynb' ], }, scripts=[ @@ -34,8 +33,8 @@ 'six', ], extras_require={ - 'tensorflow': ['tensorflow>=1.3.0'], - 'tensorflow_gpu': ['tensorflow-gpu>=1.3.0'], + 'tensorflow': ['tensorflow>=1.4.0'], + 'tensorflow_gpu': ['tensorflow-gpu>=1.4.0'], 'tests': ['pytest', 'h5py', 'mock'], }, classifiers=[ @@ -45,4 +44,5 @@ 'License :: OSI Approved :: Apache Software License', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], - keywords='tensorflow machine learning',) + keywords='tensorflow machine learning', +) diff --git a/tensor2tensor/data_generators/generator_utils.py b/tensor2tensor/data_generators/generator_utils.py index 2d21da2ba..236d43772 100644 --- a/tensor2tensor/data_generators/generator_utils.py +++ b/tensor2tensor/data_generators/generator_utils.py @@ -125,6 +125,13 @@ def shard_filepath(fname, num_shards): ] +def outputs_exist(filenames): + for out_fname in filenames: + out_fname = out_fname.replace(UNSHUFFLED_SUFFIX, "") + if tf.gfile.Exists(out_fname): + return out_fname + + def generate_files(generator, output_filenames, max_cases=None): """Generate cases from a generator and save as TFRecord files. @@ -137,6 +144,9 @@ def generate_files(generator, output_filenames, max_cases=None): max_cases: maximum number of cases to get from the generator; if None (default), we use the generator until StopIteration is raised. """ + if outputs_exist(output_filenames): + tf.logging.info("Skipping generator because outputs files exist") + return num_shards = len(output_filenames) writers = [tf.python_io.TFRecordWriter(fname) for fname in output_filenames] counter, shard = 0, 0 @@ -440,6 +450,9 @@ def generate_dataset_and_shuffle(train_gen, def shuffle_dataset(filenames): + if outputs_exist(filenames): + tf.logging.info("Skipping shuffle because output files exist") + return tf.logging.info("Shuffling data...") for fname in filenames: records = read_records(fname) diff --git a/tensor2tensor/data_generators/image.py b/tensor2tensor/data_generators/image.py index e5d378b52..70bca2d60 100644 --- a/tensor2tensor/data_generators/image.py +++ b/tensor2tensor/data_generators/image.py @@ -42,6 +42,8 @@ import tensorflow as tf +from tensorflow.python.eager import context + def resize_by_area(img, size): """image resize function used by quite a few image problems.""" @@ -463,6 +465,21 @@ def hparams(self, defaults, unused_model_hparams): p.target_space_id = 1 +def _encoded_images(images): + if context.in_eager_mode(): + for image in images: + yield tf.image.encode_png(image).numpy() + else: + (width, height, channels) = images[0].shape + with tf.Graph().as_default(): + image_t = tf.placeholder(dtype=tf.uint8, shape=(width, height, channels)) + encoded_image_t = tf.image.encode_png(image_t) + with tf.Session() as sess: + for image in images: + enc_string = sess.run(encoded_image_t, feed_dict={image_t: image}) + yield enc_string + + def image_generator(images, labels): """Generator for images that takes image and labels lists and creates pngs. @@ -484,20 +501,15 @@ def image_generator(images, labels): """ if not images: raise ValueError("Must provide some images for the generator.") - (width, height, channels) = images[0].shape - with tf.Graph().as_default(): - image_t = tf.placeholder(dtype=tf.uint8, shape=(width, height, channels)) - encoded_image_t = tf.image.encode_png(image_t) - with tf.Session() as sess: - for (image, label) in zip(images, labels): - enc_string = sess.run(encoded_image_t, feed_dict={image_t: image}) - yield { - "image/encoded": [enc_string], - "image/format": ["png"], - "image/class/label": [int(label)], - "image/height": [height], - "image/width": [width] - } + width, height, _ = images[0].shape + for (enc_image, label) in zip(_encoded_images(images), labels): + yield { + "image/encoded": [enc_image], + "image/format": ["png"], + "image/class/label": [int(label)], + "image/height": [height], + "image/width": [width] + } # URLs and filenames for MNIST data. diff --git a/tensor2tensor/data_generators/problem.py b/tensor2tensor/data_generators/problem.py index d80cc01da..6a1a7208e 100644 --- a/tensor2tensor/data_generators/problem.py +++ b/tensor2tensor/data_generators/problem.py @@ -382,7 +382,7 @@ def dataset(self, data_filepattern) if shuffle_files or shuffle_files is None and is_training: random.shuffle(data_files) - dataset = tf.contrib.data.TFRecordDataset(data_files) + dataset = tf.data.TFRecordDataset(data_files) def decode_record(record): """Serialized Example to dict of .""" @@ -399,13 +399,12 @@ def _preprocess(example): self.maybe_copy_features(example) return example - dataset = dataset.map(decode_record, num_threads=num_threads) + dataset = dataset.map(decode_record, num_parallel_calls=num_threads) if preprocess: - dataset = dataset.map( - _preprocess, - num_threads=num_threads, - output_buffer_size=output_buffer_size) + dataset = dataset.map(_preprocess, num_parallel_calls=num_threads) + if output_buffer_size: + dataset = dataset.prefetch(output_buffer_size) return dataset @@ -517,7 +516,7 @@ def define_shapes(example): dataset = self.dataset( mode=mode, data_dir=data_dir, num_threads=num_threads, hparams=hparams) dataset = dataset.map( - data_reader.cast_int64_to_int32, num_threads=num_threads) + data_reader.cast_int64_to_int32, num_parallel_calls=num_threads) if is_training: dataset = dataset.repeat(None) diff --git a/tensor2tensor/layers/rev_block.py b/tensor2tensor/layers/rev_block.py index eaeb55921..88bf622ab 100644 --- a/tensor2tensor/layers/rev_block.py +++ b/tensor2tensor/layers/rev_block.py @@ -399,7 +399,7 @@ def grad_fn(inputs, variables, outputs, output_grads): @common_layers.fn_with_custom_grad(grad_fn) def fn_with_recompute(*args): cached_vs.append(tf.get_variable_scope()) - # TODO(rsepassi): Rm conditional in TF 1.4 + # TODO(rsepassi): Rm conditional in TF 1.5 if hasattr(tf.contrib.framework, "current_arg_scope"): cached_arg_scope.append(tf.contrib.framework.current_arg_scope()) else: diff --git a/tensor2tensor/notebooks/hello_t2t.ipynb b/tensor2tensor/notebooks/hello_t2t.ipynb new file mode 100644 index 000000000..86070da40 --- /dev/null +++ b/tensor2tensor/notebooks/hello_t2t.ipynb @@ -0,0 +1,891 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "T2T with TF Eager", + "version": "0.3.2", + "views": {}, + "default_view": {}, + "provenance": [], + "collapsed_sections": [] + } + }, + "cells": [ + { + "metadata": { + "id": "s19ucTii_wYb", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + } + }, + "source": [ + "# Copyright 2017 Google LLC.\n", + "\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "cell_type": "code", + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "OPGni6fuvoTj", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + } + }, + "source": [ + "# Install deps\n", + "!pip install -q \"tensor2tensor-dev==1.3.1.dev5\" tf-nightly" + ], + "cell_type": "code", + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "oILRLCWN_16u", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + } + }, + "source": [ + "import tensorflow as tf\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import os\n", + "\n", + "from tensor2tensor import problems\n", + "from tensor2tensor.utils import t2t_model\n", + "from tensor2tensor.utils import trainer_utils\n", + "from tensor2tensor.utils import registry\n", + "from tensor2tensor.utils import metrics\n", + "\n", + "# Enable TF Eager execution\n", + "from tensorflow.contrib.eager.python import tfe\n", + "tfe.enable_eager_execution()\n", + "\n", + "# Other setup\n", + "Modes = tf.estimator.ModeKeys\n", + "\n", + "# Setup some directories\n", + "data_dir = os.path.expanduser(\"~/t2t/data\")\n", + "tmp_dir = os.path.expanduser(\"~/t2t/tmp\")\n", + "train_dir = os.path.expanduser(\"~/t2t/train\")\n", + "checkpoint_dir = os.path.expanduser(\"~/t2t/checkpoints\")\n", + "tf.gfile.MakeDirs(data_dir)\n", + "tf.gfile.MakeDirs(tmp_dir)\n", + "tf.gfile.MakeDirs(train_dir)\n", + "tf.gfile.MakeDirs(checkpoint_dir)" + ], + "cell_type": "code", + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "gXL7_bVH49Kl", + "colab_type": "text" + }, + "source": [ + "# Translate from English to French with a pre-trained model" + ], + "cell_type": "markdown" + }, + { + "metadata": { + "id": "Q2CYCYjZTlZs", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "output_extras": [ + { + "item_id": 5 + } + ], + "base_uri": "https://localhost:8080/", + "height": 136 + }, + "outputId": "9d08dd17-a3a1-49ba-930c-a07f11ea24e3", + "executionInfo": { + "status": "ok", + "timestamp": 1512092524785, + "user_tz": 480, + "elapsed": 17914, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } + }, + "source": [ + "# Translation\n", + "enfr_problem = registry.problem(\"translate_enfr_wmt_small32k\")\n", + "enfr_problem.generate_data(data_dir, tmp_dir) " + ], + "cell_type": "code", + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Found vocab file: /content/t2t/data/vocab.enfr.32768\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/baseline-1M-enfr.tgz\n", + "INFO:tensorflow:Found vocab file: /content/t2t/data/vocab.enfr.32768\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/baseline-1M-enfr.tgz\n", + "INFO:tensorflow:Skipping generator because outputs files exist\n", + "INFO:tensorflow:Skipping generator because outputs files exist\n", + "INFO:tensorflow:Skipping shuffle because output files exist\n" + ], + "name": "stdout" + } + ] + }, + { + "metadata": { + "id": "g2aQW7Z6TOEu", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "output_extras": [ + { + "item_id": 2 + } + ], + "base_uri": "https://localhost:8080/", + "height": 170 + }, + "outputId": "8196348d-747e-4b33-9b7c-742d8041d0b7", + "executionInfo": { + "status": "ok", + "timestamp": 1512092525545, + "user_tz": 480, + "elapsed": 732, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } + }, + "source": [ + "example = tfe.Iterator(enfr_problem.dataset(Modes.TRAIN, data_dir)).next()\n", + "inputs = [int(x) for x in example[\"inputs\"].numpy()] # Cast to ints.\n", + "targets = [int(x) for x in example[\"targets\"].numpy()] # Cast to ints.\n", + "\n", + "encoders = enfr_problem.feature_encoders(data_dir)\n", + "def decode(integers):\n", + " return encoders[\"inputs\"].decode(np.squeeze(integers))\n", + "\n", + "# Example inputs as int-tensor.\n", + "print(\"Inputs, encoded:\")\n", + "print(inputs)\n", + "print(\"Inputs, decoded:\")\n", + "# Example inputs as a sentence.\n", + "print(decode(inputs))\n", + "# Example targets as int-tensor.\n", + "print(\"Targets, encoded:\")\n", + "print(targets)\n", + "# Example targets as a sentence.\n", + "print(\"Targets, decoded:\")\n", + "print(decode(targets))" + ], + "cell_type": "code", + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/translate_enfr_wmt_small32k-train*\n", + "Inputs, encoded:\n", + "[47, 254, 17, 280, 7, 219, 4, 696, 158, 8, 4, 2085, 135, 4, 246, 3930, 3, 780, 4, 696, 158, 8, 4, 2085, 11, 5281, 5010, 31, 2679, 8, 4, 2085, 2, 1]\n", + "Inputs, decoded:\n", + "The first is how to take the resources out of the ground -- the economic processes, taking the resources out of the ground and putting assets on top of the ground.\n", + "Targets, encoded:\n", + "[113, 699, 131, 5, 24, 6, 477, 571, 27599, 27580, 27584, 27586, 24058, 18, 1018, 37, 4663, 135, 15, 739, 360, 3, 131, 5, 24, 22, 5, 27599, 27580, 27584, 27586, 24058, 18, 1018, 37, 4663, 14, 27, 8388, 20, 2477, 16, 12, 5, 1348, 1374, 2, 1]\n", + "Targets, decoded:\n", + "Le premier c'est de savoir comment extraire les ressources du sol -- le processus économique, c'est d'extraire les ressources du sol et en retirer des avantages à l'air libre.\n" + ], + "name": "stdout" + } + ] + }, + { + "metadata": { + "id": "9l6hDQbrRUYV", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + } + }, + "source": [ + "# Create hparams and the T2TModel object.\n", + "model_name = \"transformer\"\n", + "hparams_set = \"transformer_base\"\n", + "\n", + "hparams = trainer_utils.create_hparams(hparams_set, data_dir)\n", + "hparams.use_eager_mode = True\n", + "trainer_utils.add_problem_hparams(hparams, \"translate_enfr_wmt32k\")\n", + "\n", + "# NOTE: Only create the model once when restoring from a checkpoint; it's a\n", + "# Layer and so subsequent instantiations will have different variable scopes\n", + "# that will not match the checkpoint.\n", + "model = registry.model(model_name)(hparams, Modes.PREDICT)" + ], + "cell_type": "code", + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "FEwNUVlMYOJi", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + } + }, + "source": [ + "# Copy the pretrained checkpoint locally\n", + "gs_ckpt_dir = \"gs://tensor2tensor-checkpoints/\"\n", + "ckpt_name = \"transformer_enfr_test\"\n", + "gs_ckpt = os.path.join(gs_ckpt_dir, ckpt_name)\n", + "local_ckpt = os.path.join(checkpoint_dir, ckpt_name)\n", + "!gsutil -q cp -R {gs_ckpt} {local_ckpt}\n", + "ckpt_path = tf.train.latest_checkpoint(local_ckpt)\n", + "ckpt_path" + ], + "cell_type": "code", + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "3O-8E9d6TtuJ", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "output_extras": [ + { + "item_id": 3 + } + ], + "base_uri": "https://localhost:8080/", + "height": 139 + }, + "outputId": "d7883ce2-d90f-440c-b6b3-16ecffab481c", + "executionInfo": { + "status": "ok", + "timestamp": 1512092689851, + "user_tz": 480, + "elapsed": 141849, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } + }, + "source": [ + "# Restore and translate!\n", + "\n", + "def encode(input_str):\n", + " # Encode from raw string to ints using problem encoders.\n", + " inputs = encoders[\"inputs\"].encode(input_str) + [1] # add EOS id\n", + " batch_inputs = tf.reshape(inputs, [1, -1, 1, 1]) # Make it 4D.\n", + " # TODO: rm target_space_id\n", + " features_dict = {\"inputs\": batch_inputs,\n", + " \"target_space_id\": tf.constant(hparams.problems[0].target_space_id)}\n", + " return features_dict\n", + "\n", + "\n", + "inputs = \"This is a cat.\"\n", + "\n", + "# Restore from checkpoint and run inference\n", + "with tfe.restore_variables_on_create(ckpt_path):\n", + " samples = model.infer(encode(inputs), beam_size=1)\n", + "\n", + "print(\"Inputs: %s\" % inputs)\n", + "print(\"Outputs: %s\" % decode(samples))" + ], + "cell_type": "code", + "execution_count": 7, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Greedy Decoding\n", + "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensor2tensor/layers/common_layers.py:487: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "keep_dims is deprecated, use keepdims instead\n", + "Inputs: This is a cat.\n", + "Outputs: chairpersons solidité Istanbul individuelles cassava, «salle mutuelles détaillée adoptée cravate dépit 750 820 procédés Afghan permettraient capture fasse numérique bans got éthiciens regretteras célébrer January impressed Precisely saison complicité opérée flung ıhostiles Thinking voudrait auxiliaires holding multilateral focalisé réussisaient Steagall dons reminds researching promette assigned anachronique IPCC fatigue irresponsables homologue reprennent After formulent finit\n" + ], + "name": "stdout" + } + ] + }, + { + "metadata": { + "id": "i7BZuO7T5BB4", + "colab_type": "text" + }, + "source": [ + "# Train a custom model on MNIST" + ], + "cell_type": "markdown" + }, + { + "metadata": { + "id": "RYDMO4zArgkz", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "output_extras": [ + { + "item_id": 1 + } + ], + "base_uri": "https://localhost:8080/", + "height": 1224 + }, + "outputId": "73452116-72c6-4327-9f83-84be584c3e6f", + "executionInfo": { + "status": "ok", + "timestamp": 1512092690339, + "user_tz": 480, + "elapsed": 456, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } + }, + "source": [ + "# Lots of problems available\n", + "problems.available()" + ], + "cell_type": "code", + "execution_count": 8, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "['algorithmic_addition_binary40',\n", + " 'algorithmic_addition_decimal40',\n", + " 'algorithmic_cipher_shift200',\n", + " 'algorithmic_cipher_shift5',\n", + " 'algorithmic_cipher_vigenere200',\n", + " 'algorithmic_cipher_vigenere5',\n", + " 'algorithmic_identity_binary40',\n", + " 'algorithmic_identity_decimal40',\n", + " 'algorithmic_multiplication_binary40',\n", + " 'algorithmic_multiplication_decimal40',\n", + " 'algorithmic_reverse_binary40',\n", + " 'algorithmic_reverse_binary40_test',\n", + " 'algorithmic_reverse_decimal40',\n", + " 'algorithmic_reverse_nlplike32k',\n", + " 'algorithmic_reverse_nlplike8k',\n", + " 'algorithmic_shift_decimal40',\n", + " 'audio_timit_characters_tune',\n", + " 'audio_timit_tokens8k_test',\n", + " 'audio_timit_tokens8k_tune',\n", + " 'image_celeba_tune',\n", + " 'image_cifar10',\n", + " 'image_cifar10_plain',\n", + " 'image_cifar10_plain8',\n", + " 'image_cifar10_tune',\n", + " 'image_fsns',\n", + " 'image_imagenet',\n", + " 'image_imagenet224',\n", + " 'image_imagenet32',\n", + " 'image_imagenet64',\n", + " 'image_mnist',\n", + " 'image_mnist_tune',\n", + " 'image_ms_coco_characters',\n", + " 'image_ms_coco_tokens32k',\n", + " 'image_ms_coco_tokens8k',\n", + " 'img2img_cifar10',\n", + " 'img2img_imagenet',\n", + " 'languagemodel_lm1b32k',\n", + " 'languagemodel_lm1b8k_packed',\n", + " 'languagemodel_lm1b_characters',\n", + " 'languagemodel_ptb10k',\n", + " 'languagemodel_ptb_characters',\n", + " 'languagemodel_wiki_full32k',\n", + " 'languagemodel_wiki_scramble128',\n", + " 'languagemodel_wiki_scramble1k50',\n", + " 'languagemodel_wiki_scramble8k50',\n", + " 'librispeech',\n", + " 'multinli_matched',\n", + " 'multinli_mismatched',\n", + " 'ocr_test',\n", + " 'parsing_english_ptb16k',\n", + " 'parsing_english_ptb8k',\n", + " 'parsing_icelandic16k',\n", + " 'programming_desc2code_cpp',\n", + " 'programming_desc2code_py',\n", + " 'sentiment_imdb',\n", + " 'summarize_cnn_dailymail32k',\n", + " 'translate_encs_wmt32k',\n", + " 'translate_encs_wmt_characters',\n", + " 'translate_ende_wmt32k',\n", + " 'translate_ende_wmt32k_packed',\n", + " 'translate_ende_wmt8k',\n", + " 'translate_ende_wmt_bpe32k',\n", + " 'translate_ende_wmt_characters',\n", + " 'translate_enfr_wmt32k',\n", + " 'translate_enfr_wmt8k',\n", + " 'translate_enfr_wmt_characters',\n", + " 'translate_enfr_wmt_small32k',\n", + " 'translate_enfr_wmt_small8k',\n", + " 'translate_enfr_wmt_small_characters',\n", + " 'translate_enmk_setimes32k',\n", + " 'translate_enzh_wmt8k']" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 8 + } + ] + }, + { + "metadata": { + "id": "JKc2uSk6WX5e", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "output_extras": [ + { + "item_id": 2 + } + ], + "base_uri": "https://localhost:8080/", + "height": 204 + }, + "outputId": "9fe602a6-6b67-4d4e-82dd-2c0c11f16d14", + "executionInfo": { + "status": "ok", + "timestamp": 1512092691265, + "user_tz": 480, + "elapsed": 839, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } + }, + "source": [ + "# Create the MNIST problem and generate the data\n", + "\n", + "mnist_problem = problems.problem(\"image_mnist\")\n", + "# Generate data\n", + "mnist_problem.generate_data(data_dir, tmp_dir)" + ], + "cell_type": "code", + "execution_count": 9, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Skipping generator because outputs files exist\n", + "INFO:tensorflow:Skipping generator because outputs files exist\n", + "INFO:tensorflow:Skipping shuffle because output files exist\n" + ], + "name": "stdout" + } + ] + }, + { + "metadata": { + "id": "VW6HCRANFPYV", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "output_extras": [ + { + "item_id": 1 + }, + { + "item_id": 2 + } + ], + "base_uri": "https://localhost:8080/", + "height": 381 + }, + "outputId": "7b76feb3-2237-4669-d632-3ef69e04815d", + "executionInfo": { + "status": "ok", + "timestamp": 1512092691915, + "user_tz": 480, + "elapsed": 620, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } + }, + "source": [ + "# Get the tf.data.Dataset from Problem.dataset\n", + "mnist_example = tfe.Iterator(mnist_problem.dataset(Modes.TRAIN, data_dir)).next()\n", + "image = mnist_example[\"inputs\"]\n", + "label = mnist_example[\"targets\"]\n", + "\n", + "plt.imshow(image.numpy()[:, :, 0].astype(np.float32), cmap=plt.get_cmap('gray'))\n", + "print(\"Label: %d\" % label.numpy())" + ], + "cell_type": "code", + "execution_count": 10, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-train*\n", + "Label: 5\n" + ], + "name": "stdout" + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUsAAAFKCAYAAACU6307AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAFBBJREFUeJzt3X9MlfX7x/HXESI5S4cQUK4ffjJc\nLHCr1IXmD9TZbJVKtZLUudmmFU50zpj54w+3VHS11Fpo4pZYnY3W5swGOddyDShJzePWQNuMmSIo\nU5hoiuf7R4tv2Dmci+PhnHMfn4+NP877vM/7XFc3vbzvc5/7xuXz+XwCAPRqQLQLAAAnICwBwICw\nBAADwhIADAhLADAgLAHAwhcBkvz+HD9+POBzTv2Jx57itS96cs5PpPrqjSsS37N0uVx+x30+X8Dn\nnCoee5Lisy96co5I9dVbHCaGuuh7772nY8eOyeVyaeXKlRo5cmSoSwFAzAspLH/66SedPn1aHo9H\np06d0sqVK+XxeMJdGwDEjJBO8NTU1Gjq1KmSpOHDh+vSpUvq6OgIa2EAEEtC2rNsbW3V448/3v04\nNTVVLS0tuueee/zOP378uHJycvw+F4GPTCMuHnuS4rMvenKOaPcV8meW/xasidzc3ICvi7cPo+Ox\nJyk++6In54iFEzwhHYZnZGSotbW1+/H58+eVnp4eylIA4AghheW4ceNUVVUlSTpx4oQyMjICHoID\nQDwI6TD8ySef1OOPP67XXntNLpdLa9euDXddABBT+FJ6mMVjT1J89kVPzuHYzywB4E5DWAKAAWEJ\nAAaEJQAYEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAY\nEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYEJYAYEBY\nAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYEJYAYEBYAoBBYrQL\nAIBQpKammuZdvHgxLO/HniUAGIS0Z1lXV6clS5YoKytLkjRixAitXr06rIUBQCwJ+TB8zJgx2rJl\nSzhrAYCYxWE4ABiEHJYnT57UokWLNHv2bP3444/hrAkAYo7L5/P5+vqi5uZm1dfXa/r06WpqatK8\nefNUXV2tpKQkv/O9Xq9ycnJuu1gAiJaQwvJWL7/8sj744AM9+OCD/t/E5fI77vP5Aj7nVPHYkxSf\nfdGTc/jrqz++OtRbHIZ0GL53717t3LlTktTS0qILFy4oMzMzlKUAwBFC2rPs6OjQ8uXLdfnyZV2/\nfl1FRUWaOHFi4Ddhz9Lx4rEvenKOWNizDMtheDCEpfPFY1/05ByxEJZc7gggJMOGDTPNe+KJJ8xr\n/nOhiz8rVqzo8bioqMi05kMPPWR+/97wPUsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIA\nDAhLADAgLAHAgMsdgRjidrvNc7Ozs/2OP/XUUz0eFxQUmNd8+eWXzXMD3ZLxVoHuc+vPkSNHAj73\nyiuv9Hj88ccfm9cNB/YsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgL/uGGbx\n2JPk7L6GDx/ud/zkyZN69NFHe4xZ/wjXmDFjzO//zDPPmOfeevVNb9LT0/8zNmDAAN28ebPHWHt7\nu3nN3377zTz3q6++Ms07dOiQec3a2lq/45H6/estDtmzBAADwhIADAhLADAgLAHAgLAEAAPCEgAM\nCEsAMCAsAcCAsAQAA8ISAAy43DHMnN7TrZf//aOxsVFZWVndj8ePH29ec9SoUea5qamppnl5eXnm\nNTMyMvyOJycnq7Ozs8fY3XffbVrzjz/+ML//8ePHzXO/++4781x/lyZWV1dr2rRpPcaOHj1qXrOl\npcU8N5K43BEAHIKwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAAy53DLNY7GnS\npEnmuZWVlX7H09LSdOHChe7H1ssSpcDb35+amhrTvP3795vXPHLkiN/xffv26fnnn+8x5vV6TWue\nPn3a/P6RFIu/f+HgmMsdGxoaNHXqVFVUVEiSzp49q7lz56qwsFBLlizRX3/9FZ5KASBGBQ3LK1eu\naN26dT1uXLBlyxYVFhbq888/18MPPxxwbwQA4kXQsExKStKOHTt63Lmlrq5OU6ZMkSTl5+ebD50A\nwKkSg05ITFRiYs9pnZ2dSkpKkvT3Z1mxelsnAAiXoGEZjOX80PHjx5WTkxPy650mHnuS/v6Hsb9Z\n71PZl/tZ9mbfvn1hWSeWxOvvX7T7Ciks3W63rl69qoEDB6q5uTngzVX/kZub63c8Hs/cxWJPnA3n\nbLjTOeZs+K3Gjh2rqqoqSX/fmbkvd80GACcKumfp9Xq1ceNGnTlzRomJiaqqqtLmzZtVUlIij8ej\noUOHaubMmZGoFQCiJmhY5uTkaPfu3f8Z37VrV78UBACx6LZP8CD2tbW1medeunTJ73haWlqP54YM\nGWJe89q1a+a5b775pmnesWPHzGv25ptvvgnLOoh/XBsOAAaEJQAYEJYAYEBYAoABYQkABoQlABgQ\nlgBgQFgCgAFhCQAGhCUAGHC54x3g119/Nc/95Zdf/I4/8sgjPZ574IEHzGsGupepP6dOnTLPBSKJ\nPUsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgMsd7wBvv/22eW5BQYHp\nucLCQvOaXMKIeMCeJQAYEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGHAFzx3gxRdf\nDPua8+fPN8+dMGGCee7FixdN87766ivzmkePHjXPBQJhzxIADAhLADAgLAHAgLAEAAPCEgAMCEsA\nMCAsAcCAsAQAA8ISAAwISwAw4HLHO8CFCxfMc7///nu/45MnT+7x3LBhw8xrZmdnm+empKSY5r37\n7rvmNT/66KOAz23durXH46VLl5rWvHHjhvn9ER/YswQAA1NYNjQ0aOrUqaqoqJAklZSU6IUXXtDc\nuXM1d+7cgHsjABAvgh6GX7lyRevWrVNeXl6P8WXLlik/P7/fCgOAWBJ0zzIpKUk7duxQRkZGJOoB\ngJjk8vl8PsvErVu3asiQIZozZ45KSkrU0tKi69evKy0tTatXr1ZqamrA13q9XuXk5IStaACItJDO\nhs+YMUMpKSnKzs7W9u3btW3bNq1Zsybg/NzcXL/jPp9PLpcrlBJiViz29MUXX5jnBjqCmDx5sg4e\nPNj9eOjQoeY1k5OTzXOtZ8MHDx5sXjPQ2fCioiJt27atx5jTz4bH4u9fOESqr972HUM6G56Xl9f9\ndZDJkyeroaEhtMoAwCFCCsvFixerqalJklRXV6esrKywFgUAsSboYbjX69XGjRt15swZJSYmqqqq\nSnPmzFFxcbGSk5Pldru1fv36SNQKAFETNCxzcnK0e/fu/4w/++yz/VIQAMQi89nw23qTAB/MxuOH\n0fHYkxS5vu677z7TvOXLl5vXDHTSZsCAAbp582aPsXnz5pnW3LNnj/n9I4nfv9t/n0C43BEADAhL\nADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAw4K87IqacO3fONG/Dhg3mNa33qJSk\nQYMGmefizsKeJQAYEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGHAFDxzp4YcfjnYJ\nuMOwZwkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYcLljjMnMzDTNa25u\n7udKYtuaNWv6Zd07/b8rAmPPEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhL\nADDgcscYs2fPHtO84uJi85perzfUcsIiMdH+a/buu++a5r3wwgvmNcvLy/2OL1iwQLt27eox9vXX\nX5vXxZ3F9FtcWlqq+vp63bhxQwsXLlRubq5WrFihrq4upaena9OmTUpKSurvWgEgaoKGZW1trRob\nG+XxeNTW1qZZs2YpLy9PhYWFmj59ut5//31VVlaqsLAwEvUCQFQE/cxy9OjR+vDDDyVJgwcPVmdn\np+rq6jRlyhRJUn5+vmpqavq3SgCIsqBhmZCQILfbLUmqrKzUhAkT1NnZ2X3YnZaWppaWlv6tEgCi\nzOXz+XyWiQcOHFBZWZnKy8s1bdq07r3J06dP65133tGXX34Z8LVer1c5OTnhqRgAosB0gufQoUP6\n5JNP9Omnn2rQoEFyu926evWqBg4cqObmZmVkZPT6+tzcXL/jPp9PLper71XHsNvt6cCBA6Z5kT4b\nfjt99cfZ8LVr15rX7O1s+M6dO3uMvfHGG+Z1Y1E8/j8lRa6v3vYdgx6Gt7e3q7S0VGVlZUpJSZEk\njR07VlVVVZKk6upqjR8/PkylAkBsCvpP/v79+9XW1tZjT2bDhg1atWqVPB6Phg4dqpkzZ/ZrkQAQ\nbUHD8tVXX9Wrr776n/Fbv8wLAPHMfILntt4kwGcN8fj5yu32dPPmTdO8SZMmmdf84YcfQqzm/93a\n16hRo8yvfeedd8xzCwoKTPMOHz5sXnPGjBl+x8+ePav777+/x9i5c+fM68aiePx/SnLIZ5YAAMIS\nAEwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAM+INlMebnn382zfv222/Na/7+++/m\nuR0dHQGf+/cd8ftyuWNXV5d57meffWaat2TJEvOaly9fDvic0y9vROSwZwkABoQlABgQlgBgQFgC\ngAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYcLljjJk9e7Zp3oIFC8xrPvvss+a5f/75p+m50tJS\n85rl5eXmuadOnTLPBSKJPUsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADBw+Xw+\nX7+/icvld9zn8wV8zqnisScpPvuiJ+eIVF+9xSF7lgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJ\nAAaEJQAYEJYAYEBYAoABYQkABqa/7lhaWqr6+nrduHFDCxcu1MGDB3XixAmlpKRI+vsvDU6aNKk/\n6wSAqAoalrW1tWpsbJTH41FbW5tmzZqlp59+WsuWLVN+fn4kagSAqAsalqNHj9bIkSMlSYMHD1Zn\nZ6e6urr6vTAAiCV9ukWbx+PR4cOHlZCQoJaWFl2/fl1paWlavXq1UlNTA78Jt2hzvHjsi56cIxZu\n0WYOywMHDqisrEzl5eXyer1KSUlRdna2tm/frnPnzmnNmjUBX+v1epWTk9P3ygEgVvgMfvjhB99L\nL73ka2tr+89zjY2Nvtdff73X10vy+9Pbc079icee4rUvenLOT6T66k3Qrw61t7ertLRUZWVl3We/\nFy9erKamJklSXV2dsrKygi0DAI4W9ATP/v371dbWpuLi4u6xgoICFRcXKzk5WW63W+vXr+/XIgEg\n2vgbPGEWjz1J8dkXPTlHpPrqLQ65ggcADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8IS\nAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAw\niMifwgUAp2PPEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwSIzGm7733ns6duyYXC6XVq5cqZEjR0aj\njLCqq6vTkiVLlJWVJUkaMWKEVq9eHeWqQtfQ0KC33npL8+fP15w5c3T27FmtWLFCXV1dSk9P16ZN\nm5SUlBTtMvvk1p5KSkp04sQJpaSkSJIWLFigSZMmRbfIPiotLVV9fb1u3LihhQsXKjc31/HbSfpv\nXwcPHoz6top4WP700086ffq0PB6PTp06pZUrV8rj8US6jH4xZswYbdmyJdpl3LYrV65o3bp1ysvL\n6x7bsmWLCgsLNX36dL3//vuqrKxUYWFhFKvsG389SdKyZcuUn58fpapuT21trRobG+XxeNTW1qZZ\ns2YpLy/P0dtJ8t/X008/HfVtFfHD8JqaGk2dOlWSNHz4cF26dEkdHR2RLgO9SEpK0o4dO5SRkdE9\nVldXpylTpkiS8vPzVVNTE63yQuKvJ6cbPXq0PvzwQ0nS4MGD1dnZ6fjtJPnvq6urK8pVRSEsW1tb\nNWTIkO7HqampamlpiXQZ/eLkyZNatGiRZs+erR9//DHa5YQsMTFRAwcO7DHW2dnZfTiXlpbmuG3m\nrydJqqio0Lx587R06VJdvHgxCpWFLiEhQW63W5JUWVmpCRMmOH47Sf77SkhIiPq2ispnlv8WL1db\nDhs2TEVFRZo+fbqampo0b948VVdXO/LzomDiZZvNmDFDKSkpys7O1vbt27Vt2zatWbMm2mX12YED\nB1RZWany8nJNmzate9zp2+nffXm93qhvq4jvWWZkZKi1tbX78fnz55Wenh7pMsIuMzNTzz33nFwu\nlx566CHde++9am5ujnZZYeN2u3X16lVJUnNzc1wczubl5Sk7O1uSNHnyZDU0NES5or47dOiQPvnk\nE+3YsUODBg2Km+10a1+xsK0iHpbjxo1TVVWVJOnEiRPKyMjQPffcE+kywm7v3r3auXOnJKmlpUUX\nLlxQZmZmlKsKn7Fjx3Zvt+rqao0fPz7KFd2+xYsXq6mpSdLfn8n+800Gp2hvb1dpaanKysq6zxLH\nw3by11csbKuo3HVo8+bNOnz4sFwul9auXavHHnss0iWEXUdHh5YvX67Lly/r+vXrKioq0sSJE6Nd\nVki8Xq82btyoM2fOKDExUZmZmdq8ebNKSkp07do1DR06VOvXr9ddd90V7VLN/PU0Z84cbd++XcnJ\nyXK73Vq/fr3S0tKiXaqZx+PR1q1b9b///a97bMOGDVq1apVjt5Pkv6+CggJVVFREdVtxizYAMOAK\nHgAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAM/g8DO834LYDKmQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "metadata": { + "id": "WkFUEs7ZOA79", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "output_extras": [ + { + "item_id": 1 + } + ], + "base_uri": "https://localhost:8080/", + "height": 408 + }, + "outputId": "f56d417d-0b2e-4b4d-e1ea-6e6b233a609b", + "executionInfo": { + "status": "ok", + "timestamp": 1512092692257, + "user_tz": 480, + "elapsed": 279, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } + }, + "source": [ + "# Lots of models available\n", + "registry.list_models()" + ], + "cell_type": "code", + "execution_count": 11, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "['resnet50',\n", + " 'lstm_seq2seq',\n", + " 'transformer_encoder',\n", + " 'attention_lm',\n", + " 'vanilla_gan',\n", + " 'transformer',\n", + " 'gene_expression_conv',\n", + " 'transformer_moe',\n", + " 'attention_lm_moe',\n", + " 'transformer_revnet',\n", + " 'lstm_seq2seq_attention',\n", + " 'shake_shake',\n", + " 'transformer_ae',\n", + " 'diagonal_neural_gpu',\n", + " 'xception',\n", + " 'aligned',\n", + " 'multi_model',\n", + " 'neural_gpu',\n", + " 'slice_net',\n", + " 'byte_net',\n", + " 'cycle_gan',\n", + " 'transformer_sketch',\n", + " 'blue_net']" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 11 + } + ] + }, + { + "metadata": { + "id": "-H25oG91YQj3", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + } + }, + "source": [ + "# Create your own model\n", + "\n", + "class MySimpleModel(t2t_model.T2TModel):\n", + "\n", + " def model_fn_body(self, features):\n", + " inputs = features[\"inputs\"]\n", + " filters = self.hparams.hidden_size\n", + " h1 = tf.layers.conv2d(inputs, filters,\n", + " kernel_size=(5, 5), strides=(2, 2))\n", + " h2 = tf.layers.conv2d(tf.nn.relu(h1), filters,\n", + " kernel_size=(5, 5), strides=(2, 2))\n", + " return tf.layers.conv2d(tf.nn.relu(h2), filters,\n", + " kernel_size=(3, 3))\n", + "\n", + "hparams = trainer_utils.create_hparams(\"basic_1\", data_dir)\n", + "hparams.hidden_size = 64\n", + "hparams.use_eager_mode = True\n", + "trainer_utils.add_problem_hparams(hparams, \"image_mnist\")\n", + "model = MySimpleModel(hparams, Modes.TRAIN)" + ], + "cell_type": "code", + "execution_count": 0, + "outputs": [] + }, + { + "metadata": { + "id": "AWVd2I7PYz6H", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "output_extras": [ + { + "item_id": 12 + } + ], + "base_uri": "https://localhost:8080/", + "height": 357 + }, + "outputId": "5acd846f-7d5e-45b9-85b7-e8a93389630a", + "executionInfo": { + "status": "ok", + "timestamp": 1512092812219, + "user_tz": 480, + "elapsed": 119560, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } + }, + "source": [ + "# Train\n", + "\n", + "hparams.learning_rate = 0.0001\n", + "optimizer = tf.train.MomentumOptimizer(\n", + " hparams.learning_rate, momentum=hparams.optimizer_momentum_momentum)\n", + "\n", + "# In Eager mode, opt.minimize must be passed a function that produces the loss\n", + "def loss_function(features):\n", + " _, losses = model(features)\n", + " return losses[\"training\"]\n", + "\n", + "NUM_STEPS = 500\n", + "BATCH_SIZE = 128\n", + "\n", + "# Repeat and batch the data\n", + "mnist_train_dataset = mnist_problem.dataset(Modes.TRAIN, data_dir)\n", + "mnist_train_dataset = mnist_train_dataset.repeat(None).batch(BATCH_SIZE)\n", + "\n", + "# Training loop\n", + "for count, example in enumerate(tfe.Iterator(mnist_train_dataset)):\n", + " if count % 50 == 0:\n", + " loss = loss_function(example)\n", + " print(\"Step: %d, Loss: %.3f\" % (count, loss.numpy()))\n", + " if count >= NUM_STEPS:\n", + " break\n", + "\n", + " example[\"targets\"] = tf.reshape(example[\"targets\"], [BATCH_SIZE, 1, 1, 1]) # Make it 4D.\n", + " optimizer.minimize(lambda: loss_function(example),\n", + " global_step=tf.train.get_or_create_global_step())" + ], + "cell_type": "code", + "execution_count": 13, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-train*\n", + "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensor2tensor/layers/common_layers.py:1671: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "\n", + "Future major versions of TensorFlow will allow gradients to flow\n", + "into the labels input on backprop by default.\n", + "\n", + "See tf.nn.softmax_cross_entropy_with_logits_v2.\n", + "\n", + "Step: 0, Loss: 669.337\n", + "Step: 50, Loss: 681.818\n", + "Step: 100, Loss: 672.086\n", + "Step: 150, Loss: 696.411\n", + "Step: 200, Loss: 687.108\n", + "Step: 250, Loss: 679.670\n", + "Step: 300, Loss: 686.915\n", + "Step: 350, Loss: 687.450\n", + "Step: 400, Loss: 680.961\n", + "Step: 450, Loss: 685.741\n", + "Step: 500, Loss: 690.723\n" + ], + "name": "stdout" + } + ] + }, + { + "metadata": { + "id": "CIFlkiVOd8jO", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "output_extras": [ + { + "item_id": 2 + } + ], + "base_uri": "https://localhost:8080/", + "height": 51 + }, + "outputId": "29223ecd-c5ae-401b-e518-97b06fafb530", + "executionInfo": { + "status": "ok", + "timestamp": 1512092815393, + "user_tz": 480, + "elapsed": 3149, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } + }, + "source": [ + "model.set_mode(Modes.EVAL)\n", + "mnist_eval_dataset = mnist_problem.dataset(Modes.EVAL, data_dir)\n", + "all_perplexities = []\n", + "all_accuracies = []\n", + "for count, example in enumerate(tfe.Iterator(mnist_eval_dataset)):\n", + " if count >= 100:\n", + " break\n", + "\n", + " batch_inputs = tf.reshape(example[\"inputs\"], [1, 28, 28, 3]) # Make it 4D.\n", + " batch_targets = tf.reshape(example[\"targets\"], [1, 1, 1, 1]) # Make it 4D.\n", + " features = {\"inputs\": batch_inputs, \"targets\": batch_targets}\n", + "\n", + " # Call the model.\n", + " predictions, _ = model(features)\n", + " \n", + " # Calculate and append the metrics\n", + " all_perplexities.extend(metrics.padded_neg_log_perplexity(predictions, features[\"targets\"]))\n", + " all_accuracies.extend(metrics.padded_accuracy(predictions, features[\"targets\"]))\n", + "\n", + "# Print out metrics on the dataset\n", + "print(\"Accuracy: %.2f\" % tf.reduce_mean(tf.concat(all_accuracies, axis=1)).numpy())" + ], + "cell_type": "code", + "execution_count": 14, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-dev*\n", + "Accuracy: 0.49\n" + ], + "name": "stdout" + } + ] + } + ] +} \ No newline at end of file diff --git a/tensor2tensor/utils/data_reader.py b/tensor2tensor/utils/data_reader.py index 092aa5628..2736a0c45 100644 --- a/tensor2tensor/utils/data_reader.py +++ b/tensor2tensor/utils/data_reader.py @@ -106,7 +106,7 @@ def input_pipeline(problem, hparams=hparams, dataset_split=dataset_split, shard=shard) - dataset = dataset.map(cast_int64_to_int32, num_threads=num_threads) + dataset = dataset.map(cast_int64_to_int32, num_parallel_calls=num_threads) dataset = dataset.filter( functools.partial( example_valid_size, @@ -132,12 +132,6 @@ def input_pipeline(problem, batching_scheme["window_size"], padded_shapes=batching_scheme["padded_shapes"]) - # We reshuffle the batches to prevent many long-sequence batches at once. - # TODO(rsepassi): Rm hasattr call once new dynamic window size functionality - # is in a stable TF release. - if (batching_scheme["shuffle_queue_size"] is not None and - not hasattr(dataset, "apply")): - dataset = dataset.shuffle(batching_scheme["shuffle_queue_size"]) batched_examples = dataset.make_one_shot_iterator().get_next() return batched_examples @@ -182,6 +176,7 @@ def bucket_by_sequence_length(dataset, Returns: Dataset of padded and batched examples. """ + del window_size with tf.name_scope("bucket_by_seq_length"): def example_to_bucket_id(example): @@ -209,16 +204,9 @@ def batching_fn(bucket_id, grouped_dataset): batch_size = batch_sizes[bucket_id] return padded_batch(grouped_dataset, batch_size, padded_shapes) - # TODO(rsepassi): Rm branch once the new group_by_window functionality is in - # a stable TF release. - if hasattr(dataset, "apply"): - # If the Dataset supports dynamic window size, use it. - dataset = dataset.apply( - tf.contrib.data.group_by_window(example_to_bucket_id, batching_fn, - None, window_size_fn)) - else: - dataset = dataset.group_by_window(example_to_bucket_id, batching_fn, - window_size) + dataset = dataset.apply( + tf.contrib.data.group_by_window(example_to_bucket_id, batching_fn, None, + window_size_fn)) return dataset diff --git a/tensor2tensor/utils/expert_utils.py b/tensor2tensor/utils/expert_utils.py index 8fe5479da..fed1af719 100644 --- a/tensor2tensor/utils/expert_utils.py +++ b/tensor2tensor/utils/expert_utils.py @@ -33,6 +33,7 @@ from six.moves import zip # pylint: disable=redefined-builtin import tensorflow as tf +from tensorflow.python.eager import context from tensorflow.python.framework import function DEFAULT_DEV_STRING = "existing_device" @@ -186,6 +187,7 @@ def __call__(self, fn, *args, **kwargs): # Now make the parallel call. outputs = [] cache = {} + tensor_to_var = {} for i in xrange(self.n): def daisy_chain_getter(getter, name, *args, **kwargs): @@ -196,11 +198,16 @@ def daisy_chain_getter(getter, name, *args, **kwargs): return cache[device_var_key] if name in cache: # if we have it on a different device, copy it from the last device - v = tf.identity(cache[name]) + last_device_v = cache[name] + var = tensor_to_var[last_device_v] + v = tf.identity(last_device_v) else: var = getter(name, *args, **kwargs) v = tf.identity(var._ref()) # pylint: disable=protected-access - _add_variable_proxy_methods(var, v) + + # keep track of the original variable + tensor_to_var[v] = var + _add_variable_proxy_methods(tensor_to_var[v], v) # update the cache cache[name] = v cache[device_var_key] = v @@ -546,9 +553,10 @@ def remove(self, x): x, indices=self.nonpad_ids, ) - # This is a hack but for some reason, gather_nd return a tensor of - # undefined shape, so the shape is set up manually - x.set_shape([None] + x_shape[1:]) + if not context.in_eager_mode(): + # This is a hack but for some reason, gather_nd return a tensor of + # undefined shape, so the shape is set up manually + x.set_shape([None] + x_shape[1:]) return x def restore(self, x): @@ -894,14 +902,16 @@ def my_fn(x): def reshape_like(a, b): """Reshapes a to match the shape of b in all but the last dimension.""" ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0)) - ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:]) + if not context.in_eager_mode(): + ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:]) return ret def flatten_all_but_last(a): """Flatten all dimensions of a except the last.""" ret = tf.reshape(a, [-1, tf.shape(a)[-1]]) - ret.set_shape([None] + a.get_shape().as_list()[-1:]) + if not context.in_eager_mode(): + ret.set_shape([None] + a.get_shape().as_list()[-1:]) return ret diff --git a/tensor2tensor/utils/t2t_model.py b/tensor2tensor/utils/t2t_model.py index 0f7b865b6..c49bdbaf1 100644 --- a/tensor2tensor/utils/t2t_model.py +++ b/tensor2tensor/utils/t2t_model.py @@ -175,6 +175,20 @@ def eval_autoregressive(self, features=None, decode_length=50): features, decode_length=decode_length) return logits, losses + def _fill_problem_hparams_features(self, features): + if features is None: + return + problem_hparams = self._problem_hparams + if "problem_choice" not in features: + features["problem_choice"] = tf.constant( + self._problem_idx, name="problem_choice") + if "input_space_id" not in features: + features["input_space_id"] = tf.constant( + problem_hparams.input_space_id, name="input_space_id") + if "target_space_id" not in features: + features["target_space_id"] = tf.constant( + problem_hparams.target_space_id, name="target_space_id") + def infer(self, features=None, decode_length=50, @@ -203,6 +217,7 @@ def infer(self, tf.logging.warn("Beam searching for a model with no inputs.") if not self.has_input and self.hparams.sampling_method != "random": tf.logging.warn("Non-random sampling for a model with no inputs.") + self._fill_problem_hparams_features(features) target_modality = self.hparams.problems[self._problem_idx].target_modality if target_modality.is_class_modality: @@ -370,7 +385,8 @@ def _slow_greedy_infer(self, features, decode_length): def infer_step(recent_output, recent_logits, unused_loss): """Inference step.""" - recent_output.set_shape([None, None, None, 1]) + if not self.hparams.use_eager_mode: + recent_output.set_shape([None, None, None, 1]) padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]]) features["targets"] = padded # This is inefficient in that it generates samples at all timesteps, @@ -385,7 +401,8 @@ def infer_step(recent_output, recent_logits, unused_loss): common_layers.shape_list(recent_output)[1], :, :] cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1)) samples = tf.concat([recent_output, cur_sample], axis=1) - samples.set_shape([None, None, None, 1]) + if not self.hparams.use_eager_mode: + samples.set_shape([None, None, None, 1]) # Assuming we have one shard for logits. logits = tf.concat([recent_logits, logits[:, -1:]], 1) @@ -416,7 +433,8 @@ def infer_step(recent_output, recent_logits, unused_loss): result = initial_output # tensor of shape [batch_size, time, 1, 1, vocab_size] logits = tf.zeros((batch_size, 0, 1, 1, target_modality.top_dimensionality)) - logits.set_shape([None, None, None, None, None]) + if not self.hparams.use_eager_mode: + logits.set_shape([None, None, None, None, None]) loss = 0.0 def while_exit_cond(result, logits, loss): # pylint: disable=unused-argument @@ -662,20 +680,13 @@ def sampled_results(): tf.less(tf.random_uniform([]), prob), sampled_results, lambda: (sharded_logits, losses)) - tf.logging.info("This model_fn took %.3f sec." % (time.time() - start_time)) + if not self.hparams.use_eager_mode: + tf.logging.info("This model_fn took %.3f sec." % + (time.time() - start_time)) return sharded_logits, losses def call(self, inputs_dict, skip=False, force_full_predict=False): - problem_hparams = self._problem_hparams - if "problem_choice" not in inputs_dict: - inputs_dict["problem_choice"] = tf.constant( - self._problem_idx, name="problem_choice") - if "input_space_id" not in inputs_dict: - inputs_dict["input_space_id"] = tf.constant( - problem_hparams.input_space_id, name="input_space_id") - if "target_space_id" not in inputs_dict: - inputs_dict["target_space_id"] = tf.constant( - problem_hparams.target_space_id, name="target_space_id") + self._fill_problem_hparams_features(inputs_dict) sharded_logits, losses = self._model_fn( inputs_dict, skip=skip, force_full_predict=force_full_predict) return tf.concat(sharded_logits, 0), losses @@ -701,8 +712,10 @@ def model_fn_body_sharded(self, sharded_features): } for d in xrange(self._num_datashards)] output = self._data_parallelism( - _with_timing(self.model_fn_body, "model_fn_body"), - datashard_to_features) + _with_timing( + self.model_fn_body, + "model_fn_body", + silent=self.hparams.use_eager_mode), datashard_to_features) if isinstance(output, tuple): losses_sharded = output[1] if isinstance(losses_sharded[0], dict): @@ -919,12 +932,14 @@ def _warn_changed_modality_type(new_name, old_name, feature_name): feature_name, old_type, old_name, new_type, new_name) -def _with_timing(fn, msg): +def _with_timing(fn, msg, silent=False): def fn_with_timing(*args, **kwargs): start_time = time.time() res = fn(*args, **kwargs) - tf.logging.info("Doing %s took %.3f sec." % (msg, time.time() - start_time)) + if not silent: + tf.logging.info("Doing %s took %.3f sec." % (msg, + time.time() - start_time)) return res return fn_with_timing From b1abcf4fa7f9e363c07686abacc30134537458d9 Mon Sep 17 00:00:00 2001 From: Ryan Sepassi Date: Thu, 30 Nov 2017 18:57:49 -0800 Subject: [PATCH 07/11] v1.3.1 PiperOrigin-RevId: 177540047 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 34a94965c..94f44c137 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name='tensor2tensor', - version='1.3.0', + version='1.3.1', description='Tensor2Tensor', author='Google Inc.', author_email='no-reply@google.com', From c93a188a76a60ebeb0d7b3ba6f050338120aa807 Mon Sep 17 00:00:00 2001 From: Lukasz Kaiser Date: Thu, 30 Nov 2017 20:57:56 -0800 Subject: [PATCH 08/11] New BLEU cleanup and small correction to VAE. PiperOrigin-RevId: 177547599 --- tensor2tensor/models/transformer_vae.py | 2 +- tensor2tensor/utils/bleu_hook.py | 66 +++++++++++++++++++++++++ tensor2tensor/utils/bleu_hook_test.py | 7 +-- 3 files changed, 71 insertions(+), 4 deletions(-) diff --git a/tensor2tensor/models/transformer_vae.py b/tensor2tensor/models/transformer_vae.py index 140959c34..be21fca1a 100644 --- a/tensor2tensor/models/transformer_vae.py +++ b/tensor2tensor/models/transformer_vae.py @@ -139,7 +139,7 @@ def vae(x, z_size, name): kl = 0.5 * tf.reduce_mean( tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1) free_bits = z_size // 2 - kl_loss = tf.maximum(tf.reduce_mean(kl) - free_bits, 0.0) + kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) return z, kl_loss, mu, log_sigma diff --git a/tensor2tensor/utils/bleu_hook.py b/tensor2tensor/utils/bleu_hook.py index 20a7c8426..49b31c1bb 100644 --- a/tensor2tensor/utils/bleu_hook.py +++ b/tensor2tensor/utils/bleu_hook.py @@ -20,10 +20,14 @@ import collections import math +import re +import sys +import unicodedata # Dependency imports import numpy as np +import six # pylint: disable=redefined-builtin from six.moves import xrange from six.moves import zip @@ -93,9 +97,15 @@ def compute_bleu(reference_corpus, for ngram in translation_ngram_counts: possible_matches_by_order[len(ngram)-1] += translation_ngram_counts[ngram] precisions = [0] * max_order + smooth = 1.0 for i in xrange(0, max_order): if possible_matches_by_order[i] > 0: precisions[i] = matches_by_order[i] / possible_matches_by_order[i] + if matches_by_order[i] > 0: + precisions[i] = matches_by_order[i] / possible_matches_by_order[i] + else: + smooth *= 2 + precisions[i] = 1.0 / (smooth * possible_matches_by_order[i]) else: precisions[i] = 0.0 @@ -131,3 +141,59 @@ def bleu_score(predictions, labels, **unused_kwargs): bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return bleu, tf.constant(1.0) + + +class UnicodeRegex(object): + """Ad-hoc hack to recognize all punctuation and symbols.""" + + def __init__(self): + def _property_chars(prefix): + return ''.join(six.unichr(x) for x in range(sys.maxunicode) + if unicodedata.category(six.unichr(x)).startswith(prefix)) + punctuation = self._property_chars('P') + self.nondigit_punct_re = re.compile(r'([^\d])([' + punctuation + r'])') + self.punct_nondigit_re = re.compile(r'([' + punctuation + r'])([^\d])') + self.symbol_re = re.compile('([' + _property_chars('S') + '])') + + +def bleu_tokenize(string): + r"""Tokenize a string following the official BLEU implementation. + + See https://github.com/moses-smt/mosesdecoder/" + "blob/master/scripts/generic/mteval-v14.pl#L954-L983 + In our case, the input string is expected to be just one line + and no HTML entities de-escaping is needed. + So we just tokenize on punctuation and symbols, + except when a punctuation is preceded and followed by a digit + (e.g. a comma/dot as a thousand/decimal separator). + + Note that a numer (e.g. a year) followed by a dot at the end of sentence + is NOT tokenized, + i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` + does not match this case (unless we add a space after each sentence). + However, this error is already in the original mteval-v14.pl + and we want to be consistent with it. + + Args: + string: the input string + + Returns: + a list of tokens + """ + string = UnicodeRegex.nondigit_punct_re.sub(r'\1 \2 ', string) + string = UnicodeRegex.punct_nondigit_re.sub(r' \1 \2', string) + string = UnicodeRegex.symbol_re.sub(r' \1 ', string) + return string.split() + + +def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False): + """Compute BLEU for two files (reference and hypothesis translation).""" + ref_lines = open(ref_filename).read().splitlines() + hyp_lines = open(hyp_filename).read().splitlines() + assert len(ref_lines) == len(hyp_lines) + if not case_sensitive: + ref_lines = [x.lower() for x in ref_lines] + hyp_lines = [x.lower() for x in hyp_lines] + ref_tokens = [bleu_tokenize(x) for x in ref_lines] + hyp_tokens = [bleu_tokenize(x) for x in hyp_lines] + return compute_bleu(ref_tokens, hyp_tokens) diff --git a/tensor2tensor/utils/bleu_hook_test.py b/tensor2tensor/utils/bleu_hook_test.py index bf08174f8..e4f3a18a9 100644 --- a/tensor2tensor/utils/bleu_hook_test.py +++ b/tensor2tensor/utils/bleu_hook_test.py @@ -39,8 +39,9 @@ def testComputeNotEqual(self): translation_corpus = [[1, 2, 3, 4]] reference_corpus = [[5, 6, 7, 8]] bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus) - actual_bleu = 0.0 - self.assertEqual(bleu, actual_bleu) + # The smoothing prevents 0 for small corpora + actual_bleu = 0.0798679 + self.assertAllClose(bleu, actual_bleu, atol=1e-03) def testComputeMultipleBatch(self): translation_corpus = [[1, 2, 3, 4], [5, 6, 7, 0]] @@ -53,7 +54,7 @@ def testComputeMultipleNgrams(self): reference_corpus = [[1, 2, 1, 13], [12, 6, 7, 4, 8, 9, 10]] translation_corpus = [[1, 2, 1, 3], [5, 6, 7, 4]] bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus) - actual_bleu = 0.486 + actual_bleu = 0.3436 self.assertAllClose(bleu, actual_bleu, atol=1e-03) if __name__ == '__main__': From e133a1af7439eaa32d9ebd8edef7d1e6b88b0a8c Mon Sep 17 00:00:00 2001 From: Niki Parmar Date: Thu, 30 Nov 2017 23:03:32 -0800 Subject: [PATCH 09/11] Enable Transformer fast decoding in eager mode PiperOrigin-RevId: 177554962 --- tensor2tensor/models/transformer.py | 32 ++++++++++------------------- 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/tensor2tensor/models/transformer.py b/tensor2tensor/models/transformer.py index 099a226b3..f2b693e95 100644 --- a/tensor2tensor/models/transformer.py +++ b/tensor2tensor/models/transformer.py @@ -171,14 +171,9 @@ def _greedy_infer(self, features, decode_length): Raises: NotImplementedError: If there are multiple data shards. """ - # TODO(nikip): Remove slow decoding for eager. Eager mode doesn't work - # with accessing _shape which is used in fast decoding currently. - if self._hparams.use_eager_mode: - return self._slow_greedy_infer(features, decode_length) - else: - with tf.variable_scope(self.name): - decoded_ids, _ = self._fast_decode(features, decode_length) - return decoded_ids, None, None + with tf.variable_scope(self.name): + decoded_ids, _ = self._fast_decode(features, decode_length) + return decoded_ids, None, None def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha): """Beam search decoding. @@ -194,16 +189,10 @@ def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha): Returns: samples: an integer `Tensor`. Top samples from the beam search """ - # TODO(nikip): Remove slow decoding for eager. Eager mode doesn't work - # with accessing _shape which is used in fast decoding currently. - if self._hparams.use_eager_mode: - return self._beam_decode_slow( - features, decode_length, beam_size, top_beams, alpha) - else: - with tf.variable_scope(self.name): - decoded_ids, scores = self._fast_decode(features, decode_length, - beam_size, top_beams, alpha) - return {"outputs": decoded_ids, "scores": scores} + with tf.variable_scope(self.name): + decoded_ids, scores = self._fast_decode(features, decode_length, + beam_size, top_beams, alpha) + return {"outputs": decoded_ids, "scores": scores} def _fast_decode(self, features, @@ -335,9 +324,10 @@ def symbols_to_logits_fn(ids, i, cache): # Note: Tensor.set_shape() does not work here since it merges shape info. # TODO(llion); Find a more robust solution. # pylint: disable=protected-access - for layer in cache: - cache[layer]["k"]._shape = tf.TensorShape([None, None, key_channels]) - cache[layer]["v"]._shape = tf.TensorShape([None, None, value_channels]) + if not self._hparams.use_eager_mode: + for layer in cache: + cache[layer]["k"]._shape = tf.TensorShape([None, None, key_channels]) + cache[layer]["v"]._shape = tf.TensorShape([None, None, value_channels]) # pylint: enable=protected-access cache["encoder_output"] = encoder_output cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias From 654f74eeb82170bbb555b83af6ff1e60f39eafd7 Mon Sep 17 00:00:00 2001 From: Niki Parmar Date: Fri, 1 Dec 2017 13:47:45 -0800 Subject: [PATCH 10/11] Fix decoding and training issues in external colab. PiperOrigin-RevId: 177635374 --- tensor2tensor/notebooks/hello_t2t.ipynb | 685 +++++------------------- 1 file changed, 139 insertions(+), 546 deletions(-) diff --git a/tensor2tensor/notebooks/hello_t2t.ipynb b/tensor2tensor/notebooks/hello_t2t.ipynb index 86070da40..845f20d5f 100644 --- a/tensor2tensor/notebooks/hello_t2t.ipynb +++ b/tensor2tensor/notebooks/hello_t2t.ipynb @@ -1,28 +1,19 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "T2T with TF Eager", - "version": "0.3.2", - "views": {}, - "default_view": {}, - "provenance": [], - "collapsed_sections": [] - } - }, "cells": [ { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "s19ucTii_wYb", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - } + }, + "colab_type": "code", + "id": "s19ucTii_wYb" }, + "outputs": [], "source": [ "# Copyright 2017 Google LLC.\n", "\n", @@ -37,41 +28,41 @@ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# See the License for the specific language governing permissions and\n", "# limitations under the License." - ], - "cell_type": "code", - "execution_count": 0, - "outputs": [] + ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "OPGni6fuvoTj", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - } + }, + "colab_type": "code", + "id": "OPGni6fuvoTj" }, + "outputs": [], "source": [ "# Install deps\n", "!pip install -q \"tensor2tensor-dev==1.3.1.dev5\" tf-nightly" - ], - "cell_type": "code", - "execution_count": 0, - "outputs": [] + ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "oILRLCWN_16u", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - } + }, + "colab_type": "code", + "id": "oILRLCWN_16u" }, + "outputs": [], "source": [ "import tensorflow as tf\n", "import matplotlib.pyplot as plt\n", @@ -100,104 +91,52 @@ "tf.gfile.MakeDirs(tmp_dir)\n", "tf.gfile.MakeDirs(train_dir)\n", "tf.gfile.MakeDirs(checkpoint_dir)" - ], - "cell_type": "code", - "execution_count": 0, - "outputs": [] + ] }, { + "cell_type": "markdown", "metadata": { - "id": "gXL7_bVH49Kl", - "colab_type": "text" + "colab_type": "text", + "id": "gXL7_bVH49Kl" }, "source": [ "# Translate from English to French with a pre-trained model" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "Q2CYCYjZTlZs", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - }, - "output_extras": [ - { - "item_id": 5 - } - ], - "base_uri": "https://localhost:8080/", - "height": 136 - }, - "outputId": "9d08dd17-a3a1-49ba-930c-a07f11ea24e3", - "executionInfo": { - "status": "ok", - "timestamp": 1512092524785, - "user_tz": 480, - "elapsed": 17914, - "user": { - "displayName": "Ryan Sepassi", - "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", - "userId": "107877449274830904926" } - } + }, + "colab_type": "code", + "id": "Q2CYCYjZTlZs" }, + "outputs": [], "source": [ "# Translation\n", "enfr_problem = registry.problem(\"translate_enfr_wmt_small32k\")\n", "enfr_problem.generate_data(data_dir, tmp_dir) " - ], - "cell_type": "code", - "execution_count": 3, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:tensorflow:Found vocab file: /content/t2t/data/vocab.enfr.32768\n", - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/baseline-1M-enfr.tgz\n", - "INFO:tensorflow:Found vocab file: /content/t2t/data/vocab.enfr.32768\n", - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/baseline-1M-enfr.tgz\n", - "INFO:tensorflow:Skipping generator because outputs files exist\n", - "INFO:tensorflow:Skipping generator because outputs files exist\n", - "INFO:tensorflow:Skipping shuffle because output files exist\n" - ], - "name": "stdout" - } ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "g2aQW7Z6TOEu", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - }, - "output_extras": [ - { - "item_id": 2 - } - ], - "base_uri": "https://localhost:8080/", - "height": 170 - }, - "outputId": "8196348d-747e-4b33-9b7c-742d8041d0b7", - "executionInfo": { - "status": "ok", - "timestamp": 1512092525545, - "user_tz": 480, - "elapsed": 732, - "user": { - "displayName": "Ryan Sepassi", - "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", - "userId": "107877449274830904926" } - } + }, + "colab_type": "code", + "id": "g2aQW7Z6TOEu" }, + "outputs": [], "source": [ "example = tfe.Iterator(enfr_problem.dataset(Modes.TRAIN, data_dir)).next()\n", "inputs = [int(x) for x in example[\"inputs\"].numpy()] # Cast to ints.\n", @@ -205,7 +144,8 @@ "\n", "encoders = enfr_problem.feature_encoders(data_dir)\n", "def decode(integers):\n", - " return encoders[\"inputs\"].decode(np.squeeze(integers))\n", + " samples = encoders[\"inputs\"].decode(np.squeeze(integers))\n", + " return samples[:samples.find(\"\u003cEOS\u003e\")]\n", "\n", "# Example inputs as int-tensor.\n", "print(\"Inputs, encoded:\")\n", @@ -219,38 +159,22 @@ "# Example targets as a sentence.\n", "print(\"Targets, decoded:\")\n", "print(decode(targets))" - ], - "cell_type": "code", - "execution_count": 4, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:tensorflow:Reading data files from /content/t2t/data/translate_enfr_wmt_small32k-train*\n", - "Inputs, encoded:\n", - "[47, 254, 17, 280, 7, 219, 4, 696, 158, 8, 4, 2085, 135, 4, 246, 3930, 3, 780, 4, 696, 158, 8, 4, 2085, 11, 5281, 5010, 31, 2679, 8, 4, 2085, 2, 1]\n", - "Inputs, decoded:\n", - "The first is how to take the resources out of the ground -- the economic processes, taking the resources out of the ground and putting assets on top of the ground.\n", - "Targets, encoded:\n", - "[113, 699, 131, 5, 24, 6, 477, 571, 27599, 27580, 27584, 27586, 24058, 18, 1018, 37, 4663, 135, 15, 739, 360, 3, 131, 5, 24, 22, 5, 27599, 27580, 27584, 27586, 24058, 18, 1018, 37, 4663, 14, 27, 8388, 20, 2477, 16, 12, 5, 1348, 1374, 2, 1]\n", - "Targets, decoded:\n", - "Le premier c'est de savoir comment extraire les ressources du sol -- le processus économique, c'est d'extraire les ressources du sol et en retirer des avantages à l'air libre.\n" - ], - "name": "stdout" - } ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "9l6hDQbrRUYV", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - } + }, + "colab_type": "code", + "id": "9l6hDQbrRUYV" }, + "outputs": [], "source": [ "# Create hparams and the T2TModel object.\n", "model_name = \"transformer\"\n", @@ -264,22 +188,22 @@ "# Layer and so subsequent instantiations will have different variable scopes\n", "# that will not match the checkpoint.\n", "model = registry.model(model_name)(hparams, Modes.PREDICT)" - ], - "cell_type": "code", - "execution_count": 0, - "outputs": [] + ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "FEwNUVlMYOJi", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - } + }, + "colab_type": "code", + "id": "FEwNUVlMYOJi" }, + "outputs": [], "source": [ "# Copy the pretrained checkpoint locally\n", "gs_ckpt_dir = \"gs://tensor2tensor-checkpoints/\"\n", @@ -289,41 +213,22 @@ "!gsutil -q cp -R {gs_ckpt} {local_ckpt}\n", "ckpt_path = tf.train.latest_checkpoint(local_ckpt)\n", "ckpt_path" - ], - "cell_type": "code", - "execution_count": 0, - "outputs": [] + ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "3O-8E9d6TtuJ", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - }, - "output_extras": [ - { - "item_id": 3 - } - ], - "base_uri": "https://localhost:8080/", - "height": 139 - }, - "outputId": "d7883ce2-d90f-440c-b6b3-16ecffab481c", - "executionInfo": { - "status": "ok", - "timestamp": 1512092689851, - "user_tz": 480, - "elapsed": 141849, - "user": { - "displayName": "Ryan Sepassi", - "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", - "userId": "107877449274830904926" } - } + }, + "colab_type": "code", + "id": "3O-8E9d6TtuJ" }, + "outputs": [], "source": [ "# Restore and translate!\n", "\n", @@ -333,259 +238,87 @@ " batch_inputs = tf.reshape(inputs, [1, -1, 1, 1]) # Make it 4D.\n", " # TODO: rm target_space_id\n", " features_dict = {\"inputs\": batch_inputs,\n", - " \"target_space_id\": tf.constant(hparams.problems[0].target_space_id)}\n", + " \"target_space_id\": tf.constant(hparams.problems[0].target_space_id)}\n", " return features_dict\n", "\n", - "\n", + "# Input to the decoder.\n", "inputs = \"This is a cat.\"\n", "\n", + "store = tfe.EagerVariableStore()\n", "# Restore from checkpoint and run inference\n", - "with tfe.restore_variables_on_create(ckpt_path):\n", - " samples = model.infer(encode(inputs), beam_size=1)\n", + "with store.as_default():\n", + " with tfe.restore_variables_on_create(ckpt_path):\n", + " samples = model.infer(encode(inputs), beam_size=1)\n", "\n", "print(\"Inputs: %s\" % inputs)\n", "print(\"Outputs: %s\" % decode(samples))" - ], - "cell_type": "code", - "execution_count": 7, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:tensorflow:Greedy Decoding\n", - "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensor2tensor/layers/common_layers.py:487: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "keep_dims is deprecated, use keepdims instead\n", - "Inputs: This is a cat.\n", - "Outputs: chairpersons solidité Istanbul individuelles cassava, «salle mutuelles détaillée adoptée cravate dépit 750 820 procédés Afghan permettraient capture fasse numérique bans got éthiciens regretteras célébrer January impressed Precisely saison complicité opérée flung ıhostiles Thinking voudrait auxiliaires holding multilateral focalisé réussisaient Steagall dons reminds researching promette assigned anachronique IPCC fatigue irresponsables homologue reprennent After formulent finit\n" - ], - "name": "stdout" - } ] }, { + "cell_type": "markdown", "metadata": { - "id": "i7BZuO7T5BB4", - "colab_type": "text" + "colab_type": "text", + "id": "i7BZuO7T5BB4" }, "source": [ "# Train a custom model on MNIST" - ], - "cell_type": "markdown" + ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "RYDMO4zArgkz", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - }, - "output_extras": [ - { - "item_id": 1 - } - ], - "base_uri": "https://localhost:8080/", - "height": 1224 - }, - "outputId": "73452116-72c6-4327-9f83-84be584c3e6f", - "executionInfo": { - "status": "ok", - "timestamp": 1512092690339, - "user_tz": 480, - "elapsed": 456, - "user": { - "displayName": "Ryan Sepassi", - "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", - "userId": "107877449274830904926" } - } + }, + "colab_type": "code", + "id": "RYDMO4zArgkz" }, + "outputs": [], "source": [ "# Lots of problems available\n", "problems.available()" - ], - "cell_type": "code", - "execution_count": 8, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "['algorithmic_addition_binary40',\n", - " 'algorithmic_addition_decimal40',\n", - " 'algorithmic_cipher_shift200',\n", - " 'algorithmic_cipher_shift5',\n", - " 'algorithmic_cipher_vigenere200',\n", - " 'algorithmic_cipher_vigenere5',\n", - " 'algorithmic_identity_binary40',\n", - " 'algorithmic_identity_decimal40',\n", - " 'algorithmic_multiplication_binary40',\n", - " 'algorithmic_multiplication_decimal40',\n", - " 'algorithmic_reverse_binary40',\n", - " 'algorithmic_reverse_binary40_test',\n", - " 'algorithmic_reverse_decimal40',\n", - " 'algorithmic_reverse_nlplike32k',\n", - " 'algorithmic_reverse_nlplike8k',\n", - " 'algorithmic_shift_decimal40',\n", - " 'audio_timit_characters_tune',\n", - " 'audio_timit_tokens8k_test',\n", - " 'audio_timit_tokens8k_tune',\n", - " 'image_celeba_tune',\n", - " 'image_cifar10',\n", - " 'image_cifar10_plain',\n", - " 'image_cifar10_plain8',\n", - " 'image_cifar10_tune',\n", - " 'image_fsns',\n", - " 'image_imagenet',\n", - " 'image_imagenet224',\n", - " 'image_imagenet32',\n", - " 'image_imagenet64',\n", - " 'image_mnist',\n", - " 'image_mnist_tune',\n", - " 'image_ms_coco_characters',\n", - " 'image_ms_coco_tokens32k',\n", - " 'image_ms_coco_tokens8k',\n", - " 'img2img_cifar10',\n", - " 'img2img_imagenet',\n", - " 'languagemodel_lm1b32k',\n", - " 'languagemodel_lm1b8k_packed',\n", - " 'languagemodel_lm1b_characters',\n", - " 'languagemodel_ptb10k',\n", - " 'languagemodel_ptb_characters',\n", - " 'languagemodel_wiki_full32k',\n", - " 'languagemodel_wiki_scramble128',\n", - " 'languagemodel_wiki_scramble1k50',\n", - " 'languagemodel_wiki_scramble8k50',\n", - " 'librispeech',\n", - " 'multinli_matched',\n", - " 'multinli_mismatched',\n", - " 'ocr_test',\n", - " 'parsing_english_ptb16k',\n", - " 'parsing_english_ptb8k',\n", - " 'parsing_icelandic16k',\n", - " 'programming_desc2code_cpp',\n", - " 'programming_desc2code_py',\n", - " 'sentiment_imdb',\n", - " 'summarize_cnn_dailymail32k',\n", - " 'translate_encs_wmt32k',\n", - " 'translate_encs_wmt_characters',\n", - " 'translate_ende_wmt32k',\n", - " 'translate_ende_wmt32k_packed',\n", - " 'translate_ende_wmt8k',\n", - " 'translate_ende_wmt_bpe32k',\n", - " 'translate_ende_wmt_characters',\n", - " 'translate_enfr_wmt32k',\n", - " 'translate_enfr_wmt8k',\n", - " 'translate_enfr_wmt_characters',\n", - " 'translate_enfr_wmt_small32k',\n", - " 'translate_enfr_wmt_small8k',\n", - " 'translate_enfr_wmt_small_characters',\n", - " 'translate_enmk_setimes32k',\n", - " 'translate_enzh_wmt8k']" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 8 - } ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "JKc2uSk6WX5e", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - }, - "output_extras": [ - { - "item_id": 2 - } - ], - "base_uri": "https://localhost:8080/", - "height": 204 - }, - "outputId": "9fe602a6-6b67-4d4e-82dd-2c0c11f16d14", - "executionInfo": { - "status": "ok", - "timestamp": 1512092691265, - "user_tz": 480, - "elapsed": 839, - "user": { - "displayName": "Ryan Sepassi", - "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", - "userId": "107877449274830904926" } - } + }, + "colab_type": "code", + "id": "JKc2uSk6WX5e" }, + "outputs": [], "source": [ "# Create the MNIST problem and generate the data\n", "\n", "mnist_problem = problems.problem(\"image_mnist\")\n", "# Generate data\n", "mnist_problem.generate_data(data_dir, tmp_dir)" - ], - "cell_type": "code", - "execution_count": 9, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-images-idx3-ubyte.gz\n", - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-labels-idx1-ubyte.gz\n", - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-images-idx3-ubyte.gz\n", - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-labels-idx1-ubyte.gz\n", - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-images-idx3-ubyte.gz\n", - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-labels-idx1-ubyte.gz\n", - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-images-idx3-ubyte.gz\n", - "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-labels-idx1-ubyte.gz\n", - "INFO:tensorflow:Skipping generator because outputs files exist\n", - "INFO:tensorflow:Skipping generator because outputs files exist\n", - "INFO:tensorflow:Skipping shuffle because output files exist\n" - ], - "name": "stdout" - } ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "VW6HCRANFPYV", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - }, - "output_extras": [ - { - "item_id": 1 - }, - { - "item_id": 2 - } - ], - "base_uri": "https://localhost:8080/", - "height": 381 - }, - "outputId": "7b76feb3-2237-4669-d632-3ef69e04815d", - "executionInfo": { - "status": "ok", - "timestamp": 1512092691915, - "user_tz": 480, - "elapsed": 620, - "user": { - "displayName": "Ryan Sepassi", - "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", - "userId": "107877449274830904926" } - } + }, + "colab_type": "code", + "id": "VW6HCRANFPYV" }, + "outputs": [], "source": [ "# Get the tf.data.Dataset from Problem.dataset\n", "mnist_example = tfe.Iterator(mnist_problem.dataset(Modes.TRAIN, data_dir)).next()\n", @@ -594,116 +327,41 @@ "\n", "plt.imshow(image.numpy()[:, :, 0].astype(np.float32), cmap=plt.get_cmap('gray'))\n", "print(\"Label: %d\" % label.numpy())" - ], - "cell_type": "code", - "execution_count": 10, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-train*\n", - "Label: 5\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUsAAAFKCAYAAACU6307AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAFBBJREFUeJzt3X9MlfX7x/HXESI5S4cQUK4ffjJc\nLHCr1IXmD9TZbJVKtZLUudmmFU50zpj54w+3VHS11Fpo4pZYnY3W5swGOddyDShJzePWQNuMmSIo\nU5hoiuf7R4tv2Dmci+PhnHMfn4+NP877vM/7XFc3vbzvc5/7xuXz+XwCAPRqQLQLAAAnICwBwICw\nBAADwhIADAhLADAgLAHAwhcBkvz+HD9+POBzTv2Jx57itS96cs5PpPrqjSsS37N0uVx+x30+X8Dn\nnCoee5Lisy96co5I9dVbHCaGuuh7772nY8eOyeVyaeXKlRo5cmSoSwFAzAspLH/66SedPn1aHo9H\np06d0sqVK+XxeMJdGwDEjJBO8NTU1Gjq1KmSpOHDh+vSpUvq6OgIa2EAEEtC2rNsbW3V448/3v04\nNTVVLS0tuueee/zOP378uHJycvw+F4GPTCMuHnuS4rMvenKOaPcV8meW/xasidzc3ICvi7cPo+Ox\nJyk++6In54iFEzwhHYZnZGSotbW1+/H58+eVnp4eylIA4AghheW4ceNUVVUlSTpx4oQyMjICHoID\nQDwI6TD8ySef1OOPP67XXntNLpdLa9euDXddABBT+FJ6mMVjT1J89kVPzuHYzywB4E5DWAKAAWEJ\nAAaEJQAYEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAY\nEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYEJYAYEBY\nAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYEJYAYEBYAoBBYrQL\nAIBQpKammuZdvHgxLO/HniUAGIS0Z1lXV6clS5YoKytLkjRixAitXr06rIUBQCwJ+TB8zJgx2rJl\nSzhrAYCYxWE4ABiEHJYnT57UokWLNHv2bP3444/hrAkAYo7L5/P5+vqi5uZm1dfXa/r06WpqatK8\nefNUXV2tpKQkv/O9Xq9ycnJuu1gAiJaQwvJWL7/8sj744AM9+OCD/t/E5fI77vP5Aj7nVPHYkxSf\nfdGTc/jrqz++OtRbHIZ0GL53717t3LlTktTS0qILFy4oMzMzlKUAwBFC2rPs6OjQ8uXLdfnyZV2/\nfl1FRUWaOHFi4Ddhz9Lx4rEvenKOWNizDMtheDCEpfPFY1/05ByxEJZc7gggJMOGDTPNe+KJJ8xr\n/nOhiz8rVqzo8bioqMi05kMPPWR+/97wPUsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIA\nDAhLADAgLAHAgMsdgRjidrvNc7Ozs/2OP/XUUz0eFxQUmNd8+eWXzXMD3ZLxVoHuc+vPkSNHAj73\nyiuv9Hj88ccfm9cNB/YsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgL/uGGbx\n2JPk7L6GDx/ud/zkyZN69NFHe4xZ/wjXmDFjzO//zDPPmOfeevVNb9LT0/8zNmDAAN28ebPHWHt7\nu3nN3377zTz3q6++Ms07dOiQec3a2lq/45H6/estDtmzBAADwhIADAhLADAgLAHAgLAEAAPCEgAM\nCEsAMCAsAcCAsAQAA8ISAAy43DHMnN7TrZf//aOxsVFZWVndj8ePH29ec9SoUea5qamppnl5eXnm\nNTMyMvyOJycnq7Ozs8fY3XffbVrzjz/+ML//8ePHzXO/++4781x/lyZWV1dr2rRpPcaOHj1qXrOl\npcU8N5K43BEAHIKwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAAy53DLNY7GnS\npEnmuZWVlX7H09LSdOHChe7H1ssSpcDb35+amhrTvP3795vXPHLkiN/xffv26fnnn+8x5vV6TWue\nPn3a/P6RFIu/f+HgmMsdGxoaNHXqVFVUVEiSzp49q7lz56qwsFBLlizRX3/9FZ5KASBGBQ3LK1eu\naN26dT1uXLBlyxYVFhbq888/18MPPxxwbwQA4kXQsExKStKOHTt63Lmlrq5OU6ZMkSTl5+ebD50A\nwKkSg05ITFRiYs9pnZ2dSkpKkvT3Z1mxelsnAAiXoGEZjOX80PHjx5WTkxPy650mHnuS/v6Hsb9Z\n71PZl/tZ9mbfvn1hWSeWxOvvX7T7Ciks3W63rl69qoEDB6q5uTngzVX/kZub63c8Hs/cxWJPnA3n\nbLjTOeZs+K3Gjh2rqqoqSX/fmbkvd80GACcKumfp9Xq1ceNGnTlzRomJiaqqqtLmzZtVUlIij8ej\noUOHaubMmZGoFQCiJmhY5uTkaPfu3f8Z37VrV78UBACx6LZP8CD2tbW1medeunTJ73haWlqP54YM\nGWJe89q1a+a5b775pmnesWPHzGv25ptvvgnLOoh/XBsOAAaEJQAYEJYAYEBYAoABYQkABoQlABgQ\nlgBgQFgCgAFhCQAGhCUAGHC54x3g119/Nc/95Zdf/I4/8sgjPZ574IEHzGsGupepP6dOnTLPBSKJ\nPUsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgMsd7wBvv/22eW5BQYHp\nucLCQvOaXMKIeMCeJQAYEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGHAFzx3gxRdf\nDPua8+fPN8+dMGGCee7FixdN87766ivzmkePHjXPBQJhzxIADAhLADAgLAHAgLAEAAPCEgAMCEsA\nMCAsAcCAsAQAA8ISAAwISwAw4HLHO8CFCxfMc7///nu/45MnT+7x3LBhw8xrZmdnm+empKSY5r37\n7rvmNT/66KOAz23durXH46VLl5rWvHHjhvn9ER/YswQAA1NYNjQ0aOrUqaqoqJAklZSU6IUXXtDc\nuXM1d+7cgHsjABAvgh6GX7lyRevWrVNeXl6P8WXLlik/P7/fCgOAWBJ0zzIpKUk7duxQRkZGJOoB\ngJjk8vl8PsvErVu3asiQIZozZ45KSkrU0tKi69evKy0tTatXr1ZqamrA13q9XuXk5IStaACItJDO\nhs+YMUMpKSnKzs7W9u3btW3bNq1Zsybg/NzcXL/jPp9PLpcrlBJiViz29MUXX5jnBjqCmDx5sg4e\nPNj9eOjQoeY1k5OTzXOtZ8MHDx5sXjPQ2fCioiJt27atx5jTz4bH4u9fOESqr972HUM6G56Xl9f9\ndZDJkyeroaEhtMoAwCFCCsvFixerqalJklRXV6esrKywFgUAsSboYbjX69XGjRt15swZJSYmqqqq\nSnPmzFFxcbGSk5Pldru1fv36SNQKAFETNCxzcnK0e/fu/4w/++yz/VIQAMQi89nw23qTAB/MxuOH\n0fHYkxS5vu677z7TvOXLl5vXDHTSZsCAAbp582aPsXnz5pnW3LNnj/n9I4nfv9t/n0C43BEADAhL\nADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAw4K87IqacO3fONG/Dhg3mNa33qJSk\nQYMGmefizsKeJQAYEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGHAFDxzp4YcfjnYJ\nuMOwZwkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYcLljjMnMzDTNa25u\n7udKYtuaNWv6Zd07/b8rAmPPEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhL\nADDgcscYs2fPHtO84uJi85perzfUcsIiMdH+a/buu++a5r3wwgvmNcvLy/2OL1iwQLt27eox9vXX\nX5vXxZ3F9FtcWlqq+vp63bhxQwsXLlRubq5WrFihrq4upaena9OmTUpKSurvWgEgaoKGZW1trRob\nG+XxeNTW1qZZs2YpLy9PhYWFmj59ut5//31VVlaqsLAwEvUCQFQE/cxy9OjR+vDDDyVJgwcPVmdn\np+rq6jRlyhRJUn5+vmpqavq3SgCIsqBhmZCQILfbLUmqrKzUhAkT1NnZ2X3YnZaWppaWlv6tEgCi\nzOXz+XyWiQcOHFBZWZnKy8s1bdq07r3J06dP65133tGXX34Z8LVer1c5OTnhqRgAosB0gufQoUP6\n5JNP9Omnn2rQoEFyu926evWqBg4cqObmZmVkZPT6+tzcXL/jPp9PLper71XHsNvt6cCBA6Z5kT4b\nfjt99cfZ8LVr15rX7O1s+M6dO3uMvfHGG+Z1Y1E8/j8lRa6v3vYdgx6Gt7e3q7S0VGVlZUpJSZEk\njR07VlVVVZKk6upqjR8/PkylAkBsCvpP/v79+9XW1tZjT2bDhg1atWqVPB6Phg4dqpkzZ/ZrkQAQ\nbUHD8tVXX9Wrr776n/Fbv8wLAPHMfILntt4kwGcN8fj5yu32dPPmTdO8SZMmmdf84YcfQqzm/93a\n16hRo8yvfeedd8xzCwoKTPMOHz5sXnPGjBl+x8+ePav777+/x9i5c+fM68aiePx/SnLIZ5YAAMIS\nAEwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAM+INlMebnn382zfv222/Na/7+++/m\nuR0dHQGf+/cd8ftyuWNXV5d57meffWaat2TJEvOaly9fDvic0y9vROSwZwkABoQlABgQlgBgQFgC\ngAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYcLljjJk9e7Zp3oIFC8xrPvvss+a5f/75p+m50tJS\n85rl5eXmuadOnTLPBSKJPUsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADBw+Xw+\nX7+/icvld9zn8wV8zqnisScpPvuiJ+eIVF+9xSF7lgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJ\nAAaEJQAYEJYAYEBYAoABYQkABqa/7lhaWqr6+nrduHFDCxcu1MGDB3XixAmlpKRI+vsvDU6aNKk/\n6wSAqAoalrW1tWpsbJTH41FbW5tmzZqlp59+WsuWLVN+fn4kagSAqAsalqNHj9bIkSMlSYMHD1Zn\nZ6e6urr6vTAAiCV9ukWbx+PR4cOHlZCQoJaWFl2/fl1paWlavXq1UlNTA78Jt2hzvHjsi56cIxZu\n0WYOywMHDqisrEzl5eXyer1KSUlRdna2tm/frnPnzmnNmjUBX+v1epWTk9P3ygEgVvgMfvjhB99L\nL73ka2tr+89zjY2Nvtdff73X10vy+9Pbc079icee4rUvenLOT6T66k3Qrw61t7ertLRUZWVl3We/\nFy9erKamJklSXV2dsrKygi0DAI4W9ATP/v371dbWpuLi4u6xgoICFRcXKzk5WW63W+vXr+/XIgEg\n2vgbPGEWjz1J8dkXPTlHpPrqLQ65ggcADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8IS\nAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAw\niMifwgUAp2PPEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwSIzGm7733ns6duyYXC6XVq5cqZEjR0aj\njLCqq6vTkiVLlJWVJUkaMWKEVq9eHeWqQtfQ0KC33npL8+fP15w5c3T27FmtWLFCXV1dSk9P16ZN\nm5SUlBTtMvvk1p5KSkp04sQJpaSkSJIWLFigSZMmRbfIPiotLVV9fb1u3LihhQsXKjc31/HbSfpv\nXwcPHoz6top4WP700086ffq0PB6PTp06pZUrV8rj8US6jH4xZswYbdmyJdpl3LYrV65o3bp1ysvL\n6x7bsmWLCgsLNX36dL3//vuqrKxUYWFhFKvsG389SdKyZcuUn58fpapuT21trRobG+XxeNTW1qZZ\ns2YpLy/P0dtJ8t/X008/HfVtFfHD8JqaGk2dOlWSNHz4cF26dEkdHR2RLgO9SEpK0o4dO5SRkdE9\nVldXpylTpkiS8vPzVVNTE63yQuKvJ6cbPXq0PvzwQ0nS4MGD1dnZ6fjtJPnvq6urK8pVRSEsW1tb\nNWTIkO7HqampamlpiXQZ/eLkyZNatGiRZs+erR9//DHa5YQsMTFRAwcO7DHW2dnZfTiXlpbmuG3m\nrydJqqio0Lx587R06VJdvHgxCpWFLiEhQW63W5JUWVmpCRMmOH47Sf77SkhIiPq2ispnlv8WL1db\nDhs2TEVFRZo+fbqampo0b948VVdXO/LzomDiZZvNmDFDKSkpys7O1vbt27Vt2zatWbMm2mX12YED\nB1RZWany8nJNmzate9zp2+nffXm93qhvq4jvWWZkZKi1tbX78fnz55Wenh7pMsIuMzNTzz33nFwu\nlx566CHde++9am5ujnZZYeN2u3X16lVJUnNzc1wczubl5Sk7O1uSNHnyZDU0NES5or47dOiQPvnk\nE+3YsUODBg2Km+10a1+xsK0iHpbjxo1TVVWVJOnEiRPKyMjQPffcE+kywm7v3r3auXOnJKmlpUUX\nLlxQZmZmlKsKn7Fjx3Zvt+rqao0fPz7KFd2+xYsXq6mpSdLfn8n+800Gp2hvb1dpaanKysq6zxLH\nw3by11csbKuo3HVo8+bNOnz4sFwul9auXavHHnss0iWEXUdHh5YvX67Lly/r+vXrKioq0sSJE6Nd\nVki8Xq82btyoM2fOKDExUZmZmdq8ebNKSkp07do1DR06VOvXr9ddd90V7VLN/PU0Z84cbd++XcnJ\nyXK73Vq/fr3S0tKiXaqZx+PR1q1b9b///a97bMOGDVq1apVjt5Pkv6+CggJVVFREdVtxizYAMOAK\nHgAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAM/g8DO834LYDKmQAAAABJRU5ErkJggg==\n", - "text/plain": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "WkFUEs7ZOA79", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - }, - "output_extras": [ - { - "item_id": 1 - } - ], - "base_uri": "https://localhost:8080/", - "height": 408 - }, - "outputId": "f56d417d-0b2e-4b4d-e1ea-6e6b233a609b", - "executionInfo": { - "status": "ok", - "timestamp": 1512092692257, - "user_tz": 480, - "elapsed": 279, - "user": { - "displayName": "Ryan Sepassi", - "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", - "userId": "107877449274830904926" } - } + }, + "colab_type": "code", + "id": "WkFUEs7ZOA79" }, + "outputs": [], "source": [ "# Lots of models available\n", "registry.list_models()" - ], - "cell_type": "code", - "execution_count": 11, - "outputs": [ - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "['resnet50',\n", - " 'lstm_seq2seq',\n", - " 'transformer_encoder',\n", - " 'attention_lm',\n", - " 'vanilla_gan',\n", - " 'transformer',\n", - " 'gene_expression_conv',\n", - " 'transformer_moe',\n", - " 'attention_lm_moe',\n", - " 'transformer_revnet',\n", - " 'lstm_seq2seq_attention',\n", - " 'shake_shake',\n", - " 'transformer_ae',\n", - " 'diagonal_neural_gpu',\n", - " 'xception',\n", - " 'aligned',\n", - " 'multi_model',\n", - " 'neural_gpu',\n", - " 'slice_net',\n", - " 'byte_net',\n", - " 'cycle_gan',\n", - " 'transformer_sketch',\n", - " 'blue_net']" - ] - }, - "metadata": { - "tags": [] - }, - "execution_count": 11 - } ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "-H25oG91YQj3", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - } + }, + "colab_type": "code", + "id": "-H25oG91YQj3" }, + "outputs": [], "source": [ "# Create your own model\n", "\n", @@ -724,53 +382,34 @@ "hparams.use_eager_mode = True\n", "trainer_utils.add_problem_hparams(hparams, \"image_mnist\")\n", "model = MySimpleModel(hparams, Modes.TRAIN)" - ], - "cell_type": "code", - "execution_count": 0, - "outputs": [] + ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "AWVd2I7PYz6H", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - }, - "output_extras": [ - { - "item_id": 12 - } - ], - "base_uri": "https://localhost:8080/", - "height": 357 - }, - "outputId": "5acd846f-7d5e-45b9-85b7-e8a93389630a", - "executionInfo": { - "status": "ok", - "timestamp": 1512092812219, - "user_tz": 480, - "elapsed": 119560, - "user": { - "displayName": "Ryan Sepassi", - "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", - "userId": "107877449274830904926" } - } + }, + "colab_type": "code", + "id": "AWVd2I7PYz6H" }, + "outputs": [], "source": [ "# Train\n", - "\n", - "hparams.learning_rate = 0.0001\n", - "optimizer = tf.train.MomentumOptimizer(\n", - " hparams.learning_rate, momentum=hparams.optimizer_momentum_momentum)\n", + "store = tfe.EagerVariableStore()\n", + "optimizer = tf.train.AdamOptimizer()\n", "\n", "# In Eager mode, opt.minimize must be passed a function that produces the loss\n", "def loss_function(features):\n", " _, losses = model(features)\n", " return losses[\"training\"]\n", "\n", + "tfe_loss_fn = tfe.implicit_value_and_gradients(loss_function)\n", + "\n", "NUM_STEPS = 500\n", "BATCH_SIZE = 128\n", "\n", @@ -780,84 +419,37 @@ "\n", "# Training loop\n", "for count, example in enumerate(tfe.Iterator(mnist_train_dataset)):\n", - " if count % 50 == 0:\n", - " loss = loss_function(example)\n", - " print(\"Step: %d, Loss: %.3f\" % (count, loss.numpy()))\n", - " if count >= NUM_STEPS:\n", + " if count \u003e= NUM_STEPS:\n", " break\n", "\n", " example[\"targets\"] = tf.reshape(example[\"targets\"], [BATCH_SIZE, 1, 1, 1]) # Make it 4D.\n", - " optimizer.minimize(lambda: loss_function(example),\n", - " global_step=tf.train.get_or_create_global_step())" - ], - "cell_type": "code", - "execution_count": 13, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-train*\n", - "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensor2tensor/layers/common_layers.py:1671: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "\n", - "Future major versions of TensorFlow will allow gradients to flow\n", - "into the labels input on backprop by default.\n", - "\n", - "See tf.nn.softmax_cross_entropy_with_logits_v2.\n", - "\n", - "Step: 0, Loss: 669.337\n", - "Step: 50, Loss: 681.818\n", - "Step: 100, Loss: 672.086\n", - "Step: 150, Loss: 696.411\n", - "Step: 200, Loss: 687.108\n", - "Step: 250, Loss: 679.670\n", - "Step: 300, Loss: 686.915\n", - "Step: 350, Loss: 687.450\n", - "Step: 400, Loss: 680.961\n", - "Step: 450, Loss: 685.741\n", - "Step: 500, Loss: 690.723\n" - ], - "name": "stdout" - } + " loss, gv = tfe_loss_fn(example)\n", + " optimizer.apply_gradients(gv)\n", + " if count % 50 == 0:\n", + " print(\"Step: %d, Loss: %.3f\" % (count, loss.numpy()))" ] }, { + "cell_type": "code", + "execution_count": 0, "metadata": { - "id": "CIFlkiVOd8jO", - "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - }, - "output_extras": [ - { - "item_id": 2 - } - ], - "base_uri": "https://localhost:8080/", - "height": 51 - }, - "outputId": "29223ecd-c5ae-401b-e518-97b06fafb530", - "executionInfo": { - "status": "ok", - "timestamp": 1512092815393, - "user_tz": 480, - "elapsed": 3149, - "user": { - "displayName": "Ryan Sepassi", - "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", - "userId": "107877449274830904926" } - } + }, + "colab_type": "code", + "id": "CIFlkiVOd8jO" }, + "outputs": [], "source": [ "model.set_mode(Modes.EVAL)\n", "mnist_eval_dataset = mnist_problem.dataset(Modes.EVAL, data_dir)\n", "all_perplexities = []\n", "all_accuracies = []\n", "for count, example in enumerate(tfe.Iterator(mnist_eval_dataset)):\n", - " if count >= 100:\n", + " if count \u003e= 100:\n", " break\n", "\n", " batch_inputs = tf.reshape(example[\"inputs\"], [1, 28, 28, 3]) # Make it 4D.\n", @@ -865,27 +457,28 @@ " features = {\"inputs\": batch_inputs, \"targets\": batch_targets}\n", "\n", " # Call the model.\n", - " predictions, _ = model(features)\n", - " \n", + " with store.as_default():\n", + " predictions, _ = model(features)\n", + "\n", " # Calculate and append the metrics\n", " all_perplexities.extend(metrics.padded_neg_log_perplexity(predictions, features[\"targets\"]))\n", " all_accuracies.extend(metrics.padded_accuracy(predictions, features[\"targets\"]))\n", "\n", "# Print out metrics on the dataset\n", "print(\"Accuracy: %.2f\" % tf.reduce_mean(tf.concat(all_accuracies, axis=1)).numpy())" - ], - "cell_type": "code", - "execution_count": 14, - "outputs": [ - { - "output_type": "stream", - "text": [ - "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-dev*\n", - "Accuracy: 0.49\n" - ], - "name": "stdout" - } ] } - ] -} \ No newline at end of file + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "default_view": {}, + "name": "T2T with TF Eager", + "provenance": [], + "version": "0.3.2", + "views": {} + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 889fc84785ec1a1b76f7e461e5fcfb50612c35f5 Mon Sep 17 00:00:00 2001 From: Ryan Sepassi Date: Fri, 1 Dec 2017 14:30:32 -0800 Subject: [PATCH 11/11] TF Eager improvements for T2TModel PiperOrigin-RevId: 177641254 --- tensor2tensor/layers/common_hparams.py | 3 - tensor2tensor/layers/common_layers.py | 6 +- tensor2tensor/layers/modalities.py | 9 +- tensor2tensor/layers/modalities_test.py | 3 - tensor2tensor/models/cycle_gan.py | 5 +- tensor2tensor/models/transformer.py | 6 +- tensor2tensor/notebooks/hello_t2t.ipynb | 773 ++++++++++++++++++------ tensor2tensor/utils/t2t_model.py | 78 ++- 8 files changed, 661 insertions(+), 222 deletions(-) diff --git a/tensor2tensor/layers/common_hparams.py b/tensor2tensor/layers/common_hparams.py index 591b3e28f..673ea1c83 100644 --- a/tensor2tensor/layers/common_hparams.py +++ b/tensor2tensor/layers/common_hparams.py @@ -184,9 +184,6 @@ def basic_params1(): # This is the actual batch size, *not* tokens per batch (i.e. for # language models this is the number of sentences in the batch) tpu_batch_size_per_shard=24, - # Things not compatible with eager mode use this flag to implement - # alternative functionality. We expect this to go away soon. - use_eager_mode=False, # Set by tpu_trainer to let the model know whether we are on TPU. # Switching on/off tpu should not invalidate checkpoints. use_tpu=False, diff --git a/tensor2tensor/layers/common_layers.py b/tensor2tensor/layers/common_layers.py index ca8a28b99..a4f573d03 100644 --- a/tensor2tensor/layers/common_layers.py +++ b/tensor2tensor/layers/common_layers.py @@ -32,6 +32,7 @@ import tensorflow as tf +from tensorflow.python.eager import context as tfe_context from tensorflow.python.framework import function from tensorflow.python.framework import ops @@ -200,8 +201,7 @@ def flatten4d3d(x): return result -def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0, - use_eager_mode=False): +def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0): """Embed x of type int64 into dense vectors, reducing to max 4 dimensions.""" with tf.variable_scope( name, default_name="embedding", values=[x], reuse=reuse): @@ -209,7 +209,7 @@ def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0, # On the backwards pass, we want to convert the gradient from # an indexed-slices to a regular tensor before sending it back to the # parameter server. This avoids excess computation on the parameter server. - if not use_eager_mode: + if not tfe_context.in_eager_mode(): embedding_var = eu.convert_gradient_to_tensor(embedding_var) emb_x = tf.gather(embedding_var, x) if multiplier != 1.0: diff --git a/tensor2tensor/layers/modalities.py b/tensor2tensor/layers/modalities.py index d0264d5cc..ddef5e67f 100644 --- a/tensor2tensor/layers/modalities.py +++ b/tensor2tensor/layers/modalities.py @@ -29,6 +29,8 @@ import tensorflow as tf +from tensorflow.python.eager import context + # TODO(noam): remove this function after TPUs do gather faster. def tpu_gather(params, indices): @@ -96,7 +98,7 @@ def _get_weights(self, hidden_dim=None): else: ret = tf.concat(shards, 0) # Convert ret to tensor. - if not self._model_hparams.use_eager_mode: + if not context.in_eager_mode(): ret = eu.convert_gradient_to_tensor(ret) return ret @@ -205,7 +207,7 @@ class ImageModality(modality.Modality): def bottom(self, inputs): with tf.variable_scope(self.name): inputs = common_layers.standardize_images(inputs) - if not self._model_hparams.use_eager_mode: + if not context.in_eager_mode(): tf.summary.image("inputs", inputs, max_outputs=2) return tf.to_float(inputs) @@ -216,8 +218,7 @@ def targets_bottom(self, inputs): tf.to_int32(common_layers.flatten4d3d(inputs)), self.top_dimensionality, self._body_input_depth, - name="input_rgb_embedding", - use_eager_mode=self._model_hparams.use_eager_mode) + name="input_rgb_embedding") if self._model_hparams.multiply_embedding_mode == "sqrt_depth": ret *= self._body_input_depth**0.5 diff --git a/tensor2tensor/layers/modalities_test.py b/tensor2tensor/layers/modalities_test.py index f5f7b8998..f1bcd87c3 100644 --- a/tensor2tensor/layers/modalities_test.py +++ b/tensor2tensor/layers/modalities_test.py @@ -43,7 +43,6 @@ def testSymbolModalityInputs(self): symbol_modality_skip_top=0, shared_embedding_and_softmax_weights=0, prepend_mode="none", - use_eager_mode=False, use_tpu=False) x = -1 + np.random.random_integers( vocab_size, size=(batch_size, length, 1, 1)) @@ -74,7 +73,6 @@ def testSymbolModalityTargets(self): factored_logits=0, mode=tf.estimator.ModeKeys.TRAIN, prepend_mode="none", - use_eager_mode=False, use_tpu=False) body_output = -1 + np.random.random_integers( 100, size=(batch_size, length, height, hidden_size)) @@ -112,7 +110,6 @@ def testSymbolModalityTargetsFactored(self): factored_logits=1, mode=tf.estimator.ModeKeys.TRAIN, prepend_mode="none", - use_eager_mode=False, use_tpu=False) body_output = -1 + np.random.random_integers( 100, size=(batch_size, length, height, hidden_size)) diff --git a/tensor2tensor/models/cycle_gan.py b/tensor2tensor/models/cycle_gan.py index dd013acad..4cf1a5871 100644 --- a/tensor2tensor/models/cycle_gan.py +++ b/tensor2tensor/models/cycle_gan.py @@ -66,11 +66,10 @@ def cycle_gan_internal(inputs, targets, _, hparams): # Embed inputs and targets. inputs_orig, targets_orig = tf.to_int32(inputs), tf.to_int32(targets) inputs = common_layers.embedding( - inputs_orig, hparams.vocab_size, hparams.hidden_size, "embed", - use_eager_mode=hparams.use_eager_mode) + inputs_orig, hparams.vocab_size, hparams.hidden_size, "embed") targets = common_layers.embedding( targets_orig, hparams.vocab_size, hparams.hidden_size, - "embed", reuse=True, use_eager_mode=hparams.use_eager_mode) + "embed", reuse=True) # Split the batch into input-input and target-target parts. inputs1, _ = split_on_batch(inputs) diff --git a/tensor2tensor/models/transformer.py b/tensor2tensor/models/transformer.py index f2b693e95..ffe5fcb52 100644 --- a/tensor2tensor/models/transformer.py +++ b/tensor2tensor/models/transformer.py @@ -37,6 +37,7 @@ import tensorflow as tf +from tensorflow.python.eager import context from tensorflow.python.util import nest @@ -324,7 +325,7 @@ def symbols_to_logits_fn(ids, i, cache): # Note: Tensor.set_shape() does not work here since it merges shape info. # TODO(llion); Find a more robust solution. # pylint: disable=protected-access - if not self._hparams.use_eager_mode: + if not context.in_eager_mode(): for layer in cache: cache[layer]["k"]._shape = tf.TensorShape([None, None, key_channels]) cache[layer]["v"]._shape = tf.TensorShape([None, None, value_channels]) @@ -452,8 +453,7 @@ def transformer_prepare_encoder(inputs, target_space, hparams, features=None): common_layers.shape_list(inputs)[1]) # Append target_space_id embedding to inputs. emb_target_space = common_layers.embedding( - target_space, 32, ishape_static[-1], name="target_space_embedding", - use_eager_mode=hparams.use_eager_mode) + target_space, 32, ishape_static[-1], name="target_space_embedding") emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) encoder_input += emb_target_space if hparams.pos == "timing": diff --git a/tensor2tensor/notebooks/hello_t2t.ipynb b/tensor2tensor/notebooks/hello_t2t.ipynb index 845f20d5f..fd08175c6 100644 --- a/tensor2tensor/notebooks/hello_t2t.ipynb +++ b/tensor2tensor/notebooks/hello_t2t.ipynb @@ -1,19 +1,28 @@ { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "T2T with TF Eager", + "version": "0.3.2", + "views": {}, + "default_view": {}, + "provenance": [], + "collapsed_sections": [] + } + }, "cells": [ { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "s19ucTii_wYb", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - }, - "colab_type": "code", - "id": "s19ucTii_wYb" + } }, - "outputs": [], "source": [ "# Copyright 2017 Google LLC.\n", "\n", @@ -28,41 +37,41 @@ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# See the License for the specific language governing permissions and\n", "# limitations under the License." - ] - }, - { + ], "cell_type": "code", "execution_count": 0, + "outputs": [] + }, + { "metadata": { + "id": "OPGni6fuvoTj", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - }, - "colab_type": "code", - "id": "OPGni6fuvoTj" + } }, - "outputs": [], "source": [ "# Install deps\n", - "!pip install -q \"tensor2tensor-dev==1.3.1.dev5\" tf-nightly" - ] - }, - { + "!pip install -q \"tensor2tensor-dev==1.3.1.dev7\" tf-nightly" + ], "cell_type": "code", "execution_count": 0, + "outputs": [] + }, + { "metadata": { + "id": "oILRLCWN_16u", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - }, - "colab_type": "code", - "id": "oILRLCWN_16u" + } }, - "outputs": [], "source": [ "import tensorflow as tf\n", "import matplotlib.pyplot as plt\n", @@ -90,235 +99,514 @@ "tf.gfile.MakeDirs(data_dir)\n", "tf.gfile.MakeDirs(tmp_dir)\n", "tf.gfile.MakeDirs(train_dir)\n", - "tf.gfile.MakeDirs(checkpoint_dir)" - ] + "tf.gfile.MakeDirs(checkpoint_dir)\n", + "gs_data_dir = \"gs://tensor2tensor-data\"\n", + "gs_ckpt_dir = \"gs://tensor2tensor-checkpoints/\"" + ], + "cell_type": "code", + "execution_count": 0, + "outputs": [] }, { - "cell_type": "markdown", "metadata": { - "colab_type": "text", - "id": "gXL7_bVH49Kl" + "id": "gXL7_bVH49Kl", + "colab_type": "text" }, "source": [ - "# Translate from English to French with a pre-trained model" - ] + "# Translate from English to German with a pre-trained model" + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "Q2CYCYjZTlZs", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - } + }, + "output_extras": [ + { + "item_id": 2 + } + ], + "base_uri": "https://localhost:8080/", + "height": 68 }, - "colab_type": "code", - "id": "Q2CYCYjZTlZs" + "outputId": "b13d53a3-feba-4d74-fc1e-951bef99ecb0", + "executionInfo": { + "status": "ok", + "timestamp": 1512165746671, + "user_tz": 480, + "elapsed": 2799, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } }, - "outputs": [], "source": [ "# Translation\n", - "enfr_problem = registry.problem(\"translate_enfr_wmt_small32k\")\n", - "enfr_problem.generate_data(data_dir, tmp_dir) " + "ende_problem = registry.problem(\"translate_ende_wmt32k\")\n", + "\n", + "# Copy the vocab file locally\n", + "vocab_file = os.path.join(gs_data_dir, \"vocab.ende.32768\")\n", + "!gsutil cp {vocab_file} {data_dir}" + ], + "cell_type": "code", + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Copying gs://tensor2tensor-data/vocab.ende.32768...\n", + "/ [1 files][316.4 KiB/316.4 KiB] \n", + "Operation completed over 1 objects/316.4 KiB. \n" + ], + "name": "stdout" + } ] }, { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "EB4MP7_y_SuQ", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - }, - "colab_type": "code", - "id": "g2aQW7Z6TOEu" + } }, - "outputs": [], "source": [ - "example = tfe.Iterator(enfr_problem.dataset(Modes.TRAIN, data_dir)).next()\n", - "inputs = [int(x) for x in example[\"inputs\"].numpy()] # Cast to ints.\n", - "targets = [int(x) for x in example[\"targets\"].numpy()] # Cast to ints.\n", + "encoders = ende_problem.feature_encoders(data_dir)\n", + "\n", + "def encode(input_str):\n", + " \"\"\"Input str to features dict, ready for inference\"\"\"\n", + " inputs = encoders[\"inputs\"].encode(input_str) + [1] # add EOS id\n", + " batch_inputs = tf.reshape(inputs, [1, -1, 1]) # Make it 3D.\n", + " return {\"inputs\": batch_inputs}\n", "\n", - "encoders = enfr_problem.feature_encoders(data_dir)\n", "def decode(integers):\n", - " samples = encoders[\"inputs\"].decode(np.squeeze(integers))\n", - " return samples[:samples.find(\"\u003cEOS\u003e\")]\n", - "\n", - "# Example inputs as int-tensor.\n", - "print(\"Inputs, encoded:\")\n", - "print(inputs)\n", - "print(\"Inputs, decoded:\")\n", - "# Example inputs as a sentence.\n", - "print(decode(inputs))\n", - "# Example targets as int-tensor.\n", - "print(\"Targets, encoded:\")\n", - "print(targets)\n", - "# Example targets as a sentence.\n", - "print(\"Targets, decoded:\")\n", - "print(decode(targets))" - ] + " \"\"\"List of ints to str\"\"\"\n", + " integers = list(np.squeeze(integers))\n", + " if 1 in integers:\n", + " integers = integers[:integers.index(1)]\n", + " return encoders[\"inputs\"].decode(np.squeeze(integers))" + ], + "cell_type": "code", + "execution_count": 0, + "outputs": [] }, { + "metadata": { + "id": "g2aQW7Z6TOEu", + "colab_type": "code", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + } + }, + "source": [ + "# # Generate and view the data\n", + "# # This cell is commented out because data generation can take hours\n", + "\n", + "# ende_problem.generate_data(data_dir, tmp_dir)\n", + "# example = tfe.Iterator(ende_problem.dataset(Modes.TRAIN, data_dir)).next()\n", + "# inputs = [int(x) for x in example[\"inputs\"].numpy()] # Cast to ints.\n", + "# targets = [int(x) for x in example[\"targets\"].numpy()] # Cast to ints.\n", + "\n", + "\n", + "\n", + "# # Example inputs as int-tensor.\n", + "# print(\"Inputs, encoded:\")\n", + "# print(inputs)\n", + "# print(\"Inputs, decoded:\")\n", + "# # Example inputs as a sentence.\n", + "# print(decode(inputs))\n", + "# # Example targets as int-tensor.\n", + "# print(\"Targets, encoded:\")\n", + "# print(targets)\n", + "# # Example targets as a sentence.\n", + "# print(\"Targets, decoded:\")\n", + "# print(decode(targets))" + ], "cell_type": "code", "execution_count": 0, + "outputs": [] + }, + { "metadata": { + "id": "9l6hDQbrRUYV", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - }, - "colab_type": "code", - "id": "9l6hDQbrRUYV" + } }, - "outputs": [], "source": [ "# Create hparams and the T2TModel object.\n", "model_name = \"transformer\"\n", "hparams_set = \"transformer_base\"\n", "\n", "hparams = trainer_utils.create_hparams(hparams_set, data_dir)\n", - "hparams.use_eager_mode = True\n", - "trainer_utils.add_problem_hparams(hparams, \"translate_enfr_wmt32k\")\n", + "trainer_utils.add_problem_hparams(hparams, \"translate_ende_wmt32k\")\n", "\n", "# NOTE: Only create the model once when restoring from a checkpoint; it's a\n", "# Layer and so subsequent instantiations will have different variable scopes\n", "# that will not match the checkpoint.\n", - "model = registry.model(model_name)(hparams, Modes.PREDICT)" - ] - }, - { + "translate_model = registry.model(model_name)(hparams, Modes.PREDICT)" + ], "cell_type": "code", "execution_count": 0, + "outputs": [] + }, + { "metadata": { + "id": "FEwNUVlMYOJi", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - } + }, + "output_extras": [ + { + "item_id": 1 + } + ], + "base_uri": "https://localhost:8080/", + "height": 34 }, - "colab_type": "code", - "id": "FEwNUVlMYOJi" + "outputId": "fc15a59a-7ea7-4baa-85c1-2a94528eb157", + "executionInfo": { + "status": "ok", + "timestamp": 1512165760778, + "user_tz": 480, + "elapsed": 12527, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } }, - "outputs": [], "source": [ "# Copy the pretrained checkpoint locally\n", - "gs_ckpt_dir = \"gs://tensor2tensor-checkpoints/\"\n", - "ckpt_name = \"transformer_enfr_test\"\n", + "ckpt_name = \"transformer_ende_test\"\n", "gs_ckpt = os.path.join(gs_ckpt_dir, ckpt_name)\n", - "local_ckpt = os.path.join(checkpoint_dir, ckpt_name)\n", - "!gsutil -q cp -R {gs_ckpt} {local_ckpt}\n", - "ckpt_path = tf.train.latest_checkpoint(local_ckpt)\n", + "!gsutil -q cp -R {gs_ckpt} {checkpoint_dir}\n", + "ckpt_path = tf.train.latest_checkpoint(os.path.join(checkpoint_dir, ckpt_name))\n", "ckpt_path" + ], + "cell_type": "code", + "execution_count": 8, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "u'/content/t2t/checkpoints/transformer_ende_test/model.ckpt-350855'" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 8 + } ] }, { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "3O-8E9d6TtuJ", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - } + }, + "output_extras": [ + { + "item_id": 3 + } + ], + "base_uri": "https://localhost:8080/", + "height": 119 }, - "colab_type": "code", - "id": "3O-8E9d6TtuJ" + "outputId": "24231c95-99cb-421b-d961-5a21322be945", + "executionInfo": { + "status": "ok", + "timestamp": 1512165773424, + "user_tz": 480, + "elapsed": 12593, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } }, - "outputs": [], "source": [ "# Restore and translate!\n", "\n", - "def encode(input_str):\n", - " # Encode from raw string to ints using problem encoders.\n", - " inputs = encoders[\"inputs\"].encode(input_str) + [1] # add EOS id\n", - " batch_inputs = tf.reshape(inputs, [1, -1, 1, 1]) # Make it 4D.\n", - " # TODO: rm target_space_id\n", - " features_dict = {\"inputs\": batch_inputs,\n", - " \"target_space_id\": tf.constant(hparams.problems[0].target_space_id)}\n", - " return features_dict\n", + "def translate(inputs):\n", + " encoded_inputs = encode(inputs)\n", + " with tfe.restore_variables_on_create(ckpt_path):\n", + " model_output = translate_model.infer(encoded_inputs)\n", + " return decode(model_output)\n", "\n", - "# Input to the decoder.\n", "inputs = \"This is a cat.\"\n", - "\n", - "store = tfe.EagerVariableStore()\n", - "# Restore from checkpoint and run inference\n", - "with store.as_default():\n", - " with tfe.restore_variables_on_create(ckpt_path):\n", - " samples = model.infer(encode(inputs), beam_size=1)\n", + "outputs = translate(inputs)\n", "\n", "print(\"Inputs: %s\" % inputs)\n", - "print(\"Outputs: %s\" % decode(samples))" + "print(\"Outputs: %s\" % outputs)" + ], + "cell_type": "code", + "execution_count": 9, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Greedy Decoding\n", + "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensor2tensor/layers/common_layers.py:487: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "keep_dims is deprecated, use keepdims instead\n", + "Inputs: This is a cat.\n", + "Outputs: Das ist eine Katze.\n" + ], + "name": "stdout" + } ] }, { - "cell_type": "markdown", "metadata": { - "colab_type": "text", - "id": "i7BZuO7T5BB4" + "id": "i7BZuO7T5BB4", + "colab_type": "text" }, "source": [ "# Train a custom model on MNIST" - ] + ], + "cell_type": "markdown" }, { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "RYDMO4zArgkz", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - } + }, + "output_extras": [ + { + "item_id": 1 + } + ], + "base_uri": "https://localhost:8080/", + "height": 1224 }, - "colab_type": "code", - "id": "RYDMO4zArgkz" + "outputId": "3b62dff4-7bfa-436e-a9f5-ecf66616e93a", + "executionInfo": { + "status": "ok", + "timestamp": 1512165773875, + "user_tz": 480, + "elapsed": 423, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } }, - "outputs": [], "source": [ "# Lots of problems available\n", "problems.available()" + ], + "cell_type": "code", + "execution_count": 10, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "['algorithmic_addition_binary40',\n", + " 'algorithmic_addition_decimal40',\n", + " 'algorithmic_cipher_shift200',\n", + " 'algorithmic_cipher_shift5',\n", + " 'algorithmic_cipher_vigenere200',\n", + " 'algorithmic_cipher_vigenere5',\n", + " 'algorithmic_identity_binary40',\n", + " 'algorithmic_identity_decimal40',\n", + " 'algorithmic_multiplication_binary40',\n", + " 'algorithmic_multiplication_decimal40',\n", + " 'algorithmic_reverse_binary40',\n", + " 'algorithmic_reverse_binary40_test',\n", + " 'algorithmic_reverse_decimal40',\n", + " 'algorithmic_reverse_nlplike32k',\n", + " 'algorithmic_reverse_nlplike8k',\n", + " 'algorithmic_shift_decimal40',\n", + " 'audio_timit_characters_tune',\n", + " 'audio_timit_tokens8k_test',\n", + " 'audio_timit_tokens8k_tune',\n", + " 'image_celeba_tune',\n", + " 'image_cifar10',\n", + " 'image_cifar10_plain',\n", + " 'image_cifar10_plain8',\n", + " 'image_cifar10_tune',\n", + " 'image_fsns',\n", + " 'image_imagenet',\n", + " 'image_imagenet224',\n", + " 'image_imagenet32',\n", + " 'image_imagenet64',\n", + " 'image_mnist',\n", + " 'image_mnist_tune',\n", + " 'image_ms_coco_characters',\n", + " 'image_ms_coco_tokens32k',\n", + " 'image_ms_coco_tokens8k',\n", + " 'img2img_cifar10',\n", + " 'img2img_imagenet',\n", + " 'languagemodel_lm1b32k',\n", + " 'languagemodel_lm1b8k_packed',\n", + " 'languagemodel_lm1b_characters',\n", + " 'languagemodel_ptb10k',\n", + " 'languagemodel_ptb_characters',\n", + " 'languagemodel_wiki_full32k',\n", + " 'languagemodel_wiki_scramble128',\n", + " 'languagemodel_wiki_scramble1k50',\n", + " 'languagemodel_wiki_scramble8k50',\n", + " 'librispeech',\n", + " 'multinli_matched',\n", + " 'multinli_mismatched',\n", + " 'ocr_test',\n", + " 'parsing_english_ptb16k',\n", + " 'parsing_english_ptb8k',\n", + " 'parsing_icelandic16k',\n", + " 'programming_desc2code_cpp',\n", + " 'programming_desc2code_py',\n", + " 'sentiment_imdb',\n", + " 'summarize_cnn_dailymail32k',\n", + " 'translate_encs_wmt32k',\n", + " 'translate_encs_wmt_characters',\n", + " 'translate_ende_wmt32k',\n", + " 'translate_ende_wmt32k_packed',\n", + " 'translate_ende_wmt8k',\n", + " 'translate_ende_wmt_bpe32k',\n", + " 'translate_ende_wmt_characters',\n", + " 'translate_enfr_wmt32k',\n", + " 'translate_enfr_wmt8k',\n", + " 'translate_enfr_wmt_characters',\n", + " 'translate_enfr_wmt_small32k',\n", + " 'translate_enfr_wmt_small8k',\n", + " 'translate_enfr_wmt_small_characters',\n", + " 'translate_enmk_setimes32k',\n", + " 'translate_enzh_wmt8k']" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 10 + } ] }, { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "JKc2uSk6WX5e", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - } + }, + "output_extras": [ + { + "item_id": 3 + } + ], + "base_uri": "https://localhost:8080/", + "height": 204 }, - "colab_type": "code", - "id": "JKc2uSk6WX5e" + "outputId": "f9fa17c1-ed3f-474e-8bd8-f764c3b00000", + "executionInfo": { + "status": "ok", + "timestamp": 1512165774930, + "user_tz": 480, + "elapsed": 977, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } }, - "outputs": [], "source": [ "# Create the MNIST problem and generate the data\n", "\n", "mnist_problem = problems.problem(\"image_mnist\")\n", "# Generate data\n", "mnist_problem.generate_data(data_dir, tmp_dir)" + ], + "cell_type": "code", + "execution_count": 11, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Skipping generator because outputs files exist\n", + "INFO:tensorflow:Skipping generator because outputs files exist\n", + "INFO:tensorflow:Skipping shuffle because output files exist\n" + ], + "name": "stdout" + } ] }, { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "VW6HCRANFPYV", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - } + }, + "output_extras": [ + { + "item_id": 2 + }, + { + "item_id": 3 + } + ], + "base_uri": "https://localhost:8080/", + "height": 381 }, - "colab_type": "code", - "id": "VW6HCRANFPYV" + "outputId": "93dea49c-dbca-4856-998f-8bcbb621abea", + "executionInfo": { + "status": "ok", + "timestamp": 1512165775597, + "user_tz": 480, + "elapsed": 622, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } }, - "outputs": [], "source": [ "# Get the tf.data.Dataset from Problem.dataset\n", "mnist_example = tfe.Iterator(mnist_problem.dataset(Modes.TRAIN, data_dir)).next()\n", @@ -327,41 +615,116 @@ "\n", "plt.imshow(image.numpy()[:, :, 0].astype(np.float32), cmap=plt.get_cmap('gray'))\n", "print(\"Label: %d\" % label.numpy())" + ], + "cell_type": "code", + "execution_count": 12, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-train*\n", + "Label: 6\n" + ], + "name": "stdout" + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUsAAAFKCAYAAACU6307AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAFK1JREFUeJzt3X9MVfUfx/HXDSQgJJSEzS2rNS0m\nuFWzxB8Vymx8y1JrsxCdzT/shyaZK8ZEWzZ/oP2Qfomm/iG53cYfzj90MLNWKuBk1YR/0NqMWREY\nGSYU2P3+0WIhF3hzufeee67Px8Yf93M+nPN+fw+9vuee4znH4/P5fAIADOoGpwsAADcgLAHAgLAE\nAAPCEgAMCEsAMCAsAcDCFwaS/P6cOXNmwGVu/YnGnqK1L3pyz0+4+hqMJxz/ztLj8fgd9/l8Ay5z\nq2jsSYrOvujJPcLV12BxGBvoSjdt2qRvv/1WHo9HxcXFmjJlSqCrAoCIF1BYnjp1SufPn5fX69V3\n332n4uJieb3eYNcGABEjoAs8NTU1ys3NlSTdeeedunTpki5fvhzUwgAgkgR0ZNnW1qbJkyf3fh47\ndqxaW1uVlJTkd/6ZM2eUmZnpd1kYTpmGXTT2JEVnX/TkHk73FfA5y/8aqomsrKwBfy/aTkZHY09S\ndPZFT+4RCRd4AvoanpaWpra2tt7Pv/zyi8aNGxfIqgDAFQIKyxkzZqiqqkqS1NjYqLS0tAG/ggNA\nNAjoa/i9996ryZMn6+mnn5bH49GGDRuCXRcARBT+UXqQRWNPUnT2RU/u4dpzlgBwvSEsAcCAsAQA\nA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwI\nSwAwICwBwICwBAADwhIADAhLADAI6FW4QLSaNGmSad7JkyfN6/zss8/McxctWmSei/DiyBIADAhL\nADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAw4HZHRL2EhATzspKSEtM6x4wZY97+\nN998Y56LyMWRJQAYEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGHAHD6Jebm6ueVl+\nfn7Qt19RURH0dSL8OLIEAIOAjizr6uq0evVqTZw4UdI/rw+13lMLAG4U8Nfw+++/X2VlZcGsBQAi\nFl/DAcAg4LA8d+6cnnvuOT3zzDM6ceJEMGsCgIjj8fl8vuH+UktLi+rr65WXl6fm5mYtXbpU1dXV\niouL8zu/oaFBmZmZIy4WAJwSUFhe66mnntI777yjW2+91f9GPB6/4z6fb8BlbhWNPUnu7mvevHl+\nxw8dOqTHH3+8z9jBgweDvv3bb7/dPLe5uXlE23LzfhpMuPoaLA4D+hp+6NAh7dmzR5LU2tqqixcv\nKj09PbDqAMAFAroaPnv2bK1du1afffaZuru79frrrw/4FRwAokFAYZmUlKSdO3cGuxYAiFhBOWc5\n5EY4Z+l6kdbXYLcwXuvw4cN+x0eNGqXu7u4+YzExMaZ1fvnll+btz5071zz32nqGK9L2U7C49pwl\nAFxvCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADDg7Y6IKI899php3oEDB8zr\njI0d+M/82mWXLl0yrXP58uXm7Y/0FkZEBo4sAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhL\nADAgLAHAgDt4EJDB7oq51vPPP2+e+8Ybb5jm3XTTTeZ1/vHHH37Hk5KS+i3Lz883rfP77783bx/R\ngSNLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwIDbHRGQ2bNnm+e+++67\nQd9+T0+Pee7Bgwf9jhcUFPRbduTIkRHVhejFkSUAGBCWAGBAWAKAAWEJAAaEJQAYEJYAYEBYAoAB\nYQkABoQlABgQlgBg4PH5fL6Qb8Tj8Tvu8/kGXOZWbu/pgQce8DteW1uradOm9X4+duyYeZ3x8fEj\nrutaGzZsMM998803/Y67fV/5E409SeHra7A4NB1ZNjU1KTc3VxUVFZKkn376SUuWLFF+fr5Wr16t\nv/76KziVAkCEGjIsr1y5oo0bNyo7O7t3rKysTPn5+Tpw4IBuu+02VVZWhrRIAHDakGEZFxen3bt3\nKy0trXesrq5Oc+bMkSTl5OSopqYmdBUCQAQY8hFtsbGxio3tO62zs1NxcXGSpNTUVLW2toamOgCI\nECN+nqXl+tCZM2eUmZkZ8O+7TTT2JP1zkSdSbNy4MShzo3FfRWNPkvN9BRSWiYmJ6urqUnx8vFpa\nWvp8RfcnKyvL73g0Xrlze09cDXe3aOxJctHV8GtNnz5dVVVVkqTq6mrNmjUrsMoAwCWGPLJsaGjQ\n1q1bdeHCBcXGxqqqqkrbt29XUVGRvF6vxo8fr/nz54ejVgBwzJBhmZmZqf379/cb37dvX0gKAoBI\nxAvL0MfatWtNy0JxHlKSPvnkE9O8t956KyTbD4VHH33UPDcmJsY898KFC37H77vvvj6f6+vrzevE\nwLg3HAAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADDghWVBFok9Pfvss+a5\nH374od/x+Ph4dXV19X7+9+HPFhcvXjTPnT59umneuXPnzOu8+eab/Y7/9ttvSklJ6TO2bt060zoX\nLlxo3v5tt91mnjucv50//vij39jo0aPV0dHRZ+yOO+4wr3M4+yqcXPuINgC43hCWAGBAWAKAAWEJ\nAAaEJQAYEJYAYEBYAoABYQkABoQlABgQlgBgwNsdXWo4bwFcsGCBee6NN95oWjacu2RXrVplnmu9\njXGgWxj9WbZsmXnZK6+8Yl6v1XBu0xvO/65JSUmm8cLCQvM6S0pKzHOvNxxZAoABYQkABoQlABgQ\nlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAS8sC7Jw9ZSenm6e++OPP454ezfccIP+/vvv3s/+XpY1\nkOTkZPPce++91zRvx44d5nXOmDHD77jH4+l3x0wo/nM4efKkea71hW0DuXY/DXeddXV1I9p+qPDC\nMgBwCcISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMeGEZAtLY2GieO3bsWPPc\niooK07y77rrLvM6Ojg6/48nJyf2Web1e0zo/+eQT8/ZHjRplnltVVWWeu23btn5jr732Wr/x+vp6\n8zoxMI4sAcDAFJZNTU3Kzc3t/X/9oqIizZs3T0uWLNGSJUv0xRdfhLJGAHDckF/Dr1y5oo0bNyo7\nO7vP+Jo1a5STkxOywgAgkgx5ZBkXF6fdu3crLS0tHPUAQEQyP8/yvffe05gxY1RQUKCioiK1traq\nu7tbqampKikpGfQkfkNDgzIzM4NWNACEW0BXw5944gmlpKQoIyNDu3bt0vvvv6/169cPOD8rK8vv\nOA//DZzTD/89deqU+XcfffRR89zjx4+b5g3navjly5f9jicnJ+v333/vMxYNV8O3bt3aZ2zdunXm\ndfb09JjnhpNrH/6bnZ2tjIwMSdLs2bPV1NQUWGUA4BIBheWqVavU3Nws6Z/H0E+cODGoRQFApBny\na3hDQ4O2bt2qCxcuKDY2VlVVVSooKFBhYaESEhKUmJiozZs3h6NWAHDMkGGZmZmp/fv39xt/5JFH\nQlIQAEQibnd0qWeffdbR7Q/nZPu/p2ws4uPjTfOuXLliXmdeXp7f8RMnTvRbZn0TY0JCgnn7R44c\nMc8dzgWWTz/9tN/Ya6+91m88Ui/auA23OwKAAWEJAAaEJQAYEJYAYEBYAoABYQkABoQlABgQlgBg\nQFgCgAFhCQAG5of/jmgjA9wax/MsA+f08yydVlxcbJ67Y8cOv+OdnZ39blucN2+eaZ2vvPKKefv3\n3HOPee6aNWvMcz/44IN+Y9H435Tk4udZAsD1hrAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAw\nICwBwIAXlrlUV1eXee4PP/xgnjthwoRAygmalStXmuadOHHCvM6PPvrIvGzp0qWmdX7//ffm7b/0\n0kvmueXl5ea5CC+OLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADXlgW\nZJHY04EDB8xzFy1a5Hc8XC8su3TpkmlebKz9Tt2kpCS/4x6Pp98Lqqy3kc6aNcu8/fr6evPckYrE\nv79g4IVlAOAShCUAGBCWAGBAWAKAAWEJAAaEJQAYEJYAYEBYAoABYQkABoQlABjwdsfrQFtbm9Ml\nmN18881BX+dAb4KcOXNmv2WLFy82rXM4b8xEdDCFZWlpqerr69XT06MVK1YoKytLr776qq5evapx\n48Zp27ZtiouLC3WtAOCYIcOytrZWZ8+eldfrVXt7uxYsWKDs7Gzl5+crLy9Pb7/9tiorK5Wfnx+O\negHAEUOes5w6dap27NghSUpOTlZnZ6fq6uo0Z84cSVJOTo5qampCWyUAOGzIsIyJiVFiYqIkqbKy\nUg8++KA6Ozt7v3anpqaqtbU1tFUCgMPMF3iOHj2qyspK7d27V3Pnzu0dtzwO88yZM8rMzPS7LAyP\n0wy7aOxJ+ueZlm40c+ZM87Lz58+HupyQi9a/P6f7MoXlV199pZ07d+rjjz/W6NGjlZiYqK6uLsXH\nx6ulpUVpaWmD/n5WVpbf8Wh8UGkk9lRWVmae++KLL/odD9fDf0Ph5MmTfsdnzpyp48eP9xlz+9Xw\nSPz7CwZXPPy3o6NDpaWlKi8vV0pKiiRp+vTpqqqqkiRVV1cP66nRAOBGQx5ZHj58WO3t7SosLOwd\n27Jli9atWyev16vx48dr/vz5IS0SAJw2ZFguWrTI73tZ9u3bF5KCACAS8cKyIIvEnqZNm2aeO9Dd\nLiM5Z/nWW2+Z5x45ciSgbQzm888/9zseiftqpKKxJ8kl5ywBAIQlAJgQlgBgQFgCgAFhCQAGhCUA\nGBCWAGBAWAKAAWEJAAaEJQAYcLtjkEViT/Hx8ea5Az3O7J577tHXX3/d+3ny5MnmdT700EPmubW1\ntea5IxWJ+2qkorEnidsdAcA1CEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADDg\ndscgi8aepOjsi57cg9sdAcAlCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADAg\nLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADGItk0pLS1VfX6+e\nnh6tWLFCx44dU2Njo1JSUiRJy5cv18MPPxzKOgHAUUOGZW1trc6ePSuv16v29nYtWLBA06ZN05o1\na5STkxOOGgHAcUOG5dSpUzVlyhRJUnJysjo7O3X16tWQFwYAkcTjG+yt4tfwer06ffq0YmJi1Nra\nqu7ubqWmpqqkpERjx44deCMDvBw9Gl8IH409SdHZFz25R7j6GiwOzWF59OhRlZeXa+/evWpoaFBK\nSooyMjK0a9cu/fzzz1q/fv2Av9vQ0KDMzMzhVw4AkcJn8OWXX/qefPJJX3t7e79lZ8+e9S1evHjQ\n35fk92ewZW79icaeorUvenLPT7j6GsyQ/3Soo6NDpaWlKi8v7736vWrVKjU3N0uS6urqNHHixKFW\nAwCuNuQFnsOHD6u9vV2FhYW9YwsXLlRhYaESEhKUmJiozZs3h7RIAHDasC7wBLwRLvC4XjT2RU/u\nEa6+BotD7uABAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHA\ngLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADMLyKlwAcDuOLAHAgLAE\nAAPCEgAMCEsAMCAsAcCAsAQAg1gnNrpp0yZ9++238ng8Ki4u1pQpU5woI6jq6uq0evVqTZw4UZI0\nadIklZSUOFxV4JqamvTCCy9o2bJlKigo0E8//aRXX31VV69e1bhx47Rt2zbFxcU5XeawXNtTUVGR\nGhsblZKSIklavny5Hn74YWeLHKbS0lLV19erp6dHK1asUFZWluv3k9S/r2PHjjm+r8IelqdOndL5\n8+fl9Xr13Xffqbi4WF6vN9xlhMT999+vsrIyp8sYsStXrmjjxo3Kzs7uHSsrK1N+fr7y8vL09ttv\nq7KyUvn5+Q5WOTz+epKkNWvWKCcnx6GqRqa2tlZnz56V1+tVe3u7FixYoOzsbFfvJ8l/X9OmTXN8\nX4X9a3hNTY1yc3MlSXfeeacuXbqky5cvh7sMDCIuLk67d+9WWlpa71hdXZ3mzJkjScrJyVFNTY1T\n5QXEX09uN3XqVO3YsUOSlJycrM7OTtfvJ8l/X1evXnW4KgfCsq2tTWPGjOn9PHbsWLW2toa7jJA4\nd+6cnnvuOT3zzDM6ceKE0+UELDY2VvHx8X3GOjs7e7/Opaamum6f+etJkioqKrR06VK9/PLL+vXX\nXx2oLHAxMTFKTEyUJFVWVurBBx90/X6S/PcVExPj+L5y5Jzlf0XL3Za33367Vq5cqby8PDU3N2vp\n0qWqrq525fmioUTLPnviiSeUkpKijIwM7dq1S++//77Wr1/vdFnDdvToUVVWVmrv3r2aO3du77jb\n99N/+2poaHB8X4X9yDItLU1tbW29n3/55ReNGzcu3GUEXXp6uv73v//J4/FowoQJuuWWW9TS0uJ0\nWUGTmJiorq4uSVJLS0tUfJ3Nzs5WRkaGJGn27NlqampyuKLh++qrr7Rz507t3r1bo0ePjpr9dG1f\nkbCvwh6WM2bMUFVVlSSpsbFRaWlpSkpKCncZQXfo0CHt2bNHktTa2qqLFy8qPT3d4aqCZ/r06b37\nrbq6WrNmzXK4opFbtWqVmpubJf1zTvbff8ngFh0dHSotLVV5eXnvVeJo2E/++oqEfeXIU4e2b9+u\n06dPy+PxaMOGDbr77rvDXULQXb58WWvXrtXvv/+u7u5urVy5Ug899JDTZQWkoaFBW7du1YULFxQb\nG6v09HRt375dRUVF+vPPPzV+/Hht3rxZo0aNcrpUM389FRQUaNeuXUpISFBiYqI2b96s1NRUp0s1\n83q9eu+993THHXf0jm3ZskXr1q1z7X6S/Pe1cOFCVVRUOLqveEQbABhwBw8AGBCWAGBAWAKAAWEJ\nAAaEJQAYEJYAYEBYAoABYQkABv8HkbgWVGnLsmMAAAAASUVORK5CYII=\n", + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + } + } ] }, { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "WkFUEs7ZOA79", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - } + }, + "output_extras": [ + { + "item_id": 1 + } + ], + "base_uri": "https://localhost:8080/", + "height": 408 }, - "colab_type": "code", - "id": "WkFUEs7ZOA79" + "outputId": "3d0c50f2-9c18-4d4b-8455-1aabe9e28190", + "executionInfo": { + "status": "ok", + "timestamp": 1512165775887, + "user_tz": 480, + "elapsed": 242, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } }, - "outputs": [], "source": [ "# Lots of models available\n", "registry.list_models()" + ], + "cell_type": "code", + "execution_count": 13, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "['resnet50',\n", + " 'lstm_seq2seq',\n", + " 'transformer_encoder',\n", + " 'attention_lm',\n", + " 'vanilla_gan',\n", + " 'transformer',\n", + " 'gene_expression_conv',\n", + " 'transformer_moe',\n", + " 'attention_lm_moe',\n", + " 'transformer_revnet',\n", + " 'lstm_seq2seq_attention',\n", + " 'shake_shake',\n", + " 'transformer_ae',\n", + " 'diagonal_neural_gpu',\n", + " 'xception',\n", + " 'aligned',\n", + " 'multi_model',\n", + " 'neural_gpu',\n", + " 'slice_net',\n", + " 'byte_net',\n", + " 'cycle_gan',\n", + " 'transformer_sketch',\n", + " 'blue_net']" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 13 + } ] }, { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "-H25oG91YQj3", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 } - }, - "colab_type": "code", - "id": "-H25oG91YQj3" + } }, - "outputs": [], "source": [ "# Create your own model\n", "\n", @@ -379,29 +742,45 @@ "\n", "hparams = trainer_utils.create_hparams(\"basic_1\", data_dir)\n", "hparams.hidden_size = 64\n", - "hparams.use_eager_mode = True\n", "trainer_utils.add_problem_hparams(hparams, \"image_mnist\")\n", "model = MySimpleModel(hparams, Modes.TRAIN)" - ] - }, - { + ], "cell_type": "code", "execution_count": 0, + "outputs": [] + }, + { "metadata": { + "id": "AWVd2I7PYz6H", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - } + }, + "output_extras": [ + { + "item_id": 12 + } + ], + "base_uri": "https://localhost:8080/", + "height": 357 }, - "colab_type": "code", - "id": "AWVd2I7PYz6H" + "outputId": "19abcffa-6a56-4633-90c1-71a59a104ace", + "executionInfo": { + "status": "ok", + "timestamp": 1512165882231, + "user_tz": 480, + "elapsed": 105926, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } }, - "outputs": [], "source": [ "# Train\n", - "store = tfe.EagerVariableStore()\n", - "optimizer = tf.train.AdamOptimizer()\n", "\n", "# In Eager mode, opt.minimize must be passed a function that produces the loss\n", "def loss_function(features):\n", @@ -409,6 +788,7 @@ " return losses[\"training\"]\n", "\n", "tfe_loss_fn = tfe.implicit_value_and_gradients(loss_function)\n", + "optimizer = tf.train.AdamOptimizer()\n", "\n", "NUM_STEPS = 500\n", "BATCH_SIZE = 128\n", @@ -419,37 +799,83 @@ "\n", "# Training loop\n", "for count, example in enumerate(tfe.Iterator(mnist_train_dataset)):\n", - " if count \u003e= NUM_STEPS:\n", - " break\n", - "\n", " example[\"targets\"] = tf.reshape(example[\"targets\"], [BATCH_SIZE, 1, 1, 1]) # Make it 4D.\n", " loss, gv = tfe_loss_fn(example)\n", " optimizer.apply_gradients(gv)\n", + "\n", " if count % 50 == 0:\n", - " print(\"Step: %d, Loss: %.3f\" % (count, loss.numpy()))" + " print(\"Step: %d, Loss: %.3f\" % (count, loss.numpy()))\n", + " if count >= NUM_STEPS:\n", + " break" + ], + "cell_type": "code", + "execution_count": 15, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-train*\n", + "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensor2tensor/layers/common_layers.py:1671: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "\n", + "Future major versions of TensorFlow will allow gradients to flow\n", + "into the labels input on backprop by default.\n", + "\n", + "See tf.nn.softmax_cross_entropy_with_logits_v2.\n", + "\n", + "Step: 0, Loss: 5.430\n", + "Step: 50, Loss: 0.833\n", + "Step: 100, Loss: 0.722\n", + "Step: 150, Loss: 0.529\n", + "Step: 200, Loss: 0.349\n", + "Step: 250, Loss: 0.293\n", + "Step: 300, Loss: 0.303\n", + "Step: 350, Loss: 0.295\n", + "Step: 400, Loss: 0.275\n", + "Step: 450, Loss: 0.290\n", + "Step: 500, Loss: 0.334\n" + ], + "name": "stdout" + } ] }, { - "cell_type": "code", - "execution_count": 0, "metadata": { + "id": "CIFlkiVOd8jO", + "colab_type": "code", "colab": { "autoexec": { "startup": false, "wait_interval": 0 - } + }, + "output_extras": [ + { + "item_id": 2 + } + ], + "base_uri": "https://localhost:8080/", + "height": 51 }, - "colab_type": "code", - "id": "CIFlkiVOd8jO" + "outputId": "70b92db9-9ec0-466c-e5c2-c5a39f13447d", + "executionInfo": { + "status": "ok", + "timestamp": 1512165950748, + "user_tz": 480, + "elapsed": 2772, + "user": { + "displayName": "Ryan Sepassi", + "photoUrl": "//lh4.googleusercontent.com/-dcHmhQy1Y2A/AAAAAAAAAAI/AAAAAAAABEw/if_k14yF4KI/s50-c-k-no/photo.jpg", + "userId": "107877449274830904926" + } + } }, - "outputs": [], "source": [ "model.set_mode(Modes.EVAL)\n", "mnist_eval_dataset = mnist_problem.dataset(Modes.EVAL, data_dir)\n", "all_perplexities = []\n", "all_accuracies = []\n", "for count, example in enumerate(tfe.Iterator(mnist_eval_dataset)):\n", - " if count \u003e= 100:\n", + " if count >= 100:\n", " break\n", "\n", " batch_inputs = tf.reshape(example[\"inputs\"], [1, 28, 28, 3]) # Make it 4D.\n", @@ -457,8 +883,7 @@ " features = {\"inputs\": batch_inputs, \"targets\": batch_targets}\n", "\n", " # Call the model.\n", - " with store.as_default():\n", - " predictions, _ = model(features)\n", + " predictions, _ = model(features)\n", "\n", " # Calculate and append the metrics\n", " all_perplexities.extend(metrics.padded_neg_log_perplexity(predictions, features[\"targets\"]))\n", @@ -466,19 +891,19 @@ "\n", "# Print out metrics on the dataset\n", "print(\"Accuracy: %.2f\" % tf.reduce_mean(tf.concat(all_accuracies, axis=1)).numpy())" + ], + "cell_type": "code", + "execution_count": 17, + "outputs": [ + { + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-dev*\n", + "Accuracy: 0.98\n" + ], + "name": "stdout" + } ] } - ], - "metadata": { - "colab": { - "collapsed_sections": [], - "default_view": {}, - "name": "T2T with TF Eager", - "provenance": [], - "version": "0.3.2", - "views": {} - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} + ] +} \ No newline at end of file diff --git a/tensor2tensor/utils/t2t_model.py b/tensor2tensor/utils/t2t_model.py index c49bdbaf1..3fdbc6281 100644 --- a/tensor2tensor/utils/t2t_model.py +++ b/tensor2tensor/utils/t2t_model.py @@ -18,6 +18,7 @@ from __future__ import division from __future__ import print_function +import contextlib import copy import time @@ -36,7 +37,9 @@ import tensorflow as tf +from tensorflow.python.eager import context from tensorflow.python.layers import base +from tensorflow.python.ops import variable_scope class T2TModel(base.Layer): @@ -101,6 +104,7 @@ def __init__(self, self._problem_hparams = problem_hparams self._problem_idx = problem_idx self._create_modalities(problem_hparams, self._hparams) + self._var_store = create_eager_var_store() @property def hparams(self): @@ -210,26 +214,27 @@ def infer(self, Returns: samples: an integer `Tensor`. """ - # TODO(rsepassi): Make decoding work with real-valued model outputs - # (i.e. if the target modality is RealModality). - self.prepare_features_for_infer(features) - if not self.has_input and beam_size > 1: - tf.logging.warn("Beam searching for a model with no inputs.") - if not self.has_input and self.hparams.sampling_method != "random": - tf.logging.warn("Non-random sampling for a model with no inputs.") - self._fill_problem_hparams_features(features) - - target_modality = self.hparams.problems[self._problem_idx].target_modality - if target_modality.is_class_modality: - beam_size = 1 # No use to run beam-search for a single class. - if beam_size == 1: - tf.logging.info("Greedy Decoding") - samples, _, _ = self._greedy_infer(features, decode_length) - else: - tf.logging.info("Beam Decoding with beam size %d" % beam_size) - samples = self._beam_decode( - features, decode_length, beam_size, top_beams, alpha) - return samples + with self._var_store.as_default(): + # TODO(rsepassi): Make decoding work with real-valued model outputs + # (i.e. if the target modality is RealModality). + self.prepare_features_for_infer(features) + if not self.has_input and beam_size > 1: + tf.logging.warn("Beam searching for a model with no inputs.") + if not self.has_input and self.hparams.sampling_method != "random": + tf.logging.warn("Non-random sampling for a model with no inputs.") + self._fill_problem_hparams_features(features) + + target_modality = self.hparams.problems[self._problem_idx].target_modality + if target_modality.is_class_modality: + beam_size = 1 # No use to run beam-search for a single class. + if beam_size == 1: + tf.logging.info("Greedy Decoding") + samples, _, _ = self._greedy_infer(features, decode_length) + else: + tf.logging.info("Beam Decoding with beam size %d" % beam_size) + samples = self._beam_decode( + features, decode_length, beam_size, top_beams, alpha) + return samples def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha): """Beam search decoding. @@ -385,7 +390,7 @@ def _slow_greedy_infer(self, features, decode_length): def infer_step(recent_output, recent_logits, unused_loss): """Inference step.""" - if not self.hparams.use_eager_mode: + if not context.in_eager_mode(): recent_output.set_shape([None, None, None, 1]) padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]]) features["targets"] = padded @@ -401,7 +406,7 @@ def infer_step(recent_output, recent_logits, unused_loss): common_layers.shape_list(recent_output)[1], :, :] cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1)) samples = tf.concat([recent_output, cur_sample], axis=1) - if not self.hparams.use_eager_mode: + if not context.in_eager_mode(): samples.set_shape([None, None, None, 1]) # Assuming we have one shard for logits. @@ -433,7 +438,7 @@ def infer_step(recent_output, recent_logits, unused_loss): result = initial_output # tensor of shape [batch_size, time, 1, 1, vocab_size] logits = tf.zeros((batch_size, 0, 1, 1, target_modality.top_dimensionality)) - if not self.hparams.use_eager_mode: + if not context.in_eager_mode(): logits.set_shape([None, None, None, None, None]) loss = 0.0 @@ -680,16 +685,17 @@ def sampled_results(): tf.less(tf.random_uniform([]), prob), sampled_results, lambda: (sharded_logits, losses)) - if not self.hparams.use_eager_mode: + if not context.in_eager_mode(): tf.logging.info("This model_fn took %.3f sec." % (time.time() - start_time)) return sharded_logits, losses def call(self, inputs_dict, skip=False, force_full_predict=False): - self._fill_problem_hparams_features(inputs_dict) - sharded_logits, losses = self._model_fn( - inputs_dict, skip=skip, force_full_predict=force_full_predict) - return tf.concat(sharded_logits, 0), losses + with self._var_store.as_default(): + self._fill_problem_hparams_features(inputs_dict) + sharded_logits, losses = self._model_fn( + inputs_dict, skip=skip, force_full_predict=force_full_predict) + return tf.concat(sharded_logits, 0), losses def model_fn_body_sharded(self, sharded_features): """Mixture-of-experts models will override this function. @@ -715,7 +721,7 @@ def model_fn_body_sharded(self, sharded_features): _with_timing( self.model_fn_body, "model_fn_body", - silent=self.hparams.use_eager_mode), datashard_to_features) + silent=context.in_eager_mode()), datashard_to_features) if isinstance(output, tuple): losses_sharded = output[1] if isinstance(losses_sharded[0], dict): @@ -1052,3 +1058,17 @@ def _del_dict_nones(d): for k in list(d.keys()): if d[k] is None: del d[k] + + +class DummyVariableStore(object): + + @contextlib.contextmanager + def as_default(self): + yield + + +def create_eager_var_store(): + if context.in_eager_mode(): + return variable_scope.EagerVariableStore() + else: + return DummyVariableStore()