Skip to content

Commit

Permalink
Merge pull request tensorflow#924 from h4ck3rm1k3/master
Browse files Browse the repository at this point in the history
untie
  • Loading branch information
nealwu authored Mar 15, 2017
2 parents 7e465e4 + 8b66790 commit 5d758ef
Show file tree
Hide file tree
Showing 16 changed files with 17 additions and 17 deletions.
2 changes: 1 addition & 1 deletion differential_privacy/dp_sgd/dp_optimizer/dp_pca.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def ComputeDPPrincipalProjection(data, projection_dims,
Args:
data: the input data, each row is a data vector.
projection_dims: the projection dimension.
sanitizer: the sanitizer used for acheiving privacy.
sanitizer: the sanitizer used for achieving privacy.
eps_delta: (eps, delta) pair.
sigma: if not None, use noise sigma; otherwise compute it using
eps_delta pair.
Expand Down
2 changes: 1 addition & 1 deletion differential_privacy/multiple_teachers/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ def main(unused_argv):
if min(eps_list_nm) == eps_list_nm[-1]:
print "Warning: May not have used enough values of l"

# Data indpendent bound, as mechanism is
# Data independent bound, as mechanism is
# 2*noise_eps DP.
data_ind_log_mgf = np.array([0.0 for _ in l_list])
data_ind_log_mgf += num_examples * np.array(
Expand Down
4 changes: 2 additions & 2 deletions differential_privacy/multiple_teachers/deep_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def inference(images, dropout=False):
"""Build the CNN model.
Args:
images: Images returned from distorted_inputs() or inputs().
dropout: Boolean controling whether to use dropout or not
dropout: Boolean controlling whether to use dropout or not
Returns:
Logits
"""
Expand Down Expand Up @@ -194,7 +194,7 @@ def inference_deeper(images, dropout=False):
"""Build a deeper CNN model.
Args:
images: Images returned from distorted_inputs() or inputs().
dropout: Boolean controling whether to use dropout or not
dropout: Boolean controlling whether to use dropout or not
Returns:
Logits
"""
Expand Down
2 changes: 1 addition & 1 deletion differential_privacy/privacy_accountant/tf/accountant.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ class MomentsAccountant(object):
We further assume that at each step, the mechanism operates on a random
sample with sampling probability q = batch_size / total_examples. Then
E[exp(L X)] = E[(Pr[M(D)==x / Pr[M(D')==x])^L]
By distinguishign two cases of wether D < D' or D' < D, we have
By distinguishing two cases of whether D < D' or D' < D, we have
that
E[exp(L X)] <= max (I1, I2)
where
Expand Down
2 changes: 1 addition & 1 deletion im2txt/im2txt/data/build_mscoco_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ def _load_and_process_metadata(captions_file, image_dir):
(len(id_to_filename), captions_file))

# Process the captions and combine the data into a list of ImageMetadata.
print("Proccessing captions.")
print("Processing captions.")
image_metadata = []
num_captions = 0
for image_id, base_filename in id_to_filename:
Expand Down
2 changes: 1 addition & 1 deletion inception/inception/inception_distributed_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@

def train(target, dataset, cluster_spec):
"""Train Inception on a dataset for a number of steps."""
# Number of workers and parameter servers are infered from the workers and ps
# Number of workers and parameter servers are inferred from the workers and ps
# hosts string.
num_workers = len(cluster_spec.as_dict()['worker'])
num_parameter_servers = len(cluster_spec.as_dict()['ps'])
Expand Down
2 changes: 1 addition & 1 deletion inception/inception/inception_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def _eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op):
# /my-favorite-path/imagenet_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Succesfully loaded model from %s at step=%s.' %
print('Successfully loaded model from %s at step=%s.' %
(ckpt.model_checkpoint_path, global_step))
else:
print('No checkpoint file found')
Expand Down
2 changes: 1 addition & 1 deletion inception/inception/inception_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def train(dataset):
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY, global_step)

# Another possiblility is to use tf.slim.get_variables().
# Another possibility is to use tf.slim.get_variables().
variables_to_average = (tf.trainable_variables() +
tf.moving_average_variables())
variables_averages_op = variable_averages.apply(variables_to_average)
Expand Down
2 changes: 1 addition & 1 deletion inception/inception/slim/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
Additionally it maintains a collection with update_ops that need to be
updated after the ops have been computed, for exmaple to update moving means
updated after the ops have been computed, for example to update moving means
and moving variances of batch_norm.
Ops that have different behavior during training or eval have an is_training
Expand Down
2 changes: 1 addition & 1 deletion namignizer/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def _letter_to_number(letter):
def namignizer_iterator(names, counts, batch_size, num_steps, epoch_size):
"""Takes a list of names and counts like those output from read_names, and
makes an iterator yielding a batch_size by num_steps array of random names
separated by an end of name token. The names are choosen randomly according
separated by an end of name token. The names are chosen randomly according
to their counts. The batch may end mid-name
Args:
Expand Down
2 changes: 1 addition & 1 deletion namignizer/names.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
"""A library showing off sequence recognition and generation with the simple
example of names.
We use recurrent neural nets to learn complex functions able to recogize and
We use recurrent neural nets to learn complex functions able to recognize and
generate sequences of a given form. This can be used for natural language
syntax recognition, dynamically generating maps or puzzles and of course
baby name generation.
Expand Down
2 changes: 1 addition & 1 deletion neural_programmer/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def list_join(a):


def group_by_max(table, number):
#computes the most frequently occuring entry in a column
#computes the most frequently occurring entry in a column
answer = []
for i in range(len(table)):
temp = []
Expand Down
2 changes: 1 addition & 1 deletion neural_programmer/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def question_number_softmax(self, hidden_vectors):
#Attention on quetsion to decide the question number to passed to comparison ops
def compute_ans(op_embedding, comparison):
op_embedding = tf.expand_dims(op_embedding, 0)
#dot product of operation embedding with hidden state to the left of the number occurence
#dot product of operation embedding with hidden state to the left of the number occurrence
first = tf.transpose(
tf.matmul(op_embedding,
tf.transpose(
Expand Down
2 changes: 1 addition & 1 deletion slim/deployment/model_deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ def optimize_clones(clones, optimizer,
regularization_losses = None
# Compute the total_loss summing all the clones_losses.
total_loss = tf.add_n(clones_losses, name='total_loss')
# Sum the gradients accross clones.
# Sum the gradients across clones.
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars

Expand Down
2 changes: 1 addition & 1 deletion slim/nets/inception_resnet_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
end_points['Mixed_6a'] = net
net = slim.repeat(net, 20, block17, scale=0.10)

# Auxillary tower
# Auxiliary tower
with tf.variable_scope('AuxLogits'):
aux = slim.avg_pool2d(net, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
Expand Down
2 changes: 1 addition & 1 deletion slim/nets/inception_v4.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def inception_v4(inputs, num_classes=1001, is_training=True,
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
create_aux_logits: Whether to include the auxiliary logits.
Returns:
logits: the logits outputs of the model.
Expand Down

0 comments on commit 5d758ef

Please sign in to comment.