desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Theoretical bound of the coded length given a probability distribution. Args: c: The binary codes. Belong to {0, 1}. p: The probability of: P(code==+1) Returns: The average code length. Note: the average code length can be greater than 1 bit (e.g. when encoding the least likely symbol).'
def _Apply(self, c, p):
entropy = ((((1.0 - c) * tf.log((1.0 - p))) + (c * tf.log(p))) / (- math.log(2))) entropy = tf.reduce_mean(entropy) return entropy
'Creates an initializer. Args: dims: Dimension(s) index to compute standard deviation: 1.0 / sqrt(product(shape[dims])) **kwargs: Extra keyword arguments to pass to tf.truncated_normal.'
def __init__(self, dims=(0,), **kwargs):
if isinstance(dims, (int, long)): self._dims = [dims] else: self._dims = dims self._kwargs = kwargs
'Creates an initializer. Args: dims: Dimension(s) index to compute standard deviation: sqrt(scale / product(shape[dims])) scale: A constant scaling for the initialization used as sqrt(scale / product(shape[dims])). **kwargs: Extra keyword arguments to pass to tf.truncated_normal.'
def __init__(self, dims=(0,), scale=2.0, **kwargs):
if isinstance(dims, (int, long)): self._dims = [dims] else: self._dims = dims self._kwargs = kwargs self._scale = scale
'Always returns True.'
@property def initialized(self):
return True
'Initializes Bias block. |initializer| parameter have two special cases. 1. If initializer is None, then this block works as a PassThrough. 2. If initializer is a Bias class object, then tf.constant_initializer is used with the stored value. Args: initializer: An initializer for the bias variable. name: Name of this block.'
def __init__(self, initializer=Bias(0), name=None):
super(BiasAdd, self).__init__(name) with self._BlockScope(): if isinstance(initializer, Bias): self._initializer = tf.constant_initializer(value=initializer.value) else: self._initializer = initializer self._bias = None
'Initializes NN block. Args: depth: The depth of the output. bias: An initializer for the bias, or a Bias class object. If None, there will be no bias term for this NN block. See BiasAdd block. act: Optional activation function. If None, no activation is applied. initializer: The initialization method for the matrix weights. linear_block_factory: A function used to create a linear block. name: The name of this block.'
def __init__(self, depth, bias=Bias(0), act=None, initializer=block_util.RsqrtInitializer(), linear_block_factory=(lambda d, i: Linear(d, initializer=i)), name=None):
super(NN, self).__init__(name) with self._BlockScope(): self._linear_block_factory = linear_block_factory self._depth = depth self._initializer = initializer self._matrices = None self._bias = (BiasAdd(bias) if bias else PassThrough()) self._act = (act if act else PassThrough())
'Initializes a Conv2DBase block. Arguments: depth: The output depth of the block (i.e. #filters); if negative, the output depth will be set to be the same as the input depth. filter_size: The size of the 2D filter. If it\'s specified as an integer, it\'s going to create a square filter. Otherwise, this is a tuple specifying the height x width of the filter. strides: A tuple specifying the y and x stride. padding: One of the valid padding modes allowed by tf.nn.conv2d, or \'REFLECT\'/\'SYMMETRIC\' for mirror padding. bias: An initializer for the bias, or a Bias class object. If None, there will be no bias in this block. See BiasAdd block. act: Optional activation function applied to the output. atrous_rate: optional input rate for ATrous convolution. If not None, this will be used and the strides will be ignored. conv: The convolution function to use (e.g. tf.nn.conv2d). name: The name for this conv2d op.'
def __init__(self, depth, filter_size, strides, padding, bias=None, act=None, atrous_rate=None, conv=tf.nn.conv2d, name=None):
super(Conv2DBase, self).__init__(name) with self._BlockScope(): self._act = (act if act else PassThrough()) self._bias = (BiasAdd(bias) if bias else PassThrough()) self._kernel_shape = np.zeros((4,), dtype=np.int32) self._kernel_shape[:2] = filter_size self._kernel_shape[3] = depth self._strides = np.ones((4,), dtype=np.int32) self._strides[1:3] = strides self._strides = list(self._strides) self._padding = padding self._kernel = None self._conv = conv self._atrous_rate = atrous_rate
'Apply the self._conv op. Arguments: x: input tensor. It needs to be a 4D tensor of the form [batch, height, width, channels]. Returns: The output of the convolution of x with the current convolutional kernel. Raises: ValueError: if number of channels is not defined at graph construction.'
def _Apply(self, x):
input_shape = x.get_shape().with_rank(4) input_shape[3:].assert_is_fully_defined() if (self._kernel is None): assert (self._kernel_shape[2] == 0), self._kernel_shape self._kernel_shape[2] = input_shape[3].value if (self._kernel_shape[3] < 0): self._kernel_shape[3] = self._kernel_shape[2] self._kernel = self._CreateKernel(self._kernel_shape, x.dtype) (x, padding) = HandleConvPaddingModes(x, self._padding, self._kernel_shape, self._strides) if (self._atrous_rate is None): x = self._conv(x, self._kernel, strides=self._strides, padding=padding) else: x = self._conv(x, self._kernel, rate=self._atrous_rate, padding=padding) if (self._padding != 'VALID'): height = ((1 + ((input_shape[1].value - 1) // self._strides[1])) if input_shape[1].value else None) width = ((1 + ((input_shape[2].value - 1) // self._strides[2])) if input_shape[2].value else None) shape = x.get_shape() x.set_shape([shape[0], height, width, shape[3]]) return self._act(self._bias(x))
'Initializes a Conv2D block. Arguments: depth: The output depth of the block (i.e., #filters) filter_size: The size of the 2D filter. If it\'s specified as an integer, it\'s going to create a square filter. Otherwise, this is a tuple specifying the height x width of the filter. strides: A tuple specifying the y and x stride. padding: One of the valid padding modes allowed by tf.nn.conv2d, or \'REFLECT\'/\'SYMMETRIC\' for mirror padding. bias: An initializer for the bias, or a Bias class object. If None, there will be no bias in this block. See BiasAdd block. act: Optional activation function applied to the output. initializer: Optional initializer for weights. name: The name for this conv2d op.'
def __init__(self, depth, filter_size, strides, padding, bias=None, act=None, initializer=None, name=None):
super(Conv2D, self).__init__(depth, filter_size, strides, padding, bias, act, conv=tf.nn.conv2d, name=name) with self._BlockScope(): if (initializer is None): initializer = block_util.RsqrtInitializer(dims=(0, 1, 2)) self._initializer = initializer
'Initializes LSTMBase class object. Args: output_shape: List representing the LSTM output shape. This argument does not include batch dimension. For example, if the LSTM output has shape [batch, depth], then pass [depth]. name: Name of this block.'
def __init__(self, output_shape, name):
super(LSTMBase, self).__init__(name) with self._BlockScope(): self._output_shape = ([None] + list(output_shape)) self._hidden = None self._cell = None
'Returns the hidden units of this LSTM.'
@property def hidden(self):
return self._hidden
'Assigns to the hidden units of this LSTM. Args: value: The new value for the hidden units. If None, the hidden units are considered to be filled with zeros.'
@hidden.setter def hidden(self, value):
if (value is not None): value.get_shape().assert_is_compatible_with(self._output_shape) self._hidden = value
'Returns the cell units of this LSTM.'
@property def cell(self):
return self._cell
'Assigns to the cell units of this LSTM. Args: value: The new value for the cell units. If None, the cell units are considered to be filled with zeros.'
@cell.setter def cell(self, value):
if (value is not None): value.get_shape().assert_is_compatible_with(self._output_shape) self._cell = value
'Transforms the input units to (4 * depth) units. The forget-gate, input-gate, output-gate, and cell update is computed as f, i, j, o = T(h) + R(x) where h is hidden units, x is input units, and T, R are transforms of h, x, respectively. This method implements R. Note that T is strictly linear, so if LSTM is going to use bias, this method must include the bias to the transformation. Subclasses must implement this method. See _Apply() for more details.'
def _TransformInputs(self, _):
raise NotImplementedError()
'Transforms the hidden units to (4 * depth) units. The forget-gate, input-gate, output-gate, and cell update is computed as f, i, j, o = T(h) + R(x) where h is hidden units, x is input units, and T, R are transforms of h, x, respectively. This method implements T in the equation. The method must implement a strictly linear transformation. For example, it may use MatMul or Conv2D, but must not add bias. This is because when hidden units are zeros, then the LSTM implementation will skip calling this method, instead of passing zeros to this function. Subclasses must implement this method. See _Apply() for more details.'
def _TransformHidden(self, _):
raise NotImplementedError()
'Initialization of the composition operator. Args: block_list: List of blocks.BlockBase that are chained to create a new blocks.BlockBase. name: Name of this block.'
def __init__(self, block_list, name=None):
super(CompositionOperator, self).__init__(name) self._blocks = block_list
'Apply successively all the blocks on the given input tensor.'
def _Apply(self, x):
h = x for layer in self._blocks: h = layer(h) return h
'Initialization of the parallel exec + concat (Tower). Args: block_list: List of blocks.BlockBase that are chained to create a new blocks.BlockBase. dim: the dimension on which to concat. name: Name of this block.'
def __init__(self, block_list, dim=3, name=None):
super(TowerOperator, self).__init__(name) self._blocks = block_list self._concat_dim = dim
'Apply successively all the blocks on the given input tensor.'
def _Apply(self, x):
outputs = [layer(x) for layer in self._blocks] return tf.concat(outputs, self._concat_dim)
'Computes the loss used for PTN paper (projection + volume loss).'
def get_loss(self, inputs, outputs):
g_loss = tf.zeros(dtype=tf.float32, shape=[]) if self._params.proj_weight: g_loss += losses.add_volume_proj_loss(inputs, outputs, self._params.step_size, self._params.proj_weight) if self._params.volume_weight: g_loss += losses.add_volume_loss(inputs, outputs, 1, self._params.volume_weight) slim.summaries.add_scalar_summary(g_loss, 'im2vox_loss', prefix='losses') return g_loss
'Aggregate the metrics for voxel generation model. Args: inputs: Input dictionary of the voxel generation model. outputs: Output dictionary returned by the voxel generation model. Returns: names_to_values: metrics->values (dict). names_to_updates: metrics->ops (dict).'
def get_metrics(self, inputs, outputs):
names_to_values = dict() names_to_updates = dict() (tmp_values, tmp_updates) = metrics.add_volume_iou_metrics(inputs, outputs) names_to_values.update(tmp_values) names_to_updates.update(tmp_updates) for (name, value) in names_to_values.iteritems(): slim.summaries.add_scalar_summary(value, name, prefix='eval', print_summary=True) return (names_to_values, names_to_updates)
'Function called by TF to save the prediction periodically.'
def write_disk_grid(self, global_step, log_dir, input_images, gt_projs, pred_projs, input_voxels=None, output_voxels=None):
summary_freq = self._params.save_every def write_grid(input_images, gt_projs, pred_projs, global_step, input_voxels, output_voxels): 'Native python function to call for writing images to files.' grid = _build_image_grid(input_images, gt_projs, pred_projs, input_voxels=input_voxels, output_voxels=output_voxels) if ((global_step % summary_freq) == 0): img_path = os.path.join(log_dir, ('%s.jpg' % str(global_step))) utils.save_image(grid, img_path) return grid save_op = tf.py_func(write_grid, [input_images, gt_projs, pred_projs, global_step, input_voxels, output_voxels], [tf.uint8], 'write_grid')[0] slim.summaries.add_image_summary(tf.expand_dims(save_op, axis=0), name='grid_vis') return save_op
'Get the 4x4 Perspective Transfromation matrix used for PTN.'
def get_transform_matrix(self, view_out):
num_views = self._params.num_views focal_length = self._params.focal_length focal_range = self._params.focal_range phi = 30 theta_interval = (360.0 / num_views) theta = (theta_interval * view_out) camera_matrix = np.zeros((4, 4), dtype=np.float32) intrinsic_matrix = np.eye(4, dtype=np.float32) extrinsic_matrix = np.eye(4, dtype=np.float32) sin_phi = np.sin(((float(phi) / 180.0) * np.pi)) cos_phi = np.cos(((float(phi) / 180.0) * np.pi)) sin_theta = np.sin(((float((- theta)) / 180.0) * np.pi)) cos_theta = np.cos(((float((- theta)) / 180.0) * np.pi)) rotation_azimuth = np.zeros((3, 3), dtype=np.float32) rotation_azimuth[(0, 0)] = cos_theta rotation_azimuth[(2, 2)] = cos_theta rotation_azimuth[(0, 2)] = (- sin_theta) rotation_azimuth[(2, 0)] = sin_theta rotation_azimuth[(1, 1)] = 1.0 rotation_elevation = np.zeros((3, 3), dtype=np.float32) rotation_elevation[(0, 0)] = cos_phi rotation_elevation[(0, 1)] = sin_phi rotation_elevation[(1, 0)] = (- sin_phi) rotation_elevation[(1, 1)] = cos_phi rotation_elevation[(2, 2)] = 1.0 rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation) displacement = np.zeros((3, 1), dtype=np.float32) displacement[(0, 0)] = (float(focal_length) + (float(focal_range) / 2.0)) displacement = np.matmul(rotation_matrix, displacement) extrinsic_matrix[0:3, 0:3] = rotation_matrix extrinsic_matrix[0:3, 3:4] = (- displacement) intrinsic_matrix[(2, 2)] = (1.0 / float(focal_length)) intrinsic_matrix[(1, 1)] = (1.0 / float(focal_length)) camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix) return camera_matrix
'Gets dictionaries from metrics to value `Tensors` & update `Tensors`.'
@abc.abstractmethod def get_metrics(self, inputs, outputs):
pass
'Loads data for a specified dataset and split.'
def get_inputs(self, dataset_dir, dataset_name, split_name, batch_size, image_size, vox_size, is_training=True):
del image_size, vox_size with tf.variable_scope(('data_loading_%s/%s' % (dataset_name, split_name))): common_queue_min = 64 common_queue_capacity = 256 num_readers = 4 inputs = input_generator.get(dataset_dir, dataset_name, split_name, shuffle=is_training, num_readers=num_readers, common_queue_min=common_queue_min, common_queue_capacity=common_queue_capacity) (images, voxels) = tf.train.batch([inputs['image'], inputs['voxel']], batch_size=batch_size, num_threads=8, capacity=(8 * batch_size), name=('batching_queues/%s/%s' % (dataset_name, split_name))) outputs = dict() outputs['images'] = images outputs['voxels'] = voxels outputs['num_samples'] = inputs['num_samples'] return outputs
'Selects the subset of viewpoints to train on.'
def preprocess(self, raw_inputs, step_size):
(quantity, num_views) = raw_inputs['images'].get_shape().as_list()[:2] inputs = dict() inputs['voxels'] = raw_inputs['voxels'] for k in xrange(step_size): inputs[('images_%d' % (k + 1))] = [] inputs[('matrix_%d' % (k + 1))] = [] for n in xrange(quantity): selected_views = np.random.choice(num_views, step_size, replace=False) for k in xrange(step_size): view_selected = selected_views[k] inputs[('images_%d' % (k + 1))].append(raw_inputs['images'][n, view_selected, :, :, :]) tf_matrix = self.get_transform_matrix(view_selected) inputs[('matrix_%d' % (k + 1))].append(tf_matrix) for k in xrange(step_size): inputs[('images_%d' % (k + 1))] = tf.stack(inputs[('images_%d' % (k + 1))]) inputs[('matrix_%d' % (k + 1))] = tf.stack(inputs[('matrix_%d' % (k + 1))]) return inputs
'Initialization assignment operator function used while training.'
def get_init_fn(self, scopes):
if (not self._params.init_model): return None is_trainable = (lambda x: (x in tf.trainable_variables())) var_list = [] for scope in scopes: var_list.extend(filter(is_trainable, tf.contrib.framework.get_model_variables(scope))) (init_assign_op, init_feed_dict) = slim.assign_from_checkpoint(self._params.init_model, var_list) def init_assign_function(sess): sess.run(init_assign_op, init_feed_dict) return init_assign_function
'Train operation function for the given scope used file training.'
def get_train_op_for_scope(self, loss, optimizer, scopes):
is_trainable = (lambda x: (x in tf.trainable_variables())) var_list = [] update_ops = [] for scope in scopes: var_list.extend(filter(is_trainable, tf.contrib.framework.get_model_variables(scope))) update_ops.extend(tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)) return slim.learning.create_train_op(loss, optimizer, update_ops=update_ops, variables_to_train=var_list, clip_gradient_norm=self._params.clip_gradient_norm)
'Function called by TF to save the prediction periodically.'
def write_disk_grid(self, global_step, log_dir, input_images, gt_projs, pred_projs, pred_voxels=None):
summary_freq = self._params.save_every def write_grid(input_images, gt_projs, pred_projs, pred_voxels, global_step): 'Native python function to call for writing images to files.' grid = _build_image_grid(input_images, gt_projs, pred_projs, pred_voxels) if ((global_step % summary_freq) == 0): img_path = os.path.join(log_dir, ('%s.jpg' % str(global_step))) utils.save_image(grid, img_path) with open(os.path.join(log_dir, ('pred_voxels_%s' % str(global_step))), 'w') as fout: np.save(fout, pred_voxels) with open(os.path.join(log_dir, ('input_images_%s' % str(global_step))), 'w') as fout: np.save(fout, input_images) return grid py_func_args = [input_images, gt_projs, pred_projs, pred_voxels, global_step] save_grid_op = tf.py_func(write_grid, py_func_args, [tf.uint8], 'wrtie_grid')[0] slim.summaries.add_image_summary(tf.expand_dims(save_grid_op, axis=0), name='grid_vis') return save_grid_op
'Round robin the gpu device. (Reserve last gpu for expensive op).'
def _next_device(self):
if (self._num_gpus == 0): return '' dev = ('/gpu:%d' % self._cur_gpu) if (self._num_gpus > 1): self._cur_gpu = ((self._cur_gpu + 1) % (self._num_gpus - 1)) return dev
'Inputs to be fed to the graph.'
def _add_placeholders(self):
hps = self._hps self._articles = tf.placeholder(tf.int32, [hps.batch_size, hps.enc_timesteps], name='articles') self._abstracts = tf.placeholder(tf.int32, [hps.batch_size, hps.dec_timesteps], name='abstracts') self._targets = tf.placeholder(tf.int32, [hps.batch_size, hps.dec_timesteps], name='targets') self._article_lens = tf.placeholder(tf.int32, [hps.batch_size], name='article_lens') self._abstract_lens = tf.placeholder(tf.int32, [hps.batch_size], name='abstract_lens') self._loss_weights = tf.placeholder(tf.float32, [hps.batch_size, hps.dec_timesteps], name='loss_weights')
'Sets self._train_op, op to run for training.'
def _add_train_op(self):
hps = self._hps self._lr_rate = tf.maximum(hps.min_lr, tf.train.exponential_decay(hps.lr, self.global_step, 30000, 0.98)) tvars = tf.trainable_variables() with tf.device(self._get_gpu((self._num_gpus - 1))): (grads, global_norm) = tf.clip_by_global_norm(tf.gradients(self._loss, tvars), hps.max_grad_norm) tf.summary.scalar('global_norm', global_norm) optimizer = tf.train.GradientDescentOptimizer(self._lr_rate) tf.summary.scalar('learning rate', self._lr_rate) self._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')
'Return the top states from encoder for decoder. Args: sess: tensorflow session. enc_inputs: encoder inputs of shape [batch_size, enc_timesteps]. enc_len: encoder input length of shape [batch_size] Returns: enc_top_states: The top level encoder states. dec_in_state: The decoder layer initial state.'
def encode_top_state(self, sess, enc_inputs, enc_len):
results = sess.run([self._enc_top_states, self._dec_in_state], feed_dict={self._articles: enc_inputs, self._article_lens: enc_len}) return (results[0], results[1][0])
'Return the topK results and new decoder states.'
def decode_topk(self, sess, latest_tokens, enc_top_states, dec_init_states):
feed = {self._enc_top_states: enc_top_states, self._dec_in_state: np.squeeze(np.array(dec_init_states)), self._abstracts: np.transpose(np.array([latest_tokens])), self._abstract_lens: np.ones([len(dec_init_states)], np.int32)} results = sess.run([self._topk_ids, self._topk_log_probs, self._dec_out_state], feed_dict=feed) (ids, probs, states) = (results[0], results[1], results[2]) new_states = [s for s in states] return (ids, probs, new_states)
'Hypothesis constructor. Args: tokens: start tokens for decoding. log_prob: log prob of the start tokens, usually 1. state: decoder initial states.'
def __init__(self, tokens, log_prob, state):
self.tokens = tokens self.log_prob = log_prob self.state = state
'Extend the hypothesis with result from latest step. Args: token: latest token from decoding. log_prob: log prob of the latest decoded tokens. new_state: decoder output state. Fed to the decoder for next step. Returns: New Hypothesis with the results from latest step.'
def Extend(self, token, log_prob, new_state):
return Hypothesis((self.tokens + [token]), (self.log_prob + log_prob), new_state)
'Creates BeamSearch object. Args: model: Seq2SeqAttentionModel. beam_size: int. start_token: int, id of the token to start decoding with end_token: int, id of the token that completes an hypothesis max_steps: int, upper limit on the size of the hypothesis'
def __init__(self, model, beam_size, start_token, end_token, max_steps):
self._model = model self._beam_size = beam_size self._start_token = start_token self._end_token = end_token self._max_steps = max_steps
'Performs beam search for decoding. Args: sess: tf.Session, session enc_inputs: ndarray of shape (enc_length, 1), the document ids to encode enc_seqlen: ndarray of shape (1), the length of the sequnce Returns: hyps: list of Hypothesis, the best hypotheses found by beam search, ordered by score'
def BeamSearch(self, sess, enc_inputs, enc_seqlen):
(enc_top_states, dec_in_state) = self._model.encode_top_state(sess, enc_inputs, enc_seqlen) hyps = ([Hypothesis([self._start_token], 0.0, dec_in_state)] * self._beam_size) results = [] steps = 0 while ((steps < self._max_steps) and (len(results) < self._beam_size)): latest_tokens = [h.latest_token for h in hyps] states = [h.state for h in hyps] (topk_ids, topk_log_probs, new_states) = self._model.decode_topk(sess, latest_tokens, enc_top_states, states) all_hyps = [] num_beam_source = (1 if (steps == 0) else len(hyps)) for i in xrange(num_beam_source): (h, ns) = (hyps[i], new_states[i]) for j in xrange((self._beam_size * 2)): all_hyps.append(h.Extend(topk_ids[(i, j)], topk_log_probs[(i, j)], ns)) hyps = [] for h in self._BestHyps(all_hyps): if (h.latest_token == self._end_token): results.append(h) else: hyps.append(h) if ((len(hyps) == self._beam_size) or (len(results) == self._beam_size)): break steps += 1 if (steps == self._max_steps): results.extend(hyps) return self._BestHyps(results)
'Sort the hyps based on log probs and length. Args: hyps: A list of hypothesis. Returns: hyps: A list of sorted hypothesis in reverse log_prob order.'
def _BestHyps(self, hyps):
if FLAGS.normalize_by_length: return sorted(hyps, key=(lambda h: (h.log_prob / len(h.tokens))), reverse=True) else: return sorted(hyps, key=(lambda h: h.log_prob), reverse=True)
'Batcher constructor. Args: data_path: tf.Example filepattern. vocab: Vocabulary. hps: Seq2SeqAttention model hyperparameters. article_key: article feature key in tf.Example. abstract_key: abstract feature key in tf.Example. max_article_sentences: Max number of sentences used from article. max_abstract_sentences: Max number of sentences used from abstract. bucketing: Whether bucket articles of similar length into the same batch. truncate_input: Whether to truncate input that is too long. Alternative is to discard such examples.'
def __init__(self, data_path, vocab, hps, article_key, abstract_key, max_article_sentences, max_abstract_sentences, bucketing=True, truncate_input=False):
self._data_path = data_path self._vocab = vocab self._hps = hps self._article_key = article_key self._abstract_key = abstract_key self._max_article_sentences = max_article_sentences self._max_abstract_sentences = max_abstract_sentences self._bucketing = bucketing self._truncate_input = truncate_input self._input_queue = Queue.Queue((QUEUE_NUM_BATCH * self._hps.batch_size)) self._bucket_input_queue = Queue.Queue(QUEUE_NUM_BATCH) self._input_threads = [] for _ in xrange(16): self._input_threads.append(Thread(target=self._FillInputQueue)) self._input_threads[(-1)].daemon = True self._input_threads[(-1)].start() self._bucketing_threads = [] for _ in xrange(4): self._bucketing_threads.append(Thread(target=self._FillBucketInputQueue)) self._bucketing_threads[(-1)].daemon = True self._bucketing_threads[(-1)].start() self._watch_thread = Thread(target=self._WatchThreads) self._watch_thread.daemon = True self._watch_thread.start()
'Returns a batch of inputs for seq2seq attention model. Returns: enc_batch: A batch of encoder inputs [batch_size, hps.enc_timestamps]. dec_batch: A batch of decoder inputs [batch_size, hps.dec_timestamps]. target_batch: A batch of targets [batch_size, hps.dec_timestamps]. enc_input_len: encoder input lengths of the batch. dec_input_len: decoder input lengths of the batch. loss_weights: weights for loss function, 1 if not padded, 0 if padded. origin_articles: original article words. origin_abstracts: original abstract words.'
def NextBatch(self):
enc_batch = np.zeros((self._hps.batch_size, self._hps.enc_timesteps), dtype=np.int32) enc_input_lens = np.zeros(self._hps.batch_size, dtype=np.int32) dec_batch = np.zeros((self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32) dec_output_lens = np.zeros(self._hps.batch_size, dtype=np.int32) target_batch = np.zeros((self._hps.batch_size, self._hps.dec_timesteps), dtype=np.int32) loss_weights = np.zeros((self._hps.batch_size, self._hps.dec_timesteps), dtype=np.float32) origin_articles = (['None'] * self._hps.batch_size) origin_abstracts = (['None'] * self._hps.batch_size) buckets = self._bucket_input_queue.get() for i in xrange(self._hps.batch_size): (enc_inputs, dec_inputs, targets, enc_input_len, dec_output_len, article, abstract) = buckets[i] origin_articles[i] = article origin_abstracts[i] = abstract enc_input_lens[i] = enc_input_len dec_output_lens[i] = dec_output_len enc_batch[i, :] = enc_inputs[:] dec_batch[i, :] = dec_inputs[:] target_batch[i, :] = targets[:] for j in xrange(dec_output_len): loss_weights[i][j] = 1 return (enc_batch, dec_batch, target_batch, enc_input_lens, dec_output_lens, loss_weights, origin_articles, origin_abstracts)
'Fill input queue with ModelInput.'
def _FillInputQueue(self):
start_id = self._vocab.WordToId(data.SENTENCE_START) end_id = self._vocab.WordToId(data.SENTENCE_END) pad_id = self._vocab.WordToId(data.PAD_TOKEN) input_gen = self._TextGenerator(data.ExampleGen(self._data_path)) while True: (article, abstract) = six.next(input_gen) article_sentences = [sent.strip() for sent in data.ToSentences(article, include_token=False)] abstract_sentences = [sent.strip() for sent in data.ToSentences(abstract, include_token=False)] enc_inputs = [] dec_inputs = [start_id] for i in xrange(min(self._max_article_sentences, len(article_sentences))): enc_inputs += data.GetWordIds(article_sentences[i], self._vocab) for i in xrange(min(self._max_abstract_sentences, len(abstract_sentences))): dec_inputs += data.GetWordIds(abstract_sentences[i], self._vocab) if ((len(enc_inputs) < self._hps.min_input_len) or (len(dec_inputs) < self._hps.min_input_len)): tf.logging.warning('Drop an example - too short.\nenc:%d\ndec:%d', len(enc_inputs), len(dec_inputs)) continue if (not self._truncate_input): if ((len(enc_inputs) > self._hps.enc_timesteps) or (len(dec_inputs) > self._hps.dec_timesteps)): tf.logging.warning('Drop an example - too long.\nenc:%d\ndec:%d', len(enc_inputs), len(dec_inputs)) continue else: if (len(enc_inputs) > self._hps.enc_timesteps): enc_inputs = enc_inputs[:self._hps.enc_timesteps] if (len(dec_inputs) > self._hps.dec_timesteps): dec_inputs = dec_inputs[:self._hps.dec_timesteps] targets = dec_inputs[1:] targets.append(end_id) enc_input_len = len(enc_inputs) dec_output_len = len(targets) while (len(enc_inputs) < self._hps.enc_timesteps): enc_inputs.append(pad_id) while (len(dec_inputs) < self._hps.dec_timesteps): dec_inputs.append(end_id) while (len(targets) < self._hps.dec_timesteps): targets.append(end_id) element = ModelInput(enc_inputs, dec_inputs, targets, enc_input_len, dec_output_len, ' '.join(article_sentences), ' '.join(abstract_sentences)) self._input_queue.put(element)
'Fill bucketed batches into the bucket_input_queue.'
def _FillBucketInputQueue(self):
while True: inputs = [] for _ in xrange((self._hps.batch_size * BUCKET_CACHE_BATCH)): inputs.append(self._input_queue.get()) if self._bucketing: inputs = sorted(inputs, key=(lambda inp: inp.enc_len)) batches = [] for i in xrange(0, len(inputs), self._hps.batch_size): batches.append(inputs[i:(i + self._hps.batch_size)]) shuffle(batches) for b in batches: self._bucket_input_queue.put(b)
'Watch the daemon input threads and restart if dead.'
def _WatchThreads(self):
while True: time.sleep(60) input_threads = [] for t in self._input_threads: if t.is_alive(): input_threads.append(t) else: tf.logging.error('Found input thread dead.') new_t = Thread(target=self._FillInputQueue) input_threads.append(new_t) input_threads[(-1)].daemon = True input_threads[(-1)].start() self._input_threads = input_threads bucketing_threads = [] for t in self._bucketing_threads: if t.is_alive(): bucketing_threads.append(t) else: tf.logging.error('Found bucketing thread dead.') new_t = Thread(target=self._FillBucketInputQueue) bucketing_threads.append(new_t) bucketing_threads[(-1)].daemon = True bucketing_threads[(-1)].start() self._bucketing_threads = bucketing_threads
'Generates article and abstract text from tf.Example.'
def _TextGenerator(self, example_gen):
while True: e = six.next(example_gen) try: article_text = self._GetExFeatureText(e, self._article_key) abstract_text = self._GetExFeatureText(e, self._abstract_key) except ValueError: tf.logging.error('Failed to get article or abstract from example') continue (yield (article_text, abstract_text))
'Extract text for a feature from td.Example. Args: ex: tf.Example. key: key of the feature to be extracted. Returns: feature: a feature text extracted.'
def _GetExFeatureText(self, ex, key):
return ex.features.feature[key].bytes_list.value[0]
'Writes the reference and decoded outputs to RKV files. Args: reference: The human (correct) result. decode: The machine-generated result'
def Write(self, reference, decode):
self._ref_file.write(('output=%s\n' % reference)) self._decode_file.write(('output=%s\n' % decode)) self._cnt += 1 if ((self._cnt % DECODE_IO_FLUSH_INTERVAL) == 0): self._ref_file.flush() self._decode_file.flush()
'Resets the output files. Must be called once before Write().'
def ResetFiles(self):
if self._ref_file: self._ref_file.close() if self._decode_file: self._decode_file.close() timestamp = int(time.time()) self._ref_file = open(os.path.join(self._outdir, ('ref%d' % timestamp)), 'w') self._decode_file = open(os.path.join(self._outdir, ('decode%d' % timestamp)), 'w')
'Beam search decoding. Args: model: The seq2seq attentional model. batch_reader: The batch data reader. hps: Hyperparamters. vocab: Vocabulary'
def __init__(self, model, batch_reader, hps, vocab):
self._model = model self._model.build_graph() self._batch_reader = batch_reader self._hps = hps self._vocab = vocab self._saver = tf.train.Saver() self._decode_io = DecodeIO(FLAGS.decode_dir)
'Decoding loop for long running process.'
def DecodeLoop(self):
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) step = 0 while (step < FLAGS.max_decode_steps): time.sleep(DECODE_LOOP_DELAY_SECS) if (not self._Decode(self._saver, sess)): continue step += 1
'Restore a checkpoint and decode it. Args: saver: Tensorflow checkpoint saver. sess: Tensorflow session. Returns: If success, returns true, otherwise, false.'
def _Decode(self, saver, sess):
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root) if (not (ckpt_state and ckpt_state.model_checkpoint_path)): tf.logging.info('No model to decode yet at %s', FLAGS.log_root) return False tf.logging.info('checkpoint path %s', ckpt_state.model_checkpoint_path) ckpt_path = os.path.join(FLAGS.log_root, os.path.basename(ckpt_state.model_checkpoint_path)) tf.logging.info('renamed checkpoint path %s', ckpt_path) saver.restore(sess, ckpt_path) self._decode_io.ResetFiles() for _ in xrange(FLAGS.decode_batches_per_ckpt): (article_batch, _, _, article_lens, _, _, origin_articles, origin_abstracts) = self._batch_reader.NextBatch() for i in xrange(self._hps.batch_size): bs = beam_search.BeamSearch(self._model, self._hps.batch_size, self._vocab.WordToId(data.SENTENCE_START), self._vocab.WordToId(data.SENTENCE_END), self._hps.dec_timesteps) article_batch_cp = article_batch.copy() article_batch_cp[:] = article_batch[i:(i + 1)] article_lens_cp = article_lens.copy() article_lens_cp[:] = article_lens[i:(i + 1)] best_beam = bs.BeamSearch(sess, article_batch_cp, article_lens_cp)[0] decode_output = [int(t) for t in best_beam.tokens[1:]] self._DecodeBatch(origin_articles[i], origin_abstracts[i], decode_output) return True
'Convert id to words and writing results. Args: article: The original article string. abstract: The human (correct) abstract string. output_ids: The abstract word ids output by machine.'
def _DecodeBatch(self, article, abstract, output_ids):
decoded_output = ' '.join(data.Ids2Words(output_ids, self._vocab)) end_p = decoded_output.find(data.SENTENCE_END, 0) if (end_p != (-1)): decoded_output = decoded_output[:end_p] tf.logging.info('article: %s', article) tf.logging.info('abstract: %s', abstract) tf.logging.info('decoded: %s', decoded_output) self._decode_io.Write(abstract, decoded_output.strip())
'Initialize vocabulary. Args: filename: Vocabulary file name.'
def __init__(self, filename):
self._id_to_word = [] self._word_to_id = {} self._unk = (-1) self._bos = (-1) self._eos = (-1) with tf.gfile.Open(filename) as f: idx = 0 for line in f: word_name = line.strip() if (word_name == '<S>'): self._bos = idx elif (word_name == '</S>'): self._eos = idx elif (word_name == '<UNK>'): self._unk = idx if (word_name == '!!!MAXTERMID'): continue self._id_to_word.append(word_name) self._word_to_id[word_name] = idx idx += 1
'Convert a list of ids to a sentence, with space inserted.'
def decode(self, cur_ids):
return ' '.join([self.id_to_word(cur_id) for cur_id in cur_ids])
'Convert a sentence to a list of ids, with special tokens added.'
def encode(self, sentence):
word_ids = [self.word_to_id(cur_word) for cur_word in sentence.split()] return np.array((([self.bos] + word_ids) + [self.eos]), dtype=np.int32)
'Initialize LM1BDataset reader. Args: filepattern: Dataset file pattern. vocab: Vocabulary.'
def __init__(self, filepattern, vocab):
self._vocab = vocab self._all_shards = tf.gfile.Glob(filepattern) tf.logging.info('Found %d shards at %s', len(self._all_shards), filepattern)
'Randomly select a file and read it.'
def _load_random_shard(self):
return self._load_shard(random.choice(self._all_shards))
'Read one file and convert to ids. Args: shard_name: file path. Returns: list of (id, char_id, global_word_id) tuples.'
def _load_shard(self, shard_name):
tf.logging.info('Loading data from: %s', shard_name) with tf.gfile.Open(shard_name) as f: sentences = f.readlines() chars_ids = [self.vocab.encode_chars(sentence) for sentence in sentences] ids = [self.vocab.encode(sentence) for sentence in sentences] global_word_ids = [] current_idx = 0 for word_ids in ids: current_size = (len(word_ids) - 1) cur_ids = np.arange(current_idx, (current_idx + current_size)) global_word_ids.append(cur_ids) current_idx += current_size tf.logging.info('Loaded %d words.', current_idx) tf.logging.info('Finished loading') return zip(ids, chars_ids, global_word_ids)
'Writes a Markdown-formatted version of this document to file `f`. Args: f: The output file.'
def write_markdown_to_file(self, f):
raise NotImplementedError('Document.WriteToFile')
'Creates a new Index. Args: module_to_name: Dictionary mapping modules to short names. members: Dictionary mapping member name to (fullname, member). filename_to_library_map: A list of (filename, Library) pairs. The order corresponds to the order in which the libraries appear in the index. path_prefix: Prefix to add to links in the index.'
def __init__(self, module_to_name, members, filename_to_library_map, path_prefix):
self._module_to_name = module_to_name self._members = members self._filename_to_library_map = filename_to_library_map self._path_prefix = path_prefix
'Writes this index to file `f`. The output is formatted as an unordered list. Each list element contains the title of the library, followed by a list of symbols in that library hyperlinked to the corresponding anchor in that library. Args: f: The output file.'
def write_markdown_to_file(self, f):
print('---', file=f) print('---', file=f) print('<!-- This file is machine generated: DO NOT EDIT! -->', file=f) print('', file=f) print('# TensorFlow Python reference documentation', file=f) print('', file=f) fullname_f = (lambda name: self._members[name][0]) anchor_f = (lambda name: _get_anchor(self._module_to_name, fullname_f(name))) for (filename, library) in self._filename_to_library_map: sorted_names = sorted(library.mentioned, key=(lambda x: (str.lower(x), x))) member_names = [n for n in sorted_names if (n in self._members)] full_filename = (self._path_prefix + filename) links = [('[`%s`](%s#%s)' % (name, full_filename[:(-3)], anchor_f(name))) for name in member_names] if links: print(('* **[%s](%s)**:' % (library.title, full_filename[:(-3)])), file=f) for link in links: print((' * %s' % link), file=f) print('', file=f)
'Creates a new Library. Args: title: A human-readable title for the library. module: Module to pull high level docstring from (for table of contents, list of Ops to document, etc.). module_to_name: Dictionary mapping modules to short names. members: Dictionary mapping member name to (fullname, member). documented: Set of documented names to update. exclude_symbols: A list of specific symbols to exclude. prefix: A string to include at the beginning of the page.'
def __init__(self, title, module, module_to_name, members, documented, exclude_symbols=(), prefix=None):
self._title = title self._module = module self._module_to_name = module_to_name self._members = dict(members) self._exclude_symbols = frozenset(exclude_symbols) documented.update(exclude_symbols) self._documented = documented self._mentioned = set() self._prefix = (prefix or '')
'The human-readable title for this library.'
@property def title(self):
return self._title
'Set of names mentioned in this library.'
@property def mentioned(self):
return self._mentioned
'Set of excluded symbols.'
@property def exclude_symbols(self):
return self._exclude_symbols
'Returns True if this member should be included in the document.'
def _should_include_member(self, name, member):
if _always_drop_symbol_re.match(name): return False if (name in self._exclude_symbols): return False return True
'Returns the list of modules imported from `module`.'
def get_imported_modules(self, module):
for (name, member) in inspect.getmembers(module): if inspect.ismodule(member): (yield (name, member))
'Returns the list of class members to document in `cls`. This function filters the class member to ONLY return those defined by the class. It drops the inherited ones. Args: cls_name: Qualified name of `cls`. cls: An inspect object of type \'class\'. Yields: name, member tuples.'
def get_class_members(self, cls_name, cls):
for (name, member) in inspect.getmembers(cls): is_method = (inspect.ismethod(member) or inspect.isfunction(member)) if (not (is_method or isinstance(member, property))): continue if ((is_method and (member.__name__ == '__init__')) or self._should_include_member(name, member)): (yield (name, (('%s.%s' % (cls_name, name)), member)))
'Given a function, returns a string representing its args.'
def _generate_signature_for_function(self, func):
args_list = [] argspec = inspect.getargspec(func) first_arg_with_default = (len((argspec.args or [])) - len((argspec.defaults or []))) for arg in argspec.args[:first_arg_with_default]: if (arg == 'self'): continue args_list.append(arg) if ((argspec.varargs == 'args') and (argspec.keywords == 'kwds')): original_func = func.__closure__[0].cell_contents return self._generate_signature_for_function(original_func) if argspec.defaults: for (arg, default) in zip(argspec.args[first_arg_with_default:], argspec.defaults): if callable(default): args_list.append(('%s=%s' % (arg, default.__name__))) else: args_list.append(('%s=%r' % (arg, default))) if argspec.varargs: args_list.append(('*' + argspec.varargs)) if argspec.keywords: args_list.append(('**' + argspec.keywords)) return (('(' + ', '.join(args_list)) + ')')
'Remove indenting. We follow Python\'s convention and remove the minimum indent of the lines after the first, see: https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation preserving relative indentation. Args: docstring: A docstring. Returns: A list of strings, one per line, with the minimum indent stripped.'
def _remove_docstring_indent(self, docstring):
docstring = (docstring or '') lines = docstring.strip().split('\n') min_indent = len(docstring) for l in lines[1:]: l = l.rstrip() if l: i = 0 while ((i < len(l)) and (l[i] == ' ')): i += 1 if (i < min_indent): min_indent = i for i in range(1, len(lines)): l = lines[i].rstrip() if (len(l) >= min_indent): l = l[min_indent:] lines[i] = l return lines
'Formats the given `docstring` as Markdown and prints it to `f`.'
def _print_formatted_docstring(self, docstring, f):
lines = self._remove_docstring_indent(docstring) i = 0 def _at_start_of_section(): 'Returns the header if lines[i] is at start of a docstring section.' l = lines[i] match = _section_re.match(l) if (match and ((i + 1) < len(lines)) and lines[(i + 1)].startswith(' ')): return match.group(1) else: return None while (i < len(lines)): l = lines[i] section_header = _at_start_of_section() if section_header: if ((i == 0) or lines[(i - 1)]): print('', file=f) print((('##### ' + section_header) + ':'), file=f) print('', file=f) i += 1 outputting_list = False while (i < len(lines)): l = lines[i] if _at_start_of_section(): break match = _arg_re.match(l) if match: if (not outputting_list): print('', file=f) outputting_list = True suffix = l[len(match.group()):].lstrip() print(((('* <b>`' + match.group(1)) + '`</b>: ') + suffix), file=f) else: outputting_list &= l.startswith(' ') print(l, file=f) i += 1 else: print(l, file=f) i += 1
'Prints the given function to `f`.'
def _print_function(self, f, prefix, fullname, func):
heading = ((prefix + ' `') + fullname) if (not isinstance(func, property)): heading += self._generate_signature_for_function(func) heading += ('` {#%s}' % _get_anchor(self._module_to_name, fullname)) print(heading, file=f) print('', file=f) self._print_formatted_docstring(inspect.getdoc(func), f) print('', file=f)
'Print `member` to `f`.'
def _write_member_markdown_to_file(self, f, prefix, name, member):
if (inspect.isfunction(member) or inspect.ismethod(member) or isinstance(member, property)): print('- - -', file=f) print('', file=f) self._print_function(f, prefix, name, member) print('', file=f) elif inspect.isclass(member): print('- - -', file=f) print('', file=f) print(('%s `class %s` {#%s}' % (prefix, name, _get_anchor(self._module_to_name, name))), file=f) print('', file=f) self._write_class_markdown_to_file(f, name, member) print('', file=f) else: raise RuntimeError(('Member %s has unknown type %s' % (name, type(member))))
'Write the class doc to `f`. Args: f: File to write to. prefix: Prefix for names. cls: class object. name: name to use.'
def _write_class_markdown_to_file(self, f, name, cls):
methods = dict(self.get_class_members(name, cls)) num_methods = len(methods) try: self._write_docstring_markdown_to_file(f, '####', inspect.getdoc(cls), methods, {}) except ValueError as e: raise ValueError((str(e) + (' in class `%s`' % cls.__name__))) any_method_called_out = (len(methods) != num_methods) if any_method_called_out: other_methods = {n: m for (n, m) in methods.items() if (n in cls.__dict__)} if other_methods: print('\n#### Other Methods', file=f) else: other_methods = methods for name in sorted(other_methods): self._write_member_markdown_to_file(f, '####', *other_methods[name])
'Prints this library to file `f`. Args: f: File to write to. Returns: Dictionary of documented members.'
def write_markdown_to_file(self, f):
print('---', file=f) print('---', file=f) print('<!-- This file is machine generated: DO NOT EDIT! -->', file=f) print('', file=f) print('#', self._title, file=f) if self._prefix: print(self._prefix, file=f) print('[TOC]', file=f) print('', file=f) if (self._module is not None): self._write_module_markdown_to_file(f, self._module)
'Writes the leftover members to `f`. Args: f: File to write to. catch_all: If true, document all missing symbols from any module. Otherwise, document missing symbols from just this module.'
def write_other_members(self, f, catch_all=False):
if catch_all: names = self._members.items() else: names = inspect.getmembers(self._module) leftovers = [] for (name, _) in names: if ((name in self._members) and (name not in self._documented)): leftovers.append(name) if leftovers: print(('%s: undocumented members: %d' % (self._title, len(leftovers)))) print('\n## Other Functions and Classes', file=f) for name in sorted(leftovers): print((' %s' % name)) self._documented.add(name) self._mentioned.add(name) self._write_member_markdown_to_file(f, '###', *self._members[name])
'Generate an error if there are leftover members.'
def assert_no_leftovers(self):
leftovers = [] for name in self._members.keys(): if ((name in self._members) and (name not in self._documented)): leftovers.append(name) if leftovers: raise RuntimeError(('%s: undocumented members: %s' % (self._title, ', '.join(leftovers))))
'Return a tensor that constructs adversarial examples for the given input. Generate uses tf.py_func in order to operate over tensors. :param x: (required) A tensor with the inputs. :param y: (optional) A tensor with the true labels for an untargeted attack. If None (and y_target is None) then use the original labels the classifier assigns. :param y_target: (optional) A tensor with the target labels for a targeted attack. :param nb_classes: The number of classes the model has. :param confidence: Confidence of adversarial examples: higher produces examples with larger l2 distortion, but more strongly classified as adversarial. :param batch_size: Number of attacks to run simultaneously. :param learning_rate: The learning rate for the attack algorithm. Smaller values produce better results but are slower to converge. :param binary_search_steps: The number of times we perform binary search to find the optimal tradeoff- constant between norm of the purturbation and confidence of the classification. :param max_iterations: The maximum number of iterations. Setting this to a larger value will produce lower distortion results. Using only a few iterations requires a larger learning rate, and will produce larger distortion results. :param abort_early: If true, allows early aborts if gradient descent is unable to make progress (i.e., gets stuck in a local minimum). :param initial_const: The initial tradeoff-constant to use to tune the relative importance of size of the pururbation and confidence of classification. If binary_search_steps is large, the initial constant is not important. A smaller value of this constant gives lower distortion results. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value'
def __init__(self, sess, model, batch_size, confidence, targeted, learning_rate, binary_search_steps, max_iterations, abort_early, initial_const, clip_min, clip_max, num_labels, shape):
self.sess = sess self.TARGETED = targeted self.LEARNING_RATE = learning_rate self.MAX_ITERATIONS = max_iterations self.BINARY_SEARCH_STEPS = binary_search_steps self.ABORT_EARLY = abort_early self.CONFIDENCE = confidence self.initial_const = initial_const self.batch_size = batch_size self.clip_min = clip_min self.clip_max = clip_max self.model = model self.repeat = (binary_search_steps >= 10) self.shape = shape = tuple(([batch_size] + list(shape))) modifier = tf.Variable(np.zeros(shape, dtype=np.float32)) self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32, name=u'timg') self.tlab = tf.Variable(np.zeros((batch_size, num_labels)), dtype=tf.float32, name=u'tlab') self.const = tf.Variable(np.zeros(batch_size), dtype=tf.float32, name=u'const') self.assign_timg = tf.placeholder(tf.float32, shape, name=u'assign_timg') self.assign_tlab = tf.placeholder(tf.float32, (batch_size, num_labels), name=u'assign_tlab') self.assign_const = tf.placeholder(tf.float32, [batch_size], name=u'assign_const') self.newimg = ((tf.tanh((modifier + self.timg)) + 1) / 2) self.newimg = ((self.newimg * (clip_max - clip_min)) + clip_min) self.output = model.get_logits(self.newimg) self.other = ((((tf.tanh(self.timg) + 1) / 2) * (clip_max - clip_min)) + clip_min) self.l2dist = tf.reduce_sum(tf.square((self.newimg - self.other)), list(range(1, len(shape)))) real = tf.reduce_sum((self.tlab * self.output), 1) other = tf.reduce_max((((1 - self.tlab) * self.output) - (self.tlab * 10000)), 1) if self.TARGETED: loss1 = tf.maximum(0.0, ((other - real) + self.CONFIDENCE)) else: loss1 = tf.maximum(0.0, ((real - other) + self.CONFIDENCE)) self.loss2 = tf.reduce_sum(self.l2dist) self.loss1 = tf.reduce_sum((self.const * loss1)) self.loss = (self.loss1 + self.loss2) start_vars = set((x.name for x in tf.global_variables())) optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE) self.train = optimizer.minimize(self.loss, var_list=[modifier]) end_vars = tf.global_variables() new_vars = [x for x in end_vars if (x.name not in start_vars)] self.setup = [] self.setup.append(self.timg.assign(self.assign_timg)) self.setup.append(self.tlab.assign(self.assign_tlab)) self.setup.append(self.const.assign(self.assign_const)) self.init = tf.variables_initializer(var_list=([modifier] + new_vars))
'Perform the L_2 attack on the given images for the given targets. If self.targeted is true, then the targets represents the target labels If self.targeted is false, then targets are the original class labels'
def attack(self, imgs, targets):
r = [] for i in range(0, len(imgs), self.batch_size): r.extend(self.attack_batch(imgs[i:(i + self.batch_size)], targets[i:(i + self.batch_size)])) return np.array(r)
'Run the attack on a batch of images and labels.'
def attack_batch(self, imgs, labs):
def compare(x, y): if (not isinstance(x, (float, int, np.int64))): x = np.copy(x) if self.TARGETED: x[y] -= self.CONFIDENCE else: x[y] += self.CONFIDENCE x = np.argmax(x) if self.TARGETED: return (x == y) else: return (x != y) batch_size = self.batch_size oimgs = np.clip(imgs, self.clip_min, self.clip_max) imgs = ((imgs - self.clip_min) / (self.clip_max - self.clip_min)) imgs = np.clip(imgs, 0, 1) imgs = ((imgs * 2) - 1) imgs = np.arctanh((imgs * 0.999999)) lower_bound = np.zeros(batch_size) CONST = (np.ones(batch_size) * self.initial_const) upper_bound = (np.ones(batch_size) * 10000000000.0) o_bestl2 = ([10000000000.0] * batch_size) o_bestscore = ([(-1)] * batch_size) o_bestattack = np.copy(oimgs) for outer_step in range(self.BINARY_SEARCH_STEPS): self.sess.run(self.init) batch = imgs[:batch_size] batchlab = labs[:batch_size] bestl2 = ([10000000000.0] * batch_size) bestscore = ([(-1)] * batch_size) if (self.repeat and (outer_step == (self.BINARY_SEARCH_STEPS - 1))): CONST = upper_bound self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST}) prev = 1000000.0 for iteration in range(self.MAX_ITERATIONS): (_, l, l2s, scores, nimg) = self.sess.run([self.train, self.loss, self.l2dist, self.output, self.newimg]) if (self.ABORT_EARLY and ((iteration % (self.MAX_ITERATIONS // 10)) == 0)): if (l > (prev * 0.9999)): break prev = l for (e, (l2, sc, ii)) in enumerate(zip(l2s, scores, nimg)): lab = np.argmax(batchlab[e]) if ((l2 < bestl2[e]) and compare(sc, lab)): bestl2[e] = l2 bestscore[e] = np.argmax(sc) if ((l2 < o_bestl2[e]) and compare(sc, lab)): o_bestl2[e] = l2 o_bestscore[e] = np.argmax(sc) o_bestattack[e] = ii for e in range(batch_size): if (compare(bestscore[e], np.argmax(batchlab[e])) and (bestscore[e] != (-1))): upper_bound[e] = min(upper_bound[e], CONST[e]) if (upper_bound[e] < 1000000000.0): CONST[e] = ((lower_bound[e] + upper_bound[e]) / 2) else: lower_bound[e] = max(lower_bound[e], CONST[e]) if (upper_bound[e] < 1000000000.0): CONST[e] = ((lower_bound[e] + upper_bound[e]) / 2) else: CONST[e] *= 10 o_bestl2 = np.array(o_bestl2) return o_bestattack
':param model: An instance of the Model class. :param back: The backend to use. Either \'tf\' (default) or \'th\'. :param sess: The tf session to run graphs in (use None for Theano)'
def __init__(self, model, back='tf', sess=None):
if (not ((back == 'tf') or (back == 'th'))): raise ValueError("Backend argument must either be 'tf' or 'th'.") if ((back == 'th') and (sess is not None)): raise Exception('A session should not be provided when using th.') if (not isinstance(model, Model)): if hasattr(model, '__call__'): warnings.warn('CleverHans support for supplying a callable instead of an instance of the Model class is deprecated and will be dropped on 2018-01-11.') else: raise ValueError('The model argument should be an instance of the Model class.') if (back == 'th'): warnings.warn('CleverHans support for Theano is deprecated and will be dropped on 2017-11-08.') self.model = model self.back = back self.sess = sess self.graphs = {} self.feedable_kwargs = {} self.structural_kwargs = []
'Generate the attack\'s symbolic graph for adversarial examples. This method should be overriden in any child class that implements an attack that is expressable symbolically. Otherwise, it will wrap the numerical implementation as a symbolic operator. :param x: The model\'s symbolic inputs. :param **kwargs: optional parameters used by child classes. :return: A symbolic representation of the adversarial examples.'
def generate(self, x, **kwargs):
if (self.back == 'th'): raise NotImplementedError('Theano version not implemented.') error = 'Sub-classes must implement generate.' raise NotImplementedError(error)
'Generate adversarial examples and return them as a NumPy array. Sub-classes *should not* implement this method unless they must perform special handling of arguments. :param x_val: A NumPy array with the original inputs. :param **kwargs: optional parameters used by child classes. :return: A NumPy array holding the adversarial examples.'
def generate_np(self, x_val, **kwargs):
if (self.back == 'th'): raise NotImplementedError('Theano version not implemented.') if (self.sess is None): raise ValueError('Cannot use `generate_np` when no `sess` was provided') fixed = dict(((k, v) for (k, v) in kwargs.items() if (k in self.structural_kwargs))) feedable = dict(((k, v) for (k, v) in kwargs.items() if (k in self.feedable_kwargs))) if ((len(fixed) + len(feedable)) < len(kwargs)): warnings.warn('Supplied extra keyword arguments that are not used in the graph computation. They have been ignored.') if (not all((isinstance(value, collections.Hashable) for value in fixed.values()))): hash_key = None else: hash_key = tuple(sorted(fixed.items())) if (hash_key not in self.graphs): self.construct_graph(fixed, feedable, x_val, hash_key) (x, new_kwargs, x_adv) = self.graphs[hash_key] feed_dict = {x: x_val} for name in feedable: feed_dict[new_kwargs[name]] = feedable[name] return self.sess.run(x_adv, feed_dict)
'Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes. :param params: a dictionary of attack-specific parameters :return: True when parsing was successful'
def parse_params(self, params=None):
return True
'Create a FastGradientMethod instance.'
def __init__(self, model, back='tf', sess=None):
super(FastGradientMethod, self).__init__(model, back, sess) self.feedable_kwargs = {'eps': np.float32, 'y': np.float32, 'y_target': np.float32, 'clip_min': np.float32, 'clip_max': np.float32} self.structural_kwargs = ['ord'] if (not isinstance(self.model, Model)): self.model = CallableModelWrapper(self.model, 'probs')
'Generate symbolic graph for adversarial examples and return. :param x: The model\'s symbolic inputs. :param eps: (optional float) attack step size (input variation) :param ord: (optional) Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2. :param y: (optional) A tensor with the model labels. Only provide this parameter if you\'d like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. Labels should be one-hot-encoded. :param y_target: (optional) A tensor with the labels to target. Leave y_target=None if y is also set. Labels should be one-hot-encoded. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value'
def generate(self, x, **kwargs):
assert self.parse_params(**kwargs) if (self.back == 'tf'): from .attacks_tf import fgm else: from .attacks_th import fgm if (self.y is not None): y = self.y else: y = self.y_target return fgm(x, self.model.get_probs(x), y=y, eps=self.eps, ord=self.ord, clip_min=self.clip_min, clip_max=self.clip_max, targeted=(self.y_target is not None))
'Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes. Attack-specific parameters: :param eps: (optional float) attack step size (input variation) :param ord: (optional) Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2. :param y: (optional) A tensor with the model labels. Only provide this parameter if you\'d like to use true labels when crafting adversarial samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect (explained in this paper: https://arxiv.org/abs/1611.01236). Default is None. Labels should be one-hot-encoded. :param y_target: (optional) A tensor with the labels to target. Leave y_target=None if y is also set. Labels should be one-hot-encoded. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value'
def parse_params(self, eps=0.3, ord=np.inf, y=None, y_target=None, clip_min=None, clip_max=None, **kwargs):
self.eps = eps self.ord = ord self.y = y self.y_target = y_target self.clip_min = clip_min self.clip_max = clip_max if ((self.y is not None) and (self.y_target is not None)): raise ValueError('Must not set both y and y_target') if (self.ord not in [np.inf, int(1), int(2)]): raise ValueError('Norm order must be either np.inf, 1, or 2.') if ((self.back == 'th') and (self.ord != np.inf)): raise NotImplementedError('The only FastGradientMethod norm implemented for Theano is np.inf.') return True
'Create a BasicIterativeMethod instance.'
def __init__(self, model, back='tf', sess=None):
super(BasicIterativeMethod, self).__init__(model, back, sess) self.feedable_kwargs = {'eps': np.float32, 'eps_iter': np.float32, 'y': np.float32, 'y_target': np.float32, 'clip_min': np.float32, 'clip_max': np.float32} self.structural_kwargs = ['ord', 'nb_iter'] if (not isinstance(self.model, Model)): self.model = CallableModelWrapper(self.model, 'probs')
'Generate symbolic graph for adversarial examples and return. :param x: The model\'s symbolic inputs. :param eps: (required float) maximum distortion of adversarial example compared to original input :param eps_iter: (required float) step size for each attack iteration :param nb_iter: (required int) Number of attack iterations. :param y: (optional) A tensor with the model labels. :param y_target: (optional) A tensor with the labels to target. Leave y_target=None if y is also set. Labels should be one-hot-encoded. :param ord: (optional) Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value'
def generate(self, x, **kwargs):
import tensorflow as tf assert self.parse_params(**kwargs) eta = 0 model_preds = self.model.get_probs(x) preds_max = tf.reduce_max(model_preds, 1, keep_dims=True) if (self.y_target is not None): y = self.y_target targeted = True elif (self.y is not None): y = self.y targeted = False else: y = tf.to_float(tf.equal(model_preds, preds_max)) targeted = False y_kwarg = ('y_target' if targeted else 'y') fgm_params = {'eps': self.eps_iter, y_kwarg: y, 'ord': self.ord} for i in range(self.nb_iter): FGM = FastGradientMethod(self.model, back=self.back, sess=self.sess) eta = (FGM.generate((x + eta), **fgm_params) - x) if (self.ord == np.inf): eta = tf.clip_by_value(eta, (- self.eps), self.eps) elif (self.ord in [1, 2]): reduc_ind = list(xrange(1, len(eta.get_shape()))) if (self.ord == 1): norm = tf.reduce_sum(tf.abs(eta), reduction_indices=reduc_ind, keep_dims=True) elif (self.ord == 2): norm = tf.sqrt(tf.reduce_sum(tf.square(eta), reduction_indices=reduc_ind, keep_dims=True)) eta = ((eta * self.eps) / norm) adv_x = (x + eta) if ((self.clip_min is not None) and (self.clip_max is not None)): adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) return adv_x
'Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes. Attack-specific parameters: :param eps: (required float) maximum distortion of adversarial example compared to original input :param eps_iter: (required float) step size for each attack iteration :param nb_iter: (required int) Number of attack iterations. :param y: (optional) A tensor with the model labels. :param y_target: (optional) A tensor with the labels to target. Leave y_target=None if y is also set. Labels should be one-hot-encoded. :param ord: (optional) Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value'
def parse_params(self, eps=0.3, eps_iter=0.05, nb_iter=10, y=None, ord=np.inf, clip_min=None, clip_max=None, y_target=None, **kwargs):
self.eps = eps self.eps_iter = eps_iter self.nb_iter = nb_iter self.y = y self.y_target = y_target self.ord = ord self.clip_min = clip_min self.clip_max = clip_max if ((self.y is not None) and (self.y_target is not None)): raise ValueError('Must not set both y and y_target') if (self.ord not in [np.inf, 1, 2]): raise ValueError('Norm order must be either np.inf, 1, or 2.') if (self.back == 'th'): error_string = 'BasicIterativeMethod is not implemented in Theano' raise NotImplementedError(error_string) return True
'Create a SaliencyMapMethod instance.'
def __init__(self, model, back='tf', sess=None):
super(SaliencyMapMethod, self).__init__(model, back, sess) if (not isinstance(self.model, Model)): self.model = CallableModelWrapper(self.model, 'probs') if (self.back == 'th'): error = 'Theano version of SaliencyMapMethod not implemented.' raise NotImplementedError(error) import tensorflow as tf self.feedable_kwargs = {'y_target': tf.float32} self.structural_kwargs = ['theta', 'gamma', 'nb_classes', 'clip_max', 'clip_min']
'Generate symbolic graph for adversarial examples and return. :param x: The model\'s symbolic inputs. :param theta: (optional float) Perturbation introduced to modified components (can be positive or negative) :param gamma: (optional float) Maximum percentage of perturbed features :param nb_classes: (optional int) Number of model output classes :param clip_min: (optional float) Minimum component value for clipping :param clip_max: (optional float) Maximum component value for clipping :param y_target: (optional) Target tensor if the attack is targeted'
def generate(self, x, **kwargs):
import tensorflow as tf from .attacks_tf import jacobian_graph, jsma_batch assert self.parse_params(**kwargs) preds = self.model.get_probs(x) grads = jacobian_graph(preds, x, self.nb_classes) if (self.y_target is not None): def jsma_wrap(x_val, y_target): return jsma_batch(self.sess, x, preds, grads, x_val, self.theta, self.gamma, self.clip_min, self.clip_max, self.nb_classes, y_target=y_target) wrap = tf.py_func(jsma_wrap, [x, self.y_target], tf.float32) else: def jsma_wrap(x_val): return jsma_batch(self.sess, x, preds, grads, x_val, self.theta, self.gamma, self.clip_min, self.clip_max, self.nb_classes, y_target=None) wrap = tf.py_func(jsma_wrap, [x], tf.float32) return wrap
'Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes. Attack-specific parameters: :param theta: (optional float) Perturbation introduced to modified components (can be positive or negative) :param gamma: (optional float) Maximum percentage of perturbed features :param nb_classes: (optional int) Number of model output classes :param clip_min: (optional float) Minimum component value for clipping :param clip_max: (optional float) Maximum component value for clipping :param y_target: (optional) Target tensor if the attack is targeted'
def parse_params(self, theta=1.0, gamma=np.inf, nb_classes=10, clip_min=0.0, clip_max=1.0, y_target=None, **kwargs):
self.theta = theta self.gamma = gamma self.nb_classes = nb_classes self.clip_min = clip_min self.clip_max = clip_max self.y_target = y_target return True
'Generate symbolic graph for adversarial examples and return. :param x: The model\'s symbolic inputs. :param eps: (optional float ) the epsilon (input variation parameter) :param num_iterations: (optional) the number of iterations :param xi: (optional float) the finite difference parameter :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value'
def generate(self, x, **kwargs):
assert self.parse_params(**kwargs) return vatm(self.model, x, self.model.get_logits(x), eps=self.eps, num_iterations=self.num_iterations, xi=self.xi, clip_min=self.clip_min, clip_max=self.clip_max)
'Take in a dictionary of parameters and applies attack-specific checks before saving them as attributes. Attack-specific parameters: :param eps: (optional float )the epsilon (input variation parameter) :param num_iterations: (optional) the number of iterations :param xi: (optional float) the finite difference parameter :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value'
def parse_params(self, eps=2.0, num_iterations=1, xi=1e-06, clip_min=None, clip_max=None, **kwargs):
self.eps = eps self.num_iterations = num_iterations self.xi = xi self.clip_min = clip_min self.clip_max = clip_max return True
'Return a tensor that constructs adversarial examples for the given input. Generate uses tf.py_func in order to operate over tensors. :param x: (required) A tensor with the inputs. :param y: (optional) A tensor with the true labels for an untargeted attack. If None (and y_target is None) then use the original labels the classifier assigns. :param y_target: (optional) A tensor with the target labels for a targeted attack. :param nb_classes: The number of classes the model has. :param confidence: Confidence of adversarial examples: higher produces examples with larger l2 distortion, but more strongly classified as adversarial. :param batch_size: Number of attacks to run simultaneously. :param learning_rate: The learning rate for the attack algorithm. Smaller values produce better results but are slower to converge. :param binary_search_steps: The number of times we perform binary search to find the optimal tradeoff- constant between norm of the purturbation and confidence of the classification. :param max_iterations: The maximum number of iterations. Setting this to a larger value will produce lower distortion results. Using only a few iterations requires a larger learning rate, and will produce larger distortion results. :param abort_early: If true, allows early aborts if gradient descent is unable to make progress (i.e., gets stuck in a local minimum). :param initial_const: The initial tradeoff-constant to use to tune the relative importance of size of the pururbation and confidence of classification. If binary_search_steps is large, the initial constant is not important. A smaller value of this constant gives lower distortion results. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value'
def generate(self, x, **kwargs):
import tensorflow as tf from .attacks_tf import CarliniWagnerL2 as CWL2 self.parse_params(**kwargs) attack = CWL2(self.sess, self.model, self.batch_size, self.confidence, ('y_target' in kwargs), self.learning_rate, self.binary_search_steps, self.max_iterations, self.abort_early, self.initial_const, self.clip_min, self.clip_max, self.nb_classes, x.get_shape().as_list()[1:]) if (('y' in kwargs) and ('y_target' in kwargs)): raise ValueError("Can not set both 'y' and 'y_target'.") elif ('y' in kwargs): labels = kwargs['y'] elif ('y_target' in kwargs): labels = kwargs['y_target'] else: preds = self.model.get_probs(x) preds_max = tf.reduce_max(preds, 1, keep_dims=True) original_predictions = tf.to_float(tf.equal(preds, preds_max)) labels = original_predictions def cw_wrap(x_val, y_val): return np.array(attack.attack(x_val, y_val), dtype=np.float32) wrap = tf.py_func(cw_wrap, [x, labels], tf.float32) return wrap
'Parameters data : str String with lines separated by \''
def __init__(self, data):
if isinstance(data, list): self._str = data else: self._str = data.split('\n') self.reset()
'func_name : Descriptive text continued text another_func_name : Descriptive text func_name1, func_name2, func_name3'
def _parse_see_also(self, content):
functions = [] current_func = None rest = [] for line in content: if (not line.strip()): continue if (':' in line): if current_func: functions.append((current_func, rest)) r = line.split(':', 1) current_func = r[0].strip() r[1] = r[1].strip() if r[1]: rest = [r[1]] else: rest = [] elif (not line.startswith(' ')): if current_func: functions.append((current_func, rest)) current_func = None rest = [] if (',' in line): for func in line.split(','): func = func.strip() if func: functions.append((func, [])) elif line.strip(): current_func = line.strip() elif (current_func is not None): rest.append(line.strip()) if current_func: functions.append((current_func, rest)) return functions
'.. index: default :refguide: something, else, and more'
def _parse_index(self, section, content):
def strip_each_in(lst): return [s.strip() for s in lst] out = {} section = section.split('::') if (len(section) > 1): out['default'] = strip_each_in(section[1].split(','))[0] for line in content: line = line.split(':') if (len(line) > 2): out[line[1]] = strip_each_in(line[2].split(',')) return out