hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a277ec3a3e34b6f0d8ca66fb2863804820b3aca | 98,887 | py | Python | tensorflow/python/keras/engine/base_layer_v1.py | fwtan/tensorflow | efa3fb28d94b7937edaafb5874c191ad0e2149ca | [
"Apache-2.0"
] | 1 | 2020-05-14T03:53:01.000Z | 2020-05-14T03:53:01.000Z | tensorflow/python/keras/engine/base_layer_v1.py | fwtan/tensorflow | efa3fb28d94b7937edaafb5874c191ad0e2149ca | [
"Apache-2.0"
] | 2 | 2021-08-25T16:05:52.000Z | 2022-02-10T01:51:12.000Z | tensorflow/python/keras/engine/base_layer_v1.py | taotesea/tensorflow | 5e6479904941624cf7ce58ab3d236375c8012ef4 | [
"Apache-2.0"
] | 1 | 2020-08-07T12:49:50.000Z | 2020-08-07T12:49:50.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import threading
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.mixed_precision.experimental import autocast_variable
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.saving.saved_model import layer_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
# A module that only depends on `keras.layers` import these from here.
from tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import
from tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import doc_controls
# pylint: disable=g-classes-have-attributes
class Layer(base_layer.Layer):
"""Base layer class.
This is the class from which all layers inherit.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing weights,
losses, updates, and inter-layer connectivity.
Users will just instantiate a layer and then treat it as a callable.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Save configuration in member variables
* `build()`: Called once from `__call__`, when we know the shapes of inputs
and `dtype`. Should have the calls to `add_weight()`, and then
call the super's `build()` (which sets `self.built = True`, which is
nice in case the user wants to call `build()` manually before the
first `__call__`).
* `call()`: Called in `__call__` after making sure `build()` has been called
once. Should actually perform the logic of applying the layer to the
input tensors (which should be passed in as the first argument).
Arguments:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: The dtype of the layer's computations and weights (default of
`None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type
of the first input in TensorFlow 1).
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Attributes:
name: The name of the layer (string).
dtype: The dtype of the layer's computations and weights. If mixed
precision is used with a `tf.keras.mixed_precision.experimental.Policy`,
this is instead just the dtype of the layer's weights, as the computations
are done in a different dtype.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
Each layer has a dtype, which is typically the dtype of the layer's
computations and variables. A layer's dtype can be queried via the
`Layer.dtype` property. The dtype is specified with the `dtype` constructor
argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()`
if no dtype is passed. `floatx()` itself defaults to "float32". Additionally,
layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed
precision is used, layers may have different computation and variable dtypes.
See `tf.keras.mixed_precision.experimental.Policy` for details on layer
dtypes.
"""
# See tf.Module for the usage of this property.
# The key for _obj_reference_counts_dict is a Trackable, which could be a
# variable or layer etc. tf.Module._flatten will fail to flatten the key
# since it is trying to convert Trackable to a string. This attribute can be
# ignored even after the fix of nest lib, since the trackable object should
# already been available as individual attributes. _obj_reference_counts_dict
# just contains a copy of them.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_obj_reference_counts_dict',),
module.Module._TF_MODULE_IGNORED_PROPERTIES
))
@trackable.no_automatic_dependency_tracking
def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,
**kwargs):
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_shape',
'batch_input_shape',
'batch_size',
'weights',
'activity_regularizer',
'autocast'
}
# Validate optional keyword arguments.
generic_utils.validate_kwargs(kwargs, allowed_kwargs)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training.
self._trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self._stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights.
self.built = False
self._build_input_shape = None
# Provides information about which inputs are compatible with the layer.
self._input_spec = None
self.supports_masking = False
self._init_set_name(name)
self._activity_regularizer = kwargs.pop('activity_regularizer', None)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = []
# Object to store all thread local layer properties.
self._thread_local = threading.local()
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# Both graph and subclassed networks have a dtype policy. For graph
# networks, the policy's compute and variable dtypes are ignored, but other
# fields, like the loss scale, are used by Models. For subclassed networks,
# the compute and variable dtypes are used as like any ordinary layer.
self._set_dtype_policy(dtype)
# Boolean indicating whether the layer automatically casts its inputs to the
# layer's compute_dtype.
self._autocast = kwargs.get('autocast',
base_layer_utils.v2_dtype_behavior_enabled())
# Dependencies tracked via attribute assignment.
# All layers in order of horizontal graph traversal.
# Entries are unique. For models includes input and output layers.
self._maybe_create_attribute('_layers', [])
# These lists will be filled via successive calls
# to self._add_inbound_node().
# Used in symbolic mode only, only in conjunction with graph-networks
self._inbound_nodes = []
self._outbound_nodes = []
self._init_call_fn_args()
# Whether the `call` method can be used to build a TF graph without issues.
# This attribute has no effect if the model is created using the Functional
# API. Instead, `model.dynamic` is determined based on the internal layers.
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
self._initial_weights = kwargs.get('weights', None)
# Whether the layer will track any layers that is set as attribute on itself
# as sub-layers, the weights from the sub-layers will be included in the
# parent layer's variables() as well.
# Default to True, which means auto tracking is turned on. Certain subclass
# might want to turn it off, like Sequential model.
self._auto_track_sub_layers = True
@trackable.no_automatic_dependency_tracking
@generic_utils.default
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call.
This is typically used to create the weights of `Layer` subclasses.
Arguments:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
if not hasattr(self.build, '_is_default'):
self._build_input_shape = input_shape
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
Arguments:
inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments.
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def _add_trackable(self, trackable_object, trainable):
"""Adds a Trackable object to this layer's state.
Arguments:
trackable_object: The tf.tracking.Trackable object to add.
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases) or
"non_trainable_variables" (e.g. BatchNorm mean and variance).
Returns:
The TrackableWeightHandler used to track this object.
"""
handler = base_layer_utils.TrackableWeightHandler(trackable_object)
if trainable:
self._trainable_weights.append(handler)
else:
self._non_trainable_weights.append(handler)
return handler
@doc_controls.for_subclass_implementers
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
partitioner=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Arguments:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: Initializer instance (callable).
regularizer: Regularizer instance (callable).
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that `trainable` cannot be `True` if `synchronization`
is set to `ON_READ`.
constraint: Constraint instance (callable).
partitioner: Partitioner to be passed to the `Trackable` API.
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter`,
`collections`, `experimental_autocast` and `caching_device`.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
if shape is None:
shape = ()
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['getter', 'collections', 'experimental_autocast',
'caching_device']:
raise TypeError('Unknown keyword argument:', kwarg)
getter = kwargs.pop('getter', base_layer_utils.make_variable)
collections_arg = kwargs.pop('collections', None)
# 'experimental_autocast' can be set to False by the caller to indicate an
# AutoCastVariable should never be created.
autocast = kwargs.pop('experimental_autocast', True)
# See the docstring for tf.Variable about the details for caching_device.
caching_device = kwargs.pop('caching_device', None)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = dtypes.as_dtype(dtype)
if self._dtype_policy.variable_dtype is None:
# The policy is "_infer", so we infer the policy from the variable dtype.
self._dtype_policy = policy.Policy(dtype.base_dtype.name)
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.get('glorot_uniform')
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.zeros()
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError('An initializer for variable %s of type %s is required'
' for layer %s' % (name, dtype.base_dtype, self.name))
if (autocast and self._dtype_policy.should_cast_variables and
dtype.is_floating):
# Wrap 'getter' with a version that returns an AutoCastVariable.
old_getter = getter
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
return autocast_variable.create_autocast_variable(variable)
# Also the caching_device does not work with the mixed precision API,
# disable it if it is specified.
# TODO(b/142020079): Reenable it once the bug is fixed.
if caching_device is not None:
tf_logging.warn('`caching_device` does not work with mixed precision '
'API. Ignoring user specified `caching_device`.')
caching_device = None
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Trackable` method.
getter=getter,
# Manage errors in Layer rather than Trackable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable,
partitioner=partitioner,
use_resource=use_resource,
collections=collections_arg,
synchronization=synchronization,
aggregation=aggregation,
caching_device=caching_device)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
name_in_scope = variable.name[:variable.name.find(':')]
self._handle_weight_regularization(name_in_scope,
variable,
regularizer)
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
backend.track_variable(v)
if trainable:
self._trainable_weights.append(v)
else:
self._non_trainable_weights.append(v)
else:
backend.track_variable(variable)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
@generic_utils.default
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Returns:
Python dictionary.
"""
all_args = tf_inspect.getfullargspec(self.__init__).args
config = {'name': self.name, 'trainable': self.trainable}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
config['dtype'] = policy.serialize(self._dtype_policy)
if hasattr(self, 'dynamic'):
# Only include `dynamic` in the `config` if it is `True`
if self.dynamic:
config['dynamic'] = self.dynamic
elif 'dynamic' in all_args:
all_args.remove('dynamic')
expected_args = config.keys()
# Finds all arguments in the `__init__` that are not in the config:
extra_args = [arg for arg in all_args if arg not in expected_args]
# Check that either the only argument in the `__init__` is `self`,
# or that `get_config` has been overridden:
if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):
raise NotImplementedError('Layers with arguments in `__init__` must '
'override `get_config`.')
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Arguments:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
If the layer has not been built, this method will call `build` on the
layer. This assumes that the layer will later be used with inputs that
match the input shape provided here.
Arguments:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if context.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually with shapes that are incompatible
# with the shape the Layer will be called on (these users will have to
# implement `compute_output_shape` themselves).
self._maybe_build(input_shape)
with ops.get_default_graph().as_default():
graph = func_graph.FuncGraph('graph')
with graph.as_default():
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
inputs = nest.map_structure(
base_layer_utils.generate_placeholders_from_shape, input_shape)
try:
outputs = self(inputs, training=False)
except TypeError as e:
six.raise_from(
NotImplementedError(
'We could not automatically infer the static shape of the '
'layer\'s output. Please implement the '
'`compute_output_shape` method on your layer (%s).' %
self.__class__.__name__), e)
return nest.map_structure(lambda t: t.shape, outputs)
raise NotImplementedError
@doc_controls.for_subclass_implementers
def compute_output_signature(self, input_signature):
"""Compute the output tensor signature of the layer based on the inputs.
Unlike a TensorShape object, a TensorSpec object contains both shape
and dtype information for a tensor. This method allows layers to provide
output dtype information if it is different from the input dtype.
For any layer that doesn't implement this function,
the framework will fall back to use `compute_output_shape`, and will
assume that the output dtype matches the input dtype.
Args:
input_signature: Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.
Returns:
Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.
Raises:
TypeError: If input_signature contains a non-TensorSpec object.
"""
def check_type_return_shape(s):
if not isinstance(s, tensor_spec.TensorSpec):
raise TypeError(
'Only TensorSpec signature types are supported, '
'but saw signature signature entry: {}.'.format(s))
return s.shape
input_shape = nest.map_structure(check_type_return_shape, input_signature)
output_shape = self.compute_output_shape(input_shape)
dtype = self._compute_dtype
if dtype is None:
input_dtypes = [s.dtype for s in nest.flatten(input_signature)]
# Default behavior when self.dtype is None, is to use the first input's
# dtype.
dtype = input_dtypes[0]
return nest.map_structure(
lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),
output_shape)
@generic_utils.default
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Arguments:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self.supports_masking:
if any(m is not None for m in nest.flatten(mask)):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask.
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
def __call__(self, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
*args: Positional arguments to be passed to `self.call`.
**kwargs: Keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
RuntimeError: if `super().__init__()` was not called in the constructor.
"""
if not hasattr(self, '_thread_local'):
raise RuntimeError(
'You must call `super().__init__()` in the layer constructor.')
# Grab the first positional or keyword argument.
if args:
inputs = args[0]
args = args[1:]
elif self._call_fn_args[0] in kwargs:
inputs = kwargs.pop(self._call_fn_args[0])
else:
raise ValueError(
'The first argument to `Layer.call` must always be passed.')
call_context = base_layer_utils.call_context()
input_list = nest.flatten(inputs)
# We will attempt to build a TF graph if & only if all inputs are symbolic.
# This is always the case in graph mode. It can also be the case in eager
# mode when all inputs can be traced back to `keras.Input()` (when building
# models using the functional API).
build_graph = tf_utils.are_all_symbolic_tensors(input_list)
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor_v2` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (np.ndarray, float, int)):
return ops.convert_to_tensor_v2(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
input_list = nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
mask_arg_passed_by_framework = False
input_masks = self._collect_input_masks(inputs, args, kwargs)
if (self._expects_mask_arg and input_masks is not None and
not self._call_arg_was_passed('mask', args, kwargs)):
mask_arg_passed_by_framework = True
kwargs['mask'] = input_masks
# If `training` argument is None or not explicitly passed,
# propagate `training` value from this layer's calling layer.
training_value = None
training_arg_passed_by_framework = False
# Priority 1: `training` was explicitly passed.
if self._call_arg_was_passed('training', args, kwargs):
training_value = self._get_call_arg_value('training', args, kwargs)
if not self._expects_training_arg:
kwargs.pop('training')
if training_value is None:
# Priority 2: `training` was passed to a parent layer.
if call_context.training is not None:
training_value = call_context.training
# Priority 3a: `learning_phase()` has been set.
elif backend.global_learning_phase_is_set():
training_value = backend.learning_phase()
# Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph.
elif build_graph:
with backend.get_graph().as_default():
if base_layer_utils.is_in_keras_graph():
training_value = backend.learning_phase()
if self._expects_training_arg and training_value is not None:
# Force the training_value to be bool type which matches to the contract
# for layer/model call args.
if tensor_util.is_tensor(training_value):
training_value = math_ops.cast(training_value, dtypes.bool)
else:
training_value = bool(training_value)
args, kwargs = self._set_call_arg_value(
'training', training_value, args, kwargs)
training_arg_passed_by_framework = True
# Only create Keras history if at least one tensor originates from a
# `keras.Input`. Otherwise this Layer may be being used outside the Keras
# framework.
if build_graph and base_layer_utils.needs_keras_history(inputs):
base_layer_utils.create_keras_history(inputs)
with call_context.enter(self, inputs, build_graph, training_value):
# Check input assumptions set after layer building, e.g. input shape.
if build_graph:
# Symbolic execution on symbolic tensors. We will attempt to build
# the corresponding TF subgraph inside `backend.get_graph()`
# TODO(reedwm): We should assert input compatibility after the inputs
# are casted, not before.
input_spec.assert_input_compatibility(self.input_spec, inputs,
self.name)
graph = backend.get_graph()
with graph.as_default(), backend.name_scope(self._name_scope()):
# Build layer if applicable (if the `build` method has been
# overridden).
self._maybe_build(inputs)
cast_inputs = self._maybe_cast_inputs(inputs)
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
call_fn = autograph.tf_convert(
self.call, ag_ctx.control_status_ctx())
else:
call_fn = self.call
if not self.dynamic:
try:
with base_layer_utils.autocast_context_manager(
self._compute_dtype):
outputs = call_fn(cast_inputs, *args, **kwargs)
except errors.OperatorNotAllowedInGraphError as e:
raise TypeError('You are attempting to use Python control '
'flow in a layer that was not declared to be '
'dynamic. Pass `dynamic=True` to the class '
'constructor.\nEncountered error:\n"""\n' +
str(e) + '\n"""')
else:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
outputs = self._symbolic_call(inputs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
if base_layer_utils.have_all_keras_metadata(inputs):
if training_arg_passed_by_framework:
args, kwargs = self._set_call_arg_value(
'training', None, args, kwargs, pop_kwarg_if_none=True)
if mask_arg_passed_by_framework:
kwargs.pop('mask')
outputs = self._set_connectivity_metadata((inputs,) + args, kwargs,
outputs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
if hasattr(self, '_set_inputs') and not self.inputs:
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
# TODO(b/120997007): This should be done in Eager as well, but
# causes garbage collection issues because of the placeholders
# created on the default Keras graph.
self._set_inputs(inputs, outputs)
else:
# Eager execution on data tensors.
with backend.name_scope(self._name_scope()):
self._maybe_build(inputs)
cast_inputs = self._maybe_cast_inputs(inputs)
with base_layer_utils.autocast_context_manager(
self._compute_dtype):
outputs = self.call(cast_inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
return outputs
@property
def dtype(self):
return self._dtype_policy.variable_dtype
@property
def name(self):
return self._name
@property
@trackable_layer_utils.cache_recursive_attribute('dynamic')
def dynamic(self):
# NOTE(taylorrobie): Currently self._dynamic is read-only. If that changes
# then this cache logic must be updated.
return self._dynamic or any(layer.dynamic
for layer in self._unique_sublayers())
@property
@doc_controls.do_not_generate_docs
@trackable_layer_utils.cache_recursive_attribute('stateful')
def stateful(self):
return self._stateful or any(
getattr(layer, 'stateful', False) for layer in self._unique_sublayers())
@stateful.setter
@trackable_layer_utils.invalidate_recursive_cache('stateful')
def stateful(self, value):
self._stateful = value
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
for layer in getattr(self, '_layers', []):
layer.trainable = value
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def input_spec(self):
return self._input_spec
@input_spec.setter
# Must be decorated to prevent tracking, since the input_spec can be nested
# InputSpec objects.
@trackable.no_automatic_dependency_tracking
def input_spec(self, value):
for v in nest.flatten(value):
if v is not None and not isinstance(v, base_layer.InputSpec):
raise TypeError('Layer input_spec must be an instance of InputSpec. '
'Got: {}'.format(v))
self._input_spec = value
@property
def trainable_weights(self):
if self.trainable:
children_weights = self._gather_children_attribute('trainable_weights')
return self._dedup_weights(self._trainable_weights + children_weights)
else:
return []
@property
def non_trainable_weights(self):
if self.trainable:
children_weights = self._gather_children_attribute(
'non_trainable_weights')
non_trainable_weights = self._non_trainable_weights + children_weights
else:
children_weights = self._gather_children_attribute('weights')
non_trainable_weights = (
self._trainable_weights + self._non_trainable_weights +
children_weights)
return self._dedup_weights(non_trainable_weights)
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.trainable_weights + self.non_trainable_weights
@property
def updates(self):
collected_updates = []
all_layers = self._gather_unique_layers()
with backend.get_graph().as_default():
for layer in all_layers:
if not layer.trainable and not layer.stateful:
continue
for u in layer._updates:
if callable(u):
try:
u = u()
except errors.InaccessibleTensorError:
base_layer_utils.check_graph_consistency(
method='add_update', force_raise=True)
raise # check_graph_consistency may not always raise.
base_layer_utils.check_graph_consistency(u, method='add_update')
collected_updates.append(u)
return collected_updates
@property
def losses(self):
"""Losses which are associated with this `Layer`.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Returns:
A list of tensors.
"""
collected_losses = []
all_layers = self._gather_unique_layers()
for layer in all_layers:
# If any eager losses are present, we assume the model to be part of an
# eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses
@doc_controls.for_subclass_implementers
def add_loss(self, losses, inputs=None):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This method can be used inside a subclassed layer or model's `call`
function, in which case `losses` should be a Tensor or list of Tensors.
Example:
```python
class MyLayer(tf.keras.layers.Layer):
def call(inputs, self):
self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any loss Tensors passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
losses become part of the model's topology and are tracked in `get_config`.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Actvity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))
```
If this is not the case for your loss (if, for example, your loss references
a `Variable` of one of the model's layers), you can wrap your loss in a
zero-argument lambda. These losses are not tracked as part of the model's
topology since they can't be serialized.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(x.kernel))
```
The `get_losses_for` method allows to retrieve the losses relevant to a
specific set of inputs.
Arguments:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
inputs: Ignored when executing eagerly. If anything other than None is
passed, it signals the losses are conditional on some of the layer's
inputs, and thus they should only be run where these inputs are
available. This is the case for activity regularization losses, for
instance. If `None` is passed, the losses are assumed
to be unconditional, and will apply across all dataflows of the layer
(e.g. weight regularization losses).
"""
def _tag_unconditional(loss):
"""Process the loss and tag it by setting loss._unconditional_loss."""
if callable(loss):
# We run the loss without autocasting, as regularizers are often
# numerically unstable in float16.
with base_layer_utils.autocast_context_manager(None):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())
loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access
return loss
losses = nest.flatten(losses)
callable_losses = []
symbolic_losses = []
for loss in losses:
if callable(loss):
callable_losses.append(functools.partial(_tag_unconditional, loss))
continue
if loss is None:
continue
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if (tf_utils.is_symbolic_tensor(loss) and
not base_layer_utils.is_in_tf_function()):
symbolic_losses.append(_tag_unconditional(loss))
base_layer_utils.check_graph_consistency(loss, method='add_loss')
self._callable_losses.extend(callable_losses)
in_call_context = base_layer_utils.call_context().in_call
if in_call_context:
for symbolic_loss in symbolic_losses:
self._losses.append(symbolic_loss)
else:
for symbolic_loss in symbolic_losses:
if getattr(self, '_is_graph_network', False):
self._graph_network_add_loss(symbolic_loss)
else:
# Possible a loss was added in a Layer's `build`.
self._losses.append(symbolic_loss)
@property
def metrics(self):
collected_metrics = []
all_layers = self._gather_unique_layers()
for layer in all_layers:
collected_metrics.extend(layer._metrics)
return collected_metrics
@doc_controls.for_subclass_implementers
def add_metric(self, value, aggregation=None, name=None):
"""Adds metric tensor to the layer.
Args:
value: Metric tensor.
aggregation: Sample-wise metric reduction function. If `aggregation=None`,
it indicates that the metric tensor provided has been aggregated
already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by
`model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the
given metric tensor will be sample-wise reduced using `mean` function.
eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',
aggregation='mean')`.
name: String metric name.
Raises:
ValueError: If `aggregation` is anything other than None or `mean`.
"""
if aggregation is not None and aggregation != 'mean':
raise ValueError(
'We currently support only `mean` sample-wise metric aggregation. '
'You provided aggregation=`%s`' % aggregation)
from_metric_obj = hasattr(value, '_metric_obj')
is_symbolic = tf_utils.is_symbolic_tensor(value)
in_call_context = base_layer_utils.call_context().in_call
if name is None and not from_metric_obj:
# Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')`
# In eager mode, we use metric name to lookup a metric. Without a name,
# a new Mean metric wrapper will be created on every model/layer call.
# So, we raise an error when no name is provided.
# We will do the same for symbolic mode for consistency although a name
# will be generated if no name is provided.
# We will not raise this error in the foll use case for the sake of
# consistency as name in provided in the metric constructor.
# mean = metrics.Mean(name='my_metric')
# model.add_metric(mean(outputs))
raise ValueError('Please provide a name for your metric like '
'`self.add_metric(tf.reduce_sum(inputs), '
'name=\'mean_activation\', aggregation=\'mean\')`')
elif from_metric_obj:
name = value._metric_obj.name
if in_call_context:
# TF Function path should take the eager path.
self._symbolic_add_metric(value, aggregation, name)
else:
if not is_symbolic:
raise ValueError('Expected a symbolic Tensor for the metric value, '
'received: ' + str(value))
# Possible a metric was added in a Layer's `build`.
if not getattr(self, '_is_graph_network', False):
with backend.get_graph().as_default():
self._symbolic_add_metric(value, aggregation, name)
return
if from_metric_obj:
raise ValueError('Using the result of calling a `Metric` object '
'when calling `add_metric` on a Functional '
'Model is not supported. Please pass the '
'Tensor to monitor directly.')
# Insert layers into the Keras Graph Network.
self._graph_network_add_metric(value, aggregation, name)
@deprecation.deprecated_args(None, '`inputs` is now automatically inferred',
'inputs')
@doc_controls.for_subclass_implementers
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_updates_for` method allows to retrieve the updates relevant to a
specific set of inputs.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Arguments:
updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False`
on this Layer, when executing in Eager mode.
inputs: Deprecated, will be automatically inferred.
"""
call_context = base_layer_utils.call_context()
if (ds_context.has_strategy() and
ds_context.in_cross_replica_context() and
# When saving the model, the distribution strategy context should be
# ignored, following the default path for adding updates.
not call_context.saving):
# Updates don't need to be run in a cross-replica context.
return
updates = generic_utils.to_list(updates)
if call_context.in_call:
relevant_inputs = call_context.inputs
else:
inbound_nodes = getattr(self, '_inbound_nodes', [])
relevant_inputs = [node.input_tensors for node in inbound_nodes]
def process_update(x):
"""Standardize update ops.
Arguments:
x: Tensor, op, or callable.
Returns:
An update op.
"""
if callable(x):
update = lambda: process_update(x())
return update()
elif isinstance(x, ops.Operation):
update = x
elif hasattr(x, 'op'):
update = x.op
else:
update = ops.convert_to_tensor_v2(x)
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])
update._unconditional_update = update not in reachable
return update
updates = [process_update(x) for x in updates]
self._updates.extend(updates)
def set_weights(self, weights):
"""Sets the weights of the layer, from Numpy arrays.
The weights of a layer represent the state of the layer. This function
sets the weight values from numpy arrays. The weight values should be
passed in the order they are created by the layer. Note that the layer's
weights must be instantiated before calling this function by calling
the layer.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
>>> a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
expected_num_weights = 0
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
expected_num_weights += param.num_tensors
else:
expected_num_weights += 1
if expected_num_weights != len(weights):
raise ValueError(
'You called `set_weights(weights)` on layer "%s" '
'with a weight list of length %s, but the layer was '
'expecting %s weights. Provided weights: %s...' %
(self.name, len(weights), expected_num_weights, str(weights)[:50]))
weight_index = 0
weight_value_tuples = []
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
num_tensors = param.num_tensors
tensors = weights[weight_index:weight_index + num_tensors]
param.set_weights(tensors)
weight_index += num_tensors
else:
weight = weights[weight_index]
ref_shape = param.shape
if not ref_shape.is_compatible_with(weight.shape):
raise ValueError(
'Layer weight shape %s not compatible with provided weight '
'shape %s' % (ref_shape, weight.shape))
weight_value_tuples.append((param, weight))
weight_index += 1
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current weights of the layer.
The weights of a layer represent the state of the layer. This function
returns both trainable and non-trainable weight values associated with this
layer as a list of Numpy arrays, which can in turn be used to load state
into similarly parameterized layers.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
>>> a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Returns:
Weights values as a list of numpy arrays.
"""
weights = self.weights
output_weights = []
for weight in weights:
if isinstance(weight, base_layer_utils.TrackableWeightHandler):
output_weights.extend(weight.get_tensors())
else:
output_weights.append(weight)
return backend.batch_get_value(output_weights)
def get_updates_for(self, inputs):
"""Retrieves updates relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional updates.
return [u for u in self.updates if u._unconditional_update]
# Requesting input-conditional updates.
updates = [u for u in self.updates if not u._unconditional_update]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, updates)
return [u for u in updates if u in reachable]
def get_losses_for(self, inputs):
"""Retrieves losses relevant to a specific set of inputs.
Arguments:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional losses.
return [l for l in self.losses if l._unconditional_loss]
# Requesting input-conditional losses.
losses = [l for l in self.losses if not l._unconditional_loss]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, losses)
return [l for l in losses if l in reachable]
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Arguments:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined input shape.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if getattr(self, '_is_graph_network', False):
with tf_utils.maybe_init_scope(self):
self._maybe_build(self.inputs)
else:
raise ValueError('You tried to call `count_params` on ' + self.name +
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
return layer_utils.count_params(self.weights)
@property
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
@deprecation.deprecated(
date=None, instructions='Please use `layer.__call__` method instead.')
@doc_controls.do_not_doc_inheritable
def apply(self, inputs, *args, **kwargs):
"""Deprecated, do NOT use!
This is an alias of `self.__call__`.
Arguments:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, *args, **kwargs)
@deprecation.deprecated(
date=None, instructions='Please use `layer.add_weight` method instead.')
@doc_controls.do_not_doc_inheritable
def add_variable(self, *args, **kwargs):
"""Deprecated, do NOT use! Alias for `add_weight`."""
return self.add_weight(*args, **kwargs)
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Returns:
A list of variables.
"""
return self.weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
def _set_dtype_policy(self, dtype):
"""Sets self._dtype_policy."""
if isinstance(dtype, policy.Policy):
self._dtype_policy = dtype
elif isinstance(dtype, dict):
self._dtype_policy = policy.deserialize(dtype)
elif dtype:
self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)
else:
self._dtype_policy = policy.global_policy()
if (self._dtype_policy.name == 'mixed_float16' and
not loss_scale_optimizer.strategy_supports_loss_scaling()):
# Although only loss scaling doesn't support certain strategies, to avoid
# confusion, we disallow the 'mixed_float16' policy with unsupported
# strategies. This is because 'mixed_float16' requires loss scaling for
# numeric stability.
strategy = ds_context.get_strategy()
raise ValueError('Mixed precision is not supported with the '
'tf.distribute.Strategy: %s. Either stop using mixed '
'precision by removing the use of the "%s" policy or '
'use a different Strategy, e.g. a MirroredStrategy.' %
(strategy.__class__.__name__, self._dtype_policy.name))
# This has no impact on the layer behavior, and is only used for printing
# warnings.
self._dtype_defaulted_to_floatx = (not dtype and
policy.policy_defaults_to_floatx())
# TODO(reedwm): Expose this property?
@property
def _compute_dtype(self):
"""The layer's compute dtype.
Unless mixed-precision is used, this is the same as `Layer.dtype`.
If self._autocast is True, layer's will cast floating-point inputs to this.
Returns:
The layer's compute dtype.
"""
return self._dtype_policy.compute_dtype
def _maybe_cast_inputs(self, inputs):
"""Maybe casts the inputs to the compute dtype.
If self._compute_dtype is floating-point, and self_autocast is True,
floating-point inputs are casted to self._compute_dtype.
Args:
inputs: Input tensor, or structure of input tensors.
Returns:
`inputs`, but tensors may have been casted to self._compute_dtype
"""
compute_dtype = self._compute_dtype
if (self._autocast and compute_dtype and
dtypes.as_dtype(compute_dtype).is_floating):
def f(x):
"""Cast a single Tensor or TensorSpec to the compute dtype."""
cast_types = (ops.Tensor, sparse_tensor.SparseTensor,
ragged_tensor.RaggedTensor)
if (isinstance(x, cast_types) and x.dtype.is_floating and
x.dtype.base_dtype.name != compute_dtype):
if self._dtype_defaulted_to_floatx:
self._warn_about_input_casting(x.dtype.base_dtype)
return math_ops.cast(x, compute_dtype)
elif isinstance(x, tensor_spec.TensorSpec) and x.dtype.is_floating:
# Inputs may be TensorSpecs when this function is called from
# model._set_inputs.
return tensor_spec.TensorSpec(x.shape, compute_dtype, x.name)
else:
return x
return nest.map_structure(f, inputs)
else:
return inputs
def _warn_about_input_casting(self, input_dtype):
# self._already_warned_about_input_casting is only retrieved or set in this
# function.
already_warned = getattr(self, '_already_warned_about_input_casting', False)
if not already_warned:
tf_logging.warn(
"Layer {self.name} is casting an input tensor from dtype "
"{input_dtype} to the layer's dtype of {layer_dtype}, which is new "
"behavior in TensorFlow 2. The layer has dtype {layer_dtype} "
'because its dtype defaults to floatx.\n\n'
""
"If you intended to run this layer in {layer_dtype}, you can safely "
"ignore this warning. If in doubt, this warning is likely only an "
"issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\n"
""
"To change all layers to have dtype {input_dtype} by default, call "
"`tf.keras.backend.set_floatx('{input_dtype}')`. To change just this "
"layer, pass dtype='{input_dtype}' to the layer constructor. If you "
"are the author of this layer, you can disable autocasting by "
"passing autocast=False to the base Layer constructor.\n".format(
self=self,
input_dtype=input_dtype.name,
layer_dtype=self._compute_dtype))
self._already_warned_about_input_casting = True
# _dtype used to be an attribute set in the constructor. We still expose it
# because some clients still use it.
# TODO(reedwm): Deprecate, then remove the _dtype property.
@property
def _dtype(self):
# This is equivalent to returning self.dtype . We do not return self.dtype
# as it would cause infinite recursion in a few subclasses, which override
# "dtype" to return self._dtype.
return self._dtype_policy.variable_dtype
@_dtype.setter
def _dtype(self, value):
value = dtypes.as_dtype(value).name
self._dtype_policy = policy.Policy(value)
def _name_scope(self):
return self.name
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _symbolic_add_metric(self, value, aggregation=None, name=None):
base_layer_utils.check_graph_consistency(value, method='add_metric')
match = self._get_existing_metric(name)
if aggregation is None:
# Iterate over the metrics and check if the given metric exists already.
# This can happen when a metric instance is created in subclassed model
# layer `__init__` and we have tracked that instance already in
# model.__setattr__.
if match:
result_tensor = value
metric_obj = match
elif hasattr(value, '_metric_obj'):
# We track the instance using the metadata on the result tensor.
result_tensor = value
metric_obj = result_tensor._metric_obj
self._metrics.append(metric_obj)
else:
raise ValueError(
'We do not support adding an aggregated metric result tensor that '
'is not the output of a `tf.keras.metrics.Metric` metric instance. '
'Without having access to the metric instance we cannot reset the '
'state of a metric after every epoch during training. You can '
'create a `tf.keras.metrics.Metric` instance and pass the result '
'here or pass an un-aggregated result with `aggregation` parameter '
'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)'
', name=\'mean_activation\', aggregation=\'mean\')`')
else:
# If a non-aggregated tensor is given as input (ie. `aggregation` is
# explicitly set to `mean`), we wrap the tensor in `Mean` metric.
if match:
result_tensor = match(value)
metric_obj = match
else:
metric_obj, result_tensor = base_layer_utils.create_mean_metric(
value, name)
self._metrics.append(metric_obj)
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with backend.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = nest.flatten(outputs)
with backend.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = math_ops.cast(
array_ops.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
base_layer_utils.check_graph_consistency(
mean_activity_loss, method='activity_regularizer')
self.add_loss(mean_activity_loss, inputs=inputs)
def _set_mask_metadata(self, inputs, outputs, previous_mask):
flat_outputs = nest.flatten(outputs)
mask_already_computed = (
getattr(self, '_compute_output_and_mask_jointly', False) or
all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))
# Only compute the mask if the Layer explicitly supports masking or has
# overridden `compute_mask`.
should_compute_mask = (
hasattr(self, 'compute_mask') and
(self.supports_masking or
not getattr(self.compute_mask, '_is_default', False)))
if mask_already_computed:
flat_masks = [getattr(x, '_keras_mask', None) for x in flat_outputs]
elif not should_compute_mask:
flat_masks = [None for _ in flat_outputs]
else:
output_masks = self.compute_mask(inputs, previous_mask)
# `compute_mask` can return a single `None` even when a Layer
# has multiple outputs.
if output_masks is None:
flat_masks = [None for _ in flat_outputs]
else:
flat_masks = nest.flatten(output_masks)
for output, mask in zip(flat_outputs, flat_masks):
try:
output._keras_mask = mask
except AttributeError:
# C Type such as np.ndarray.
pass
if tf_utils.are_all_symbolic_tensors(flat_outputs):
for output in flat_outputs:
if getattr(output, '_keras_mask', None) is not None:
# Do not track masks for `TensorFlowOpLayer` construction.
output._keras_mask._keras_history_checked = True
def _collect_input_masks(self, inputs, args, kwargs):
"""Checks if `mask` argument was passed, else gathers mask from inputs."""
if self._call_arg_was_passed('mask', args, kwargs):
return self._get_call_arg_value('mask', args, kwargs)
if not self._should_compute_mask:
return None
input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None),
inputs)
if generic_utils.is_all_none(input_masks):
return None
return input_masks
def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return True
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
if arg_name in dict(zip(call_fn_args, args)):
return True
return False
def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return kwargs[arg_name]
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
args_dict = dict(zip(call_fn_args, args))
return args_dict[arg_name]
def _set_call_arg_value(
self, arg_name, new_value, args,
kwargs, inputs_in_args=False, pop_kwarg_if_none=False):
arg_pos = self._call_fn_arg_positions.get(arg_name, None)
if arg_pos is not None:
if not inputs_in_args:
# Ignore `inputs` arg.
arg_pos = arg_pos - 1
if len(args) > arg_pos:
args = list(args)
args[arg_pos] = new_value
return args, kwargs
if new_value is None and pop_kwarg_if_none:
kwargs.pop(arg_name, None)
else:
kwargs[arg_name] = new_value
return args, kwargs
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Arguments:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called '
'and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' +
str(node_index) + ', but the layer has only ' +
str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
if not self.built:
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = nest.flatten(inputs)
if input_list and self._dtype_policy.compute_dtype is None:
try:
dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
else:
self._dtype_policy = policy.Policy(dtype)
input_shapes = None
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
self.build(input_shapes)
# We must set self.built since user defined build functions are not
# constrained to set self.built.
self.built = True
# Optionally load weight values specified at layer instantiation.
if self._initial_weights is not None:
self.set_weights(self._initial_weights)
self._initial_weights = None
def _symbolic_call(self, inputs):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
return nest.map_structure(_make_placeholder_like, output_shapes)
def _get_trainable_state(self):
"""Get the `trainable` state of each sublayer.
Returns:
A dict mapping all sublayers to their `trainable` value.
"""
layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)
# Keep track of each top-level layers' `trainable` as well as the
# state of all of its sublayers.
trainable_state = {self: self.trainable}
for layer in layers:
trainable_state.update(layer._get_trainable_state())
return trainable_state
def _set_trainable_state(self, trainable_state):
"""Set `trainable` state for each sublayer."""
layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)
if self in trainable_state:
self.trainable = trainable_state[self]
for layer in layers:
layer._set_trainable_state(trainable_state)
@property
def _obj_reference_counts(self):
"""A dictionary counting the number of attributes referencing an object."""
self._maybe_create_attribute('_obj_reference_counts_dict',
object_identity.ObjectIdentityDictionary())
return self._obj_reference_counts_dict
@trackable.no_automatic_dependency_tracking
def _maybe_create_attribute(self, name, default_value):
"""Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
"""
if not hasattr(self, name):
super(Layer, self).__setattr__(name, default_value)
def __delattr__(self, name):
# For any super.__delattr__() call, we will directly use the implementation
# in Trackable and skip the behavior in AutoTrackable. The Layer was
# originally use Trackable as base class, the change of using Module as base
# class forced us to have AutoTrackable in the class hierarchy. Skipping
# the __delattr__ and __setattr__ in AutoTrackable will keep the status quo.
existing_value = getattr(self, name, None)
# If this value is replacing an existing object assigned to an attribute, we
# should clean it out to avoid leaking memory. First we check if there are
# other attributes referencing it.
reference_counts = self._obj_reference_counts
if existing_value not in reference_counts:
super(tracking.AutoTrackable, self).__delattr__(name)
return
reference_count = reference_counts[existing_value]
if reference_count > 1:
# There are other remaining references. We can't remove this object from
# _layers etc.
reference_counts[existing_value] = reference_count - 1
super(tracking.AutoTrackable, self).__delattr__(name)
return
else:
# This is the last remaining reference.
del reference_counts[existing_value]
super(tracking.AutoTrackable, self).__delattr__(name)
if (isinstance(existing_value, Layer)
or trackable_layer_utils.has_weights(existing_value)):
super(tracking.AutoTrackable, self).__setattr__(
'_layers',
[l for l in self._layers if l is not existing_value])
self._attribute_sentinel.invalidate_all()
if isinstance(existing_value, tf_variables.Variable):
super(tracking.AutoTrackable, self).__setattr__(
'_trainable_weights',
[w for w in self._trainable_weights if w is not existing_value])
super(tracking.AutoTrackable, self).__setattr__(
'_non_trainable_weights',
[w for w in self._non_trainable_weights if w is not existing_value])
# Any time we change `_layers` (either by deleting the attribute or by
# reassigning it which will call __delattr__ from __setattr__) the topology
# of the subgraph of Layers may change. In that case we will need to
# recompute any attribute which depends on that subgraph.
if name == '_layers':
self._attribute_sentinel.invalidate_all()
def __setattr__(self, name, value):
if (name == '_self_setattr_tracking' or
not getattr(self, '_self_setattr_tracking', True) or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)):
try:
super(tracking.AutoTrackable, self).__setattr__(name, value)
except AttributeError:
raise AttributeError(
('Can\'t set the attribute "{}", likely because it conflicts with '
'an existing read-only @property of the object. Please choose a '
'different name.').format(name))
return
# Keep track of trackable objects, for the needs of `Network.save_weights`.
value = data_structures.sticky_attribute_assignment(
trackable=self, value=value, name=name)
reference_counts = self._obj_reference_counts
reference_counts[value] = reference_counts.get(value, 0) + 1
# Clean out the old attribute, which clears _layers and _trainable_weights
# if necessary.
try:
self.__delattr__(name)
except AttributeError:
pass
# Keep track of metric instance created in subclassed layer.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
for val in nest.flatten(value):
if isinstance(val, metrics_module.Metric):
self._metrics.append(val)
# TODO(scottzhu): Need to track Module object as well for weight tracking.
# Be careful about metric if it becomes a Module in future.
# Append value to self._layers if relevant
if (getattr(self, '_auto_track_sub_layers', True) and
(isinstance(value, Layer) or trackable_layer_utils.has_weights(value))):
self._maybe_create_attribute('_layers', [])
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._layers)):
self._layers.append(value)
if hasattr(value, '_attribute_sentinel'):
value._attribute_sentinel.add_parent(self._attribute_sentinel)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
# TODO(b/125122625): This won't pick up on any variables added to a
# list/dict after creation.
for val in nest.flatten(value):
# TODO(b/126450014): Remove `_UnreadVariable` check here when assign ops
# no longer return True for isinstance Variable checks.
if not isinstance(val, tf_variables.Variable):
continue
if isinstance(val, resource_variable_ops._UnreadVariable): # pylint: disable=protected-access
continue
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
if val.trainable:
if any(val is w for w in self._trainable_weights):
continue
self._trainable_weights.append(val)
else:
if any(val is w for w in self._non_trainable_weights):
continue
self._non_trainable_weights.append(val)
backend.track_variable(val)
# Skip the auto trackable from tf.Module to keep status quo. See the comment
# at __delattr__.
super(tracking.AutoTrackable, self).__setattr__(name, value)
def _gather_children_attribute(self, attribute):
assert attribute in {
'weights', 'trainable_weights', 'non_trainable_weights'
}
if hasattr(self, '_layers'):
nested_layers = trackable_layer_utils.filter_empty_layer_containers(
self._layers)
return list(
itertools.chain.from_iterable(
getattr(layer, attribute) for layer in nested_layers))
return []
def _gather_unique_layers(self):
"""Returns the current layer and all its children depth first deduped.
We are deduping after getting the layers to maintain the order.
"""
all_layers = self._gather_layers()
unique_layers, seen_layers = [], object_identity.ObjectIdentitySet()
for layer in all_layers:
if layer not in seen_layers:
unique_layers.append(layer)
# Track the Variable's identity to avoid __eq__ issues.
seen_layers.add(layer)
return unique_layers
def _gather_layers(self):
"""Returns the current layer and all its children depth first."""
all_layers = [self]
if hasattr(self, '_layers'):
child_layers = trackable_layer_utils.filter_empty_layer_containers(
self._layers)
for child_layer in child_layers:
all_layers.extend(child_layer._gather_layers())
return all_layers
@property
@tracking.cached_per_instance
def _attribute_sentinel(self):
return trackable_layer_utils.AttributeSentinel()
# This is a hack so that the is_layer (within
# training/trackable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
def _init_call_fn_args(self):
# Clear cached call function arguments.
self.__class__._call_full_argspec.fget.cache.pop(self, None)
self.__class__._call_fn_args.fget.cache.pop(self, None)
self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)
call_fn_args = self._call_fn_args
self._expects_training_arg = ('training' in call_fn_args or
self._call_accepts_kwargs)
self._expects_mask_arg = ('mask' in call_fn_args or
self._call_accepts_kwargs)
@property
@tracking.cached_per_instance
def _call_full_argspec(self):
# Argspec inspection is expensive and the call spec is used often, so it
# makes sense to cache the result.
return tf_inspect.getfullargspec(self.call)
@property
@tracking.cached_per_instance
def _call_fn_args(self):
all_args = self._call_full_argspec.args
# Scrub `self` that appears if a decorator was applied.
if all_args and all_args[0] == 'self':
return all_args[1:]
return all_args
@property
@tracking.cached_per_instance
def _call_fn_arg_positions(self):
call_fn_arg_positions = dict()
for pos, arg in enumerate(self._call_fn_args):
call_fn_arg_positions[arg] = pos
return call_fn_arg_positions
@property
@tracking.cached_per_instance
def _call_accepts_kwargs(self):
return self._call_full_argspec.varkw is not None
@property
@tracking.cached_per_instance
def _should_compute_mask(self):
return ('mask' in self._call_fn_args or
getattr(self, 'compute_mask', None) is not None)
def _dedup_weights(self, weights):
"""Dedupe weights while maintaining order as much as possible."""
output, seen_weights = [], object_identity.ObjectIdentitySet()
for w in weights:
if w not in seen_weights:
output.append(w)
# Track the Variable's identity to avoid __eq__ issues.
seen_weights.add(w)
return output
# SavedModel properties. Please see keras/saving/saved_model for details.
@property
def _trackable_saved_model_saver(self):
return layer_serialization.LayerSavedModelSaver(self)
@property
def _object_identifier(self):
return self._trackable_saved_model_saver.object_identifier
@property
def _tracking_metadata(self):
return self._trackable_saved_model_saver.tracking_metadata
def _list_extra_dependencies_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_extra_dependencies_for_serialization(serialization_cache))
def _list_functions_for_serialization(self, serialization_cache):
return (self._trackable_saved_model_saver
.list_functions_for_serialization(serialization_cache))
def __getstate__(self):
# Override to support `copy.deepcopy` and pickling.
# Thread-local objects cannot be copied in Python 3, so pop these.
# Thread-local objects are used to cache losses in MirroredStrategy, and
# so shouldn't be copied.
state = self.__dict__.copy()
state.pop('_thread_local', None)
return state
def __setstate__(self, state):
state['_thread_local'] = threading.local()
# Bypass Trackable logic as `__dict__` already contains this info.
object.__setattr__(self, '__dict__', state)
class KerasHistory(
collections.namedtuple('KerasHistory',
['layer', 'node_index', 'tensor_index'])):
"""Tracks the Layer call that created a Tensor, for Keras Graph Networks.
During construction of Keras Graph Networks, this metadata is added to
each Tensor produced as the output of a Layer, starting with an
`InputLayer`. This allows Keras to track how each Tensor was produced, and
this information is later retraced by the `keras.engine.Network` class to
reconstruct the Keras Graph Network.
Attributes:
layer: The Layer that produced the Tensor.
node_index: The specific call to the Layer that produced this Tensor. Layers
can be called multiple times in order to share weights. A new node is
created every time a Tensor is called.
tensor_index: The output index for this Tensor. Always zero if the Layer
that produced this Tensor only has one output. Nested structures of
Tensors are deterministically assigned an index via `nest.flatten`.
"""
# Added to maintain memory and performance characteristics of `namedtuple`
# while subclassing.
__slots__ = ()
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
| 40.395016 | 108 | 0.680403 |
4a277f6613335521717bae9abcd3cfc03552a14b | 1,996 | py | Python | minions/views.py | net592/OneOps | 540898675385c982f4e196e2920e7bf7b251217c | [
"Apache-2.0"
] | 92 | 2016-04-29T05:22:02.000Z | 2022-03-07T15:16:19.000Z | minions/views.py | net592/OneOps | 540898675385c982f4e196e2920e7bf7b251217c | [
"Apache-2.0"
] | 2 | 2016-04-25T02:59:00.000Z | 2017-06-16T09:45:10.000Z | minions/views.py | net592/OneOps | 540898675385c982f4e196e2920e7bf7b251217c | [
"Apache-2.0"
] | 66 | 2016-04-29T05:22:06.000Z | 2021-12-15T03:33:22.000Z | #coding=utf-8
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from shaker.shaker_core import *
from shaker.nodegroups import *
from minions.models import Minions_status
from returner.models import Salt_grains
#定义客户端状态页面
@login_required(login_url="/account/login/")
def minions_status(request):
status = Minions_status.objects.all()
return render(request, 'minions/minions_status.html', {'status': status})
#定义客户端KEY管理
@login_required(login_url="/account/login/")
def minions_keys(request):
sapi = SaltAPI()
if request.POST:
hostname = request.POST.get("accept")
sapi.accept_key(hostname)
hostname = request.POST.get("delete")
sapi.delete_key(hostname)
Minions_status.objects.get(minion_id=hostname).delete()
Salt_grains.objects.get(minion_id=hostname).delete()
hostname = request.POST.get("reject")
sapi.reject_key(hostname)
keys_all = sapi.list_all_key()
return render(request, 'minions/minions_keys.html', {'key': keys_all})
#定义获取硬件信息
@login_required(login_url="/account/login/")
def minions_hardware_info(request):
sapi = SaltAPI()
up_host = sapi.runner_status('status')['up']
jid = []
disk_all = {}
for hostname in up_host:
info_all = sapi.remote_noarg_execution(hostname, 'grains.items')
disk_use = sapi.remote_noarg_execution(hostname, 'disk.usage')
for key in disk_use:
if disk_use[key]['capacity'] is None:
continue
disk_info = {key: int(disk_use[key]['capacity'][:-1])}
disk_all.update(disk_info)
disk_dic = {'disk': disk_all}
info_all.update(disk_dic)
disk_all = {}
jid += [info_all]
return render(request, 'minions/minions_hardware_info.html', {'jyp': jid})
@login_required(login_url="/account/login/")
def minions_servers_status(request):
return render(request, 'minions/minions_servers_status.html',)
| 36.290909 | 78 | 0.690381 |
4a27802cc692acec12f1b597617f0671b36ad039 | 1,616 | py | Python | python/threeD.py | atsmith3/ifde | dcb7b412e2172a87f6928bdabdd21abc65d17fdf | [
"MIT"
] | null | null | null | python/threeD.py | atsmith3/ifde | dcb7b412e2172a87f6928bdabdd21abc65d17fdf | [
"MIT"
] | null | null | null | python/threeD.py | atsmith3/ifde | dcb7b412e2172a87f6928bdabdd21abc65d17fdf | [
"MIT"
] | null | null | null | import open3d as o3d
import sys
import numpy as np
import pandas as pd
import csv
#check how many arguments
if(len(sys.argv) == 5):
print('Correct number of arguments')
'''
r = int(34)
g = int(200)
b = int(78)
rgb = (r << 16) | (g << 8) | b
print(str(rgb))
r_back = (rgb >> 16) & 0x0000ff;
g_back = (rgb >> 8) & 0x0000ff;
b_back = (rgb) & 0x0000ff;
print('\n')
print('\n')
print(str(r_back))
print('\n')
print(str(g_back))
print('\n')
print(str(b_back))
print('\n')
'''
#Get inputs
pathToCsv = sys.argv[1]
height = int(sys.argv[2])
width = int(sys.argv[3])
depths = int(sys.argv[4])
#Create a pcd file
pcdFilename = 'heyhey.pcd'
pcdFile = open(pcdFilename,'w')
pcdFile.write('VERSION .7\nFIELDS x y z rgb\nSIZE 4 4 4 4\nTYPE I I I F\nCOUNT 1 1 1 1\n')
pcdFile.write('WIDTH ' + str(width) + '\n')
pcdFile.write('HEIGHT ' + str(height) + '\n')
pcdFile.write('VIEWPOINT 0 0 0 1 0 0 0\n')
pcdFile.write('POINTS ' + str(width*height) + '\n')
pcdFile.write('DATA ascii\n')
#print('hello')
with open(pathToCsv) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
#read from csv
x = int(row[0]);
y = int(row[1]);
z = int(row[2]);
r = int(row[3]);
g = int(row[4]);
b = int(row[5]);
'''
x = 53
y= 42
z = 245
r = 82
g = 3
b = 0
'''
rgb = (r << 16) | (g << 8) | b
#write to pcd file
pcdFile.write(str(x) + ' ' + str(y) + ' ' + str(z) + ' ' + str(rgb) + '\n')
#Close pcd file
pcdFile.close()
#visualize
pcd = o3d.io.read_point_cloud("ex.pcd")
#print(pcd)
#print(np.asarray(pcd.points))
o3d.visualization.draw_geometries([pcd])
| 17.758242 | 91 | 0.598391 |
4a2780fe78ce68b3e8cef71e44e48a5f43d5192f | 99,341 | py | Python | mmdet/datasets/pipelines/transforms.py | opencv/mmdetection | 6a7dfa5b954d6bbad7f8d33db8268b0fafc7d555 | [
"Apache-2.0"
] | 24 | 2020-04-15T14:54:44.000Z | 2020-08-12T12:45:57.000Z | mmdet/datasets/pipelines/transforms.py | opencv/mmdetection | 6a7dfa5b954d6bbad7f8d33db8268b0fafc7d555 | [
"Apache-2.0"
] | 46 | 2020-04-10T12:01:59.000Z | 2020-09-04T06:25:56.000Z | mmdet/datasets/pipelines/transforms.py | ovextra/mmdetection | 230fb739a50d8495e826013c7bbf64b7e0aff939 | [
"Apache-2.0"
] | 11 | 2020-04-16T17:55:29.000Z | 2020-08-25T11:13:58.000Z | # Copyright (C) 2018-2021 OpenMMLab
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import math
import copy
import cv2
import inspect
import mmcv
import numpy as np
from numpy import random
from mmdet.core import PolygonMasks
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from ..builder import PIPELINES
try:
from imagecorruptions import corrupt
except ImportError:
corrupt = None
try:
import albumentations
from albumentations import Compose
from .albumentations_extra import ALBUMENTATIONS_EXTRA
except ImportError:
albumentations = None
Compose = None
@PIPELINES.register_module()
class Resize(object):
"""Resize images & bbox & mask.
This transform resizes the input image to some scale. Bboxes and masks are
then resized with the same scale factor. If the input dict contains the key
"scale", then the scale in the input dict is used, otherwise the specified
scale in the init method is used. If the input dict contains the key
"scale_factor" (if MultiScaleFlipAug does not give img_scale but
scale_factor), the actual scale will be computed by image shape and
scale_factor.
`img_scale` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- ``ratio_range is not None``: randomly sample a ratio from the ratio \
range and multiply it with the image scale.
- ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
sample a scale from the multiscale range.
- ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
sample a scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
override (bool, optional): Whether to override `scale` and
`scale_factor` so as to call resize twice. Default False. If True,
after the first resizing, the existed `scale` and `scale_factor`
will be ignored so the second resizing can be allowed.
This option is a work-around for multiple times of resize in DETR.
Defaults to False.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True,
bbox_clip_border=True,
backend='cv2',
override=False):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.backend = backend
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
# TODO: refactor the override option in Resize
self.override = override
self.bbox_clip_border = bbox_clip_border
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
where ``img_scale`` is the selected image scale and \
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and upper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where \
``img_scale`` is sampled scale and None is just a placeholder \
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where \
``scale`` is sampled ratio multiplied with ``img_scale`` and \
None is just a placeholder to be consistent with \
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into \
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
for key in results.get('img_fields', ['img']):
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results[key],
results['scale'],
return_scale=True,
backend=self.backend)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results[key].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results[key],
results['scale'],
return_scale=True,
backend=self.backend)
results[key] = img
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img_shape'] = img.shape
# in case that there is no padding
results['pad_shape'] = img.shape
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_bboxes(self, results):
"""Resize bounding boxes with ``results['scale_factor']``."""
for key in results.get('bbox_fields', []):
bboxes = results[key] * results['scale_factor']
if self.bbox_clip_border:
img_shape = results['img_shape']
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
results[key] = bboxes
def _resize_masks(self, results):
"""Resize masks with ``results['scale']``"""
for key in results.get('mask_fields', []):
if results[key] is None:
continue
if self.keep_ratio:
results[key] = results[key].rescale(results['scale'])
else:
results[key] = results[key].resize(results['img_shape'][:2])
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
else:
gt_seg = mmcv.imresize(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
results['gt_semantic_seg'] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
if 'scale_factor' in results:
img_shape = results['img'].shape[:2]
scale_factor = results['scale_factor']
assert isinstance(scale_factor, float)
results['scale'] = tuple(
[int(x * scale_factor) for x in img_shape][::-1])
else:
self._random_scale(results)
else:
if not self.override:
assert 'scale_factor' not in results, (
'scale and scale_factor cannot be both set.')
else:
results.pop('scale')
if 'scale_factor' in results:
results.pop('scale_factor')
self._random_scale(results)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_masks(results)
self._resize_seg(results)
if 'copy_paste' in results:
self.__call__(results['copy_paste'])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(img_scale={self.img_scale}, '
repr_str += f'multiscale_mode={self.multiscale_mode}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'keep_ratio={self.keep_ratio}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class RandomFlip(object):
"""Flip the image & bbox & mask.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
When random flip is enabled, ``flip_ratio``/``direction`` can either be a
float/string or tuple of float/string. There are 3 flip modes:
- ``flip_ratio`` is float, ``direction`` is string: the image will be
``direction``ly flipped with probability of ``flip_ratio`` .
E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,
then image will be horizontally flipped with probability of 0.5.
- ``flip_ratio`` is float, ``direction`` is list of string: the image wil
be ``direction[i]``ly flipped with probability of
``flip_ratio/len(direction)``.
E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,
then image will be horizontally flipped with probability of 0.25,
vertically with probability of 0.25.
- ``flip_ratio`` is list of float, ``direction`` is list of string:
given ``len(flip_ratio) == len(direction)``, the image wil
be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.
E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',
'vertical']``, then image will be horizontally flipped with probability
of 0.3, vertically with probability of 0.5
Args:
flip_ratio (float | list[float], optional): The flipping probability.
Default: None.
direction(str | list[str], optional): The flipping direction. Options
are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.
If input is a list, the length must equal ``flip_ratio``. Each
element in ``flip_ratio`` indicates the flip probability of
corresponding direction.
"""
def __init__(self, flip_ratio=None, direction='horizontal'):
if isinstance(flip_ratio, list):
assert mmcv.is_list_of(flip_ratio, float)
assert 0 <= sum(flip_ratio) <= 1
elif isinstance(flip_ratio, float):
assert 0 <= flip_ratio <= 1
elif flip_ratio is None:
pass
else:
raise ValueError('flip_ratios must be None, float, '
'or list of float')
self.flip_ratio = flip_ratio
valid_directions = ['horizontal', 'vertical', 'diagonal']
if isinstance(direction, str):
assert direction in valid_directions
elif isinstance(direction, list):
assert mmcv.is_list_of(direction, str)
assert set(direction).issubset(set(valid_directions))
else:
raise ValueError('direction must be either str or list of str')
self.direction = direction
if isinstance(flip_ratio, list):
assert len(self.flip_ratio) == len(self.direction)
def bbox_flip(self, bboxes, img_shape, direction):
"""Flip bboxes horizontally.
Args:
bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
img_shape (tuple[int]): Image shape (height, width)
direction (str): Flip direction. Options are 'horizontal',
'vertical'.
Returns:
numpy.ndarray: Flipped bounding boxes.
"""
assert bboxes.shape[-1] % 4 == 0
flipped = bboxes.copy()
if direction == 'horizontal':
w = img_shape[1]
flipped[..., 0::4] = w - bboxes[..., 2::4]
flipped[..., 2::4] = w - bboxes[..., 0::4]
elif direction == 'vertical':
h = img_shape[0]
flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
elif direction == 'diagonal':
w = img_shape[1]
h = img_shape[0]
flipped[..., 0::4] = w - bboxes[..., 2::4]
flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 2::4] = w - bboxes[..., 0::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
else:
raise ValueError(f"Invalid flipping direction '{direction}'")
return flipped
def __call__(self, results):
"""Call function to flip bounding boxes, masks, semantic segmentation
maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction' keys are added \
into result dict.
"""
if 'flip' not in results:
if isinstance(self.direction, list):
# None means non-flip
direction_list = self.direction + [None]
else:
# None means non-flip
direction_list = [self.direction, None]
if isinstance(self.flip_ratio, list):
non_flip_ratio = 1 - sum(self.flip_ratio)
flip_ratio_list = self.flip_ratio + [non_flip_ratio]
else:
non_flip_ratio = 1 - self.flip_ratio
# exclude non-flip
single_ratio = self.flip_ratio / (len(direction_list) - 1)
flip_ratio_list = [single_ratio] * (len(direction_list) -
1) + [non_flip_ratio]
cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
results['flip'] = cur_dir is not None
if 'flip_direction' not in results:
results['flip_direction'] = cur_dir
if results['flip']:
# flip image
for key in results.get('img_fields', ['img']):
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction'])
# flip bboxes
for key in results.get('bbox_fields', []):
results[key] = self.bbox_flip(results[key],
results['img_shape'],
results['flip_direction'])
# flip masks
for key in results.get('mask_fields', []):
results[key] = results[key].flip(results['flip_direction'])
# flip segs
for key in results.get('seg_fields', []):
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction'])
return results
def __repr__(self):
return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
@PIPELINES.register_module()
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_to_square (bool): Whether to pad the image into a square.
Currently only used for YOLOX. Default: False.
pad_val (float, optional): Padding value, 0 by default.
"""
def __init__(self,
size=None,
size_divisor=None,
pad_to_square=False,
pad_val=0):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
self.pad_to_square = pad_to_square
if pad_to_square:
assert size is None and size_divisor is None, \
'The size and size_divisor must be None ' \
'when pad2square is True'
else:
assert size is not None or size_divisor is not None, \
'only one of size and size_divisor should be valid'
assert size is None or size_divisor is None
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
for key in results.get('img_fields', ['img']):
if self.pad_to_square:
max_size = max(results[key].shape[:2])
self.size = (max_size, max_size)
if self.size is not None:
padded_img = mmcv.impad(
results[key], shape=self.size, pad_val=self.pad_val)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results[key], self.size_divisor, pad_val=self.pad_val)
results[key] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_masks(self, results):
"""Pad masks according to ``results['pad_shape']``."""
pad_shape = results['pad_shape'][:2]
for key in results.get('mask_fields', []):
results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
def _pad_seg(self, results):
"""Pad semantic segmentation map according to
``results['pad_shape']``."""
for key in results.get('seg_fields', []):
results[key] = mmcv.impad(
results[key], shape=results['pad_shape'][:2])
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_masks(results)
self._pad_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(size={self.size}, '
repr_str += f'size_divisor={self.size_divisor}, '
repr_str += f'pad_to_square={self.pad_to_square}, '
repr_str += f'pad_val={self.pad_val})'
return repr_str
@PIPELINES.register_module()
class Normalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
for key in results.get('img_fields', ['img']):
results[key] = mmcv.imnormalize(results[key], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
return repr_str
@PIPELINES.register_module()
class RandomCrop(object):
"""Random crop the image & bboxes & masks.
The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
then the cropped results are generated.
Args:
crop_size (tuple): The relative ratio or absolute pixels of
height and width.
crop_type (str, optional): one of "relative_range", "relative",
"absolute", "absolute_range". "relative" randomly crops
(h * crop_size[0], w * crop_size[1]) part from an input of size
(h, w). "relative_range" uniformly samples relative crop size from
range [crop_size[0], 1] and [crop_size[1], 1] for height and width
respectively. "absolute" crops from an input with absolute size
(crop_size[0], crop_size[1]). "absolute_range" uniformly samples
crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
in range [crop_size[0], min(w, crop_size[1])]. Default "absolute".
allow_negative_crop (bool, optional): Whether to allow a crop that does
not contain any bbox area. Default False.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
- If the image is smaller than the absolute crop size, return the
original image.
- The keys for bboxes, labels and masks must be aligned. That is,
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
`gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
`gt_masks_ignore`.
- If the crop does not contain any gt-bbox region and
`allow_negative_crop` is set to False, skip this image.
"""
def __init__(self,
crop_size,
crop_type='absolute',
allow_negative_crop=False,
bbox_clip_border=True):
if crop_type not in [
'relative_range', 'relative', 'absolute', 'absolute_range'
]:
raise ValueError(f'Invalid crop_type {crop_type}.')
if crop_type in ['absolute', 'absolute_range']:
assert crop_size[0] > 0 and crop_size[1] > 0
assert isinstance(crop_size[0], int) and isinstance(
crop_size[1], int)
else:
assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
self.crop_size = crop_size
self.crop_type = crop_type
self.allow_negative_crop = allow_negative_crop
self.bbox_clip_border = bbox_clip_border
# The key correspondence from bboxes to labels and masks.
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def _crop_data(self, results, crop_size, allow_negative_crop):
"""Function to randomly crop images, bounding boxes, masks, semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
crop_size (tuple): Expected absolute size after cropping, (h, w).
allow_negative_crop (bool): Whether to allow a crop that does not
contain any bbox area. Default to False.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
assert crop_size[0] > 0 and crop_size[1] > 0
for key in results.get('img_fields', ['img']):
img = results[key]
margin_h = max(img.shape[0] - crop_size[0], 0)
margin_w = max(img.shape[1] - crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
img_shape = img.shape
results[key] = img
results['img_shape'] = img_shape
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
# e.g. gt_bboxes and gt_bboxes_ignore
bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
dtype=np.float32)
bboxes = results[key] - bbox_offset
if self.bbox_clip_border:
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
# If the crop does not contain any gt-bbox area and
# allow_negative_crop is False, skip this image.
if (key == 'gt_bboxes' and not valid_inds.any()
and not allow_negative_crop):
return None
results[key] = bboxes[valid_inds, :]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
valid_inds.nonzero()[0]].crop(
np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
return results
def _get_crop_size(self, image_size):
"""Randomly generates the absolute crop size based on `crop_type` and
`image_size`.
Args:
image_size (tuple): (h, w).
Returns:
crop_size (tuple): (crop_h, crop_w) in absolute pixels.
"""
h, w = image_size
if self.crop_type == 'absolute':
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == 'absolute_range':
assert self.crop_size[0] <= self.crop_size[1]
crop_h = np.random.randint(
min(h, self.crop_size[0]),
min(h, self.crop_size[1]) + 1)
crop_w = np.random.randint(
min(w, self.crop_size[0]),
min(w, self.crop_size[1]) + 1)
return crop_h, crop_w
elif self.crop_type == 'relative':
crop_h, crop_w = self.crop_size
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
elif self.crop_type == 'relative_range':
crop_size = np.asarray(self.crop_size, dtype=np.float32)
crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
def __call__(self, results):
"""Call function to randomly crop images, bounding boxes, masks,
semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
image_size = results['img'].shape[:2]
crop_size = self._get_crop_size(image_size)
results = self._crop_data(results, crop_size, self.allow_negative_crop)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'crop_type={self.crop_type}, '
repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class SegRescale(object):
"""Rescale semantic segmentation maps.
Args:
scale_factor (float): The scale factor of the final output.
backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
"""
def __init__(self, scale_factor=1, backend='cv2'):
self.scale_factor = scale_factor
self.backend = backend
def __call__(self, results):
"""Call function to scale the semantic segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with semantic segmentation map scaled.
"""
for key in results.get('seg_fields', []):
if self.scale_factor != 1:
results[key] = mmcv.imrescale(
results[key],
self.scale_factor,
interpolation='nearest',
backend=self.backend)
return results
def __repr__(self):
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
@PIPELINES.register_module()
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert img.dtype == np.float32, \
'PhotoMetricDistortion needs the input image of dtype ' \
'np.float32, please set "to_float32=True" in ' \
'"LoadImageFromFile" pipeline'
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
repr_str += 'contrast_range='
repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
repr_str += 'saturation_range='
repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
repr_str += f'hue_delta={self.hue_delta})'
return repr_str
@PIPELINES.register_module()
class Expand(object):
"""Random expand the image & bboxes.
Randomly place the original image on a canvas of 'ratio' x original image
size filled with mean values. The ratio is in the range of ratio_range.
Args:
mean (tuple): mean value of dataset.
to_rgb (bool): if need to convert the order of mean to align with RGB.
ratio_range (tuple): range of expand ratio.
prob (float): probability of applying this transformation
"""
def __init__(self,
mean=(0, 0, 0),
to_rgb=True,
ratio_range=(1, 4),
seg_ignore_label=None,
prob=0.5):
self.to_rgb = to_rgb
self.ratio_range = ratio_range
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
self.seg_ignore_label = seg_ignore_label
self.prob = prob
def __call__(self, results):
"""Call function to expand images, bounding boxes.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images, bounding boxes expanded
"""
if random.uniform(0, 1) > self.prob:
return results
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
# speedup expand when meets large image
if np.all(self.mean == self.mean[0]):
expand_img = np.empty((int(h * ratio), int(w * ratio), c),
img.dtype)
expand_img.fill(self.mean[0])
else:
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean,
dtype=img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
results['img'] = expand_img
# expand bboxes
for key in results.get('bbox_fields', []):
results[key] = results[key] + np.tile(
(left, top), 2).astype(results[key].dtype)
# expand masks
for key in results.get('mask_fields', []):
results[key] = results[key].expand(
int(h * ratio), int(w * ratio), top, left)
# expand segs
for key in results.get('seg_fields', []):
gt_seg = results[key]
expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
self.seg_ignore_label,
dtype=gt_seg.dtype)
expand_gt_seg[top:top + h, left:left + w] = gt_seg
results[key] = expand_gt_seg
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label})'
return repr_str
@PIPELINES.register_module()
class MinIoURandomCrop(object):
"""Random crop the image & bboxes, the cropped patches have minimum IoU
requirement with original image & bboxes, the IoU threshold is randomly
selected from min_ious.
Args:
min_ious (tuple): minimum IoU threshold for all intersections with
bounding boxes
min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
where a >= min_crop_size).
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
The keys for bboxes, labels and masks should be paired. That is, \
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
`gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
"""
def __init__(self,
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3,
bbox_clip_border=True):
# 1: return ori img
self.min_ious = min_ious
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
self.bbox_clip_border = bbox_clip_border
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def __call__(self, results):
"""Call function to crop images and bounding boxes with minimum IoU
constraint.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images and bounding boxes cropped, \
'img_shape' key is updated.
"""
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert 'bbox_fields' in results
boxes = [results[key] for key in results['bbox_fields']]
boxes = np.concatenate(boxes, 0)
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
self.mode = mode
if mode == 1:
return results
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array(
(int(left), int(top), int(left + new_w), int(top + new_h)))
# Line or point crop is not allowed
if patch[2] == patch[0] or patch[3] == patch[1]:
continue
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
if len(overlaps) > 0 and overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
# only adjust boxes and instance masks when the gt is not empty
if len(overlaps) > 0:
# adjust boxes
def is_center_of_bboxes_in_patch(boxes, patch):
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = ((center[:, 0] > patch[0]) *
(center[:, 1] > patch[1]) *
(center[:, 0] < patch[2]) *
(center[:, 1] < patch[3]))
return mask
mask = is_center_of_bboxes_in_patch(boxes, patch)
if not mask.any():
continue
for key in results.get('bbox_fields', []):
boxes = results[key].copy()
mask = is_center_of_bboxes_in_patch(boxes, patch)
boxes = boxes[mask]
if self.bbox_clip_border:
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
results[key] = boxes
# labels
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][mask]
# mask fields
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
mask.nonzero()[0]].crop(patch)
# adjust the img no matter whether the gt is empty before crop
img = img[patch[1]:patch[3], patch[0]:patch[2]]
results['img'] = img
results['img_shape'] = img.shape
# seg fields
for key in results.get('seg_fields', []):
results[key] = results[key][patch[1]:patch[3],
patch[0]:patch[2]]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(min_ious={self.min_ious}, '
repr_str += f'min_crop_size={self.min_crop_size}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class Corrupt(object):
"""Corruption augmentation.
The code from the following github repo was used as an example:
`imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.
Args:
corruption (str): Corruption name.
severity (int, optional): The severity of corruption. Default: 1.
"""
def __init__(self, corruption, severity=1):
self.corruption = corruption
self.severity = severity
def __call__(self, results):
"""Call function to corrupt image.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images corrupted.
"""
if corrupt is None:
raise RuntimeError('imagecorruptions is not installed')
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
results['img'] = corrupt(
results['img'].astype(np.uint8),
corruption_name=self.corruption,
severity=self.severity)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(corruption={self.corruption}, '
repr_str += f'severity={self.severity})'
return repr_str
@PIPELINES.register_module()
class Albu(object):
"""Albumentation augmentation.
Adds custom transformations from Albumentations library.
Please, visit `https://albumentations.readthedocs.io`
to get more information.
An example of ``transforms`` is as followed:
.. code-block::
[
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
Args:
transforms (list[dict]): A list of albu transformations
bbox_params (dict): Bbox_params for albumentation `Compose`
keymap (dict): Contains {'input key':'albumentation-style key'}
skip_img_without_anno (bool): Whether to skip the image if no ann left
after aug
"""
def __init__(self,
transforms,
bbox_params=None,
keymap=None,
update_pad_shape=False,
skip_img_without_anno=False):
if Compose is None:
raise RuntimeError('albumentations is not installed')
self.transforms = transforms
self.filter_lost_elements = False
self.update_pad_shape = update_pad_shape
self.skip_img_without_anno = skip_img_without_anno
# A simple workaround to remove masks without boxes
if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
and 'filter_lost_elements' in bbox_params):
self.filter_lost_elements = True
self.origin_label_fields = bbox_params['label_fields']
bbox_params['label_fields'] = ['idx_mapper']
del bbox_params['filter_lost_elements']
self.bbox_params = (
self.albu_builder(bbox_params) if bbox_params else None)
self.aug = Compose([self.albu_builder(t) for t in self.transforms],
bbox_params=self.bbox_params)
if not keymap:
self.keymap_to_albu = {
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
}
else:
self.keymap_to_albu = keymap
self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
def albu_builder(self, cfg):
"""Import a module from albumentations.
It inherits some of :func:`build_from_cfg` logic.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj: The constructed object.
"""
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if albumentations is None:
raise RuntimeError('albumentations is not installed')
try:
obj_cls = getattr(albumentations, obj_type)
except AttributeError:
obj_cls = ALBUMENTATIONS_EXTRA[obj_type]
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
if 'transforms' in args:
args['transforms'] = [
self.albu_builder(transform)
for transform in args['transforms']
]
return obj_cls(**args)
@staticmethod
def mapper(d, keymap):
"""Dictionary mapper. Renames keys according to keymap provided.
Args:
d (dict): old dict
keymap (dict): {'old_key':'new_key'}
Returns:
dict: new dict.
"""
updated_dict = {}
for k, v in zip(d.keys(), d.values()):
new_k = keymap.get(k, k)
updated_dict[new_k] = d[k]
return updated_dict
def __call__(self, results):
# dict to albumentations format
results = self.mapper(results, self.keymap_to_albu)
# TODO: add bbox_fields
if 'bboxes' in results:
# to list of boxes
if isinstance(results['bboxes'], np.ndarray):
results['bboxes'] = [x for x in results['bboxes']]
# add pseudo-field for filtration
if self.filter_lost_elements:
results['idx_mapper'] = np.arange(len(results['bboxes']))
# TODO: Support mask structure in albu
if 'masks' in results:
if isinstance(results['masks'], PolygonMasks):
raise NotImplementedError(
'Albu only supports BitMap masks now')
ori_masks = results['masks']
if albumentations.__version__ < '0.5':
results['masks'] = results['masks'].masks
else:
results['masks'] = [mask for mask in results['masks'].masks]
results = self.aug(**results)
if 'bboxes' in results:
if isinstance(results['bboxes'], list):
results['bboxes'] = np.array(
results['bboxes'], dtype=np.float32)
results['bboxes'] = results['bboxes'].reshape(-1, 4)
# filter label_fields
if self.filter_lost_elements:
for label in self.origin_label_fields:
results[label] = np.array(
[results[label][i] for i in results['idx_mapper']])
if 'masks' in results:
results['masks'] = np.array(
[results['masks'][i] for i in results['idx_mapper']])
results['masks'] = ori_masks.__class__(
results['masks'], results['image'].shape[0],
results['image'].shape[1])
if 'texts' in results:
results['texts'] = np.array(
[results['texts'][i] for i in results['idx_mapper']])
assert len(results['bboxes']) == len(results['texts'])
assert len(results['masks']) == len(results['texts'])
if (not len(results['idx_mapper'])
and self.skip_img_without_anno):
return None
if 'gt_labels' in results:
if isinstance(results['gt_labels'], list):
results['gt_labels'] = np.array(results['gt_labels'])
results['gt_labels'] = results['gt_labels'].astype(np.int64)
# back to the original format
results = self.mapper(results, self.keymap_back)
# update final shape
if self.update_pad_shape:
results['pad_shape'] = results['img'].shape
return results
def __repr__(self):
repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
return repr_str
@PIPELINES.register_module()
class RandomCenterCropPad(object):
"""Random center crop and random around padding for CornerNet.
This operation generates randomly cropped image from the original image and
pads it simultaneously. Different from :class:`RandomCrop`, the output
shape may not equal to ``crop_size`` strictly. We choose a random value
from ``ratios`` and the output shape could be larger or smaller than
``crop_size``. The padding operation is also different from :class:`Pad`,
here we use around padding instead of right-bottom padding.
The relation between output image (padding image) and original image:
.. code:: text
output image
+----------------------------+
| padded area |
+------|----------------------------|----------+
| | cropped area | |
| | +---------------+ | |
| | | . center | | | original image
| | | range | | |
| | +---------------+ | |
+------|----------------------------|----------+
| padded area |
+----------------------------+
There are 5 main areas in the figure:
- output image: output image of this operation, also called padding
image in following instruction.
- original image: input image of this operation.
- padded area: non-intersect area of output image and original image.
- cropped area: the overlap of output image and original image.
- center range: a smaller area where random center chosen from.
center range is computed by ``border`` and original image's shape
to avoid our random center is too close to original image's border.
Also this operation act differently in train and test mode, the summary
pipeline is listed below.
Train pipeline:
1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
will be ``random_ratio * crop_size``.
2. Choose a ``random_center`` in center range.
3. Generate padding image with center matches the ``random_center``.
4. Initialize the padding image with pixel value equals to ``mean``.
5. Copy the cropped area to padding image.
6. Refine annotations.
Test pipeline:
1. Compute output shape according to ``test_pad_mode``.
2. Generate padding image with center matches the original image
center.
3. Initialize the padding image with pixel value equals to ``mean``.
4. Copy the ``cropped area`` to padding image.
Args:
crop_size (tuple | None): expected size after crop, final size will
computed according to ratio. Requires (h, w) in train mode, and
None in test mode.
ratios (tuple): random select a ratio from tuple and crop image to
(crop_size[0] * ratio) * (crop_size[1] * ratio).
Only available in train mode.
border (int): max distance from center select area to image border.
Only available in train mode.
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB.
test_mode (bool): whether involve random variables in transform.
In train mode, crop_size is fixed, center coords and ratio is
random selected from predefined lists. In test mode, crop_size
is image's original shape, center coords and ratio is fixed.
test_pad_mode (tuple): padding method and padding shape value, only
available in test mode. Default is using 'logical_or' with
127 as padding shape value.
- 'logical_or': final_shape = input_shape | padding_shape_value
- 'size_divisor': final_shape = int(
ceil(input_shape / padding_shape_value) * padding_shape_value)
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
"""
def __init__(self,
crop_size=None,
ratios=(0.9, 1.0, 1.1),
border=128,
mean=None,
std=None,
to_rgb=None,
test_mode=False,
test_pad_mode=('logical_or', 127),
bbox_clip_border=True):
if test_mode:
assert crop_size is None, 'crop_size must be None in test mode'
assert ratios is None, 'ratios must be None in test mode'
assert border is None, 'border must be None in test mode'
assert isinstance(test_pad_mode, (list, tuple))
assert test_pad_mode[0] in ['logical_or', 'size_divisor']
else:
assert isinstance(crop_size, (list, tuple))
assert crop_size[0] > 0 and crop_size[1] > 0, (
'crop_size must > 0 in train mode')
assert isinstance(ratios, (list, tuple))
assert test_pad_mode is None, (
'test_pad_mode must be None in train mode')
self.crop_size = crop_size
self.ratios = ratios
self.border = border
# We do not set default value to mean, std and to_rgb because these
# hyper-parameters are easy to forget but could affect the performance.
# Please use the same setting as Normalize for performance assurance.
assert mean is not None and std is not None and to_rgb is not None
self.to_rgb = to_rgb
self.input_mean = mean
self.input_std = std
if to_rgb:
self.mean = mean[::-1]
self.std = std[::-1]
else:
self.mean = mean
self.std = std
self.test_mode = test_mode
self.test_pad_mode = test_pad_mode
self.bbox_clip_border = bbox_clip_border
def _get_border(self, border, size):
"""Get final border for the target size.
This function generates a ``final_border`` according to image's shape.
The area between ``final_border`` and ``size - final_border`` is the
``center range``. We randomly choose center from the ``center range``
to avoid our random center is too close to original image's border.
Also ``center range`` should be larger than 0.
Args:
border (int): The initial border, default is 128.
size (int): The width or height of original image.
Returns:
int: The final border.
"""
k = 2 * border / size
i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
return border // i
def _filter_boxes(self, patch, boxes):
"""Check whether the center of each box is in the patch.
Args:
patch (list[int]): The cropped area, [left, top, right, bottom].
boxes (numpy array, (N x 4)): Ground truth boxes.
Returns:
mask (numpy array, (N,)): Each box is inside or outside the patch.
"""
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
center[:, 0] < patch[2]) * (
center[:, 1] < patch[3])
return mask
def _crop_image_and_paste(self, image, center, size):
"""Crop image with a given center and size, then paste the cropped
image to a blank image with two centers align.
This function is equivalent to generating a blank image with ``size``
as its shape. Then cover it on the original image with two centers (
the center of blank image and the random center of original image)
aligned. The overlap area is paste from the original image and the
outside area is filled with ``mean pixel``.
Args:
image (np array, H x W x C): Original image.
center (list[int]): Target crop center coord.
size (list[int]): Target crop size. [target_h, target_w]
Returns:
cropped_img (np array, target_h x target_w x C): Cropped image.
border (np array, 4): The distance of four border of
``cropped_img`` to the original image area, [top, bottom,
left, right]
patch (list[int]): The cropped area, [left, top, right, bottom].
"""
center_y, center_x = center
target_h, target_w = size
img_h, img_w, img_c = image.shape
x0 = max(0, center_x - target_w // 2)
x1 = min(center_x + target_w // 2, img_w)
y0 = max(0, center_y - target_h // 2)
y1 = min(center_y + target_h // 2, img_h)
patch = np.array((int(x0), int(y0), int(x1), int(y1)))
left, right = center_x - x0, x1 - center_x
top, bottom = center_y - y0, y1 - center_y
cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
for i in range(img_c):
cropped_img[:, :, i] += self.mean[i]
y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
x_slice = slice(cropped_center_x - left, cropped_center_x + right)
cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
border = np.array([
cropped_center_y - top, cropped_center_y + bottom,
cropped_center_x - left, cropped_center_x + right
],
dtype=np.float32)
return cropped_img, border, patch
def _train_aug(self, results):
"""Random crop and around padding the original image.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
boxes = results['gt_bboxes']
while True:
scale = random.choice(self.ratios)
new_h = int(self.crop_size[0] * scale)
new_w = int(self.crop_size[1] * scale)
h_border = self._get_border(self.border, h)
w_border = self._get_border(self.border, w)
for i in range(50):
center_x = random.randint(low=w_border, high=w - w_border)
center_y = random.randint(low=h_border, high=h - h_border)
cropped_img, border, patch = self._crop_image_and_paste(
img, [center_y, center_x], [new_h, new_w])
mask = self._filter_boxes(patch, boxes)
# if image do not have valid bbox, any crop patch is valid.
if not mask.any() and len(boxes) > 0:
continue
results['img'] = cropped_img
results['img_shape'] = cropped_img.shape
results['pad_shape'] = cropped_img.shape
x0, y0, x1, y1 = patch
left_w, top_h = center_x - x0, center_y - y0
cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
mask = self._filter_boxes(patch, results[key])
bboxes = results[key][mask]
bboxes[:, 0:4:2] += cropped_center_x - left_w - x0
bboxes[:, 1:4:2] += cropped_center_y - top_h - y0
if self.bbox_clip_border:
bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)
bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)
keep = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
bboxes = bboxes[keep]
results[key] = bboxes
if key in ['gt_bboxes']:
if 'gt_labels' in results:
labels = results['gt_labels'][mask]
labels = labels[keep]
results['gt_labels'] = labels
if 'gt_masks' in results:
raise NotImplementedError(
'RandomCenterCropPad only supports bbox.')
# crop semantic seg
for key in results.get('seg_fields', []):
raise NotImplementedError(
'RandomCenterCropPad only supports bbox.')
return results
def _test_aug(self, results):
"""Around padding the original image without cropping.
The padding mode and value are from ``test_pad_mode``.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
results['img_shape'] = img.shape
if self.test_pad_mode[0] in ['logical_or']:
target_h = h | self.test_pad_mode[1]
target_w = w | self.test_pad_mode[1]
elif self.test_pad_mode[0] in ['size_divisor']:
divisor = self.test_pad_mode[1]
target_h = int(np.ceil(h / divisor)) * divisor
target_w = int(np.ceil(w / divisor)) * divisor
else:
raise NotImplementedError(
'RandomCenterCropPad only support two testing pad mode:'
'logical-or and size_divisor.')
cropped_img, border, _ = self._crop_image_and_paste(
img, [h // 2, w // 2], [target_h, target_w])
results['img'] = cropped_img
results['pad_shape'] = cropped_img.shape
results['border'] = border
return results
def __call__(self, results):
img = results['img']
assert img.dtype == np.float32, (
'RandomCenterCropPad needs the input image of dtype np.float32,'
' please set "to_float32=True" in "LoadImageFromFile" pipeline')
h, w, c = img.shape
assert c == len(self.mean)
if self.test_mode:
return self._test_aug(results)
else:
return self._train_aug(results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'ratios={self.ratios}, '
repr_str += f'border={self.border}, '
repr_str += f'mean={self.input_mean}, '
repr_str += f'std={self.input_std}, '
repr_str += f'to_rgb={self.to_rgb}, '
repr_str += f'test_mode={self.test_mode}, '
repr_str += f'test_pad_mode={self.test_pad_mode}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class CutOut(object):
"""CutOut operation.
Randomly drop some regions of image used in
`Cutout <https://arxiv.org/abs/1708.04552>`_.
Args:
n_holes (int | tuple[int, int]): Number of regions to be dropped.
If it is given as a list, number of holes will be randomly
selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
shape of dropped regions. It can be `tuple[int, int]` to use a
fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
shape from the list.
cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
candidate ratio of dropped regions. It can be `tuple[float, float]`
to use a fixed ratio or `list[tuple[float, float]]` to randomly
choose ratio from the list. Please note that `cutout_shape`
and `cutout_ratio` cannot be both given at the same time.
fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
of pixel to fill in the dropped regions. Default: (0, 0, 0).
"""
def __init__(self,
n_holes,
cutout_shape=None,
cutout_ratio=None,
fill_in=(0, 0, 0)):
assert (cutout_shape is None) ^ (cutout_ratio is None), \
'Either cutout_shape or cutout_ratio should be specified.'
assert (isinstance(cutout_shape, (list, tuple))
or isinstance(cutout_ratio, (list, tuple)))
if isinstance(n_holes, tuple):
assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
else:
n_holes = (n_holes, n_holes)
self.n_holes = n_holes
self.fill_in = fill_in
self.with_ratio = cutout_ratio is not None
self.candidates = cutout_ratio if self.with_ratio else cutout_shape
if not isinstance(self.candidates, list):
self.candidates = [self.candidates]
def __call__(self, results):
"""Call function to drop some regions of image."""
h, w, c = results['img'].shape
n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
for _ in range(n_holes):
x1 = np.random.randint(0, w)
y1 = np.random.randint(0, h)
index = np.random.randint(0, len(self.candidates))
if not self.with_ratio:
cutout_w, cutout_h = self.candidates[index]
else:
cutout_w = int(self.candidates[index][0] * w)
cutout_h = int(self.candidates[index][1] * h)
x2 = np.clip(x1 + cutout_w, 0, w)
y2 = np.clip(y1 + cutout_h, 0, h)
results['img'][y1:y2, x1:x2, :] = self.fill_in
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(n_holes={self.n_holes}, '
repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
else f'cutout_shape={self.candidates}, ')
repr_str += f'fill_in={self.fill_in})'
return repr_str
@PIPELINES.register_module()
class Mosaic:
"""Mosaic augmentation.
Given 4 images, mosaic transform combines them into
one output image. The output image is composed of the parts from each sub-
image.
.. code:: text
mosaic transform
center_x
+------------------------------+
| pad | pad |
| +-----------+ |
| | | |
| | image1 |--------+ |
| | | | |
| | | image2 | |
center_y |----+-------------+-----------|
| | cropped | |
|pad | image3 | image4 |
| | | |
+----|-------------+-----------+
| |
+-------------+
The mosaic transform steps are as follows:
1. Choose the mosaic center as the intersections of 4 images
2. Get the left top image according to the index, and randomly
sample another 3 images from the custom dataset.
3. Sub image will be cropped if image is larger than mosaic patch
Args:
img_scale (Sequence[int]): Image size after mosaic pipeline of single
image. Default to (640, 640).
center_ratio_range (Sequence[float]): Center ratio range of mosaic
output. Default to (0.5, 1.5).
pad_val (int): Pad value. Default to 114.
"""
def __init__(self,
img_scale=(640, 640),
center_ratio_range=(0.5, 1.5),
pad_val=114):
assert isinstance(img_scale, tuple)
self.img_scale = img_scale
self.center_ratio_range = center_ratio_range
self.pad_val = pad_val
def __call__(self, results):
"""Call function to make a mosaic of image.
Args:
results (dict): Result dict.
Returns:
dict: Result dict with mosaic transformed.
"""
results = self._mosaic_transform(results)
return results
def get_indexes(self, dataset):
"""Call function to collect indexes.
Args:
dataset (:obj:`MultiImageMixDataset`): The dataset.
Returns:
list: indexes.
"""
indexs = [random.randint(0, len(dataset)) for _ in range(3)]
return indexs
def _mosaic_transform(self, results):
"""Mosaic transform function.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
assert 'mix_results' in results
mosaic_labels = []
mosaic_bboxes = []
if len(results['img'].shape) == 3:
mosaic_img = np.full(
(int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3),
self.pad_val,
dtype=results['img'].dtype)
else:
mosaic_img = np.full(
(int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)),
self.pad_val,
dtype=results['img'].dtype)
# mosaic center x, y
center_x = int(
random.uniform(*self.center_ratio_range) * self.img_scale[1])
center_y = int(
random.uniform(*self.center_ratio_range) * self.img_scale[0])
center_position = (center_x, center_y)
loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')
for i, loc in enumerate(loc_strs):
if loc == 'top_left':
results_patch = copy.deepcopy(results)
else:
results_patch = copy.deepcopy(results['mix_results'][i - 1])
img_i = results_patch['img']
h_i, w_i = img_i.shape[:2]
# keep_ratio resize
scale_ratio_i = min(self.img_scale[0] / h_i,
self.img_scale[1] / w_i)
img_i = mmcv.imresize(
img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))
# compute the combine parameters
paste_coord, crop_coord = self._mosaic_combine(
loc, center_position, img_i.shape[:2][::-1])
x1_p, y1_p, x2_p, y2_p = paste_coord
x1_c, y1_c, x2_c, y2_c = crop_coord
# crop and paste image
mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]
# adjust coordinate
gt_bboxes_i = results_patch['gt_bboxes']
gt_labels_i = results_patch['gt_labels']
if gt_bboxes_i.shape[0] > 0:
padw = x1_p - x1_c
padh = y1_p - y1_c
gt_bboxes_i[:, 0::2] = \
scale_ratio_i * gt_bboxes_i[:, 0::2] + padw
gt_bboxes_i[:, 1::2] = \
scale_ratio_i * gt_bboxes_i[:, 1::2] + padh
mosaic_bboxes.append(gt_bboxes_i)
mosaic_labels.append(gt_labels_i)
if len(mosaic_labels) > 0:
mosaic_bboxes = np.concatenate(mosaic_bboxes, 0)
mosaic_bboxes[:, 0::2] = np.clip(mosaic_bboxes[:, 0::2], 0,
2 * self.img_scale[1])
mosaic_bboxes[:, 1::2] = np.clip(mosaic_bboxes[:, 1::2], 0,
2 * self.img_scale[0])
mosaic_labels = np.concatenate(mosaic_labels, 0)
results['img'] = mosaic_img
results['img_shape'] = mosaic_img.shape
results['ori_shape'] = mosaic_img.shape
results['gt_bboxes'] = mosaic_bboxes
results['gt_labels'] = mosaic_labels
return results
def _mosaic_combine(self, loc, center_position_xy, img_shape_wh):
"""Calculate global coordinate of mosaic image and local coordinate of
cropped sub-image.
Args:
loc (str): Index for the sub-image, loc in ('top_left',
'top_right', 'bottom_left', 'bottom_right').
center_position_xy (Sequence[float]): Mixing center for 4 images,
(x, y).
img_shape_wh (Sequence[int]): Width and height of sub-image
Returns:
tuple[tuple[float]]: Corresponding coordinate of pasting and
cropping
- paste_coord (tuple): paste corner coordinate in mosaic image.
- crop_coord (tuple): crop corner coordinate in mosaic image.
"""
assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right')
if loc == 'top_left':
# index0 to top left part of image
x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \
max(center_position_xy[1] - img_shape_wh[1], 0), \
center_position_xy[0], \
center_position_xy[1]
crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - (
y2 - y1), img_shape_wh[0], img_shape_wh[1]
elif loc == 'top_right':
# index1 to top right part of image
x1, y1, x2, y2 = center_position_xy[0], \
max(center_position_xy[1] - img_shape_wh[1], 0), \
min(center_position_xy[0] + img_shape_wh[0],
self.img_scale[1] * 2), \
center_position_xy[1]
crop_coord = 0, img_shape_wh[1] - (y2 - y1), min(
img_shape_wh[0], x2 - x1), img_shape_wh[1]
elif loc == 'bottom_left':
# index2 to bottom left part of image
x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \
center_position_xy[1], \
center_position_xy[0], \
min(self.img_scale[1] * 2, center_position_xy[1] +
img_shape_wh[1])
crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min(
y2 - y1, img_shape_wh[1])
else:
# index3 to bottom right part of image
x1, y1, x2, y2 = center_position_xy[0], \
center_position_xy[1], \
min(center_position_xy[0] + img_shape_wh[0],
self.img_scale[1] * 2), \
min(self.img_scale[0] * 2, center_position_xy[1] +
img_shape_wh[1])
crop_coord = 0, 0, min(img_shape_wh[0],
x2 - x1), min(y2 - y1, img_shape_wh[1])
paste_coord = x1, y1, x2, y2
return paste_coord, crop_coord
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'img_scale={self.img_scale}, '
repr_str += f'center_ratio_range={self.center_ratio_range})'
repr_str += f'pad_val={self.pad_val})'
return repr_str
@PIPELINES.register_module()
class MixUp:
"""MixUp data augmentation.
.. code:: text
mixup transform
+------------------------------+
| mixup image | |
| +--------|--------+ |
| | | | |
|---------------+ | |
| | | |
| | image | |
| | | |
| | | |
| |-----------------+ |
| pad |
+------------------------------+
The mixup transform steps are as follows::
1. Another random image is picked by dataset and embedded in
the top left patch(after padding and resizing)
2. The target of mixup transform is the weighted average of mixup
image and origin image.
Args:
img_scale (Sequence[int]): Image output size after mixup pipeline.
Default: (640, 640).
ratio_range (Sequence[float]): Scale ratio of mixup image.
Default: (0.5, 1.5).
flip_ratio (float): Horizontal flip ratio of mixup image.
Default: 0.5.
pad_val (int): Pad value. Default: 114.
max_iters (int): The maximum number of iterations. If the number of
iterations is greater than `max_iters`, but gt_bbox is still
empty, then the iteration is terminated. Default: 15.
min_bbox_size (float): Width and height threshold to filter bboxes.
If the height or width of a box is smaller than this value, it
will be removed. Default: 5.
min_area_ratio (float): Threshold of area ratio between
original bboxes and wrapped bboxes. If smaller than this value,
the box will be removed. Default: 0.2.
max_aspect_ratio (float): Aspect ratio of width and height
threshold to filter bboxes. If max(h/w, w/h) larger than this
value, the box will be removed. Default: 20.
"""
def __init__(self,
img_scale=(640, 640),
ratio_range=(0.5, 1.5),
flip_ratio=0.5,
pad_val=114,
max_iters=15,
min_bbox_size=5,
min_area_ratio=0.2,
max_aspect_ratio=20):
assert isinstance(img_scale, tuple)
self.dynamic_scale = img_scale
self.ratio_range = ratio_range
self.flip_ratio = flip_ratio
self.pad_val = pad_val
self.max_iters = max_iters
self.min_bbox_size = min_bbox_size
self.min_area_ratio = min_area_ratio
self.max_aspect_ratio = max_aspect_ratio
def __call__(self, results):
"""Call function to make a mixup of image.
Args:
results (dict): Result dict.
Returns:
dict: Result dict with mixup transformed.
"""
results = self._mixup_transform(results)
return results
def get_indexes(self, dataset):
"""Call function to collect indexes.
Args:
dataset (:obj:`MultiImageMixDataset`): The dataset.
Returns:
list: indexes.
"""
for i in range(self.max_iters):
index = random.randint(0, len(dataset))
gt_bboxes_i = dataset.get_ann_info(index)['bboxes']
if len(gt_bboxes_i) != 0:
break
return index
def _mixup_transform(self, results):
"""MixUp transform function.
Args:
results (dict): Result dict.
Returns:
dict: Updated result dict.
"""
assert 'mix_results' in results
assert len(
results['mix_results']) == 1, 'MixUp only support 2 images now !'
if results['mix_results'][0]['gt_bboxes'].shape[0] == 0:
# empty bbox
return results
if 'scale' in results:
self.dynamic_scale = results['scale']
retrieve_results = results['mix_results'][0]
retrieve_img = retrieve_results['img']
jit_factor = random.uniform(*self.ratio_range)
is_filp = random.uniform(0, 1) > self.flip_ratio
if len(retrieve_img.shape) == 3:
out_img = np.ones(
(self.dynamic_scale[0], self.dynamic_scale[1], 3),
dtype=retrieve_img.dtype) * self.pad_val
else:
out_img = np.ones(
self.dynamic_scale, dtype=retrieve_img.dtype) * self.pad_val
# 1. keep_ratio resize
scale_ratio = min(self.dynamic_scale[0] / retrieve_img.shape[0],
self.dynamic_scale[1] / retrieve_img.shape[1])
retrieve_img = mmcv.imresize(
retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),
int(retrieve_img.shape[0] * scale_ratio)))
# 2. paste
out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img
# 3. scale jit
scale_ratio *= jit_factor
out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),
int(out_img.shape[0] * jit_factor)))
# 4. flip
if is_filp:
out_img = out_img[:, ::-1, :]
# 5. random crop
ori_img = results['img']
origin_h, origin_w = out_img.shape[:2]
target_h, target_w = ori_img.shape[:2]
padded_img = np.zeros(
(max(origin_h, target_h), max(origin_w,
target_w), 3)).astype(np.uint8)
padded_img[:origin_h, :origin_w] = out_img
x_offset, y_offset = 0, 0
if padded_img.shape[0] > target_h:
y_offset = random.randint(0, padded_img.shape[0] - target_h)
if padded_img.shape[1] > target_w:
x_offset = random.randint(0, padded_img.shape[1] - target_w)
padded_cropped_img = padded_img[y_offset:y_offset + target_h,
x_offset:x_offset + target_w]
# 6. adjust bbox
retrieve_gt_bboxes = retrieve_results['gt_bboxes']
retrieve_gt_bboxes[:, 0::2] = np.clip(
retrieve_gt_bboxes[:, 0::2] * scale_ratio, 0, origin_w)
retrieve_gt_bboxes[:, 1::2] = np.clip(
retrieve_gt_bboxes[:, 1::2] * scale_ratio, 0, origin_h)
if is_filp:
retrieve_gt_bboxes[:, 0::2] = (
origin_w - retrieve_gt_bboxes[:, 0::2][:, ::-1])
# 7. filter
cp_retrieve_gt_bboxes = retrieve_gt_bboxes.copy()
cp_retrieve_gt_bboxes[:, 0::2] = np.clip(
cp_retrieve_gt_bboxes[:, 0::2] - x_offset, 0, target_w)
cp_retrieve_gt_bboxes[:, 1::2] = np.clip(
cp_retrieve_gt_bboxes[:, 1::2] - y_offset, 0, target_h)
keep_list = self._filter_box_candidates(retrieve_gt_bboxes.T,
cp_retrieve_gt_bboxes.T)
# 8. mix up
if keep_list.sum() >= 1.0:
ori_img = ori_img.astype(np.float32)
mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(
np.float32)
retrieve_gt_labels = retrieve_results['gt_labels'][keep_list]
retrieve_gt_bboxes = cp_retrieve_gt_bboxes[keep_list]
mixup_gt_bboxes = np.concatenate(
(results['gt_bboxes'], retrieve_gt_bboxes), axis=0)
mixup_gt_labels = np.concatenate(
(results['gt_labels'], retrieve_gt_labels), axis=0)
results['img'] = mixup_img
results['img_shape'] = mixup_img.shape
results['gt_bboxes'] = mixup_gt_bboxes
results['gt_labels'] = mixup_gt_labels
return results
def _filter_box_candidates(self, bbox1, bbox2):
"""Compute candidate boxes which include following 5 things:
bbox1 before augment, bbox2 after augment, min_bbox_size (pixels),
min_area_ratio, max_aspect_ratio.
"""
w1, h1 = bbox1[2] - bbox1[0], bbox1[3] - bbox1[1]
w2, h2 = bbox2[2] - bbox2[0], bbox2[3] - bbox2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16))
return ((w2 > self.min_bbox_size)
& (h2 > self.min_bbox_size)
& (w2 * h2 / (w1 * h1 + 1e-16) > self.min_area_ratio)
& (ar < self.max_aspect_ratio))
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'dynamic_scale={self.dynamic_scale}, '
repr_str += f'ratio_range={self.ratio_range})'
repr_str += f'flip_ratio={self.flip_ratio})'
repr_str += f'pad_val={self.pad_val})'
repr_str += f'max_iters={self.max_iters})'
repr_str += f'min_bbox_size={self.min_bbox_size})'
repr_str += f'min_area_ratio={self.min_area_ratio})'
repr_str += f'max_aspect_ratio={self.max_aspect_ratio})'
return repr_str
@PIPELINES.register_module()
class RandomAffine:
"""Random affine transform data augmentation.
This operation randomly generates affine transform matrix which including
rotation, translation, shear and scaling transforms.
Args:
max_rotate_degree (float): Maximum degrees of rotation transform.
Default: 10.
max_translate_ratio (float): Maximum ratio of translation.
Default: 0.1.
scaling_ratio_range (tuple[float]): Min and max ratio of
scaling transform. Default: (0.5, 1.5).
max_shear_degree (float): Maximum degrees of shear
transform. Default: 2.
border (tuple[int]): Distance from height and width sides of input
image to adjust output shape. Only used in mosaic dataset.
Default: (0, 0).
border_val (tuple[int]): Border padding values of 3 channels.
Default: (114, 114, 114).
min_bbox_size (float): Width and height threshold to filter bboxes.
If the height or width of a box is smaller than this value, it
will be removed. Default: 2.
min_area_ratio (float): Threshold of area ratio between
original bboxes and wrapped bboxes. If smaller than this value,
the box will be removed. Default: 0.2.
max_aspect_ratio (float): Aspect ratio of width and height
threshold to filter bboxes. If max(h/w, w/h) larger than this
value, the box will be removed.
"""
def __init__(self,
max_rotate_degree=10.0,
max_translate_ratio=0.1,
scaling_ratio_range=(0.5, 1.5),
max_shear_degree=2.0,
border=(0, 0),
border_val=(114, 114, 114),
min_bbox_size=2,
min_area_ratio=0.2,
max_aspect_ratio=20):
assert 0 <= max_translate_ratio <= 1
assert scaling_ratio_range[0] <= scaling_ratio_range[1]
assert scaling_ratio_range[0] > 0
self.max_rotate_degree = max_rotate_degree
self.max_translate_ratio = max_translate_ratio
self.scaling_ratio_range = scaling_ratio_range
self.max_shear_degree = max_shear_degree
self.border = border
self.border_val = border_val
self.min_bbox_size = min_bbox_size
self.min_area_ratio = min_area_ratio
self.max_aspect_ratio = max_aspect_ratio
def __call__(self, results):
img = results['img']
height = img.shape[0] + self.border[0] * 2
width = img.shape[1] + self.border[1] * 2
# Center
center_matrix = np.eye(3)
center_matrix[0, 2] = -img.shape[1] / 2 # x translation (pixels)
center_matrix[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Rotation
rotation_degree = random.uniform(-self.max_rotate_degree,
self.max_rotate_degree)
rotation_matrix = self._get_rotation_matrix(rotation_degree)
# Scaling
scaling_ratio = random.uniform(self.scaling_ratio_range[0],
self.scaling_ratio_range[1])
scaling_matrix = self._get_scaling_matrix(scaling_ratio)
# Shear
x_degree = random.uniform(-self.max_shear_degree,
self.max_shear_degree)
y_degree = random.uniform(-self.max_shear_degree,
self.max_shear_degree)
shear_matrix = self._get_shear_matrix(x_degree, y_degree)
# Translation
trans_x = random.uniform(0.5 - self.max_translate_ratio,
0.5 + self.max_translate_ratio) * width
trans_y = random.uniform(0.5 - self.max_translate_ratio,
0.5 + self.max_translate_ratio) * height
translate_matrix = self._get_translation_matrix(trans_x, trans_y)
warp_matrix = (
translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix
@ center_matrix)
img = cv2.warpPerspective(
img,
warp_matrix,
dsize=(width, height),
borderValue=self.border_val)
results['img'] = img
results['img_shape'] = img.shape
for key in results.get('bbox_fields', []):
bboxes = results[key]
num_bboxes = len(bboxes)
if num_bboxes:
# homogeneous coordinates
xs = bboxes[:, [0, 2, 2, 0]].reshape(num_bboxes * 4)
ys = bboxes[:, [1, 3, 3, 1]].reshape(num_bboxes * 4)
ones = np.ones_like(xs)
points = np.vstack([xs, ys, ones])
warp_points = warp_matrix @ points
warp_points = warp_points[:2] / warp_points[2]
xs = warp_points[0].reshape(num_bboxes, 4)
ys = warp_points[1].reshape(num_bboxes, 4)
warp_bboxes = np.vstack(
(xs.min(1), ys.min(1), xs.max(1), ys.max(1))).T
warp_bboxes[:, [0, 2]] = warp_bboxes[:, [0, 2]].clip(0, width)
warp_bboxes[:, [1, 3]] = warp_bboxes[:, [1, 3]].clip(0, height)
# filter bboxes
valid_index = self.filter_gt_bboxes(bboxes * scaling_ratio,
warp_bboxes)
results[key] = warp_bboxes[valid_index]
if key in ['gt_bboxes']:
if 'gt_labels' in results:
results['gt_labels'] = results['gt_labels'][
valid_index]
if 'gt_masks' in results:
raise NotImplementedError(
'RandomAffine only supports bbox.')
return results
def filter_gt_bboxes(self, origin_bboxes, wrapped_bboxes):
origin_w = origin_bboxes[:, 2] - origin_bboxes[:, 0]
origin_h = origin_bboxes[:, 3] - origin_bboxes[:, 1]
wrapped_w = wrapped_bboxes[:, 2] - wrapped_bboxes[:, 0]
wrapped_h = wrapped_bboxes[:, 3] - wrapped_bboxes[:, 1]
aspect_ratio = np.maximum(wrapped_w / (wrapped_h + 1e-16),
wrapped_h / (wrapped_w + 1e-16))
wh_valid_idx = (wrapped_w > self.min_bbox_size) & \
(wrapped_h > self.min_bbox_size)
area_valid_idx = wrapped_w * wrapped_h / (origin_w * origin_h +
1e-16) > self.min_area_ratio
aspect_ratio_valid_idx = aspect_ratio < self.max_aspect_ratio
return wh_valid_idx & area_valid_idx & aspect_ratio_valid_idx
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(max_rotate_degree={self.max_rotate_degree}, '
repr_str += f'max_translate_ratio={self.max_translate_ratio}, '
repr_str += f'scaling_ratio={self.scaling_ratio_range}, '
repr_str += f'max_shear_degree={self.max_shear_degree}, '
repr_str += f'border={self.border}, '
repr_str += f'border_val={self.border_val}, '
repr_str += f'min_bbox_size={self.min_bbox_size}, '
repr_str += f'min_area_ratio={self.min_area_ratio}, '
repr_str += f'max_aspect_ratio={self.max_aspect_ratio})'
return repr_str
@staticmethod
def _get_rotation_matrix(rotate_degrees):
radian = math.radians(rotate_degrees)
rotation_matrix = np.array([[np.cos(radian), -np.sin(radian), 0.],
[np.sin(radian),
np.cos(radian), 0.], [0., 0., 1.]])
return rotation_matrix
@staticmethod
def _get_scaling_matrix(scale_ratio):
scaling_matrix = np.array([[scale_ratio, 0., 0.],
[0., scale_ratio, 0.], [0., 0., 1.]])
return scaling_matrix
@staticmethod
def _get_share_matrix(scale_ratio):
scaling_matrix = np.array([[scale_ratio, 0., 0.],
[0., scale_ratio, 0.], [0., 0., 1.]])
return scaling_matrix
@staticmethod
def _get_shear_matrix(x_shear_degrees, y_shear_degrees):
x_radian = math.radians(x_shear_degrees)
y_radian = math.radians(y_shear_degrees)
shear_matrix = np.array([[1, np.tan(x_radian), 0.],
[np.tan(y_radian), 1, 0.], [0., 0., 1.]])
return shear_matrix
@staticmethod
def _get_translation_matrix(x, y):
translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]])
return translation_matrix
| 39.515115 | 79 | 0.556477 |
4a278216b55374afd4e6759f4952a08f1fd10570 | 2,772 | py | Python | users/forms.py | Kingjmk/mlfaati | 12c0dcbe0389c2c1da0bde80509fb3374955e293 | [
"MIT"
] | 1 | 2021-01-04T07:34:34.000Z | 2021-01-04T07:34:34.000Z | users/forms.py | Kingjmk/mlfaati | 12c0dcbe0389c2c1da0bde80509fb3374955e293 | [
"MIT"
] | null | null | null | users/forms.py | Kingjmk/mlfaati | 12c0dcbe0389c2c1da0bde80509fb3374955e293 | [
"MIT"
] | null | null | null | from crispy_forms.bootstrap import PrependedText
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Field, ButtonHolder, Submit
from django.contrib.auth import forms as auth_forms
from common.crispy import Anchor
from . import models
class LoginForm(auth_forms.AuthenticationForm):
remember_me = forms.BooleanField(label=_('Remember me'), initial=False, required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-vertical remove-asterisk'
self.helper.layout = Layout(
Div(
Field(
PrependedText(
'username', mark_safe('<i class="ft-mail"></i>'),
placeholder=_('[email protected]'), wrapper_class='col-12')
),
Field(
PrependedText(
'password', mark_safe('<i class="ft-unlock"></i>'),
placeholder=_('Password'), wrapper_class='pt-4 col-12'
)
),
Div(
Field(
'remember_me', placeholder=_('Remember me'),
css_class='form-check-input'
),
Anchor(
url='#', # TODO: make forget password page
text=_('Lost password?'),
css_class='small text-right',
),
css_class='d-flex justify-content-between align-items-top pt-4 col-12'
),
ButtonHolder(
Submit('submit', _('Sign in'), css_class='w-100'),
css_class='pt-4 col-12'
),
css_class='row'
)
)
class SettingsForm(forms.ModelForm):
class Meta:
model = models.User
fields = ['first_name', 'last_name']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = True
self.helper.form_class = 'form-vertical remove-asterisk'
self.helper.layout = Layout(
Div(
Field('first_name', wrapper_class='col-6'),
Field('last_name', wrapper_class='col-6'),
ButtonHolder(
Submit('submit', _('Save')),
css_class='pt-4 col-12'
),
css_class='row'
)
)
| 35.538462 | 91 | 0.51443 |
4a27824b1120a6f40d19fd7f7e8a07081392a285 | 1,490 | py | Python | lambda_functions/process/count_feature/lambda_function.py | hotosm/MapCampaigner | 32f6b6af2ce81f84fffe33ab864c4ead23bb55b2 | [
"BSD-3-Clause"
] | 24 | 2018-10-05T06:39:11.000Z | 2022-02-22T08:54:37.000Z | lambda_functions/process/count_feature/lambda_function.py | hotosm/field-campaigner | 32f6b6af2ce81f84fffe33ab864c4ead23bb55b2 | [
"BSD-3-Clause"
] | 384 | 2017-05-17T07:50:02.000Z | 2018-09-20T08:18:56.000Z | lambda_functions/process/count_feature/lambda_function.py | hotosm/field-campaigner | 32f6b6af2ce81f84fffe33ab864c4ead23bb55b2 | [
"BSD-3-Clause"
] | 16 | 2017-05-11T08:52:19.000Z | 2018-06-08T06:55:43.000Z | import sys
sys.path.insert(0, "dependencies")
import logging
import json
import xml.sax
from utilities import (
fetch_campaign,
campaign_path,
to_piechart,
download_overpass_file,
save_data
)
from parser import CountFeatureParser
from aws import S3Data
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
try:
main(event, context)
except Exception as e:
S3Data().create(
key=f'campaigns/{event["campaign_uuid"]}/failure.json',
body=json.dumps({'function': 'process_count_feature',
'failure': str(e)}))
def main(event, context):
logger.info('got event{}'.format(event))
uuid = event['campaign_uuid']
type_name = event['type']
type_id = type_name.replace(' ', '_')
campaign = fetch_campaign(
campaign_path=campaign_path(uuid))
for type_key in campaign['types']:
if campaign['types'][type_key]['type'] == type_name:
typee = campaign['types'][type_key]
download_overpass_file(uuid, type_id)
xml_file = open('/tmp/{type_id}.xml'.format(type_id=type_id), 'r')
parser = CountFeatureParser(typee['feature'])
try:
xml.sax.parse(xml_file, parser)
except xml.sax.SAXParseException:
print('FAIL')
output = {
'type_id': type_id,
'type_name': type_name,
'piechart': to_piechart(parser.count)
}
save_data(uuid, type_id, output)
| 24.833333 | 70 | 0.642953 |
4a27845ff9feff9e5f98d5829d46cc4238f1149c | 2,849 | py | Python | tests/integration/test_storage_s3/s3_mocks/unstable_server.py | tianyiYoung/ClickHouse | 41012b5ba49df807af52fc17ab475a21fadda9b3 | [
"Apache-2.0"
] | 5 | 2021-05-14T02:46:44.000Z | 2021-11-23T04:58:20.000Z | tests/integration/test_storage_s3/s3_mocks/unstable_server.py | tianyiYoung/ClickHouse | 41012b5ba49df807af52fc17ab475a21fadda9b3 | [
"Apache-2.0"
] | 5 | 2021-05-21T06:26:01.000Z | 2021-08-04T04:57:36.000Z | tests/integration/test_storage_s3/s3_mocks/unstable_server.py | tianyiYoung/ClickHouse | 41012b5ba49df807af52fc17ab475a21fadda9b3 | [
"Apache-2.0"
] | 8 | 2021-05-12T01:38:18.000Z | 2022-02-10T06:08:41.000Z | import http.server
import random
import re
import socket
import struct
import sys
def gen_n_digit_number(n):
assert 0 < n < 19
return random.randint(10**(n-1), 10**n-1)
def gen_line():
columns = 4
row = []
def add_number():
digits = random.randint(1, 18)
row.append(gen_n_digit_number(digits))
for i in range(columns // 2):
add_number()
row.append(1)
for i in range(columns - 1 - columns // 2):
add_number()
line = ",".join(map(str, row)) + "\n"
return line.encode()
random.seed("Unstable server/1.0")
lines = b"".join((gen_line() for _ in range(500000)))
class RequestHandler(http.server.BaseHTTPRequestHandler):
def do_HEAD(self):
if self.path == "/root/test.csv":
self.from_bytes = 0
self.end_bytes = len(lines)
self.size = self.end_bytes
self.send_block_size = 256
self.stop_at = random.randint(900000, 1200000) // self.send_block_size # Block size is 1024**2.
if "Range" in self.headers:
cr = self.headers["Range"]
parts = re.split("[ -/=]+", cr)
assert parts[0] == "bytes"
self.from_bytes = int(parts[1])
if parts[2]:
self.end_bytes = int(parts[2])+1
self.send_response(206)
self.send_header("Content-Range", f"bytes {self.from_bytes}-{self.end_bytes-1}/{self.size}")
else:
self.send_response(200)
self.send_header("Accept-Ranges", "bytes")
self.send_header("Content-Type", "text/plain")
self.send_header("Content-Length", f"{self.end_bytes-self.from_bytes}")
self.end_headers()
elif self.path == "/":
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
else:
self.send_response(404)
self.send_header("Content-Type", "text/plain")
self.end_headers()
def do_GET(self):
self.do_HEAD()
if self.path == "/root/test.csv":
for c, i in enumerate(range(self.from_bytes, self.end_bytes, self.send_block_size)):
self.wfile.write(lines[i:min(i+self.send_block_size, self.end_bytes)])
if (c + 1) % self.stop_at == 0:
#self.wfile._sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack("ii", 0, 0))
#self.wfile._sock.shutdown(socket.SHUT_RDWR)
#self.wfile._sock.close()
print('Dropping connection')
break
elif self.path == "/":
self.wfile.write(b"OK")
httpd = http.server.HTTPServer(("0.0.0.0", int(sys.argv[1])), RequestHandler)
httpd.serve_forever()
| 31.307692 | 110 | 0.559495 |
4a27863f37c70f60219074aa9f6c15bdb6a49bc8 | 3,153 | py | Python | stdplugins/new.py | aashiq075/PepeBot | 5f40f4316c84ec3875bcbcd476e10448f9214f31 | [
"Apache-2.0"
] | 1 | 2020-08-09T11:43:20.000Z | 2020-08-09T11:43:20.000Z | stdplugins/new.py | aashiq075/PepeBot | 5f40f4316c84ec3875bcbcd476e10448f9214f31 | [
"Apache-2.0"
] | null | null | null | stdplugins/new.py | aashiq075/PepeBot | 5f40f4316c84ec3875bcbcd476e10448f9214f31 | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import string
from telethon import events
from telethon.utils import add_surrogate
from telethon.tl.types import MessageEntityPre
from telethon.tl.tlobject import TLObject
import datetime
PRINTABLE_SET = set(bytes(string.printable, 'ascii'))
STR_LEN_MAX = 256
BYTE_LEN_MAX = 64
def parse_pre(text):
text = text.strip()
return (
text, [
MessageEntityPre(
offset=0, length=len(
add_surrogate(text)), language='potato')])
def yaml_format(obj, indent=0):
"""
Pretty formats the given object as a YAML string which is returned.
(based on TLObject.pretty_format)
"""
result = []
if isinstance(obj, TLObject):
obj = obj.to_dict()
if isinstance(obj, dict):
result.append(obj.get('_', 'dict') + ':')
if obj:
items = obj.items()
has_multiple_items = len(items) > 2
if has_multiple_items:
result.append('\n')
indent += 2
for k, v in items:
if k == '_' or v is None:
continue
formatted = yaml_format(v, indent)
if not formatted.strip():
continue
result.append(' ' * (indent if has_multiple_items else 1))
result.append(f'{k}: {formatted}')
result.append('\n')
result.pop()
indent -= 2
result.append(' ' * indent)
elif isinstance(obj, str):
# truncate long strings and display elipsis
result.append(repr(obj[:STR_LEN_MAX]))
if len(obj) > STR_LEN_MAX:
result.append('…')
elif isinstance(obj, bytes):
# repr() bytes if it's printable, hex like "FF EE BB" otherwise
if all(c in PRINTABLE_SET for c in obj):
result.append(repr(obj))
else:
if len(obj) > BYTE_LEN_MAX:
result.append('<…>')
else:
result.append(' '.join(f'{b:02X}' for b in obj))
elif isinstance(obj, datetime.datetime):
# ISO-8601 without timezone offset (telethon dates are always UTC)
result.append(obj.strftime('%Y-%m-%d %H:%M:%S'))
elif hasattr(obj, '__iter__'):
# display iterables one after another at the base indentation level
result.append('\n')
indent += 2
for x in obj:
result.append(' ' * indent)
result.append(yaml_format(x, indent))
result.append('\n')
result.pop()
indent -= 2
result.append(' ' * indent)
else:
result.append(repr(obj))
return ''.join(result)
@borg.on(events.NewMessage(pattern=r"\.new", outgoing=True))
async def _(event):
if not event.message.is_reply:
return
msg = await event.message.get_reply_message()
yaml_text = yaml_format(msg)
await event.edit(
yaml_text,
parse_mode=parse_pre
)
| 31.53 | 75 | 0.572788 |
4a27890dd82db00e7f4a2aa463f4dc0a4a230f4f | 3,850 | py | Python | tests/ut/python/parallel/test_model_with_loss.py | PowerOlive/mindspore | bda20724a94113cedd12c3ed9083141012da1f15 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | tests/ut/python/parallel/test_model_with_loss.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | tests/ut/python/parallel/test_model_with_loss.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.nn import Cell, Momentum
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.ops import operations as P
from mindspore.train import Model
from tests.dataset_mock import MindData
class Dataset(MindData):
def __init__(self, predict, label, length=3):
super(Dataset, self).__init__(size=length)
self.predict = predict
self.label = label
self.index = 0
self.length = length
def __iter__(self):
return self
def __next__(self):
if self.index >= self.length:
raise StopIteration
self.index += 1
return self.predict, self.label
def reset(self):
self.index = 0
class Net(Cell):
def __init__(self, mul_weight, strategy1=None, strategy2=None):
super().__init__()
self.mul = P.Mul().shard(strategy1)
self.neg = P.Neg().shard(strategy2)
self.mul_weight = Parameter(mul_weight, "w1")
def construct(self, x):
out = self.mul(x, self.mul_weight)
out = self.neg(out)
return out
_x = Tensor(np.ones([32, 128]), dtype=ms.float32)
_b = Tensor(np.ones([32]), dtype=ms.int32)
_w1 = Tensor(np.ones([512, 128]), dtype=ms.float32)
def compile_net(net):
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
dataset = Dataset(_x, _b)
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
context.reset_auto_parallel_context()
def test_neg_data_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((16, 1), (16, 1))
strategy2 = ((16, 1),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_neg_model_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((1, 16), (1, 16))
strategy2 = ((1, 16),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_neg_hybrid_parallel():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((4, 4), (4, 4))
strategy2 = ((4, 4),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_neg_auto_parallel():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0)
net = Net(_w1)
compile_net(net)
def test_neg_repeat_calc():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((4, 4), (4, 4))
strategy2 = ((2, 2),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
def test_neg_repeat_calc2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((4, 2), (4, 2))
strategy2 = ((4, 4),)
net = Net(_w1, strategy1, strategy2)
compile_net(net)
| 31.818182 | 103 | 0.682597 |
4a2789b790c62abcf801c955621571d9c3735eba | 497 | py | Python | scrapy_demo/items.py | lyabs243/scrapy-demo | 9e232a0b46ccc2309348d2c430ee6f20c7caa2f7 | [
"MIT"
] | null | null | null | scrapy_demo/items.py | lyabs243/scrapy-demo | 9e232a0b46ccc2309348d2c430ee6f20c7caa2f7 | [
"MIT"
] | null | null | null | scrapy_demo/items.py | lyabs243/scrapy-demo | 9e232a0b46ccc2309348d2c430ee6f20c7caa2f7 | [
"MIT"
] | null | null | null | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapyDemoItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class PlayerItem(scrapy.Item):
image = scrapy.Field()
details = scrapy.Field()
number = scrapy.Field()
name = scrapy.Field()
clubs = scrapy.Field()
palmares = scrapy.Field()
statistics = scrapy.Field()
| 22.590909 | 53 | 0.68008 |
4a2789d0acc9765ef8bbbb155ec4a4eb4458a1e7 | 1,050 | py | Python | server/src/prefect_server/graphql/logs.py | louisditzel/prefect | b1a02fee623b965e756a38aa09059db780ab67eb | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-05-10T14:32:32.000Z | 2020-05-10T14:32:32.000Z | server/src/prefect_server/graphql/logs.py | louisditzel/prefect | b1a02fee623b965e756a38aa09059db780ab67eb | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2022-02-14T11:25:57.000Z | 2022-02-27T16:25:14.000Z | server/src/prefect_server/graphql/logs.py | louisditzel/prefect | b1a02fee623b965e756a38aa09059db780ab67eb | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Licensed under the Prefect Community License, available at
# https://www.prefect.io/legal/prefect-community-license
import asyncio
import warnings
from typing import Any
from graphql import GraphQLResolveInfo
import prefect
from prefect_server import api
from prefect_server.utilities.graphql import mutation
from prefect_server.utilities import context
@mutation.field("write_run_logs")
async def resolve_write_run_logs(
obj: Any, info: GraphQLResolveInfo, input: dict
) -> dict:
logs = []
for log in input["logs"]:
logs.append(
dict(
flow_run_id=log.get("flow_run_id", None) or log.get("flowRunId", None),
task_run_id=log.get("task_run_id", None) or log.get("taskRunId", None),
timestamp=log.get("timestamp"),
message=log.get("message"),
name=log.get("name"),
level=log.get("level"),
info=log.get("info"),
)
)
await api.logs.create_logs(logs)
return {"success": True}
| 29.166667 | 87 | 0.642857 |
4a2789fef841ce5653677077ea7fcbd8c4c3f652 | 1,441 | py | Python | setup.py | basecrm/basecrm-python | d6c26aca8850ba3fa0b9dff48d816b721d181ef7 | [
"Apache-2.0"
] | 19 | 2015-04-27T07:50:42.000Z | 2019-12-16T03:51:21.000Z | setup.py | basecrm/basecrm-python | d6c26aca8850ba3fa0b9dff48d816b721d181ef7 | [
"Apache-2.0"
] | 17 | 2015-05-05T23:08:25.000Z | 2018-03-27T09:44:00.000Z | setup.py | basecrm/basecrm-python | d6c26aca8850ba3fa0b9dff48d816b721d181ef7 | [
"Apache-2.0"
] | 19 | 2015-05-05T22:39:46.000Z | 2020-03-20T14:11:22.000Z | import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='basecrm',
version='1.2.10',
description='BaseCRM Official API V2 library client for Python',
long_description=README,
long_description_content_type="text/markdown",
author='Zendesk',
author_email='[email protected]',
url='https://github.com/basecrm/basecrm-python',
license='Apache License 2.0',
packages=['basecrm', 'basecrm.test'],
test_suite='basecrm.test.all',
install_requires=['requests', 'munch'],
tests_require=['mock'],
use_2to3=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
])
| 36.025 | 78 | 0.643997 |
4a278a2e837b6a3fad7735f8cec04227573327d0 | 12,142 | py | Python | botogram/frozenbot.py | Haloghen/botogram | 0161a549dbfae8a4114275300b84935f906190f2 | [
"MIT"
] | null | null | null | botogram/frozenbot.py | Haloghen/botogram | 0161a549dbfae8a4114275300b84935f906190f2 | [
"MIT"
] | null | null | null | botogram/frozenbot.py | Haloghen/botogram | 0161a549dbfae8a4114275300b84935f906190f2 | [
"MIT"
] | null | null | null | # Copyright (c) 2015-2019 The Botogram Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import logbook
from . import utils
from . import objects
from . import api as api_module
class FrozenBotError(Exception):
pass
class FrozenBot:
"""A frozen version of botogram.Bot"""
def __init__(self, api, about, owner, hide_commands, before_help,
after_help, link_preview_in_help,
validate_callback_signatures, process_backlog, lang, itself,
commands_re, commands, chains, scheduler, main_component_id,
bot_id, shared_memory, update_processors, override_i18n):
# This attribute should be added with the default setattr, because is
# needed by the custom setattr
object.__setattr__(self, "_frozen", False)
# Restore original content
self.api = api
self.about = about
self.owner = owner
self._hide_commands = hide_commands
self.before_help = before_help
self.after_help = after_help
self.link_preview_in_help = link_preview_in_help
self.validate_callback_signatures = validate_callback_signatures
self.process_backlog = process_backlog
self.lang = lang
self._commands_re = commands_re
self._main_component_id = main_component_id
self._bot_id = bot_id
self._shared_memory = shared_memory
self._scheduler = scheduler
self._chains = chains
self._update_processors = update_processors
self._commands = {name: command.for_bot(self)
for name, command in commands.items()}
self.override_i18n = override_i18n
# Setup the logger
self.logger = logbook.Logger('botogram bot')
# Get a fresh Gettext instance
self._lang_inst = utils.get_language(lang)
# Prepare the bot representation
self.itself = itself
self.itself.set_api(api)
# No more changes allowed!
self._frozen = True
def __reduce__(self):
args = (
self.api, self.about, self.owner, self._hide_commands,
self.before_help, self.after_help, self.link_preview_in_help,
self.validate_callback_signatures, self.process_backlog, self.lang,
self.itself, self._commands_re, self._commands, self._chains,
self._scheduler, self._main_component_id, self._bot_id,
self._shared_memory, self._update_processors, self.override_i18n,
)
return restore, args
def __setattr__(self, name, value):
# _frozen marks if the object is frozen or not
# This is useful because the init method needs to alter the object, but
# after that no one should
if self._frozen:
raise FrozenBotError("Can't alter a frozen bot")
return object.__setattr__(self, name, value)
def __eq__(self, other):
return self._bot_id == other._bot_id
# All those methods do nothing, since you aren't allowed to change the
# hooks a bot has in a frozen instance
# All of those will be overridden in the Bot class
def before_processing(self, func):
"""Register a before processing hook"""
raise FrozenBotError("Can't add hooks to a bot at runtime")
def process_message(self, func):
"""Register a message processor hook"""
raise FrozenBotError("Can't add hooks to a bot at runtime")
def message_equals(self, string, ignore_case=True):
"""Add a message equals hook"""
raise FrozenBotError("Can't add hooks to a bot at runtime")
def message_contains(self, string, ignore_case=True, multiple=False):
"""Add a message contains hook"""
raise FrozenBotError("Can't add hooks to a bot at runtime")
def message_matches(self, regex, flags=0, multiple=False):
"""Add a message matches hook"""
raise FrozenBotError("Can't add hooks to a bot at runtime")
def command(self, name, hidden=False):
"""Register a new command"""
raise FrozenBotError("Can't add commands to a bot at runtime")
def callback(self, name, hidden=False):
"""Register a new callback"""
raise FrozenBotError("Can't add callbacks to a bot at runtime")
def timer(self, interval):
"""Register a new timer"""
raise FrozenBotError("Can't add timers to a bot at runtime")
def prepare_memory(self, func):
"""Add a shared memory preparer"""
raise FrozenBotError("Can't register a shared memory preparer to a "
"bot at runtime")
@utils.deprecated("@bot.init_shared_memory", "1.0", "Rename the decorator "
"to @bot.prepare_memory")
def init_shared_memory(self, func):
"""This decorator is deprecated, and it calls @prepare_memory"""
return self.prepare_memory(func)
# This class also contains methods to send messages to users
# They're defined dynamically out of the class body, see below
# Get a chat from its ID
def chat(self, id):
"""Get an instance of botogram.Chat based on its ID"""
return self.api.call("getChat", {"chat_id": id}, expect=objects.Chat)
# Edit messages already sent
def _edit_create_fake_message_object(self, chat, message):
"""Helper method for edit_message and edit_caption"""
# Also accept objects
if hasattr(message, "message_id"):
message = message.id
if hasattr(chat, "id"):
chat = chat.id
return objects.Message({
"message_id": message,
"from": {
"id": self.itself.id,
"first_name": "",
},
"date": 0,
"chat": {
"id": chat,
"type": "",
},
}, self.api)
def edit_message(self, chat, message, text, syntax=None, preview=True,
extra=None, attach=None):
"""Edit a message already sent to the user"""
msg = self._edit_create_fake_message_object(chat, message)
msg.edit(text, syntax, preview, extra, attach)
def edit_caption(self, chat, message, caption, extra=None, attach=None):
"""Edit the caption of a media already sent to the user"""
msg = self._edit_create_fake_message_object(chat, message)
msg.edit_caption(caption, extra, attach)
# Let's process the messages
def process(self, update):
"""Process an update object"""
if not isinstance(update, objects.Update):
raise ValueError("Only Update objects are allowed")
update.set_api(self.api) # Be sure to use the correct API object
try:
for kind, processor in self._update_processors.items():
# Call the processor of the right kind
if getattr(update, kind) is None:
continue
processor(self, self._chains, update)
break
except api_module.ChatUnavailableError as e:
# Do some sane logging
self.logger.warning("Chat %s is not available to your bot:" %
e.chat_id)
self.logger.warning(str(e))
self.logger.warning("Update #%s processing aborted!" %
update.update_id)
for hook in self._chains["chat_unavalable_hooks"]:
self.logger.debug("Executing %s for chat %s..." % (hook.name,
e.chat_id))
hook.call(self, e.chat_id, e.reason)
def scheduled_tasks(self, current_time=None, wrap=True):
"""Return a list of tasks scheduled for now"""
# This provides a better API for the users of the method
def wrapper(task):
def process():
return task.process(self)
return process
# All the tasks returned are wrapped if wrap is True
tasks = self._scheduler.now(current=current_time)
if wrap:
return [wrapper(job) for job in tasks]
return list(tasks)
def register_update_processor(self, kind, processor):
"""Register a new update processor"""
raise FrozenBotError("Can't register new update processors at runtime")
# This helper manages the translation
def _(self, message, **args):
"""Translate a string"""
# Check if the message has been overridden
if message in self.override_i18n:
return self.override_i18n[message] % args
# Otherwise try to return the original message
else:
return self._lang_inst.gettext(message) % args
# And some internal methods used by botogram
def available_commands(self, all=False):
"""Get a list of the commands this bot implements"""
s = sorted(self._commands.values(), key=lambda command: command.name)
c = sorted(s, key=lambda command: command.order)
for command in c:
# Remove `or command.name in self.hide_commands` in botogram 1.0
is_hidden = command.hidden or command.name in self._hide_commands
if all or not is_hidden:
yield command
def _call(self, func, component=None, **available):
"""Wrapper for calling user-provided functions"""
# Set some default available arguments
available.setdefault("bot", self)
# Add the `shared` argument only if a component was provided
if component is not None:
# It's lazily loaded so it won't make an IPC call on the runner
def lazy_shared():
return self._shared_memory.of(self._bot_id, component)
available.setdefault("shared", utils.CallLazyArgument(lazy_shared))
return utils.call(func, **available)
# This function allows to use the old, deprecated bot.hide_commands
@utils.deprecated("bot.hide_commands", "1.0", "Use @bot.command(\"name\", "
"hidden=True) instead")
@property
def hide_commands(self):
return self._hide_commands
# Those are shortcuts to send messages directly to someone
# Create dynamic methods for each of the send methods. They're *really*
# repetitive, so generating them with a for loop is not such a bad idea
_proxied_sends = [
objects.Chat.send,
objects.Chat.send_photo,
objects.Chat.send_audio,
objects.Chat.send_voice,
objects.Chat.send_video,
objects.Chat.send_file,
objects.Chat.send_location,
objects.Chat.send_sticker,
]
for _proxy in _proxied_sends:
@utils.wraps(_proxy)
@utils.deprecated("Bot.%s()" % _proxy.__name__, "1.0",
"Use Bot.chat(id).%s() instead." % _proxy.__name__)
def _wrapper(self, chat, *args, __proxy=_proxy, **kwargs):
obj = self.chat(chat)
return __proxy(obj, *args, **kwargs)
setattr(FrozenBot, _proxy.__name__, _wrapper)
def restore(*args):
"""Restore a FrozenBot instance from pickle"""
return FrozenBot(*args)
| 38.18239 | 79 | 0.640092 |
4a278a9c997fd4d2ef452e87321e56c7682d6932 | 1,864 | py | Python | vsts/vsts/notification/v4_0/models/subscription_evaluation_result.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/notification/v4_0/models/subscription_evaluation_result.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/notification/v4_0/models/subscription_evaluation_result.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class SubscriptionEvaluationResult(Model):
"""SubscriptionEvaluationResult.
:param evaluation_job_status: Subscription evaluation job status
:type evaluation_job_status: object
:param events: Subscription evaluation events results.
:type events: :class:`EventsEvaluationResult <notification.v4_0.models.EventsEvaluationResult>`
:param id: The requestId which is the subscription evaluation jobId
:type id: str
:param notifications: Subscription evaluation notification results.
:type notifications: :class:`NotificationsEvaluationResult <notification.v4_0.models.NotificationsEvaluationResult>`
"""
_attribute_map = {
'evaluation_job_status': {'key': 'evaluationJobStatus', 'type': 'object'},
'events': {'key': 'events', 'type': 'EventsEvaluationResult'},
'id': {'key': 'id', 'type': 'str'},
'notifications': {'key': 'notifications', 'type': 'NotificationsEvaluationResult'}
}
def __init__(self, evaluation_job_status=None, events=None, id=None, notifications=None):
super(SubscriptionEvaluationResult, self).__init__()
self.evaluation_job_status = evaluation_job_status
self.events = events
self.id = id
self.notifications = notifications
| 49.052632 | 120 | 0.619635 |
4a278ad482933f250583406078382aba1aae19e4 | 595 | py | Python | src/masonite/cli.py | holic-cl/masonite | c5eab7db5f87e389fe83a1f0f20a005035ada9d9 | [
"MIT"
] | null | null | null | src/masonite/cli.py | holic-cl/masonite | c5eab7db5f87e389fe83a1f0f20a005035ada9d9 | [
"MIT"
] | null | null | null | src/masonite/cli.py | holic-cl/masonite | c5eab7db5f87e389fe83a1f0f20a005035ada9d9 | [
"MIT"
] | null | null | null | import os
import sys
from pydoc import ErrorDuringImport
from cleo import Application
from .commands import NewCommand, InstallCommand
from . import __version__
sys.path.append(os.getcwd())
application = Application('Masonite Version:', __version__)
application.add(NewCommand())
application.add(InstallCommand())
try:
from wsgi import container
from cleo import Command
for key, value in container.collect(Command).items():
application.add(value)
except ErrorDuringImport as e:
print(e)
except ImportError:
pass
if __name__ == '__main__':
application.run()
| 22.037037 | 59 | 0.756303 |
4a278b8a49517d3728123188fbf49ee412b51e1b | 1,649 | py | Python | corpus/spectrumBruteForce/spectrumBruteForce.py | NiyeT/Lexi---Personal-Assistant | ed50765bbdd37255520863224bf42a1c88cd7865 | [
"MIT"
] | null | null | null | corpus/spectrumBruteForce/spectrumBruteForce.py | NiyeT/Lexi---Personal-Assistant | ed50765bbdd37255520863224bf42a1c88cd7865 | [
"MIT"
] | 5 | 2021-03-01T20:49:54.000Z | 2022-02-26T01:44:23.000Z | corpus/spectrumBruteForce/spectrumBruteForce.py | NiyeT/Lexi-Personal-Assistant | ed50765bbdd37255520863224bf42a1c88cd7865 | [
"MIT"
] | null | null | null | import requests
cookies = {
'lang': 'en',
'session': '%7B%22req_id%22%3A1%2C%22sess_id%22%3A%220%22%2C%22basic%22%3Afalse%2C%22user%22%3A%22username%22%2C%22dataModel%22%3A%7B%22name%22%3A%22Internal%22%2C%22nss%22%3A%5B%7B%22name%22%3A%22gtw%22%2C%22uri%22%3A%22http%3A%2F%2Fsagemcom.com%2Fgateway-data%22%7D%5D%7D%2C%22ha1%22%3A%22d46187bbbf5f4dcc3b5aa765d61d8327deb882cf99a835a392db4d6657b74f2e%22%2C%22nonce%22%3A%22%22%7D',
}
headers = {
'Origin': 'http://192.168.1.1',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9',
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Mobile Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Referer': 'http://192.168.1.1/0.1/gui/',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
data = {
'req': '{"request":{"id":0,"session-id":"0","priority":true,"actions":[{"id":0,"method":"logIn","parameters":{"user":"username","persistent":"true","session-options":{"nss":[{"name":"gtw","uri":"http://sagemcom.com/gateway-data"}],"language":"ident","context-flags":{"get-content-name":true,"local-time":true},"capability-depth":2,"capability-flags":{"name":true,"default-value":false,"restriction":true,"description":false},"time-format":"ISO_8601"}}}],"cnonce":3224652020,"auth-key":"f5e1f2c616cf4ae1d13f299bd79e980e"}}'
}
response = requests.post('http://192.168.1.1/cgi/json-req', headers=headers, cookies=cookies, data=data)
print(response.text)
| 63.423077 | 524 | 0.693147 |
4a278c0c9cbda61d3898b9b0b4921f6bfd2a1598 | 2,344 | py | Python | Udacity Data Structure and Algorithms Nanodegree/Introduction/sudoko_check.py | NirmalSilwal/Problem-Solving | 3af3867db4972bd6fc49025d6795341bbe77cdd5 | [
"MIT"
] | 16 | 2020-04-05T08:29:32.000Z | 2022-01-07T12:31:56.000Z | Udacity Data Structure and Algorithms Nanodegree/Introduction/sudoko_check.py | annu12340/Problem-Solving | 9a8b9845ff158501b981e4c5521f7fd798a4e81d | [
"MIT"
] | 2 | 2020-03-04T08:16:54.000Z | 2020-03-04T11:27:59.000Z | Udacity Data Structure and Algorithms Nanodegree/Introduction/sudoko_check.py | NirmalSilwal/Problem-Solving | 3af3867db4972bd6fc49025d6795341bbe77cdd5 | [
"MIT"
] | 4 | 2020-09-20T06:04:03.000Z | 2022-01-13T04:29:24.000Z | correct = [[1,2,3],
[2,3,1],
[3,1,2]]
incorrect = [[1,2,3,4],
[2,3,1,3],
[3,1,2,3],
[4,4,4,4]]
incorrect2 = [[1,2,3,4],
[2,3,1,4],
[4,1,2,3],
[3,4,1,2]]
incorrect3 = [[1,2,3,4,5],
[2,3,1,5,6],
[4,5,2,1,3],
[3,4,5,2,1],
[5,6,4,3,2]]
incorrect4 = [['a','b','c'],
['b','c','a'],
['c','a','b']]
incorrect5 = [ [1, 1.5],
[1.5, 1]]
# Define a function check_sudoku() here:
def check_sudoku(lst):
n = len(lst)
for row in lst:
elements = list(range(1,n+1))
for value in row:
if value not in elements:
return False
elements.remove(value)
for colItr in range(n):
elements = list(range(1,n+1))
for row in lst:
if row[colItr] not in elements:
return False
elements.remove(row[colItr])
return True
print(check_sudoku(incorrect))
#>>> False
print(check_sudoku(correct))
#>>> True
print(check_sudoku(incorrect2))
#>>> False
print(check_sudoku(incorrect3))
#>>> False
print(check_sudoku(incorrect4))
#>>> False
print(check_sudoku(incorrect5))
#>>> False
# ===========================================================================================================
# QUESTION:
# Exercise 2
# In the next exercise, you will write a function that checks sudoku squares for correctness.
# Sudoku is a logic puzzle where a game is defined by a partially filled 9 x 9 square of digits where each square contains one of the digits 1, 2, 3, 4, 5, 6, 7, 8, 9. For this question we will generalize and simplify the game.
# Define a procedure, check_sudoku, that takes as input a square list of lists representing an n x n sudoku puzzle solution and returns the boolean True if the input is a valid sudoku square and returns the boolean False otherwise.
# A valid sudoku square satisfies these two properties:
# Each column of the square contains each of the whole numbers from 1 to n exactly once.
# Each row of the square contains each of the whole numbers from 1 to n exactly once.
# You may assume that the input is square and contains at least one row and column. | 27.576471 | 231 | 0.547355 |
4a278e5b5a556847d187e9ddc964b7f41f71f617 | 45,978 | py | Python | neutron/db/l3_dvr_db.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | null | null | null | neutron/db/l3_dvr_db.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | null | null | null | neutron/db/l3_dvr_db.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import helpers as log_helper
from oslo_log import log as logging
from oslo_utils import excutils
import six
from neutron._i18n import _, _LI, _LW
from neutron.callbacks import events
from neutron.callbacks import exceptions
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.common import constants as l3_const
from neutron.common import utils as n_utils
from neutron.db import l3_agentschedulers_db as l3_sched_db
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron.ipam import utils as ipam_utils
from neutron import manager
from neutron.plugins.common import constants
from neutron.plugins.common import utils as p_utils
LOG = logging.getLogger(__name__)
router_distributed_opts = [
cfg.BoolOpt('router_distributed',
default=False,
help=_("System-wide flag to determine the type of router "
"that tenants can create. Only admin can override.")),
]
cfg.CONF.register_opts(router_distributed_opts)
class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
l3_attrs_db.ExtraAttributesMixin):
"""Mixin class to enable DVR support."""
router_device_owners = (
l3_db.L3_NAT_db_mixin.router_device_owners +
(const.DEVICE_OWNER_DVR_INTERFACE,
const.DEVICE_OWNER_ROUTER_SNAT,
const.DEVICE_OWNER_AGENT_GW))
extra_attributes = (
l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{
'name': "distributed",
'default': cfg.CONF.router_distributed
}])
def _create_router_db(self, context, router, tenant_id):
"""Create a router db object with dvr additions."""
router['distributed'] = is_distributed_router(router)
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._create_router_db(
context, router, tenant_id)
self._process_extra_attr_router_create(context, router_db, router)
return router_db
def _validate_router_migration(self, context, router_db, router_res):
"""Allow centralized -> distributed state transition only."""
if (router_db.extra_attributes.distributed and
router_res.get('distributed') is False):
LOG.info(_LI("Centralizing distributed router %s "
"is not supported"), router_db['id'])
raise n_exc.BadRequest(
resource='router',
msg=_("Migration from distributed router to centralized is "
"not supported"))
elif (not router_db.extra_attributes.distributed and
router_res.get('distributed')):
# router should be disabled in order for upgrade
if router_db.admin_state_up:
msg = _('Cannot upgrade active router to distributed. Please '
'set router admin_state_up to False prior to upgrade.')
raise n_exc.BadRequest(resource='router', msg=msg)
# Notify advanced services of the imminent state transition
# for the router.
try:
kwargs = {'context': context, 'router': router_db}
registry.notify(
resources.ROUTER, events.BEFORE_UPDATE, self, **kwargs)
except exceptions.CallbackFailure as e:
with excutils.save_and_reraise_exception():
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise l3.RouterInUse(router_id=router_db['id'],
reason=e)
def _update_distributed_attr(
self, context, router_id, router_db, data):
"""Update the model to support the dvr case of a router."""
if data.get('distributed'):
old_owner = const.DEVICE_OWNER_ROUTER_INTF
new_owner = const.DEVICE_OWNER_DVR_INTERFACE
for rp in router_db.attached_ports.filter_by(port_type=old_owner):
rp.port_type = new_owner
rp.port.device_owner = new_owner
def _update_router_db(self, context, router_id, data):
with context.session.begin(subtransactions=True):
router_db = super(
L3_NAT_with_dvr_db_mixin, self)._update_router_db(
context, router_id, data)
migrating_to_distributed = (
not router_db.extra_attributes.distributed and
data.get('distributed') is True)
self._validate_router_migration(context, router_db, data)
router_db.extra_attributes.update(data)
self._update_distributed_attr(
context, router_id, router_db, data)
if migrating_to_distributed:
if router_db['gw_port_id']:
# If the Legacy router is getting migrated to a DVR
# router, make sure to create corresponding
# snat interface ports that are to be consumed by
# the Service Node.
if not self._create_snat_intf_ports_if_not_exists(
context.elevated(), router_db):
LOG.debug("SNAT interface ports not created: %s",
router_db['id'])
cur_agents = self.list_l3_agents_hosting_router(
context, router_db['id'])['agents']
for agent in cur_agents:
self._unbind_router(context, router_db['id'],
agent['id'])
return router_db
def _delete_current_gw_port(self, context, router_id, router, new_network):
"""
Overriden here to handle deletion of dvr internal ports.
If there is a valid router update with gateway port to be deleted,
then go ahead and delete the csnat ports and the floatingip
agent gateway port associated with the dvr router.
"""
gw_ext_net_id = (
router.gw_port['network_id'] if router.gw_port else None)
super(L3_NAT_with_dvr_db_mixin,
self)._delete_current_gw_port(context, router_id,
router, new_network)
if (is_distributed_router(router) and
gw_ext_net_id != new_network and gw_ext_net_id is not None):
self.delete_csnat_router_interface_ports(
context.elevated(), router)
# NOTE(Swami): Delete the Floatingip agent gateway port
# on all hosts when it is the last gateway port in the
# given external network.
filters = {'network_id': [gw_ext_net_id],
'device_owner': [const.DEVICE_OWNER_ROUTER_GW]}
ext_net_gw_ports = self._core_plugin.get_ports(
context.elevated(), filters)
if not ext_net_gw_ports:
self.delete_floatingip_agent_gateway_port(
context.elevated(), None, gw_ext_net_id)
# Send the information to all the L3 Agent hosts
# to clean up the fip namespace as it is no longer required.
self.l3_rpc_notifier.delete_fipnamespace_for_ext_net(
context, gw_ext_net_id)
def _create_gw_port(self, context, router_id, router, new_network,
ext_ips):
super(L3_NAT_with_dvr_db_mixin,
self)._create_gw_port(context, router_id, router, new_network,
ext_ips)
# Make sure that the gateway port exists before creating the
# snat interface ports for distributed router.
if router.extra_attributes.distributed and router.gw_port:
snat_p_list = self._create_snat_intf_ports_if_not_exists(
context.elevated(), router)
if not snat_p_list:
LOG.debug("SNAT interface ports not created: %s", snat_p_list)
def _get_device_owner(self, context, router=None):
"""Get device_owner for the specified router."""
router_is_uuid = isinstance(router, six.string_types)
if router_is_uuid:
router = self._get_router(context, router)
if is_distributed_router(router):
return const.DEVICE_OWNER_DVR_INTERFACE
return super(L3_NAT_with_dvr_db_mixin,
self)._get_device_owner(context, router)
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
"""Override to create floating agent gw port for DVR.
Floating IP Agent gateway port will be created when a
floatingIP association happens.
"""
fip_port = fip.get('port_id')
super(L3_NAT_with_dvr_db_mixin, self)._update_fip_assoc(
context, fip, floatingip_db, external_port)
associate_fip = fip_port and floatingip_db['id']
if associate_fip and floatingip_db.get('router_id'):
admin_ctx = context.elevated()
router_dict = self.get_router(
admin_ctx, floatingip_db['router_id'])
# Check if distributed router and then create the
# FloatingIP agent gateway port
if router_dict.get('distributed'):
hostid = self._get_dvr_service_port_hostid(
context, fip_port)
if hostid:
# FIXME (Swami): This FIP Agent Gateway port should be
# created only once and there should not be a duplicate
# for the same host. Until we find a good solution for
# augmenting multiple server requests we should use the
# existing flow.
fip_agent_port = (
self.create_fip_agent_gw_port_if_not_exists(
admin_ctx, external_port['network_id'],
hostid))
LOG.debug("FIP Agent gateway port: %s", fip_agent_port)
def _get_floatingip_on_port(self, context, port_id=None):
"""Helper function to retrieve the fip associated with port."""
fip_qry = context.session.query(l3_db.FloatingIP)
floating_ip = fip_qry.filter_by(fixed_port_id=port_id)
return floating_ip.first()
def add_router_interface(self, context, router_id, interface_info):
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
router = self._get_router(context, router_id)
device_owner = self._get_device_owner(context, router)
# This should be True unless adding an IPv6 prefix to an existing port
new_port = True
if add_by_port:
port, subnets = self._add_interface_by_port(
context, router, interface_info['port_id'], device_owner)
elif add_by_sub:
port, subnets, new_port = self._add_interface_by_subnet(
context, router, interface_info['subnet_id'], device_owner)
subnet = subnets[0]
if new_port:
if router.extra_attributes.distributed and router.gw_port:
try:
admin_context = context.elevated()
self._add_csnat_router_interface_port(
admin_context, router, port['network_id'],
port['fixed_ips'][-1]['subnet_id'])
except Exception:
with excutils.save_and_reraise_exception():
# we need to preserve the original state prior
# the request by rolling back the port creation
# that led to new_port=True
self._core_plugin.delete_port(
admin_context, port['id'], l3_port_check=False)
with context.session.begin(subtransactions=True):
router_port = l3_db.RouterPort(
port_id=port['id'],
router_id=router.id,
port_type=device_owner
)
context.session.add(router_port)
# NOTE: For IPv6 additional subnets added to the same
# network we need to update the CSNAT port with respective
# IPv6 subnet
elif subnet and port:
fixed_ip = {'subnet_id': subnet['id']}
if subnet['ip_version'] == 6:
# Add new prefix to an existing ipv6 csnat port with the
# same network id if one exists
cs_port = (
self._find_v6_router_port_by_network_and_device_owner(
router, subnet['network_id'],
const.DEVICE_OWNER_ROUTER_SNAT))
if cs_port:
fixed_ips = list(cs_port['port']['fixed_ips'])
fixed_ips.append(fixed_ip)
updated_port = self._core_plugin.update_port(
context.elevated(),
cs_port['port_id'], {'port': {'fixed_ips': fixed_ips}})
LOG.debug("CSNAT port updated for IPv6 subnet: "
"%s", updated_port)
router_interface_info = self._make_router_interface_info(
router_id, port['tenant_id'], port['id'], port['network_id'],
subnet['id'], [subnet['id']])
self.notify_router_interface_action(
context, router_interface_info, 'add')
if router.gw_port:
gw_network_id = router.gw_port.network_id
gw_ips = [x['ip_address'] for x in router.gw_port.fixed_ips]
registry.notify(resources.ROUTER_INTERFACE,
events.AFTER_CREATE,
self,
context=context,
network_id=gw_network_id,
gateway_ips=gw_ips,
cidrs=[x['cidr'] for x in subnets],
port_id=port['id'],
router_id=router_id,
port=port,
interface_info=interface_info)
return router_interface_info
def _port_has_ipv6_address(self, port, csnat_port_check=True):
"""Overridden to return False if DVR SNAT port."""
if csnat_port_check:
if port['device_owner'] == const.DEVICE_OWNER_ROUTER_SNAT:
return False
return super(L3_NAT_with_dvr_db_mixin,
self)._port_has_ipv6_address(port)
def _find_v6_router_port_by_network_and_device_owner(
self, router, net_id, device_owner):
for port in router.attached_ports:
p = port['port']
if (p['network_id'] == net_id and
p['device_owner'] == device_owner and
self._port_has_ipv6_address(p, csnat_port_check=False)):
return port
def _check_for_multiprefix_csnat_port_and_update(
self, context, router, network_id, subnet_id):
"""Checks if the csnat port contains multiple ipv6 prefixes.
If the csnat port contains multiple ipv6 prefixes for the given
network when a router interface is deleted, make sure we don't
delete the port when a single subnet is deleted and just update
it with the right fixed_ip.
This function returns true if it is a multiprefix port.
"""
if router.gw_port:
# If router has a gateway port, check if it has IPV6 subnet
cs_port = (
self._find_v6_router_port_by_network_and_device_owner(
router, network_id, const.DEVICE_OWNER_ROUTER_SNAT))
if cs_port:
fixed_ips = (
[fixedip for fixedip in
cs_port['port']['fixed_ips']
if fixedip['subnet_id'] != subnet_id])
if len(fixed_ips) == len(cs_port['port']['fixed_ips']):
# The subnet being detached from router is not part of
# ipv6 router port. No need to update the multiprefix.
return False
if fixed_ips:
# multiple prefix port - delete prefix from port
self._core_plugin.update_port(
context.elevated(),
cs_port['port_id'], {'port': {'fixed_ips': fixed_ips}})
return True
return False
def remove_router_interface(self, context, router_id, interface_info):
router = self._get_router(context, router_id)
if not router.extra_attributes.distributed:
return super(
L3_NAT_with_dvr_db_mixin, self).remove_router_interface(
context, router_id, interface_info)
plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
router_hosts_before = plugin._get_dvr_hosts_for_router(
context, router_id)
interface_info = super(
L3_NAT_with_dvr_db_mixin, self).remove_router_interface(
context, router_id, interface_info)
router_hosts_after = plugin._get_dvr_hosts_for_router(
context, router_id)
removed_hosts = set(router_hosts_before) - set(router_hosts_after)
if removed_hosts:
agents = plugin.get_l3_agents(context,
filters={'host': removed_hosts})
binding_table = l3_sched_db.RouterL3AgentBinding
snat_binding = context.session.query(binding_table).filter_by(
router_id=router_id).first()
for agent in agents:
is_this_snat_agent = (
snat_binding and snat_binding.l3_agent_id == agent['id'])
if not is_this_snat_agent:
self.l3_rpc_notifier.router_removed_from_agent(
context, router_id, agent['host'])
is_multiple_prefix_csport = (
self._check_for_multiprefix_csnat_port_and_update(
context, router, interface_info['network_id'],
interface_info['subnet_id']))
if not is_multiple_prefix_csport:
# Single prefix port - go ahead and delete the port
self.delete_csnat_router_interface_ports(
context.elevated(), router,
subnet_id=interface_info['subnet_id'])
return interface_info
def _get_snat_sync_interfaces(self, context, router_ids):
"""Query router interfaces that relate to list of router_ids."""
if not router_ids:
return []
qry = context.session.query(l3_db.RouterPort)
qry = qry.filter(
l3_db.RouterPort.router_id.in_(router_ids),
l3_db.RouterPort.port_type == const.DEVICE_OWNER_ROUTER_SNAT
)
interfaces = collections.defaultdict(list)
for rp in qry:
interfaces[rp.router_id].append(
self._core_plugin._make_port_dict(rp.port, None))
LOG.debug("Return the SNAT ports: %s", interfaces)
return interfaces
def _build_routers_list(self, context, routers, gw_ports):
# Perform a single query up front for all routers
if not routers:
return []
router_ids = [r['id'] for r in routers]
snat_binding = l3_sched_db.RouterL3AgentBinding
query = (context.session.query(snat_binding).
filter(snat_binding.router_id.in_(router_ids))).all()
bindings = dict((b.router_id, b) for b in query)
for rtr in routers:
gw_port_id = rtr['gw_port_id']
# Collect gw ports only if available
if gw_port_id and gw_ports.get(gw_port_id):
rtr['gw_port'] = gw_ports[gw_port_id]
if 'enable_snat' in rtr[l3.EXTERNAL_GW_INFO]:
rtr['enable_snat'] = (
rtr[l3.EXTERNAL_GW_INFO]['enable_snat'])
binding = bindings.get(rtr['id'])
if not binding:
rtr['gw_port_host'] = None
LOG.debug('No snat is bound to router %s', rtr['id'])
continue
rtr['gw_port_host'] = binding.l3_agent.host
return routers
def _process_routers(self, context, routers):
routers_dict = {}
snat_intfs_by_router_id = self._get_snat_sync_interfaces(
context, [r['id'] for r in routers])
for router in routers:
routers_dict[router['id']] = router
if router['gw_port_id']:
snat_router_intfs = snat_intfs_by_router_id[router['id']]
LOG.debug("SNAT ports returned: %s ", snat_router_intfs)
router[l3_const.SNAT_ROUTER_INTF_KEY] = snat_router_intfs
return routers_dict
def _process_floating_ips_dvr(self, context, routers_dict,
floating_ips, host, agent):
fip_sync_interfaces = None
LOG.debug("FIP Agent : %s ", agent.id)
for floating_ip in floating_ips:
router = routers_dict.get(floating_ip['router_id'])
if router:
router_floatingips = router.get(const.FLOATINGIP_KEY, [])
if router['distributed']:
if (floating_ip.get('host', None) != host and
floating_ip.get('dest_host') is None):
continue
LOG.debug("Floating IP host: %s", floating_ip['host'])
router_floatingips.append(floating_ip)
router[const.FLOATINGIP_KEY] = router_floatingips
if not fip_sync_interfaces:
fip_sync_interfaces = self._get_fip_sync_interfaces(
context, agent.id)
LOG.debug("FIP Agent ports: %s", fip_sync_interfaces)
router[l3_const.FLOATINGIP_AGENT_INTF_KEY] = (
fip_sync_interfaces)
def _get_fip_sync_interfaces(self, context, fip_agent_id):
"""Query router interfaces that relate to list of router_ids."""
if not fip_agent_id:
return []
filters = {'device_id': [fip_agent_id],
'device_owner': [const.DEVICE_OWNER_AGENT_GW]}
interfaces = self._core_plugin.get_ports(context.elevated(), filters)
LOG.debug("Return the FIP ports: %s ", interfaces)
return interfaces
@log_helper.log_method_call
def _get_dvr_sync_data(self, context, host, agent, router_ids=None,
active=None):
routers, interfaces, floating_ips = self._get_router_info_list(
context, router_ids=router_ids, active=active,
device_owners=const.ROUTER_INTERFACE_OWNERS)
dvr_router_ids = set(router['id'] for router in routers
if is_distributed_router(router))
floating_ip_port_ids = [fip['port_id'] for fip in floating_ips
if fip['router_id'] in dvr_router_ids]
if floating_ip_port_ids:
port_filter = {'id': floating_ip_port_ids}
ports = self._core_plugin.get_ports(context, port_filter)
port_dict = {}
for port in ports:
# Make sure that we check for cases were the port
# might be in a pre-live migration state or also
# check for the portbinding profile 'migrating_to'
# key for the host.
port_profile = port.get(portbindings.PROFILE)
port_in_migration = (
port_profile and
port_profile.get('migrating_to') == host)
if (port[portbindings.HOST_ID] == host or port_in_migration):
port_dict.update({port['id']: port})
# Add the port binding host to the floatingip dictionary
for fip in floating_ips:
vm_port = port_dict.get(fip['port_id'], None)
if vm_port:
fip['host'] = self._get_dvr_service_port_hostid(
context, fip['port_id'], port=vm_port)
fip['dest_host'] = (
self._get_dvr_migrating_service_port_hostid(
context, fip['port_id'], port=vm_port))
routers_dict = self._process_routers(context, routers)
self._process_floating_ips_dvr(context, routers_dict,
floating_ips, host, agent)
ports_to_populate = []
for router in routers_dict.values():
if router.get('gw_port'):
ports_to_populate.append(router['gw_port'])
if router.get(l3_const.FLOATINGIP_AGENT_INTF_KEY):
ports_to_populate += router[l3_const.FLOATINGIP_AGENT_INTF_KEY]
if router.get(l3_const.SNAT_ROUTER_INTF_KEY):
ports_to_populate += router[l3_const.SNAT_ROUTER_INTF_KEY]
ports_to_populate += interfaces
self._populate_mtu_and_subnets_for_ports(context, ports_to_populate)
self._process_interfaces(routers_dict, interfaces)
return list(routers_dict.values())
def _get_dvr_service_port_hostid(self, context, port_id, port=None):
"""Returns the portbinding host_id for dvr service port."""
port_db = port or self._core_plugin.get_port(context, port_id)
device_owner = port_db['device_owner'] if port_db else ""
if n_utils.is_dvr_serviced(device_owner):
return port_db[portbindings.HOST_ID]
def _get_dvr_migrating_service_port_hostid(
self, context, port_id, port=None):
"""Returns the migrating host_id from the migrating profile."""
port_db = port or self._core_plugin.get_port(context, port_id)
port_profile = port_db.get(portbindings.PROFILE)
port_dest_host = None
if port_profile:
port_dest_host = port_profile.get('migrating_to')
device_owner = port_db['device_owner'] if port_db else ""
if n_utils.is_dvr_serviced(device_owner):
return port_dest_host
def _get_agent_gw_ports_exist_for_network(
self, context, network_id, host, agent_id):
"""Return agent gw port if exist, or None otherwise."""
if not network_id:
LOG.debug("Network not specified")
return
filters = {
'network_id': [network_id],
'device_id': [agent_id],
'device_owner': [const.DEVICE_OWNER_AGENT_GW]
}
ports = self._core_plugin.get_ports(context, filters)
if ports:
return ports[0]
def delete_floatingip_agent_gateway_port(
self, context, host_id, ext_net_id):
"""Function to delete FIP gateway port with given ext_net_id."""
# delete any fip agent gw port
device_filter = {'device_owner': [const.DEVICE_OWNER_AGENT_GW],
'network_id': [ext_net_id]}
ports = self._core_plugin.get_ports(context,
filters=device_filter)
for p in ports:
if not host_id or p[portbindings.HOST_ID] == host_id:
self._core_plugin.ipam.delete_port(context, p['id'])
if host_id:
return
def check_for_fip_and_create_agent_gw_port_on_host_if_not_exists(
self, context, port, host):
"""Create fip agent_gw_port on host if not exists"""
fip = self._get_floatingip_on_port(context, port_id=port['id'])
if not fip:
return
network_id = fip.get('floating_network_id')
agent_gw_port = self.create_fip_agent_gw_port_if_not_exists(
context.elevated(), network_id, host)
LOG.debug("Port-in-Migration: Floatingip Agent Gateway port "
"%(gw)s created for the future host: %(dest_host)s",
{'gw': agent_gw_port,
'dest_host': host})
def create_fip_agent_gw_port_if_not_exists(
self, context, network_id, host):
"""Function to return the FIP Agent GW port.
This function will create a FIP Agent GW port
if required. If the port already exists, it
will return the existing port and will not
create a new one.
"""
l3_agent_db = self._get_agent_by_type_and_host(
context, const.AGENT_TYPE_L3, host)
if l3_agent_db:
LOG.debug("Agent ID exists: %s", l3_agent_db['id'])
f_port = self._get_agent_gw_ports_exist_for_network(
context, network_id, host, l3_agent_db['id'])
if not f_port:
LOG.info(_LI('Agent Gateway port does not exist,'
' so create one: %s'), f_port)
port_data = {'tenant_id': '',
'network_id': network_id,
'device_id': l3_agent_db['id'],
'device_owner': const.DEVICE_OWNER_AGENT_GW,
portbindings.HOST_ID: host,
'admin_state_up': True,
'name': ''}
agent_port = p_utils.create_port(self._core_plugin, context,
{'port': port_data})
if agent_port:
self._populate_mtu_and_subnets_for_ports(context,
[agent_port])
return agent_port
msg = _("Unable to create the Agent Gateway Port")
raise n_exc.BadRequest(resource='router', msg=msg)
else:
self._populate_mtu_and_subnets_for_ports(context, [f_port])
return f_port
def _get_snat_interface_ports_for_router(self, context, router_id):
"""Return all existing snat_router_interface ports."""
qry = context.session.query(l3_db.RouterPort)
qry = qry.filter_by(
router_id=router_id,
port_type=const.DEVICE_OWNER_ROUTER_SNAT
)
ports = [self._core_plugin._make_port_dict(rp.port, None)
for rp in qry]
return ports
def _add_csnat_router_interface_port(
self, context, router, network_id, subnet_id, do_pop=True):
"""Add SNAT interface to the specified router and subnet."""
port_data = {'tenant_id': '',
'network_id': network_id,
'fixed_ips': [{'subnet_id': subnet_id}],
'device_id': router.id,
'device_owner': const.DEVICE_OWNER_ROUTER_SNAT,
'admin_state_up': True,
'name': ''}
snat_port = p_utils.create_port(self._core_plugin, context,
{'port': port_data})
if not snat_port:
msg = _("Unable to create the SNAT Interface Port")
raise n_exc.BadRequest(resource='router', msg=msg)
with context.session.begin(subtransactions=True):
router_port = l3_db.RouterPort(
port_id=snat_port['id'],
router_id=router.id,
port_type=const.DEVICE_OWNER_ROUTER_SNAT
)
context.session.add(router_port)
if do_pop:
return self._populate_mtu_and_subnets_for_ports(context,
[snat_port])
return snat_port
def _create_snat_intf_ports_if_not_exists(self, context, router):
"""Function to return the snat interface port list.
This function will return the snat interface port list
if it exists. If the port does not exist it will create
new ports and then return the list.
"""
port_list = self._get_snat_interface_ports_for_router(
context, router.id)
if port_list:
self._populate_mtu_and_subnets_for_ports(context, port_list)
return port_list
port_list = []
int_ports = (
rp.port for rp in
router.attached_ports.filter_by(
port_type=const.DEVICE_OWNER_DVR_INTERFACE
)
)
LOG.info(_LI('SNAT interface port list does not exist,'
' so create one: %s'), port_list)
for intf in int_ports:
if intf.fixed_ips:
# Passing the subnet for the port to make sure the IP's
# are assigned on the right subnet if multiple subnet
# exists
snat_port = self._add_csnat_router_interface_port(
context, router, intf['network_id'],
intf['fixed_ips'][0]['subnet_id'], do_pop=False)
port_list.append(snat_port)
if port_list:
self._populate_mtu_and_subnets_for_ports(context, port_list)
return port_list
def _generate_arp_table_and_notify_agent(
self, context, fixed_ip, mac_address, notifier):
"""Generates the arp table entry and notifies the l3 agent."""
ip_address = fixed_ip['ip_address']
subnet = fixed_ip['subnet_id']
filters = {'fixed_ips': {'subnet_id': [subnet]},
'device_owner': [const.DEVICE_OWNER_DVR_INTERFACE]}
ports = self._core_plugin.get_ports(context, filters=filters)
router_id = next((port['device_id'] for port in ports), None)
if not router_id:
return
arp_table = {'ip_address': ip_address,
'mac_address': mac_address,
'subnet_id': subnet}
notifier(context, router_id, arp_table)
def _should_update_arp_entry_for_dvr_service_port(self, port_dict):
# Check this is a valid VM or service port
return (n_utils.is_dvr_serviced(port_dict['device_owner']) and
port_dict['fixed_ips'])
def _get_subnet_id_for_given_fixed_ip(
self, context, fixed_ip, port_dict):
"""Returns the subnet_id that matches the fixedip on a network."""
filters = {'network_id': [port_dict['network_id']]}
subnets = self._core_plugin.get_subnets(context, filters)
for subnet in subnets:
if ipam_utils.check_subnet_ip(subnet['cidr'], fixed_ip):
return subnet['id']
def _get_allowed_address_pair_fixed_ips(self, context, port_dict):
"""Returns all fixed_ips associated with the allowed_address_pair."""
aa_pair_fixed_ips = []
if port_dict.get('allowed_address_pairs'):
for address_pair in port_dict['allowed_address_pairs']:
aap_ip_cidr = address_pair['ip_address'].split("/")
if len(aap_ip_cidr) == 1 or int(aap_ip_cidr[1]) == 32:
subnet_id = self._get_subnet_id_for_given_fixed_ip(
context, aap_ip_cidr[0], port_dict)
if subnet_id is not None:
fixed_ip = {'subnet_id': subnet_id,
'ip_address': aap_ip_cidr[0]}
aa_pair_fixed_ips.append(fixed_ip)
else:
LOG.debug("Subnet does not match for the given "
"fixed_ip %s for arp update", aap_ip_cidr[0])
return aa_pair_fixed_ips
def update_arp_entry_for_dvr_service_port(self, context, port_dict):
"""Notify L3 agents of ARP table entry for dvr service port.
When a dvr service port goes up, look for the DVR router on
the port's subnet, and send the ARP details to all
L3 agents hosting the router to add it.
If there are any allowed_address_pairs associated with the port
those fixed_ips should also be updated in the ARP table.
"""
if not self._should_update_arp_entry_for_dvr_service_port(port_dict):
return
fixed_ips = port_dict['fixed_ips']
allowed_address_pair_fixed_ips = (
self._get_allowed_address_pair_fixed_ips(context, port_dict))
changed_fixed_ips = fixed_ips + allowed_address_pair_fixed_ips
for fixed_ip in changed_fixed_ips:
self._generate_arp_table_and_notify_agent(
context, fixed_ip, port_dict['mac_address'],
self.l3_rpc_notifier.add_arp_entry)
def delete_arp_entry_for_dvr_service_port(
self, context, port_dict, fixed_ips_to_delete=None):
"""Notify L3 agents of ARP table entry for dvr service port.
When a dvr service port goes down, look for the DVR
router on the port's subnet, and send the ARP details to all
L3 agents hosting the router to delete it.
If there are any allowed_address_pairs associated with the
port, those fixed_ips should be removed from the ARP table.
"""
if not self._should_update_arp_entry_for_dvr_service_port(port_dict):
return
if not fixed_ips_to_delete:
fixed_ips = port_dict['fixed_ips']
allowed_address_pair_fixed_ips = (
self._get_allowed_address_pair_fixed_ips(context, port_dict))
fixed_ips_to_delete = fixed_ips + allowed_address_pair_fixed_ips
for fixed_ip in fixed_ips_to_delete:
self._generate_arp_table_and_notify_agent(
context, fixed_ip, port_dict['mac_address'],
self.l3_rpc_notifier.del_arp_entry)
def delete_csnat_router_interface_ports(self, context,
router, subnet_id=None):
# Each csnat router interface port is associated
# with a subnet, so we need to pass the subnet id to
# delete the right ports.
# TODO(markmcclain): This is suboptimal but was left to reduce
# changeset size since it is late in cycle
ports = [
rp.port.id for rp in
router.attached_ports.filter_by(
port_type=const.DEVICE_OWNER_ROUTER_SNAT)
if rp.port
]
c_snat_ports = self._core_plugin.get_ports(
context,
filters={'id': ports}
)
for p in c_snat_ports:
if subnet_id is None:
self._core_plugin.delete_port(context,
p['id'],
l3_port_check=False)
else:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
LOG.debug("Subnet matches: %s", subnet_id)
self._core_plugin.delete_port(context,
p['id'],
l3_port_check=False)
def create_floatingip(self, context, floatingip,
initial_status=const.FLOATINGIP_STATUS_ACTIVE):
floating_ip = self._create_floatingip(
context, floatingip, initial_status)
self._notify_floating_ip_change(context, floating_ip)
return floating_ip
def _notify_floating_ip_change(self, context, floating_ip):
router_id = floating_ip['router_id']
fixed_port_id = floating_ip['port_id']
# we need to notify agents only in case Floating IP is associated
if not router_id or not fixed_port_id:
return
try:
# using admin context as router may belong to admin tenant
router = self._get_router(context.elevated(), router_id)
except l3.RouterNotFound:
LOG.warning(_LW("Router %s was not found. "
"Skipping agent notification."),
router_id)
return
if is_distributed_router(router):
host = self._get_dvr_service_port_hostid(context, fixed_port_id)
dest_host = self._get_dvr_migrating_service_port_hostid(
context, fixed_port_id)
self.l3_rpc_notifier.routers_updated_on_host(
context, [router_id], host)
if dest_host and dest_host != host:
self.l3_rpc_notifier.routers_updated_on_host(
context, [router_id], dest_host)
else:
self.notify_router_updated(context, router_id)
def update_floatingip(self, context, id, floatingip):
old_floatingip, floatingip = self._update_floatingip(
context, id, floatingip)
self._notify_floating_ip_change(context, old_floatingip)
if (floatingip['router_id'] != old_floatingip['router_id'] or
floatingip['port_id'] != old_floatingip['port_id']):
self._notify_floating_ip_change(context, floatingip)
return floatingip
def delete_floatingip(self, context, id):
floating_ip = self._delete_floatingip(context, id)
self._notify_floating_ip_change(context, floating_ip)
def _get_address_pair_active_port_with_fip(
self, context, port_dict, port_addr_pair_ip):
port_valid_state = (port_dict['admin_state_up'] or
(port_dict['status'] == const.PORT_STATUS_ACTIVE))
if not port_valid_state:
return
query = context.session.query(l3_db.FloatingIP).filter(
l3_db.FloatingIP.fixed_ip_address == port_addr_pair_ip)
fip = query.first()
return self._core_plugin.get_port(
context, fip.fixed_port_id) if fip else None
def update_unbound_allowed_address_pair_port_binding(
self, context, service_port_dict, port_address_pairs):
"""Update allowed address pair port with host and device_owner
This function sets the host and device_owner to the port
associated with the port_addr_pair_ip with the port_dict's
host and device_owner.
"""
port_addr_pair_ip = port_address_pairs['ip_address']
address_pair_port = self._get_address_pair_active_port_with_fip(
context, service_port_dict, port_addr_pair_ip)
if address_pair_port:
host = service_port_dict[portbindings.HOST_ID]
dev_owner = service_port_dict['device_owner']
address_pair_dev_owner = address_pair_port.get('device_owner')
# If the allowed_address_pair port already has an associated
# device owner, and if the device_owner is a dvr serviceable
# port, then don't update the device_owner.
port_profile = address_pair_port.get(portbindings.PROFILE, {})
if n_utils.is_dvr_serviced(address_pair_dev_owner):
port_profile['original_owner'] = address_pair_dev_owner
port_data = {portbindings.HOST_ID: host,
portbindings.PROFILE: port_profile}
else:
port_data = {portbindings.HOST_ID: host,
'device_owner': dev_owner}
update_port = self._core_plugin.update_port(
context, address_pair_port['id'], {'port': port_data})
return update_port
def remove_unbound_allowed_address_pair_port_binding(
self, context, service_port_dict, port_address_pairs):
"""Remove allowed address pair port binding and device_owner
This function clears the host and device_owner associated with
the port_addr_pair_ip.
"""
port_addr_pair_ip = port_address_pairs['ip_address']
address_pair_port = self._get_address_pair_active_port_with_fip(
context, service_port_dict, port_addr_pair_ip)
if address_pair_port:
# Before reverting the changes, fetch the original
# device owner saved in profile and update the port
port_profile = address_pair_port.get(portbindings.PROFILE)
orig_device_owner = ""
if port_profile:
orig_device_owner = port_profile.get('original_owner')
del port_profile['original_owner']
port_data = {portbindings.HOST_ID: "",
'device_owner': orig_device_owner,
portbindings.PROFILE: port_profile}
update_port = self._core_plugin.update_port(
context, address_pair_port['id'], {'port': port_data})
return update_port
def is_distributed_router(router):
"""Return True if router to be handled is distributed."""
try:
# See if router is a DB object first
requested_router_type = router.extra_attributes.distributed
except AttributeError:
# if not, try to see if it is a request body
requested_router_type = router.get('distributed')
if validators.is_attr_set(requested_router_type):
return requested_router_type
return cfg.CONF.router_distributed
| 46.773143 | 79 | 0.603528 |
4a2790f0322bed65b6bcb58996f43a03219a39ce | 1,894 | py | Python | chainer_chemistry/dataset/preprocessors/relgat_preprocessor.py | pfnet/chainerchem | efe323aa21f63a815130d673781e7cca1ccb72d2 | [
"MIT"
] | 184 | 2019-11-27T12:59:01.000Z | 2022-03-29T19:18:54.000Z | chainer_chemistry/dataset/preprocessors/relgat_preprocessor.py | pfnet/chainerchem | efe323aa21f63a815130d673781e7cca1ccb72d2 | [
"MIT"
] | 21 | 2019-12-08T01:53:33.000Z | 2020-10-23T01:19:56.000Z | chainer_chemistry/dataset/preprocessors/relgat_preprocessor.py | pfnet/chainerchem | efe323aa21f63a815130d673781e7cca1ccb72d2 | [
"MIT"
] | 45 | 2019-11-28T09:59:54.000Z | 2022-02-07T02:42:46.000Z | from chainer_chemistry.dataset.preprocessors.common import construct_atomic_number_array # NOQA
from chainer_chemistry.dataset.preprocessors.common import construct_discrete_edge_matrix # NOQA
from chainer_chemistry.dataset.preprocessors.common import MolFeatureExtractionError # NOQA
from chainer_chemistry.dataset.preprocessors.common import type_check_num_atoms
from chainer_chemistry.dataset.preprocessors.mol_preprocessor import MolPreprocessor # NOQA
class RelGATPreprocessor(MolPreprocessor):
"""RelGAT Preprocessor
Args:
max_atoms (int): Max number of atoms for each molecule, if the
number of atoms is more than this value, this data is simply
ignored.
Setting negative value indicates no limit for max atoms.
out_size (int): It specifies the size of array returned by
`get_input_features`.
If the number of atoms in the molecule is less than this value,
the returned arrays is padded to have fixed size.
Setting negative value indicates do not pad returned array.
"""
def __init__(self, max_atoms=-1, out_size=-1, add_Hs=False):
super(RelGATPreprocessor, self).__init__(add_Hs=add_Hs)
if max_atoms >= 0 and out_size >= 0 and max_atoms > out_size:
raise ValueError('max_atoms {} must be less or equal to '
'out_size {}'.format(max_atoms, out_size))
self.max_atoms = max_atoms
self.out_size = out_size
def get_input_features(self, mol):
"""get input features
Args:
mol (Mol):
Returns:
"""
type_check_num_atoms(mol, self.max_atoms)
atom_array = construct_atomic_number_array(mol, out_size=self.out_size)
adj_array = construct_discrete_edge_matrix(mol, out_size=self.out_size)
return atom_array, adj_array
| 42.088889 | 97 | 0.697994 |
4a27911104f5be1d3b35007b5e01a6cc21236541 | 693 | py | Python | pmixvenv/Scripts/rstpep2html.py | michelcarvalho22/ecommerce_simples | a2ba147cac5dab054b373c9ebbb752d8f1eb484a | [
"CC0-1.0"
] | 1 | 2020-07-23T15:40:58.000Z | 2020-07-23T15:40:58.000Z | pmixvenv/Scripts/rstpep2html.py | michelcarvalho22/ecommerce_simples | a2ba147cac5dab054b373c9ebbb752d8f1eb484a | [
"CC0-1.0"
] | null | null | null | pmixvenv/Scripts/rstpep2html.py | michelcarvalho22/ecommerce_simples | a2ba147cac5dab054b373c9ebbb752d8f1eb484a | [
"CC0-1.0"
] | null | null | null | #!d:\pointmix\pmixvenv\scripts\python.exe
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
| 26.653846 | 76 | 0.728716 |
4a27916aaed4e9d577d8960eb3a0d3ceb20db678 | 18,424 | py | Python | sample_mods/distl/dist_col.py | kuanhanl/cappresse | 31cd7d03414a930f7e2c21e1a3eb5e7dd25cc500 | [
"MIT"
] | 2 | 2019-09-26T20:56:06.000Z | 2019-11-18T21:03:27.000Z | sample_mods/distl/dist_col.py | kuanhanl/cappresse | 31cd7d03414a930f7e2c21e1a3eb5e7dd25cc500 | [
"MIT"
] | 6 | 2018-03-19T20:36:49.000Z | 2018-04-13T15:27:29.000Z | sample_mods/distl/dist_col.py | kuanhanl/cappresse | 31cd7d03414a930f7e2c21e1a3eb5e7dd25cc500 | [
"MIT"
] | 5 | 2018-10-04T18:51:02.000Z | 2020-07-02T15:31:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from pyomo.core.base import ConcreteModel, Set, Constraint, Var,\
Param, Objective, minimize, sqrt, exp, Suffix, Expression, value
from nmpc_mhe.aux.cpoinsc import collptsgen
from nmpc_mhe.aux.lagrange_f import lgr, lgry, lgrdot, lgrydot
from dist_col_mod import *
from six import itervalues, iterkeys, iteritems
from pyomo.opt import ProblemFormat, SolverFactory
import re, os
"""
Version 03.
Need a reference model that can initialize the reference steady-state model.
"""
__author__ = 'David M Thierry @dthierry'
class DistDiehlNegrete(ConcreteModel):
def __init__(self, nfe_t, ncp_t, **kwargs):
ConcreteModel.__init__(self)
steady = kwargs.pop('steady', False)
_t = kwargs.pop('_t', 1.0)
Ntray = kwargs.pop('Ntray', 42)
# --------------------------------------------------------------------------------------------------------------
# Orthogonal Collocation Parameters section
# Radau
self._alp_gauB_t = 1
self._bet_gauB_t = 0
if steady:
print("[I] " + str(self.__class__.__name__) + " NFE and NCP Overriden - Steady state mode")
self.nfe_t = 1
self.ncp_t = 1
else:
self.nfe_t = nfe_t
self.ncp_t = ncp_t
self.tau_t = collptsgen(self.ncp_t, self._alp_gauB_t, self._bet_gauB_t)
# start at zero
self.tau_i_t = {0: 0.}
# create a list
for ii in range(1, self.ncp_t + 1):
self.tau_i_t[ii] = self.tau_t[ii - 1]
# ======= SETS ======= #
# For finite element = 1 .. NFE
# This has to be > 0
self.fe_t = Set(initialize=[ii for ii in range(1, self.nfe_t + 1)])
# collocation points
# collocation points for differential variables
self.cp_t = Set(initialize=[ii for ii in range(0, self.ncp_t + 1)])
# collocation points for algebraic variables
self.cp_ta = Set(within=self.cp_t, initialize=[ii for ii in range(1, self.ncp_t + 1)])
# create collocation param
self.taucp_t = Param(self.cp_t, initialize=self.tau_i_t)
self.ldot_t = Param(self.cp_t, self.cp_t, initialize=
(lambda m, j, k: lgrdot(k, m.taucp_t[j], self.ncp_t, self._alp_gauB_t, self._bet_gauB_t))) #: watch out for this!
self.l1_t = Param(self.cp_t, initialize=
(lambda m, j: lgr(j, 1, self.ncp_t, self._alp_gauB_t, self._bet_gauB_t)))
# --------------------------------------------------------------------------------------------------------------
# Model parameters
self.Ntray = Ntray
self.tray = Set(initialize=[i for i in range(1, Ntray + 1)])
self.feed = Param(self.tray,
initialize=lambda m, t: 57.5294 if t == 21 else 0.0,
mutable=True)
self.xf = Param(initialize=0.32, mutable=True) # feed mole fraction
self.hf = Param(initialize=9081.3) # feed enthalpy
self.hlm0 = Param(initialize=2.6786e-04)
self.hlma = Param(initialize=-0.14779)
self.hlmb = Param(initialize=97.4289)
self.hlmc = Param(initialize=-2.1045e04)
self.hln0 = Param(initialize=4.0449e-04)
self.hlna = Param(initialize=-0.1435)
self.hlnb = Param(initialize=121.7981)
self.hlnc = Param(initialize=-3.0718e04)
self.r = Param(initialize=8.3147)
self.a = Param(initialize=6.09648)
self.b = Param(initialize=1.28862)
self.c1 = Param(initialize=1.016)
self.d = Param(initialize=15.6875)
self.l = Param(initialize=13.4721)
self.f = Param(initialize=2.615)
self.gm = Param(initialize=0.557)
self.Tkm = Param(initialize=512.6)
self.Pkm = Param(initialize=8.096e06)
self.gn = Param(initialize=0.612)
self.Tkn = Param(initialize=536.7)
self.Pkn = Param(initialize=5.166e06)
self.CapAm = Param(initialize=23.48)
self.CapBm = Param(initialize=3626.6)
self.CapCm = Param(initialize=-34.29)
self.CapAn = Param(initialize=22.437)
self.CapBn = Param(initialize=3166.64)
self.CapCn = Param(initialize=-80.15)
self.pstrip = Param(initialize=250)
self.prect = Param(initialize=190)
def _p_init(m, t):
ptray = 9.39e04
if t <= 20:
return _p_init(m, 21) + m.pstrip * (21 - t)
elif 20 < t < m.Ntray:
return ptray + m.prect * (m.Ntray - t)
elif t == m.Ntray:
return 9.39e04
self.p = Param(self.tray, initialize=_p_init)
self.T29_des = Param(initialize=343.15)
self.T15_des = Param(initialize=361.15)
self.Dset = Param(initialize=1.83728)
self.Qcset = Param(initialize=1.618890)
self.Qrset = Param(initialize=1.786050)
# self.Recset = Param()
self.alpha_T29 = Param(initialize=1)
self.alpha_T15 = Param(initialize=1)
self.alpha_D = Param(initialize=1)
self.alpha_Qc = Param(initialize=1)
self.alpha_Qr = Param(initialize=1)
self.alpha_Rec = Param(initialize=1)
def _alpha_init(m, i):
if i <= 21:
return 0.62
else:
return 0.35
self.alpha = Param(self.tray,
initialize=lambda m, t: 0.62 if t <= 21 else 0.35)
# --------------------------------------------------------------------------------------------------------------
#: First define differential state variables (state: x, ic-Param: x_ic, derivative-Var:dx_dt
#: States (differential) section
zero_tray = dict.fromkeys(self.tray)
zero3 = dict.fromkeys(self.fe_t * self.cp_t * self.tray)
for key in zero3.keys():
zero3[key] = 0.0
def __m_init(m, i, j, t):
if t < m.Ntray:
return 4000.
elif t == 1:
return 104340.
elif t == m.Ntray:
return 5000.
#: Liquid hold-up
self.M = Var(self.fe_t, self.cp_t, self.tray,
initialize=__m_init)
#: Mole-fraction
self.x = Var(self.fe_t, self.cp_t, self.tray, initialize=lambda m, i, j, t: 0.999 * t / m.Ntray)
#: Initial state-Param
self.M_ic = zero_tray if steady else Param(self.tray, initialize=0.0, mutable=True)
self.x_ic = zero_tray if steady else Param(self.tray, initialize=0.0, mutable=True)
#: Derivative-var
self.dM_dt = zero3 if steady else Var(self.fe_t, self.cp_t, self.tray, initialize=0.0)
self.dx_dt = zero3 if steady else Var(self.fe_t, self.cp_t, self.tray, initialize=0.0)
# --------------------------------------------------------------------------------------------------------------
# States (algebraic) section
# Tray temperature
self.T = Var(self.fe_t, self.cp_ta, self.tray,
initialize=lambda m, i, j, t: ((370.781 - 335.753) / m.Ntray) * t + 370.781)
self.Tdot = Var(self.fe_t, self.cp_ta, self.tray, initialize=1e-05) #: Not really a der_var
# saturation pressures
self.pm = Var(self.fe_t, self.cp_ta, self.tray, initialize=1e4)
self.pn = Var(self.fe_t, self.cp_ta, self.tray, initialize=1e4)
# Vapor mole flowrate
self.V = Var(self.fe_t, self.cp_ta, self.tray, initialize=44.0)
def _l_init(m, i, j, t):
if 2 <= t <= 21:
return 83.
elif 22 <= t <= 42:
return 23
elif t == 1:
return 40
# Liquid mole flowrate
self.L = Var(self.fe_t, self.cp_ta, self.tray, initialize=_l_init)
# Vapor mole frac & diff var
self.y = Var(self.fe_t, self.cp_ta, self.tray,
initialize=lambda m, i, j, t: ((0.99 - 0.005) / m.Ntray) * t + 0.005)
# Liquid enthalpy # enthalpy
self.hl = Var(self.fe_t, self.cp_ta, self.tray, initialize=10000.)
# Liquid enthalpy # enthalpy
self.hv = Var(self.fe_t, self.cp_ta, self.tray, initialize=5e+04)
# Re-boiler & condenser heat
self.Qc = Var(self.fe_t, self.cp_ta, initialize=1.6e06)
self.D = Var(self.fe_t, self.cp_ta, initialize=18.33)
# vol holdups
self.Vm = Var(self.fe_t, self.cp_ta, self.tray, initialize=6e-05)
self.Mv = Var(self.fe_t, self.cp_ta, self.tray,
initialize=lambda m, i, j, t: 0.23 if 1 < t < m.Ntray else 0.0)
self.Mv1 = Var(self.fe_t, self.cp_ta, initialize=8.57)
self.Mvn = Var(self.fe_t, self.cp_ta, initialize=0.203)
hi_t = dict.fromkeys(self.fe_t)
for key in hi_t.keys():
hi_t[key] = 1.0 if steady else _t/self.nfe_t
self.hi_t = hi_t if steady else Param(self.fe_t, initialize=hi_t)
# --------------------------------------------------------------------------------------------------------------
#: Controls
self.u1 = Param(self.fe_t, initialize=7.72700925775773761472464684629813E-01, mutable=True) #: Dummy
self.u2 = Param(self.fe_t, initialize=1.78604740940007800236344337463379E+06, mutable=True) #: Dummy
self.Rec = Var(self.fe_t, initialize=7.72700925775773761472464684629813E-01)
self.Qr = Var(self.fe_t, initialize=1.78604740940007800236344337463379E+06)
# --------------------------------------------------------------------------------------------------------------
#: Constraints for the differential states
#: Then the ode-Con:de_x, collocation-Con:dvar_t_x, noisy-Expr: noisy_x, cp-Constraint: cp_x, initial-Con: x_icc
#: Differential equations
self.de_M = Constraint(self.fe_t, self.cp_ta, self.tray, rule=m_ode)
self.de_x = Constraint(self.fe_t, self.cp_ta, self.tray, rule=x_ode)
#: Collocation equations
self.dvar_t_M = None if steady else Constraint(self.fe_t, self.cp_ta, self.tray, rule=M_COLL)
self.dvar_t_x = None if steady else Constraint(self.fe_t, self.cp_ta, self.tray, rule=x_coll)
#: Continuation equations (redundancy here)
if self.nfe_t > 1:
#: Noisy expressions
self.noisy_M = None if steady else Expression(self.fe_t, self.tray, rule=M_CONT)
self.noisy_x = None if steady else Expression(self.fe_t, self.tray, rule=x_cont)
#: Continuation equations
self.cp_M = None if steady else \
Constraint(self.fe_t, self.tray,
rule=lambda m, i, t: self.noisy_M[i, t] == 0.0 if i < self.nfe_t else Constraint.Skip)
self.cp_x = None if steady else \
Constraint(self.fe_t, self.tray,
rule=lambda m, i, t: self.noisy_x[i, t] == 0.0 if i < self.nfe_t else Constraint.Skip)
#: Initial condition-Constraints
self.M_icc = None if steady else Constraint(self.tray, rule=acm)
self.x_icc = None if steady else Constraint(self.tray, rule=acx)
# --------------------------------------------------------------------------------------------------------------
#: Constraint section (algebraic equations)
self.hrc = Constraint(self.fe_t, self.cp_ta, rule=hrc)
self.gh = Constraint(self.fe_t, self.cp_ta, self.tray, rule=gh)
self.ghb = Constraint(self.fe_t, self.cp_ta, rule=ghb)
self.ghc = Constraint(self.fe_t, self.cp_ta, rule=ghc)
self.hkl = Constraint(self.fe_t, self.cp_ta, self.tray, rule=hkl)
self.hkv = Constraint(self.fe_t, self.cp_ta, self.tray, rule=hkv)
self.lpself = Constraint(self.fe_t, self.cp_ta, self.tray, rule=lpm)
self.lpn = Constraint(self.fe_t, self.cp_ta, self.tray, rule=lpn)
self.dp = Constraint(self.fe_t, self.cp_ta, self.tray, rule=dp)
self.lTdot = Constraint(self.fe_t, self.cp_ta, self.tray, rule=lTdot)
self.gy0 = Constraint(self.fe_t, self.cp_ta, rule=gy0)
self.gy = Constraint(self.fe_t, self.cp_ta, self.tray, rule=gy)
self.dMV = Constraint(self.fe_t, self.cp_ta, self.tray, rule=dMV)
self.dMv1 = Constraint(self.fe_t, self.cp_ta, rule=dMv1)
self.dMvn = Constraint(self.fe_t, self.cp_ta, rule=dMvn)
self.hyd = Constraint(self.fe_t, self.cp_ta, self.tray, rule=hyd)
self.hyd1 = Constraint(self.fe_t, self.cp_ta, rule=hyd1)
self.hydN = Constraint(self.fe_t, self.cp_ta, rule=hydN)
self.dvself = Constraint(self.fe_t, self.cp_ta, self.tray, rule=dvm)
# --------------------------------------------------------------------------------------------------------------
#: Control constraint
self.u1_e = Expression(self.fe_t, rule=lambda m, i: self.Rec[i])
self.u2_e = Expression(self.fe_t, rule=lambda m, i: self.Qr[i])
self.u1_c = Constraint(self.fe_t, rule=lambda m, i: self.u1[i] == self.u1_e[i])
self.u2_c = Constraint(self.fe_t, rule=lambda m, i: self.u2[i] == self.u2_e[i])
# --------------------------------------------------------------------------------------------------------------
#: Suffixes
self.dual = Suffix(direction=Suffix.IMPORT_EXPORT)
self.ipopt_zL_out = Suffix(direction=Suffix.IMPORT)
self.ipopt_zU_out = Suffix(direction=Suffix.IMPORT)
self.ipopt_zL_in = Suffix(direction=Suffix.EXPORT)
self.ipopt_zU_in = Suffix(direction=Suffix.EXPORT)
def write_nl(self):
"""Writes the nl file and the respective row & col"""
name = str(self.__class__.__name__) + ".nl"
self.write(filename=name,
format=ProblemFormat.nl,
io_options={"symbolic_solver_labels": True})
def create_bounds(self):
"""Creates bounds for the variables"""
for value in itervalues(self.M):
value.setlb(1.0)
for value in itervalues(self.T):
value.setlb(200)
for value in itervalues(self.pm):
value.setlb(1.0)
for value in itervalues(self.pn):
value.setlb(1.0)
for value in itervalues(self.L):
value.setlb(0.0)
for value in itervalues(self.V):
value.setlb(0.0)
for value in itervalues(self.x):
value.setlb(0.0)
for value in itervalues(self.y):
value.setlb(0.0)
for value in itervalues(self.hl):
value.setlb(1.0)
for value in itervalues(self.hv):
value.setlb(1.0)
for value in itervalues(self.Qc):
value.setlb(0.0)
for value in itervalues(self.D):
value.setlb(0.0)
for value in itervalues(self.Vm):
value.setlb(0.0)
for value in itervalues(self.Mv):
value.setlb(0.155 + 1e-06)
for value in itervalues(self.Mv1):
value.setlb(8.5 + 1e-06)
for value in itervalues(self.Mvn):
value.setlb(0.17 + 1e-06)
for value in itervalues(self.M):
value.setub(1e+07)
for value in itervalues(self.T):
value.setub(500)
for value in itervalues(self.pm):
value.setub(5e+07)
for value in itervalues(self.pn):
value.setub(5e+07)
for value in itervalues(self.L):
value.setub(1e+03)
for value in itervalues(self.V):
value.setub(1e+03)
for value in itervalues(self.x):
value.setub(1.0)
for value in itervalues(self.y):
value.setub(1.0)
for value in itervalues(self.hl):
value.setub(1e+07)
for value in itervalues(self.hv):
value.setub(1e+07)
for value in itervalues(self.Qc):
value.setub(1e+08)
for value in itervalues(self.D):
value.setub(1e+04)
for value in itervalues(self.Vm):
value.setub(1e+04)
for value in itervalues(self.Mv):
value.setub(1e+04)
for value in itervalues(self.Mv1):
value.setub(1e+04)
for value in itervalues(self.Mvn):
value.setub(1e+04)
@staticmethod
def parse_ig_ampl(file_i):
lines = file_i.readlines()
dict = {}
for line in lines:
kk = re.split('(?:let)|[:=\s\[\]]', line)
try:
var = kk[2]
# print(var)
key = kk[3]
key = re.split(',', key)
actual_key = []
for k in key:
actual_key.append(int(k))
actual_key.append(actual_key.pop(0))
actual_key = tuple(actual_key)
value = kk[8]
value = float(value)
dict[var, actual_key] = value
except IndexError:
continue
file_i.close()
return dict
def init_steady_ref(self):
"""If the model is steady, we try to initialize it with an initial guess from ampl"""
cur_dir = os.path.dirname(__file__)
ampl_ig = os.path.join(cur_dir, "iv_ss.txt")
file_tst = open(ampl_ig, "r")
if self.nfe_t == 1 and self.ncp_t == 1:
somedict = self.parse_ig_ampl(file_tst)
for var in self.component_objects(Var, active=True):
vx = getattr(self, str(var))
for v, k in var.iteritems():
try:
vx[v] = somedict[str(var), v]
except KeyError:
continue
solver = SolverFactory('ipopt')
someresults = solver.solve(self, tee=True)
def equalize_u(self, direction="u_to_r"):
"""set current controls to the values of their respective dummies"""
if direction == "u_to_r":
for i in iterkeys(self.Rec):
self.Rec[i].set_value(value(self.u1[i]))
for i in iterkeys(self.Rec):
self.Qr[i].set_value(value(self.u2[i]))
elif direction == "r_to_u":
for i in iterkeys(self.u1):
self.u1[i].value = value(self.Rec[i])
for i in iterkeys(self.u2):
self.u2[i].value = value(self.Qr[i]) | 40.942222 | 122 | 0.547981 |
4a27919e251b052617b0eae9dd775fe958d9b29f | 407 | py | Python | baekjoon/1057.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | baekjoon/1057.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | baekjoon/1057.py | GihwanKim/Baekjoon | 52eb2bf80bb1243697858445e5b5e2d50d78be4e | [
"MIT"
] | null | null | null | """
1057 : 토너먼트
URL : https://www.acmicpc.net/problem/1057
Input :
16 8 9
Output :
4
"""
import math
N, kim, lim = map(int, input().split(' '))
for r in range(1, int(math.ceil(math.log2(N))) + 1):
left = min(kim, lim)
right = max(kim, lim)
if (right % 2) == 0 and right == (left + 1):
break
kim = (kim + 1) // 2
lim = (lim + 1) // 2
print(r)
| 16.958333 | 52 | 0.486486 |
4a2792c27626e3c0af07204d99af06f5d8a5a9fb | 495 | py | Python | vulnman/settings/orig.py | vulnman/vulnman | d48ee022bc0e4368060a990a527b1c7a5e437504 | [
"MIT"
] | 3 | 2021-12-22T07:02:24.000Z | 2022-01-27T20:19:11.000Z | vulnman/settings/orig.py | vulnman/vulnman | d48ee022bc0e4368060a990a527b1c7a5e437504 | [
"MIT"
] | 44 | 2021-12-14T07:24:29.000Z | 2022-03-23T07:01:16.000Z | vulnman/settings/orig.py | vulnman/vulnman | d48ee022bc0e4368060a990a527b1c7a5e437504 | [
"MIT"
] | 1 | 2022-01-21T16:29:56.000Z | 2022-01-21T16:29:56.000Z | VULNMAN_CSS_THEME = "vulnman-dark"
# TODO: not used
HOST_OS_ICONS = {
"linux": {
"icon": "fa fa-linux", "matches": [
"Ubuntu", "Fedora", "Arch-Linux", "Debian", "Linux"]
}
}
# TODO: legacy stuff
CUSTOM_EXTERNAL_TOOLS = {}
CRISPY_ALLOWED_TEMPLATE_PACKS = "bootstrap5"
CRISPY_TEMPLATE_PACK = "bootstrap5"
VULNERABILITY_TEMPLATE_REPO = "https://github.com/vulnman/community-vulnerability-templates"
CHECKLIST_REPO = "https://github.com/vulnman/community-checklists"
| 26.052632 | 92 | 0.70101 |
4a2792f1d47c721966dbc2626ba072de5e6dacd3 | 7,130 | py | Python | evcouplings/visualize/taxa.py | mrunalimanj/EVcouplings | 41e88ce057bc361eb27a938f069476a25aef94c6 | [
"MIT"
] | null | null | null | evcouplings/visualize/taxa.py | mrunalimanj/EVcouplings | 41e88ce057bc361eb27a938f069476a25aef94c6 | [
"MIT"
] | null | null | null | evcouplings/visualize/taxa.py | mrunalimanj/EVcouplings | 41e88ce057bc361eb27a938f069476a25aef94c6 | [
"MIT"
] | null | null | null | """
Taxonomy diversity visualization
Authors:
Nicole Thadani (load_taxonomy_lineage)
Mrunali Manjrekar
"""
import pandas as pd
import plotly.express as px
from ete3 import NCBITaxa # another import
COLOR_DISCRETE_MAP = {'Bacteria':'#56B4E9', # blue
'Eukaryota':'#D53500', # red
'Archaea':'#E69F00', # orange
'Viruses': '#AB63FA', # purple
'Other': '#189e3c'} # green
SUNBURST_HIERARCHY = ['superkingdom', "phylum", "order"]
PATH_TO_NCBI_TAXA_DATABASE = "/n/groups/marks/databases/etetoolkit/taxa.sqlite" # TODO: add to O2
def load_taxonomy_lineage(tax_ids, ncbi):
"""
Using NCBITaxa, querying all the taxonomic information on the
species' proteins included in the alignment.
Parameters
----------
tax_ids : Python list
1D list of NCBI Taxonomy IDs.
ncbi : NCBITaxa() instance
An instance that only gets created if get_taxa gets called; that is, the
the user wants to query the taxonomic ranks for a set of sequences
to visualize species diversity or for other purposes.
Returns
-------
rank_sequencevalue_hm : pd.DataFrame
dataframe with columns making up all taxonomic ranks
covered by NCBI. These ranks are:
'superkingdom':
'phylum':
'genus':
'class':
'subphylum':
'family':
'order':
'species':
"""
# TODO: update the docstring
ranks = ['superkingdom',
'phylum',
'genus',
'class',
'subphylum',
'family',
'order',
'species']
taxs = []
for tax_id in tax_ids:
try:
lineage = ncbi.get_lineage(int(tax_id))
name_dict = ncbi.get_taxid_translator(lineage)
# dict: key=lineageid, value=sequence value
rank_dict = ncbi.get_rank(lineage)
# flipping the keys and entries of the dictionary.
lineage_dict = dict((rank_dict[i], name_dict[i]) for i in lineage)
lineage_dict = {k: lineage_dict[k] for k in ranks if k in lineage_dict}
# add at end so that it doesn't get prematurely added.
lineage_dict['tax_ID'] = tax_id
taxs.append(lineage_dict)
except ValueError as e:
print('Warning: {0}'.format(str(e)))
# TODO: consider whether you should adjust this depending on database type.
# TODO: create test cases? hm
return pd.DataFrame.from_dict(taxs)
def get_taxa(annotation, aln_format, database_file=PATH_TO_NCBI_TAXA_DATABASE):
"""
Helper function for loading taxa from an DataFrame of annotations.
Parameters
----------
annotation : pd.DataFrame
annotation.csv from EVcouplings align stage.
aln_format : {"fasta", "stockholm", None}
Format of alignment, None if not detectable
if stockholm, annotation file should include taxids
if fasta: #TODO: what happens when it's fasta? is
file difference enough to figure out whether or not taxids will be included
database_file : string
TODO: # dbfile="/path/to/taxa.sqlite"
Returns
-------
annotation: pd.DataFrame
Original annotations alignment, modified to include taxanomic
information for each sequence of the alignment.
"""
if aln_format != "stockholm":
pass
# TODO: Fix the formatting
annotation['tax_ID'] = annotation['Tax'].str.split('=').str[-1]
# TODO: check whether these columns are the same for
# uniprot vs uniref vs metagenomics:
# do the Tax IDs always get represented in this format?
# "Extract Uniprot/Uniref sequence annotation from Stockholm file
#(as output by jackhmmer). This function may not work for other
#formats."
# TODO: try/except for error checks - are these all numbers, basically? etc
# TODO: have column name be pulled properly based on the type of pipeline being used
# pull in that name properly so as to be consistent with database used and
# other config settings, based on `extract_header_annotation` function.
ncbi = NCBITaxa(dbfile=database_file)
# TODO: figure out where this ends up getting downloaded, and
# make sure it downloads once! can make it similar to SIFTS.py
# TODO: should this be integrated w/ update_database.py?
# doesn't have to be called in there but can be stored there
tax_ids = annotation['tax_ID'].unique().tolist()
taxs = load_taxonomy_lineage(tax_ids, ncbi)
annotation = annotation.merge(taxs, on='tax_ID', how='left')
return annotation
def sunburst(annotation, title, hier=SUNBURST_HIERARCHY, color_map=COLOR_DISCRETE_MAP):
# keyword argument for hier if confident?
# other category: very specific colors, you should allow them to pass in colors
# colormap=...
# e.g. for contact maps, at the top - global variables w/ color defaults, but can be modified
# set defaults as global variables/dictionaries
"""
Generates sunburst plot from annotation dataframe.
Parameters
----------
annotation : pd.DataFrame
Original annotations alignment but with taxanomic
information for each entry of the alignment.
title : Python string
Name for plot, to be passed to plotly.
hier : Python list
1D list of desired ranks to include in the plot.
These should be ordered from highest to lowest rank desired,
and must begin with `superkingdom`, so as to color the top rank
entries systematically for straightforward comparison between alignments.
The ordering of ranks provided by NCBI:
['superkingdom', "phylum", "genus", "class", "subphylum", "family", "order"]
color_map : dictionary
A mapping to colors for the top rank.
Returns
-------
fig : Plotly figure
Sunburst plot instance.
Options for visualization:
You can visualize in-notebook with fig.show() in a Jupyter notebook.
You can also save to HTML using fig.write_html(filepath) for an interactive plot
that can be opened in a browser of your choice.
For a static image of the plot, you can use fig.write_image(filepath)
with any preferred extension of your choice (JPG, PNG, JPEG, SVG, PDF, etc).
"""
# plotly will throw an error if any intermediate rank entries are empty, so
# we must fill in the empty intermediate ranks so as not to lose any hits.
annotation.fillna("Other", inplace = True) # for purposes of filling in empty intermediate ranks.
annotation["count"] = 1 # a helper column for providing "counts" to plotting function.
fig = px.sunburst(annotation, path=hier, values='count',
title = title, color=hier[0],
color_discrete_map=color_map)
return fig
| 34.444444 | 101 | 0.634081 |
4a279343a02799bae12d577b1ccdbd76d47906ba | 3,640 | py | Python | hard-gists/8b1b456ec95d95c77d42/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/8b1b456ec95d95c77d42/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/8b1b456ec95d95c77d42/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | from PySide import QtGui
class QDictBox(QtGui.QDialog):
WIDGETS = {str: QtGui.QLineEdit,
unicode: QtGui.QLineEdit,
int: QtGui.QSpinBox,
float: QtGui.QDoubleSpinBox,
list: QtGui.QComboBox,
bool: QtGui.QCheckBox}
GETTERS = {QtGui.QLineEdit: "text",
QtGui.QSpinBox: "value",
QtGui.QDoubleSpinBox: "value",
QtGui.QComboBox: "currentText",
QtGui.QCheckBox: "isChecked"}
SETTERS = {QtGui.QLineEdit: "setText",
QtGui.QSpinBox: "setValue",
QtGui.QDoubleSpinBox: "setValue",
QtGui.QComboBox: "addItems",
QtGui.QCheckBox: "setChecked"}
VALIDATORS = {QtGui.QLineEdit: lambda x: bool(len(x)),
QtGui.QSpinBox: lambda x: True,
QtGui.QDoubleSpinBox: lambda x: True,
QtGui.QComboBox: lambda x: True,
QtGui.QCheckBox: lambda x: True}
@classmethod
def getValues(cls, parent, options, required=None, title=None):
d = cls(parent, options, required, title)
ok = d.exec_()
return d.values if ok else {}
def __init__(self, parent, options, required=None, title=None):
super(QDictBox, self).__init__(parent)
self.__widgets = dict()
self.__values = dict()
if title:
self.setWindowTitle(title)
self.required = required or list()
if len(options) == 1:
self.required.append(options.keys()[0])
_firstWidget = None
formLayout = QtGui.QFormLayout()
for k, v in options.iteritems():
if isinstance(v, (list, tuple)):
v = [str(x) for x in v]
label = QtGui.QLabel(beautifyText(k))
widget = self.WIDGETS[type(v)]()
getattr(widget, self.SETTERS[type(widget)])(v)
if k in self.required:
label.setStyleSheet("color: red;")
self.__widgets[k] = (label, widget)
formLayout.addRow(label, widget)
if _firstWidget is None:
_firstWidget = widget
scrollArea = QtGui.QScrollArea()
scrollArea.setWidgetResizable(True)
scrollArea.setFrameShape(QtGui.QFrame.Shape(0)) # no frame
w = QtGui.QWidget()
w.setLayout(formLayout)
scrollArea.setWidget(w)
buttons = QtGui.QDialogButtonBox(
QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout = QtGui.QVBoxLayout()
layout.addWidget(scrollArea)
layout.addWidget(buttons)
self.setLayout(layout)
_firstWidget.setFocus()
def accept(self):
for k, (label, widget) in self.widgets.iteritems():
value = getattr(widget, self.GETTERS[type(widget)])()
self.__values[k] = value
for k in self.required:
value = self.values[k]
label, widget = self.widgets[k]
if not self.VALIDATORS[type(widget)](value) and widget.isVisible():
widget.setFocus()
return
return super(QDictBox, self).accept()
@property
def widgets(self):
return self.__widgets
@property
def values(self):
return self.__values
def beautifyText(camelCasedText):
rval = ""
for i, ch in enumerate(camelCasedText):
if i == 0:
ch = ch.upper()
elif ch.isupper():
ch = " " + ch
rval += ch
return rval | 31.652174 | 79 | 0.565659 |
4a27936db578f00e6287078303d58d6b9af140e7 | 123 | py | Python | tests/01_import_test.py | mika1337/ambiancer | 5386698ccc281c1e42c675f33be6e5c54adbcf0f | [
"MIT"
] | 1 | 2020-04-19T21:00:39.000Z | 2020-04-19T21:00:39.000Z | tests/01_import_test.py | mika1337/ambiancer | 5386698ccc281c1e42c675f33be6e5c54adbcf0f | [
"MIT"
] | null | null | null | tests/01_import_test.py | mika1337/ambiancer | 5386698ccc281c1e42c675f33be6e5c54adbcf0f | [
"MIT"
] | null | null | null | def test_import():
"""
Import test
"""
from ambiancer import bh1750
from ambiancer import rpibacklight
| 17.571429 | 38 | 0.650407 |
4a279484b1a4ee6e876a97988c95ad222474b323 | 1,835 | py | Python | post.py | looprock/megaphone | 953c92d52a4f1bab436b4cb90b52d0c54926e0af | [
"0BSD"
] | 5 | 2016-07-11T20:37:09.000Z | 2017-02-04T13:28:07.000Z | post.py | looprock/Megaphone | 953c92d52a4f1bab436b4cb90b52d0c54926e0af | [
"0BSD"
] | null | null | null | post.py | looprock/Megaphone | 953c92d52a4f1bab436b4cb90b52d0c54926e0af | [
"0BSD"
] | null | null | null | #!/usr/bin/env python
import json
import sys
import requests
import os
import getopt
import socket
def usage():
print sys.argv[0] + """ [options] [json file]
options:
-h/--help - print usage summary
-o/--override - override the status
"""
statusoverride = False
try:
opts, remainder = getopt.gnu_getopt(sys.argv[1:], "ho:", ["help", "override="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-o", "--override"):
statusoverride = arg
if len(remainder) < 1:
usage()
sys.exit()
if statusoverride:
validstatus = ["OK", "Unknown", "Warning", "Critical"]
if statusoverride not in validstatus:
print "ERROR: invalid override. Valid values are:"
for i in validstatus:
print i
sys.exit(1)
datafile = remainder[0]
if os.path.isfile(datafile) == True:
with open(datafile) as data_file:
input = json.load(data_file)
# {"id": "adi", "url": {"addr": "http://fenode3.oak.vast.com:18003/status/adi", "statusoverride": "Critical"}}
if isinstance(input['url'], dict):
input['url']['statusoverride'] = statusoverride
t = input['url']['addr'].split(":")
input['url']['addr'] = "http://%s:%s" % (socket.gethostname(), t[2])
else:
t = input['url'].split(":")
addr = "http://%s:%s" % (socket.gethostname(), t[2])
input.pop("url", None)
input['url'] = {}
input['url']['addr'] = addr
if statusoverride:
input['url']['statusoverride'] = statusoverride
r = requests.post("http://localhost:18001/checks", data=json.dumps(input))
if r.status_code == 200:
print "SUCCESS! megaphone updated!"
else:
print "ERROR: request failed with status code %s" % str(r.status_code)
sys.exit(1)
| 25.486111 | 110 | 0.607084 |
4a2794d86c282e1cbc57989b3e699a6521642092 | 201 | py | Python | osenv.py | nitesh8860/python-random-snips | 1ab54032a0d49ff51e2163c27700a823e14da8de | [
"MIT"
] | null | null | null | osenv.py | nitesh8860/python-random-snips | 1ab54032a0d49ff51e2163c27700a823e14da8de | [
"MIT"
] | null | null | null | osenv.py | nitesh8860/python-random-snips | 1ab54032a0d49ff51e2163c27700a823e14da8de | [
"MIT"
] | null | null | null | import os
import sys
with open('controlscript.log', 'a') as fh:
fh.write('\nENV_VARS\n')
for k in os.environ.keys():
fh.write("%s: %s\n" % (k, os.environ[k]))
| 22.333333 | 58 | 0.512438 |
4a279656ba7b5c7aa98d4d3b0ba01f4c0eed8481 | 6,407 | py | Python | test/substatement_test.py | plusplusco/TinCanPython | bbc3f9dd5d8385e7b66c693e7f8262561392be74 | [
"Apache-2.0"
] | 38 | 2015-02-26T12:40:15.000Z | 2021-08-19T11:19:25.000Z | test/substatement_test.py | plusplusco/TinCanPython | bbc3f9dd5d8385e7b66c693e7f8262561392be74 | [
"Apache-2.0"
] | 21 | 2015-01-07T23:12:01.000Z | 2022-02-27T12:35:49.000Z | test/substatement_test.py | plusplusco/TinCanPython | bbc3f9dd5d8385e7b66c693e7f8262561392be74 | [
"Apache-2.0"
] | 27 | 2015-02-09T17:21:44.000Z | 2022-02-27T12:36:25.000Z | # Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
if __name__ == '__main__':
from test.main import setup_tincan_path
setup_tincan_path()
from tincan import (
Agent,
Group,
Verb,
StatementRef,
Activity,
SubStatement,
)
class SubStatementTest(unittest.TestCase):
def test_InitAnonAgentActor(self):
substatement = SubStatement(actor={'object_type': 'Agent', 'name': 'test'})
self.agentVerificationHelper(substatement.actor)
def test_InitAnonGroupActor(self):
substatement = SubStatement(actor={'object_type': 'Group', 'member': [{"name": "test"}]})
self.groupVerificationHelper(substatement.actor)
def test_InitAnonVerb(self):
substatement = SubStatement(verb={'id': 'test'})
self.verbVerificationHelper(substatement.verb)
def test_InitAnonObject(self):
substatement = SubStatement(object={'id': 'test'})
self.activityVerificationHelper(substatement.object)
def test_InitAnonAgentObject(self):
substatement = SubStatement(object={'object_type': 'Agent', 'name': 'test'})
self.agentVerificationHelper(substatement.object)
def test_InitDifferentNamingObject(self):
substatement = SubStatement(object={'objectType': 'Agent', 'name': 'test'})
self.agentVerificationHelper(substatement.object)
def test_InitObjectType(self):
substatement = SubStatement(object_type="SubStatement")
self.assertEqual(substatement.object_type, "SubStatement")
def test_InitAgentActor(self):
substatement = SubStatement(actor=Agent(name='test'))
self.agentVerificationHelper(substatement.actor)
def test_InitGroupActor(self):
substatement = SubStatement(actor=Group(member=[Agent(name='test')]))
self.groupVerificationHelper(substatement.actor)
def test_InitVerb(self):
substatement = SubStatement(verb=Verb(id='test'))
self.verbVerificationHelper(substatement.verb)
def test_InitAgentObject(self):
substatement = SubStatement(object=Agent(name='test'))
self.agentVerificationHelper(substatement.object)
def test_InitGroupObject(self):
substatement = SubStatement(object=Group(member=[Agent(name='test')]))
self.groupVerificationHelper(substatement.object)
def test_InitActivityObject(self):
substatement = SubStatement(object=Activity(id='test'))
self.activityVerificationHelper(substatement.object)
def test_InitUnpack(self):
obj = {'object_type': 'SubStatement', 'actor': {'name': 'test'}, 'verb': {'id': 'test'},
'object': {'id': 'test'}}
substatement = SubStatement(**obj)
self.assertEqual(substatement.object_type, 'SubStatement')
self.agentVerificationHelper(substatement.actor)
self.verbVerificationHelper(substatement.verb)
self.activityVerificationHelper(substatement.object)
def test_FromJSON(self):
json_str = '{"object_type":"SubStatement", "actor":{"name":"test"}, ' \
'"verb":{"id":"test"}, "object":{"id":"test"}}'
substatement = SubStatement.from_json(json_str)
self.assertEqual(substatement.object_type, 'SubStatement')
self.agentVerificationHelper(substatement.actor)
self.verbVerificationHelper(substatement.verb)
self.activityVerificationHelper(substatement.object)
def test_ToJSONEmpty(self):
substatement = SubStatement()
self.assertEqual(json.loads(substatement.to_json()), json.loads('{"objectType": "SubStatement"}'))
def test_ToJSON(self):
substatement = SubStatement(object_type='SubStatement', actor=Agent(name='test'), verb=Verb(id='test'),
object=Activity(id='test'))
self.assertEqual(json.loads(substatement.to_json()),
json.loads('{"verb": {"id": "test"}, "object": {"id": "test", "objectType": "Activity"}, '
'"actor": {"name": "test", "objectType": "Agent"}, "objectType": "SubStatement"}'))
def test_FromJSONToJSON(self):
json_str = '{"object_type":"SubStatement", "actor":{"name":"test"}, "verb":{"id":"test"}, "' \
'object":{"id":"test", "objectType": "Activity"}}'
substatement = SubStatement.from_json(json_str)
self.assertEqual(substatement.object_type, 'SubStatement')
self.agentVerificationHelper(substatement.actor)
self.verbVerificationHelper(substatement.verb)
self.activityVerificationHelper(substatement.object)
self.assertEqual(json.loads(substatement.to_json()),
json.loads('{"verb": {"id": "test"}, "object": {"id": "test", "objectType": "Activity"}, '
'"actor": {"name": "test", "objectType": "Agent"}, "objectType": "SubStatement"}'))
def agentVerificationHelper(self, value):
self.assertIsInstance(value, Agent)
self.assertEqual(value.name, 'test')
def groupVerificationHelper(self, value):
self.assertIsInstance(value, Group)
for k in value.member:
self.assertIsInstance(k, Agent)
self.assertEqual(k.name, 'test')
def verbVerificationHelper(self, value):
self.assertIsInstance(value, Verb)
self.assertEqual(value.id, 'test')
def statementrefVerificationHelper(self, value):
self.assertIsInstance(value, StatementRef)
self.assertEqual(value.object_type, 'StatementRef')
def activityVerificationHelper(self, value):
self.assertIsInstance(value, Activity)
self.assertEqual(value.id, 'test')
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SubStatementTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| 42.430464 | 119 | 0.672077 |
4a2797143d0cfc1f3badea6800bb291b1ea680e6 | 3,431 | py | Python | tests/storage/psql_dos/migrations/django_branch/test_0032_remove_legacy_workflows.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 153 | 2016-12-23T20:59:03.000Z | 2019-07-02T06:47:52.000Z | tests/storage/psql_dos/migrations/django_branch/test_0032_remove_legacy_workflows.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 2,466 | 2016-12-24T01:03:52.000Z | 2019-07-04T13:41:08.000Z | tests/storage/psql_dos/migrations/django_branch/test_0032_remove_legacy_workflows.py | mkrack/aiida-core | bab1ad6cfc8e4ff041bce268f9270c613663cb35 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2016-12-23T16:28:00.000Z | 2019-07-01T15:55:20.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test removing legacy workflows."""
from aiida.common import timezone
from aiida.common.utils import get_new_uuid
from aiida.storage.psql_dos.migrator import PsqlDostoreMigrator
def test_node_repository(perform_migrations: PsqlDostoreMigrator):
"""Test removing legacy workflows."""
# starting revision
perform_migrations.migrate_up('django@django_0031')
# setup the database
user_model = perform_migrations.get_current_table('db_dbuser')
node_model = perform_migrations.get_current_table('db_dbnode')
wf_model = perform_migrations.get_current_table('db_dbworkflow')
wfdata_model = perform_migrations.get_current_table('db_dbworkflowdata')
wfstep_model = perform_migrations.get_current_table('db_dbworkflowstep')
with perform_migrations.session() as session:
user = user_model(
email='[email protected]',
first_name='John',
last_name='Doe',
institution='EPFL',
password='',
is_superuser=False,
is_staff=False,
is_active=True,
last_login=timezone.now(),
date_joined=timezone.now(),
)
session.add(user)
session.commit()
node_calc = node_model(
uuid=get_new_uuid(),
node_type='node.process.calculation.calcjob.CalcJobNode.',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
label='test',
description='',
nodeversion=1,
public=True,
)
session.add(node_calc)
session.commit()
workflow = wf_model(
label='Legacy WorkflowNode',
uuid=get_new_uuid(),
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
module='',
module_class='',
lastsyncedversion=1,
nodeversion=1,
report='',
script_md5='',
script_path='',
state='',
description='',
)
session.add(workflow)
session.commit()
workflow_data = wfdata_model(
parent_id=workflow.id,
aiida_obj_id=node_calc.id,
time=timezone.now(),
name='',
data_type='dict',
value_type='dict',
json_value='{}',
)
session.add(workflow_data)
session.commit()
workflow_step = wfstep_model(
user_id=user.id,
parent_id=workflow.id,
time=timezone.now(),
name='',
nextcall='',
state='',
)
session.add(workflow_step)
session.commit()
# final revision
perform_migrations.migrate_up('django@django_0032')
| 35.371134 | 76 | 0.535995 |
4a2799f5eb055993941ed016c69a323cd0755738 | 741 | py | Python | networking_bgpvpn/neutron/db/head.py | cgoncalves/networking-bgpvpn | 3ec876c4ead840874e08d6dc876a36814d5f1f81 | [
"Apache-2.0"
] | 38 | 2015-06-23T08:06:16.000Z | 2022-01-25T16:03:10.000Z | networking_bgpvpn/neutron/db/head.py | cgoncalves/networking-bgpvpn | 3ec876c4ead840874e08d6dc876a36814d5f1f81 | [
"Apache-2.0"
] | null | null | null | networking_bgpvpn/neutron/db/head.py | cgoncalves/networking-bgpvpn | 3ec876c4ead840874e08d6dc876a36814d5f1f81 | [
"Apache-2.0"
] | 17 | 2015-11-28T00:45:22.000Z | 2021-07-22T09:22:30.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.db.migration.models import head
# pylint: disable=unused-import
import networking_bgpvpn.neutron.db.bgpvpn_db # noqa
def get_metadata():
return head.model_base.BASEV2.metadata
| 35.285714 | 74 | 0.773279 |
4a279a6e8e3e93cc5697567026dced970a42e042 | 7,516 | py | Python | scripts/analyze-file.py | madmongo1/cpp-compile-overhead | 163038784a02c21c0e837400cd2415702e153959 | [
"MIT"
] | null | null | null | scripts/analyze-file.py | madmongo1/cpp-compile-overhead | 163038784a02c21c0e837400cd2415702e153959 | [
"MIT"
] | null | null | null | scripts/analyze-file.py | madmongo1/cpp-compile-overhead | 163038784a02c21c0e837400cd2415702e153959 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import math
import re
import argparse
import os
import sys
import subprocess
import time
import json
parser = argparse.ArgumentParser(description="C++ compile-health analyzer")
parser.add_argument("file", metavar="F", type=str,
help="C++ source or header file to analyze")
parser.add_argument("-c", "--compiler", required=True,
type=str, help="compiler to use")
parser.add_argument("-d", "--dir", required=True, type=str,
help="temporary directory to use (e.g. /tmp)")
parser.add_argument(
"args", type=str, help="additional compile args (use -- to prevent clashes with other args)", nargs="*")
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
args = parser.parse_args()
def debug_print(s):
if args.verbose:
print(s)
def debug_print_exec(a):
if args.verbose:
print("executing {}".format(a))
# ============================================================
# Parse args
compiler = args.compiler
debug_print("compiler: " + compiler)
assert os.path.isabs(
compiler), "compiler path must be absolute! (to prevent lookup dominating time measurement)"
assert os.path.exists(compiler), "cannot find compiler"
file = args.file
debug_print("file: " + file)
is_source = os.path.splitext(file)[-1].startswith(".c")
is_header = os.path.splitext(file)[-1].startswith(".h")
is_system = os.path.splitext(file)[-1] == ""
debug_print("is_source: " + str(is_source))
debug_print("is_header: " + str(is_header))
debug_print("is_system: " + str(is_system))
assert is_source or is_header or is_system, "unknown extension"
tmp_dir = args.dir
debug_print("tmp dir: " + tmp_dir)
assert os.path.exists(tmp_dir), "tmp dir does not exist"
tmp_dir = os.path.abspath(tmp_dir)
cargs = args.args
debug_print("{} additional arguments".format(len(cargs)))
for a in cargs:
debug_print(" {}".format(a))
# ============================================================
# Setup
file_main = os.path.join(tmp_dir, "main.cc")
baseline_main = os.path.join(tmp_dir, "baseline.cc")
output_main = os.path.join(tmp_dir, "main.o")
result = {}
preproc_args = [compiler] + cargs + ["-E", file_main, "-o", output_main]
preproc_args_ = [compiler] + cargs + ["-E", "main.cc", "-o", "main.o"]
compile_args = [compiler] + cargs + ["-c", file_main, "-o", output_main]
compile_args_ = [compiler] + cargs + ["-c", "main.cc", "-o", "main.o"]
preproc_baseline_args = [compiler] + cargs + \
["-E", baseline_main, "-o", output_main]
compile_baseline_args = [compiler] + cargs + \
["-c", baseline_main, "-o", output_main]
result["preproc_cmd"] = " ".join(preproc_args_)
result["compile_cmd"] = " ".join(compile_args_)
result["compiler_version"] = subprocess.check_output(
[compiler, "--version"]).decode("utf-8").splitlines()[0]
# ============================================================
# Create temporary files to compile
with open(file_main, "w") as f:
f.writelines([
"#include <" + file + ">\n",
"int main() { return 0; }\n"
])
with open(baseline_main, "w") as f:
f.writelines([
"int main() { return 0; }\n"
])
# ============================================================
# Check stats
# -E is preprocessor only (and strips comments)
debug_print_exec(preproc_args)
subprocess.run(preproc_args, check=True)
with open(output_main) as f:
line_cnt_raw = 0
line_cnt = 0
prog = re.compile(r'[a-zA-Z0-9_]')
for l in f.readlines():
line_cnt_raw += 1
if prog.search(l) is not None:
line_cnt += 1
result["line_count_raw"] = line_cnt_raw - 2 # int main() + #include
result["line_count"] = line_cnt - 1 # int main()
# -c compiles to object file
debug_print_exec(compile_args)
subprocess.run(compile_args, check=True)
result["object_size"] = os.path.getsize(output_main)
# check symbols
prog = re.compile(r'^[0-9a-zA-Z]* ([0-9a-zA-Z]*) *(\w) (.+)$')
undef_sym_cnt = 0
undef_sym_size = 0
data_sym_cnt = 0
data_sym_size = 0
code_sym_cnt = 0
code_sym_size = 0
weak_sym_cnt = 0
weak_sym_size = 0
debug_sym_cnt = 0
debug_sym_size = 0
sym_name_size = 0
debug_print_exec(["nm", output_main])
for l in subprocess.check_output(["nm", "-a", "-S", output_main]).decode("utf-8").splitlines():
m = prog.match(l)
assert m is not None, "could not parse line " + l
ss = m.group(1)
st = m.group(2)
sn = m.group(3)
ss = 0 if ss == "" else int(ss, base=16)
if sn == "main":
continue
# debug_print("symbol {}, {}, {}".format(ss,st,sn))
if st in ['U']:
undef_sym_cnt += 1
undef_sym_size += ss
sym_name_size += len(sn)
elif st in ['b', 'B', 'r', 'R', 'd', 'D', 'n', 'g', 'G']:
data_sym_cnt += 1
data_sym_size += ss
sym_name_size += len(sn)
elif st in ['t', 'T']:
code_sym_cnt += 1
code_sym_size += ss
sym_name_size += len(sn)
elif st in ['w', 'W', 'v', 'V', 'u']:
weak_sym_cnt += 1
weak_sym_size += ss
sym_name_size += len(sn)
elif st in ['N', 'a']:
debug_sym_cnt += 1
debug_sym_size += ss
sym_name_size += len(sn)
else:
assert False, "unknown symbol type " + st
result["undefined_symbol_count"] = undef_sym_cnt
result["undefined_symbol_size"] = undef_sym_size
result["data_symbol_count"] = data_sym_cnt
result["data_symbol_size"] = data_sym_size
result["code_symbol_count"] = code_sym_cnt
result["code_symbol_size"] = code_sym_size
result["weak_symbol_count"] = weak_sym_cnt
result["weak_symbol_size"] = weak_sym_size
result["debug_symbol_count"] = debug_sym_cnt
result["debug_symbol_size"] = debug_sym_size
result["symbol_name_size"] = sym_name_size
# strings (BEFORE baseline!)
string_cnt = 0
string_size = 0
for l in subprocess.check_output(["strings", output_main]).decode("utf-8").splitlines():
string_cnt += 1
string_size += len(l)
result["string_count"] = string_cnt
result["string_size"] = string_size
# section sizes (BEFORE baseline!)
for l in subprocess.check_output(["size", "-B", output_main]).decode("utf-8").splitlines():
if "main.o" in l:
parts = l.split()
result["text_size"] = int(parts[0])
result["data_size"] = int(parts[1])
result["bss_size"] = int(parts[2])
# baseline object size
debug_print_exec(compile_baseline_args)
subprocess.run(compile_baseline_args, check=True)
result["object_size_base"] = os.path.getsize(output_main)
# ============================================================
# Check parse and compile times
def measure_time(sargs):
ts = []
while True:
if len(ts) > 10:
break
if len(ts) >= 8:
if ts[3] / ts[0] < 1.01: # cheapest 4 deviate less than 1%
break
# long compilations do not need many repetitions
if len(ts) >= 3 and ts[0] > 0.5:
break
t0 = time.perf_counter()
subprocess.call(sargs)
t1 = time.perf_counter()
ts.append(t1 - t0)
ts.sort()
return ts[0]
result["preprocessing_time_base"] = measure_time(preproc_baseline_args)
result["compile_time_base"] = measure_time(compile_baseline_args)
result["preprocessing_time"] = measure_time(preproc_args)
result["compile_time"] = measure_time(compile_args)
# ============================================================
# Finalize
debug_print("")
debug_print("results:")
print(json.dumps(result, indent=4))
| 30.306452 | 108 | 0.617084 |
4a279a95a92b857977f4c83b29717ba78b292b5f | 2,325 | py | Python | api/staticdata/control_list_entries/migrations/0001_squashed_0005_auto_20200519_1617.py | uktrade/lite-ap | 4e1a57956bd921992b4a6e2b8fbacbba5720960d | [
"MIT"
] | 3 | 2019-05-15T09:30:39.000Z | 2020-04-22T16:14:23.000Z | api/staticdata/control_list_entries/migrations/0001_squashed_0005_auto_20200519_1617.py | uktrade/lite-ap | 4e1a57956bd921992b4a6e2b8fbacbba5720960d | [
"MIT"
] | 85 | 2019-04-24T10:39:35.000Z | 2022-03-21T14:52:12.000Z | api/staticdata/control_list_entries/migrations/0001_squashed_0005_auto_20200519_1617.py | uktrade/lite-ap | 4e1a57956bd921992b4a6e2b8fbacbba5720960d | [
"MIT"
] | 1 | 2021-01-17T11:12:19.000Z | 2021-01-17T11:12:19.000Z | # Generated by Django 3.1.8 on 2021-04-26 07:59
from django.db import migrations, models
import django.db.models.deletion
import uuid
def forward_migration(apps, schema_editor):
ControlListEntry = apps.get_model("control_list_entries", "ControlListEntry")
ControlListEntry.objects.filter(is_decontrolled=True).delete()
def reverse_migration(apps, schema_editor):
# To fully reverse migrate the code above you would need to allow decontrolled to be seedable again,
# and rerun seeding.
pass
class Migration(migrations.Migration):
replaces = [('control_list_entries', '0001_initial'), ('control_list_entries', '0002_auto_20200419_1827'), ('control_list_entries', '0003_auto_20200519_1048'), ('control_list_entries', '0004_remove_decontrolled_entries'), ('control_list_entries', '0005_auto_20200519_1617')]
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ControlListEntry',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('rating', models.CharField(max_length=100, unique=True)),
('text', models.TextField()),
('is_decontrolled', models.BooleanField(default=False)),
('parent', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='control_list_entries.controllistentry')),
],
),
migrations.AlterModelTable(
name='controllistentry',
table='control_list_entry',
),
migrations.AlterField(
model_name='controllistentry',
name='parent',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='control_list_entries.controllistentry'),
),
migrations.RunPython(
code=forward_migration,
reverse_code=reverse_migration,
),
migrations.RemoveField(
model_name='controllistentry',
name='is_decontrolled',
),
migrations.AddField(
model_name='controllistentry',
name='category',
field=models.CharField(default='', max_length=100),
),
]
| 37.5 | 278 | 0.655484 |
4a279bb281d4878ea379a924610bf069f20b5cc7 | 16,567 | py | Python | src/opnsense/service/modules/template.py | johanneskastl/opnsense-core | 1c8731dd784cf5bd101f8d433f99d9f6a68a982c | [
"BSD-2-Clause"
] | 35 | 2018-05-05T00:27:51.000Z | 2020-10-28T18:56:16.000Z | src/opnsense/service/modules/template.py | johanneskastl/opnsense-core | 1c8731dd784cf5bd101f8d433f99d9f6a68a982c | [
"BSD-2-Clause"
] | 1 | 2018-06-02T07:42:51.000Z | 2018-06-02T08:53:28.000Z | src/opnsense/service/modules/template.py | johanneskastl/opnsense-core | 1c8731dd784cf5bd101f8d433f99d9f6a68a982c | [
"BSD-2-Clause"
] | 11 | 2018-05-09T16:31:29.000Z | 2022-02-11T23:35:09.000Z | """
Copyright (c) 2015 Ad Schellevis <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
package : configd
function: template handler, generate configuration files using templates
"""
import os
import os.path
import glob
import stat
import syslog
import collections
import traceback
import copy
import codecs
import jinja2
import addons.template_helpers
__author__ = 'Ad Schellevis'
class Template(object):
def __init__(self, target_root_directory="/"):
""" constructor
:return:
"""
# init config (config.xml) data
self._config = {}
# set target root
self._target_root_directory = target_root_directory
# setup jinja2 environment
self._template_dir = os.path.dirname(os.path.abspath(__file__)) + '/../templates/'
self._j2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(self._template_dir), trim_blocks=True,
extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"])
# register additional filters
self._j2_env.filters['decode_idna'] = lambda x:x.decode('idna')
self._j2_env.filters['encode_idna'] = self._encode_idna
@staticmethod
def _encode_idna(x):
""" encode string to idna, preserve leading dots
"""
return ''.join(map(lambda x:'.', range(len(x) - len(x.lstrip('.'))))) + x.lstrip('.').encode('idna')
def list_module(self, module_name):
""" list single module content
:param module_name: module name in dot notation ( company.module )
:return: dictionary with module data
"""
result = {'+TARGETS': dict(), '+CLEANUP_TARGETS': dict()}
file_path = '%s/%s' % (self._template_dir, module_name.replace('.', '/'))
target_sources = ['%s/+TARGETS' % file_path]
if os.path.exists('%s/+TARGETS.D' % file_path):
for filename in sorted(glob.glob('%s/+TARGETS.D/*.TARGET' % file_path)):
target_sources.append(filename)
for target_source in target_sources:
if os.path.exists(target_source):
for line in open(target_source, 'r').read().split('\n'):
parts = line.split(':')
if len(parts) > 1 and parts[0].strip()[0] != '#':
source_file = parts[0].strip()
target_name = parts[1].strip()
if target_name in result['+TARGETS'].values():
syslog.syslog(syslog.LOG_NOTICE, "template overlay %s with %s" % (
target_name, os.path.basename(target_source)
))
result['+TARGETS'][source_file] = target_name
if len(parts) == 2:
result['+CLEANUP_TARGETS'][source_file] = target_name
elif parts[2].strip() != "":
result['+CLEANUP_TARGETS'][source_file] = parts[2].strip()
return result
def list_modules(self):
""" traverse template directory and list all modules
the template directory is structured like Manufacturer/Module/config_files
:return: list (dict) of registered modules
"""
result = list()
for root, dirs, files in os.walk(self._template_dir):
if root.count('/') > self._template_dir.count('/'):
module_name = root.replace(self._template_dir, '')
result.append(module_name)
return result
def set_config(self, config_data):
""" set config data
:param config_data: config data as dictionary/list structure
:return: None
"""
if type(config_data) in (dict, collections.OrderedDict):
self._config = config_data
else:
# no data given, reset
self._config = {}
@staticmethod
def __find_string_tags(instr):
"""
:param instr: string with optional tags [field.$$]
:return:
"""
retval = []
for item in instr.split('['):
if item.find(']') > -1:
retval.append(item.split(']')[0])
return retval
def __find_filters(self, tags):
""" match tags to config and construct a dictionary which we can use to construct the output filenames
:param tags: list of tags [xmlnode.xmlnode.%.xmlnode,xmlnode]
:return: dictionary containing key (tagname) value {existing node key, value}
"""
result = {}
for tag in tags:
result[tag] = {}
# first step, find wildcard to replace ( if any )
# ! we only support one wildcard per tag at the moment, should be enough for most situations
config_ptr = self._config
target_keys = []
for xmlNodeName in tag.split('.'):
if xmlNodeName in config_ptr:
config_ptr = config_ptr[xmlNodeName]
elif xmlNodeName == '%':
if type(config_ptr) in (collections.OrderedDict, dict):
target_keys = config_ptr.keys()
else:
target_keys = map(lambda x: str(x), range(len(config_ptr)))
else:
# config pointer is reused when the match is exact, so we need to reset it here
# if the tag was not found.
config_ptr = None
break
if len(target_keys) == 0:
# single node, only used for string replacement in output name.
result[tag] = {tag: config_ptr}
else:
# multiple node's, find all nodes
for target_node in target_keys:
config_ptr = self._config
str_wildcard_loc = len(tag.split('%')[0].split('.'))
filter_target = []
for xmlNodeName in tag.replace('%', target_node).split('.'):
if xmlNodeName in config_ptr:
if type(config_ptr[xmlNodeName]) in (collections.OrderedDict, dict):
if str_wildcard_loc >= len(filter_target):
filter_target.append(xmlNodeName)
if str_wildcard_loc == len(filter_target):
result[tag]['.'.join(filter_target)] = xmlNodeName
config_ptr = config_ptr[xmlNodeName]
elif type(config_ptr[xmlNodeName]) in (list, tuple):
if str_wildcard_loc >= len(filter_target):
filter_target.append(xmlNodeName)
filter_target.append(target_node)
config_ptr = config_ptr[xmlNodeName][int(target_node)]
else:
# fill in node value
result[tag]['.'.join(filter_target)] = config_ptr[xmlNodeName]
return result
@staticmethod
def _create_directory(filename):
""" create directory
:param filename: create path for filename ( if not existing )
:return: None
"""
fparts = []
for fpart in filename.strip().split('/')[:-1]:
fparts.append(fpart)
if len(fpart) > 1:
tmppart = '/'.join(fparts)
if os.path.isfile(tmppart):
os.remove(tmppart)
if not os.path.exists(tmppart):
os.mkdir(tmppart)
def _generate(self, module_name, create_directory=True):
""" generate configuration files for one section using bound config and template data
:param module_name: module name in dot notation ( company.module )
:param create_directory: automatically create directories to place template output in ( if not existing )
:return: list of generated output files
"""
result = []
module_data = self.list_module(module_name)
for src_template in module_data['+TARGETS'].keys():
target = module_data['+TARGETS'][src_template]
target_filename_tags = self.__find_string_tags(target)
target_filters = self.__find_filters(target_filename_tags)
result_filenames = {target: {}}
for target_filter in target_filters.keys():
for key in target_filters[target_filter].keys():
for filename in result_filenames.keys():
if target_filters[target_filter][key] is not None \
and filename.find('[%s]' % target_filter) > -1:
new_filename = filename.replace('[%s]' % target_filter, target_filters[target_filter][key])
new_filename = new_filename.replace('//', '/')
result_filenames[new_filename] = copy.deepcopy(result_filenames[filename])
result_filenames[new_filename][key] = target_filters[target_filter][key]
template_filename = '%s/%s' % (module_name.replace('.', '/'), src_template)
# parse template, make sure issues can be traced back to their origin
try:
j2_page = self._j2_env.get_template(template_filename)
except jinja2.exceptions.TemplateSyntaxError as templExc:
raise Exception("%s %s %s" % (module_name, template_filename, templExc))
for filename in result_filenames.keys():
if not (filename.find('[') != -1 and filename.find(']') != -1):
# copy config data
cnf_data = copy.deepcopy(self._config)
cnf_data['TARGET_FILTERS'] = result_filenames[filename]
# link template helpers
self._j2_env.globals['helpers'] = addons.template_helpers.Helpers(cnf_data)
# make sure we're only rendering output once
if filename not in result:
# render page and write to disc
try:
content = j2_page.render(cnf_data)
except Exception as render_exception:
# push exception with context if anything fails
raise Exception("%s %s %s" % (module_name, template_filename, render_exception))
# prefix filename with defined root directory
filename = ('%s/%s' % (self._target_root_directory, filename)).replace('//', '/')
if create_directory:
# make sure the target directory exists
self._create_directory(filename)
f_out = codecs.open(filename, 'wb', encoding="utf-8")
f_out.write(content)
# Check if the last character of our output contains an end-of-line, if not copy it in if
# it was in the original template.
# It looks like Jinja sometimes isn't consistent on placing this last end-of-line in.
if len(content) > 1 and content[-1] != '\n':
src_file = '%s%s' % (self._template_dir, template_filename)
src_file_handle = open(src_file, 'r')
src_file_handle.seek(-1, os.SEEK_END)
last_bytes_template = src_file_handle.read()
src_file_handle.close()
if last_bytes_template in ('\n', '\r'):
f_out.write('\n')
f_out.close()
# copy root permissions, without exec
root_perm = stat.S_IMODE(os.lstat(os.path.dirname(filename)).st_mode)
os.chmod(filename, root_perm & (~stat.S_IXGRP & ~stat.S_IXUSR & ~stat.S_IXOTH))
result.append(filename)
return result
def iter_modules(self, module_name):
"""
:param module_name: module name in dot notation ( company.module ), may use wildcards
:return: templates matching paterns
"""
for template_name in sorted(self.list_modules()):
wildcard_pos = module_name.find('*')
do_generate = False
if wildcard_pos > -1 and module_name[:wildcard_pos] == template_name[:wildcard_pos]:
# wildcard match
do_generate = True
elif wildcard_pos == -1 and module_name == template_name:
# direct match
do_generate = True
elif wildcard_pos == -1 and len(module_name) < len(template_name) \
and '%s.' % module_name == template_name[0:len(module_name) + 1]:
# match child item
do_generate = True
if do_generate:
yield template_name
def generate(self, module_name, create_directory=True):
"""
:param module_name: module name in dot notation ( company.module ), may use wildcards
:param create_directory: automatically create directories to place template output in ( if not existing )
:return: list of generated output files or None if template not found
"""
result = None
for template_name in self.iter_modules(module_name):
wildcard_pos = module_name.find('*')
if result is None:
result = list()
syslog.syslog(syslog.LOG_NOTICE, "generate template container %s" % template_name)
try:
for filename in self._generate(template_name, create_directory):
result.append(filename)
except Exception as render_exception:
if wildcard_pos > -1:
# log failure, but proceed processing when doing a wildcard search
syslog.syslog(syslog.LOG_ERR, 'error generating template %s : %s' % (template_name,
traceback.format_exc()))
else:
raise render_exception
return result
def cleanup(self, module_name):
"""
:param module_name: module name in dot notation ( company.module ), may use wildcards
:return: list of removed files or None if template not found
"""
result = list()
for template_name in self.iter_modules(module_name):
syslog.syslog(syslog.LOG_NOTICE, "cleanup template container %s" % template_name)
module_data = self.list_module(module_name)
for src_template in module_data['+CLEANUP_TARGETS'].keys():
target = module_data['+CLEANUP_TARGETS'][src_template]
for filename in glob.glob(target):
os.remove(filename)
result.append(filename)
return result
| 47.19943 | 119 | 0.558822 |
4a279c8d2952283c09f2246db76c05f1d510719b | 37 | py | Python | py/api.py | hugodemenez/PortfolioWebsiteTracker | 98a396662bb8ccdb27a2853e8ce42404943ecde8 | [
"MIT"
] | null | null | null | py/api.py | hugodemenez/PortfolioWebsiteTracker | 98a396662bb8ccdb27a2853e8ce42404943ecde8 | [
"MIT"
] | null | null | null | py/api.py | hugodemenez/PortfolioWebsiteTracker | 98a396662bb8ccdb27a2853e8ce42404943ecde8 | [
"MIT"
] | null | null | null | from Python-Brokers-API import ftx
| 9.25 | 34 | 0.783784 |
4a279ca8b73482550408dad8ff59f340b89f2856 | 5,890 | py | Python | brickschema/brickify/src/handlers/Handler/Handler.py | BrickSchema/py-brickschema | 42acd46eef73731db0428503eade325fac38c554 | [
"BSD-3-Clause"
] | 31 | 2020-01-23T18:39:12.000Z | 2022-02-10T04:46:30.000Z | brickschema/brickify/src/handlers/Handler/Handler.py | BrickSchema/py-brickschema | 42acd46eef73731db0428503eade325fac38c554 | [
"BSD-3-Clause"
] | 46 | 2020-01-27T22:09:16.000Z | 2022-03-11T12:58:21.000Z | brickschema/brickify/src/handlers/Handler/Handler.py | BrickSchema/py-brickschema | 42acd46eef73731db0428503eade325fac38c554 | [
"BSD-3-Clause"
] | 10 | 2020-02-18T20:21:43.000Z | 2022-03-31T13:47:49.000Z | import re
from pathlib import Path
from typing import Optional, List
import importlib_resources
import rdflib
import typer
from typer import progressbar
from brickschema.brickify.util import bind_namespaces, load_config
class Handler:
def __init__(
self,
source: Optional[str] = "input.ttl",
input_format: Optional[str] = "turtle",
module_path: Optional[List[str]] = None,
config_file: Optional[Path] = None,
):
"""
Handler class responsible for performing end to end conversion
(including ingestion, translation, and clean up).
:param source: A filepath/URL
:param input_format: Input format of the file
:param module_path: Path to default template files in the package ([<dot-separate-module-path>, <template-filename>])
:param config_file: Custom conversion configuration file
"""
self.graph = rdflib.Graph()
self.source = source
self.input_format = input_format
if config_file:
with open(config_file, "r") as config:
self.config = load_config(config, config_file)
elif module_path:
config_file = module_path[-1]
with importlib_resources.path(*module_path) as data_file:
with open(data_file, "r") as config:
self.config = load_config(config, config_file)
else:
typer.echo(
typer.style(
"[ERROR] No configuration specified!",
fg=typer.colors.RED,
)
)
def update_namespaces(
self,
building_prefix=None,
building_namespace=None,
site_prefix=None,
site_namespace=None,
):
"""
Updates prefixes and namespaces from the templates for the site and the building before conversion.
:param building_prefix: Building prefix (default: bldg)
:param building_namespace: Building namespace (default: https://example.com/bldg#)
:param site_prefix: Site prefix (default: site)
:param site_namespace: Site namespace (default: https://example.com/site#)
"""
self.config["namespace_prefixes"][building_prefix] = building_namespace
self.config["namespace_prefixes"][site_prefix] = site_namespace
bind_namespaces(self.graph, self.config["namespace_prefixes"])
for operation in self.config["operations"]:
if "query" in operation:
operation["query"] = re.sub(
"bldg:", f"{building_prefix}:", operation["query"]
)
operation["query"] = re.sub(
"site:", f"{site_prefix}:", operation["query"]
)
if "data" in operation:
operation["data"] = re.sub(
"bldg:", f"{building_prefix}:", operation["data"]
)
operation["data"] = re.sub(
"site:", f"{site_prefix}:", operation["data"]
)
if "template" in operation:
operation["template"] = re.sub(
"bldg:", f"{building_prefix}:", operation["template"]
)
operation["template"] = re.sub(
"site:", f"{site_prefix}:", operation["template"]
)
def ingest_data(self):
"""
Ingests the data from files to the memory. The default option
for the base handler is to parse a source graph in the specified input format
to self.graph.
"""
self.graph.parse(self.source, format=rdflib.util.guess_format(self.source))
def translate(self):
"""
Performs SPARQL based operations sequentially over the output graph.
"""
if not self.config["operations"]:
return
with progressbar(self.config["operations"]) as operations:
for operation in operations:
if "data" in operation:
query = f"INSERT DATA {{{{ {operation['data']} }}}}"
elif "query" in operation:
query = operation["query"]
if not query:
continue
query = query.format_map({})
try:
self.graph.update(query)
except Exception as e:
print(e)
print(query)
def infer(self):
"""
In the implementations of specific handlers, this method would be overridden to perform and append data to the output graph
based on additional inference.
"""
pass
def clean_up(self):
"""
In the implementations of specific handlers, this method would be overridden to clean the graphs, remove intermediate data, etc.
"""
pass
def convert(self, building_prefix, building_namespace, site_prefix, site_namespace):
"""
Performs the conversion based on the base conversion sequence. By default, it updates the namespaces,
ingests the data, translates the data to brick primarily using SPARQL based operations, performs and adds data
from additional inferences, cleans up the output graph.
:param building_prefix: Building prefix (default: bldg)
:param building_namespace: Building namespace (default: https://example.com/bldg#)
:param site_prefix: Site prefix (default: site)
:param site_namespace: Site namespace (default: https://example.com/site#)
:returns: A Brick graph (rdflib.Graph)
"""
self.update_namespaces(
building_prefix, building_namespace, site_prefix, site_namespace
)
self.ingest_data()
self.translate()
self.infer()
self.clean_up()
return self.graph
| 38.496732 | 136 | 0.58472 |
4a279ceccbbfde8cf4e5935e6a9f366f1c8eeacb | 10,318 | py | Python | DiscordClasses/message_funcs.py | Shlol762/J.A.R.V.I.S | 076eb0910d6bbbe1737dcb45f5d8961768a1c3cf | [
"MIT"
] | null | null | null | DiscordClasses/message_funcs.py | Shlol762/J.A.R.V.I.S | 076eb0910d6bbbe1737dcb45f5d8961768a1c3cf | [
"MIT"
] | null | null | null | DiscordClasses/message_funcs.py | Shlol762/J.A.R.V.I.S | 076eb0910d6bbbe1737dcb45f5d8961768a1c3cf | [
"MIT"
] | null | null | null | import asyncio
import datetime
import json
import re, random
from nextcord import Forbidden, Invite, Message, Member
from nextcord.ext.commands import Context, Bot
from typing import Optional, Union, Any
webhooks = [861660340617084968, 861660166193807430, 861660711037960243, 861660517746999356, 880318607643521075]
src_was_bot = "Message was by bot"
x_was_not_in_msg = "No {0} in message"
async def forbidden_word(ctx: Context) -> Union[Message, str]:
"""Checks for the forbidden word Shlol#2501 has set"""
bot: Bot = ctx.bot
author: Member = ctx.author
if re.search(r'\b([bh]( )*a)(( )*i)+\b', str(ctx.message.content).strip().lower()):
if author.name != bot.user.name:
invite_link: Invite = await ctx.channel.create_invite(max_uses=1)
message_hai_kick: Message = await author.send(invite_link)
try:
await author.kick(reason=f"{author.name} used the unholy words")
except Forbidden:
await message_hai_kick.delete()
await invite_link.delete()
return await ctx.reply(f"Sorry guys.. can't kick {author.mention}... No perms!")
else: return src_was_bot
else: return x_was_not_in_msg.format("forbidden word")
async def noswear(ctx: Context) -> Union[Message, str]:
"""Checks and alerts users if they are using foul language."""
bot: Bot = ctx.bot
author: Member = ctx.author
if re.search(
r'\b(asshole|whore|cunt)\b|\b(fuck|fk|fuk|bitch)',
ctx.message.content.strip().lower()):
if author.name != bot.user.name:
watch_ur_lang_gifs: str = random.choice([
'https://tenor.com/view/your-language-is-offensive-watch-your-mouth-zach-galifianakis-gif-13885320',
'https://tenor.com/view/funny-or-die-will-ferrell-watch-your-mouth-filthy-mouth-mouth-gif-4427315',
'https://tenor.com/view/avengers-language-captain-america-age-of-ultron-gif-5285201',
'https://tenor.com/view/watch-your-language-words-talk-dont-be-harsh-derek-luke-gif-15626011',
'https://tenor.com/view/iron-man-language-galactic-republic-gif-20457940'])
return await ctx.reply(watch_ur_lang_gifs)
else: return src_was_bot
else: return x_was_not_in_msg.format("swear words")
async def greetings(ctx: Context, random_ = True) -> Union[Message, str]:
"""Checks for greetings and responds randomly"""
author: Member = ctx.author
bot: Bot = ctx.bot
response = random.choice([True, False, True, False, False, True]) if random_ else True
if re.search(r'\b(h(i)+|he(y)+|(wh(a)+(s)*)*s(u)+(p)+|(he[nl]l(o)+)(w)*)\b',
ctx.message.content.strip().lower()):
if author.name != bot.user.name:
hi_response: str = random.choice(['Hello there {}!',
"How ya doin' {}?",
'Whazzup {}?',
"G'day {}!",
'Oh Hello {}!',
'Hi {}!',
'Konnichiva!',
'Namaste!',
'Bonjour!',
'Namaskaragalu!',
'Hola!',
'Ola!',
'Howdy {}!',
'Ciao!'
])
if response: return await ctx.reply(hi_response.format(author.mention))
else: return x_was_not_in_msg
else: return x_was_not_in_msg.format("greetings")
async def farewells(ctx: Context) -> Union[Message, str]:
"""Checks for farewells and responds randomly"""
author: Member = ctx.author
bot: Bot = ctx.bot
response = random.choice([True, True, True, False, True, True])
if re.search(r"\b(by(e)+|i(')*m out)\b", ctx.message.content.strip().lower()):
if author.name != bot.user.name:
bye_response: str = random.choice(['See you later...',
'Sayonara!',
'C u latah!',
'Have a good day!... or night!',
'Have a good time!',
'Adios!',
'Au revoir!',
"You're going already?",
'Bye!',
'Ciao!'
])
if response: await ctx.reply(bye_response.format(author.name))
else: return src_was_bot
else: return x_was_not_in_msg.format("farewells")
async def nou(ctx: Context) -> Union[Message, str]:
"""Responds with 'No u' for certain keywords."""
author: Member = ctx.author
bot: Bot = ctx.bot
message_text: str = ctx.message.content.strip().lower()
if re.search(r'\b(kill urself)\b', message_text) or message_text == 'ok':
if author.name != bot.user.name:
return await ctx.reply('No u')
else: return src_was_bot
else: return x_was_not_in_msg.format("nou deserving content")
async def urnotgod(ctx: Context) -> Union[Message, str]:
"""Responds with a variety of messages that oppose a person who claims to be god."""
if re.search(r"\b(i( )?('| a)?( )?m g( )?o( )?d)\b",
ctx.message.content.strip().lower()):
am_i_g_response = random.choice(["🤮, No you're not.",
"I strongly disagree",
"*cough* The person who proclaims him/herself god, is no god at all."
])
return await ctx.reply(am_i_g_response)
else: return x_was_not_in_msg.format("conceitedness")
async def eastereggs(ctx: Context) -> Union[Message, str]:
message = ctx.message
response = random.choice([
'Ya rang?', "'Sup?", "Heyyy!",
f'At your service{" sir" if ctx.author.id == 613044385910620190 else ""}!',
'Ayoooo whassuppp?', 'You summoned me?', 'Hello there! (gen kenobi vibes amirite?)',
"https://tenor.com/view/%D0%BE%D0%B4%D0%B8%D0%BD%D0%B4%D0%BE%D0%B"
"C%D0%B0-kevin-mc-callister-home-alone-wave-hi-gif-15750897"
])
if (ctx.bot.user.mentioned_in(message) and not ctx.command and not re.search(r"(@everyone|@here)", message.content.lower())
and ctx.author != ctx.bot.user and message.webhook_id not in webhooks and not message.reference) or re.search(
r"\b(^((j\.?)+(a\.?)+((r\.?)+(v\.?)+((i\.?)+(s\.?)+)?|y))|"
r"((j\.?)+(a\.?)+((r\.?)+(v\.?)+((i\.?)+(s\.?)+)?|y))$)\b", message.content.lower()):
await ctx.reply(response)
try:
message = await ctx.bot.wait_for('message', timeout=5.0, check=lambda msg: msg.author == ctx.author)
final_response: Message = await greetings(await ctx.bot.get_context(message), False) if not re.search(
r"\b(y[eu][sap]?h?)\b", message.content) else await message.reply("Um I don't know how to converse any further :D")
except asyncio.TimeoutError: pass
return x_was_not_in_msg.format("eastereggs")
async def train(ctx: Context):
rstring = r'\b(whores?|cunts?|tits?|boobs?|ass(holes?)?|milfs?|dick(s|heads?)?|cocks?|anals?|homos?' \
r'|w?tf*|gays?|vaginas?|puss(y|ies))\b|\b((skull)?f(u)?(c+)?k|bitch|sex|cum|fuc+|porn)'
if ctx.author.id == ctx.bot.user.id or re.search(
rstring,
ctx.message.content.strip().lower()) or ctx.channel.name == 'nsfw':
return
f = open('C:/Users/Shlok/bot_stuff/dump.txt', 'a', encoding="utf-8")
with open('C:/Users/Shlok/bot_stuff/mkvdb.json', 'r') as mkvdb:
mkvdct = json.load(mkvdb)
counter = 0
m_type = 0
l_rex = r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"
message = ctx.message
if message.content != '':
content = message.content.lower()
else:
m_type = 1
try:
content = 'err' + message.attachments[0].content_type
except IndexError:
content = 'sticker'
ch = content[:1]
if ch == '!' or ch == '.':
content = ''
m_type = 1
url = re.findall(l_rex, content)
if url:
content = 'link'
m_type = 1
content = re.sub("<.*?>", '', content)
# content = re.sub(":.*?:", '', content)
content = re.sub(r'[^\w\s]', '', content)
if not content:
m_type = 1
if not m_type:
strn = str(counter) + ') ' + content + '\n'
f.write(strn)
ct = content.split()
if len(ct) == 1 and not mkvdct.get(ct[0]):
mkvdct[ct[0]] = ['']
for i in range(len(ct) - 1):
if ct[i] in mkvdct.keys():
mkvdct[ct[i]].append(ct[i + 1])
else:
mkvdct[ct[i]] = [ct[i + 1]]
# print(strn)
# print('\n')
counter += 1
m_type = 0
with open('C:/Users/Shlok/bot_stuff/mkvdb.json', 'w', encoding="utf-8") as mkvdb:
json.dump(mkvdct, mkvdb, indent=3)
async def who_pinged(ctx: Context):
if ctx.message.mention_everyone:
ch_id = str(ctx.channel.id)
au_id = str(ctx.author.id)
fp = "C:/Users/Shlok/J.A.R.V.I.SV2021/json_files/pings.json"
with open(fp, "r") as f:
pings: dict[str, dict[str, str]] = json.load(f)
if not pings.get(ch_id):
pings[ch_id] = {}
if au_id in pings[ch_id].keys():
pings[ch_id].pop(au_id)
pings[ch_id][au_id] = datetime.datetime.now().strftime('%d %B %Y at %X:%f')
if len(pings[ch_id]) >= 10:
pings[ch_id].pop(pings[ch_id].keys()[0])
with open(fp, 'w') as f:
json.dump(pings, f, indent=3)
| 45.453744 | 197 | 0.52074 |
4a279d4e11ba48eedb2135276e48655362a99e01 | 1,654 | py | Python | setup.py | BurntSushi/nflfan | ddb5496e99787da82e233a9dd03121bc2b222549 | [
"Unlicense"
] | 35 | 2015-01-17T13:58:48.000Z | 2021-01-09T18:56:14.000Z | setup.py | BurntSushi/nflfan | ddb5496e99787da82e233a9dd03121bc2b222549 | [
"Unlicense"
] | 1 | 2016-08-14T05:26:06.000Z | 2016-08-15T10:33:17.000Z | setup.py | BurntSushi/nflfan | ddb5496e99787da82e233a9dd03121bc2b222549 | [
"Unlicense"
] | 17 | 2015-08-29T11:34:40.000Z | 2019-09-18T19:25:57.000Z | import codecs
from distutils.core import setup
from glob import glob
import os.path as path
cwd = path.dirname(__file__)
longdesc = codecs.open(path.join(cwd, 'longdesc.rst'), 'r', 'utf-8').read()
version = '0.0.0'
with codecs.open(path.join(cwd, 'nflfan/version.py'), 'r', 'utf-8') as f:
exec(f.read())
version = __version__
assert version != '0.0.0'
install_requires = ['nfldb', 'nflvid', 'bottle', 'toml', 'requests']
setup(
name='nflfan',
author='Andrew Gallant',
author_email='[email protected]',
version=version,
license='UNLICENSE',
description='A library to track your fantasy teams in one place.',
long_description=longdesc,
url='https://github.com/BurntSushi/nflfan',
classifiers=[
'License :: Public Domain',
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Other Audience',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Database',
],
platforms='ANY',
packages=['nflfan'],
package_data={
'nflfan': [
'web/css/*', 'web/fonts/*', 'web/tpl/*',
'web/js/*.js', 'web/js/*/*.js',
],
},
data_files=[('share/doc/nflfan', ['README.md', 'longdesc.rst',
'UNLICENSE']),
('share/doc/nflfan/doc', glob('doc/nflfan/*.html')),
('share/nflfan', ['config.sample.toml'])],
install_requires=install_requires,
scripts=['scripts/nflfan-update']
)
| 32.431373 | 75 | 0.587062 |
4a279e4befa2ee07f77ff43776600802e8e83e1d | 2,597 | py | Python | aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/DescribeAccountsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/DescribeAccountsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/DescribeAccountsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class DescribeAccountsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'DescribeAccounts','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) | 35.094595 | 84 | 0.77397 |
4a27a005b6968a971dcaeb5724a8a122e0b318a8 | 9,723 | py | Python | tests/test_connection.py | ywangd/peek | 25d196b614acaf9c2f9fe4b8fea36a06554950cd | [
"MIT"
] | 16 | 2020-08-31T02:06:23.000Z | 2022-01-31T23:56:44.000Z | tests/test_connection.py | ywangd/peek | 25d196b614acaf9c2f9fe4b8fea36a06554950cd | [
"MIT"
] | 97 | 2020-08-27T14:51:32.000Z | 2021-10-21T00:19:31.000Z | tests/test_connection.py | ywangd/peek | 25d196b614acaf9c2f9fe4b8fea36a06554950cd | [
"MIT"
] | 1 | 2021-02-07T13:10:38.000Z | 2021-02-07T13:10:38.000Z | import os
from unittest.mock import MagicMock, patch, call
import pytest
from peek.connection import connect, EsClient, RefreshingEsClient, EsClientManager, DelegatingListener
from peek.errors import PeekError
def test_connect_default():
mock_app = MagicMock(name='PeekApp')
client = connect(mock_app)
mock_app.input.assert_not_called()
assert str(client) == 'http://localhost:9200'
def test_connect_will_prompt_password_when_no_password_is_found():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
client = connect(mock_app, **{
'username': 'foo',
'hosts': 'localhost:9201',
'use_ssl': True,
})
mock_app.input.assert_called()
assert str(client) == 'foo @ https://localhost:9201'
@patch.dict(os.environ, {'PEEK_PASSWORD': 'password'})
def test_connect_will_not_prompt_password_when_password_is_found_in_env():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
client = connect(mock_app, **{
'username': 'foo',
'name': 'my-connection'
})
mock_app.input.assert_not_called()
assert str(client) == 'my-connection'
def test_connect_will_prompt_password_when_forced():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
connect(mock_app, **{
'username': 'foo',
'password': 'password',
'force_prompt': True,
})
mock_app.input.assert_called()
def test_connect_will_fail_when_password_is_not_provided_and_prompt_is_not_allowed():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
with pytest.raises(PeekError) as e:
connect(mock_app, **{
'username': 'foo',
'no_prompt': True,
})
assert 'Password is not found and password prompt is disabled' in str(e)
def test_connect_will_use_key_ring_when_configured():
mock_keyring = MagicMock(return_value='password')
with patch('peek.connection._keyring', mock_keyring):
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=True)
connect(mock_app, **{
'username': 'foo',
})
mock_keyring.assert_has_calls(
[call('peek/localhost:9200/userpass', 'foo'),
call('peek/localhost:9200/userpass', 'foo', 'password')])
def test_connect_has_highest_priority_for_api_key():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
client = connect(mock_app, **{
'api_key': 'id:key',
'token': 'some-token',
'username': 'foo',
'password': 'password',
})
assert str(client) == 'K-id @ http://localhost:9200'
assert client.info()['auth'].startswith('ApiKey id')
def test_connect_has_second_priority_for_token():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
client = connect(mock_app, **{
'token': 'some-token',
'username': 'foo',
'password': 'password',
})
assert str(client) == 'T-some-token @ http://localhost:9200'
assert client.info()['auth'].startswith('Token some-token')
def test_connect_will_prefer_cloud_id():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
mock_es = MagicMock
MockEs = MagicMock(return_value=mock_es)
with patch('peek.connection.Elasticsearch', MockEs):
client = connect(mock_app, **{
'username': 'foo',
'password': 'password',
'cloud_id': 'my-cloud-id',
'hosts': 'example.com:9200',
})
assert str(client) == 'foo @ my-cloud-id'
assert client.hosts is None
def test_es_client_to_and_from_dict():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
client = connect(mock_app, **{
'username': 'foo',
'password': 'password',
'hosts': 'example.com:9200',
'use_ssl': True,
})
d = client.to_dict()
assert d['password'] is None
with patch.dict(os.environ, {'PEEK_PASSWORD': 'password'}):
assert client.to_dict() == EsClient.from_dict(mock_app, d).to_dict()
def test_refreshing_es_client_to_and_from_dict():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
parent = connect(mock_app, **{
'username': 'foo',
'password': 'password',
'hosts': 'example.com:9200',
'use_ssl': True,
})
client = RefreshingEsClient(
parent=parent,
username='[email protected]',
access_token='access_token',
refresh_token='refresh_token',
expires_in=42,
name='my-refreshing-client'
)
assert client.to_dict() == RefreshingEsClient.from_dict(client.to_dict()).to_dict()
@patch.dict(os.environ, {'PEEK_PASSWORD': 'password'})
def test_es_client_manager():
mock_app = MagicMock(name='PeekApp')
mock_app.config.as_bool = MagicMock(return_value=False)
on_add = MagicMock()
on_set = MagicMock()
on_remove = MagicMock()
listener_0 = DelegatingListener(on_add=on_add, on_set=on_set, on_remove=on_remove)
listener_1 = DelegatingListener(on_add=on_add, on_set=lambda m: False)
listener_2 = DelegatingListener(on_add=on_add, on_set=on_set, on_remove=on_remove)
es_client_manager = EsClientManager(listeners=[listener_0, listener_1, listener_2])
local_admin_0 = EsClient(name='local-admin', hosts='localhost:9200', username='admin', password='password')
local_foo_1 = EsClient(name='local-foo', hosts='localhost:9200', username='foo', password='password')
local_bar_saml_2 = RefreshingEsClient(parent=local_admin_0, username='[email protected]', access_token='access_token',
refresh_token='refresh_token', expires_in=42, name='local-bar-saml')
remote_admin_3 = EsClient(name='remote-admin', hosts='example.com:9200', username='elastic', password='password')
remote_oidc_4 = RefreshingEsClient(parent=EsClient(name='removed', hosts='example.com:9200'), username='dangling',
access_token='access_token', refresh_token='refresh_token', expires_in=42,
name='remote-dangling-oidc')
es_client_manager.add(local_admin_0)
on_add.assert_has_calls([call(es_client_manager), call(es_client_manager), call(es_client_manager)])
es_client_manager.add(local_foo_1)
es_client_manager.add(local_bar_saml_2)
es_client_manager.add(remote_admin_3)
es_client_manager.add(remote_oidc_4)
assert remote_oidc_4 == es_client_manager.current
es_client_manager.set_current(2)
on_set.assert_has_calls([call(es_client_manager)])
assert local_bar_saml_2 == es_client_manager.current
es_client_manager.set_current('remote-admin')
assert remote_admin_3 == es_client_manager.current
assert local_foo_1 == es_client_manager.get_client(1)
assert local_foo_1 == es_client_manager.get_client('local-foo')
assert remote_admin_3 == es_client_manager.get_client(None) # same as get current
d = es_client_manager.to_dict()
assert d == {'_index_current': 3, '_clients': [
{'name': 'local-admin', 'hosts': 'localhost:9200', 'cloud_id': None, 'username': 'admin', 'password': None,
'use_ssl': False, 'verify_certs': False, 'assert_hostname': False, 'ca_certs': None, 'client_cert': None,
'client_key': None, 'api_key': None, 'token': None, 'headers': None},
{'name': 'local-foo', 'hosts': 'localhost:9200', 'cloud_id': None, 'username': 'foo', 'password': None,
'use_ssl': False, 'verify_certs': False, 'assert_hostname': False, 'ca_certs': None, 'client_cert': None,
'client_key': None, 'api_key': None, 'token': None, 'headers': None},
{'name': 'local-bar-saml', 'username': '[email protected]', 'access_token': 'access_token',
'refresh_token': 'refresh_token', 'expires_in': 42, 'parent': 0},
{'name': 'remote-admin', 'hosts': 'example.com:9200', 'cloud_id': None, 'username': 'elastic', 'password': None,
'use_ssl': False, 'verify_certs': False, 'assert_hostname': False, 'ca_certs': None, 'client_cert': None,
'client_key': None, 'api_key': None, 'token': None, 'headers': None},
{'name': 'remote-dangling-oidc', 'username': 'dangling', 'access_token': 'access_token',
'refresh_token': 'refresh_token', 'expires_in': 42,
'parent': {'name': 'removed', 'hosts': 'example.com:9200', 'cloud_id': None, 'username': None,
'password': None, 'use_ssl': False, 'verify_certs': False, 'assert_hostname': False,
'ca_certs': None, 'client_cert': None, 'client_key': None, 'api_key': None, 'token': None,
'headers': None}}]}
new_manager = EsClientManager.from_dict(mock_app, d)
clients = new_manager.clients()
assert len(clients) == 5
assert clients.index(new_manager.current) == 3
assert d == new_manager.to_dict()
es_client_manager.move_current_to(1)
assert es_client_manager.current == remote_admin_3
assert es_client_manager.get_client(1) == remote_admin_3
es_client_manager.move_current_to(4)
assert es_client_manager.current == remote_admin_3
assert es_client_manager.get_client(4) == remote_admin_3
removed = es_client_manager.get_client(1)
es_client_manager.remove_client(1)
on_remove.assert_has_calls([call(es_client_manager, removed), call(es_client_manager, removed)])
| 38.892 | 120 | 0.672221 |
4a27a0479f05a970e5a8c3f80a9e83102f90bef5 | 16,818 | py | Python | tests/test_smart_list.py | wikimedia/operations-debs-python-mwparserfromhell | 8039469b268372a63389e2b43825fa3b3463608a | [
"MIT"
] | null | null | null | tests/test_smart_list.py | wikimedia/operations-debs-python-mwparserfromhell | 8039469b268372a63389e2b43825fa3b3463608a | [
"MIT"
] | null | null | null | tests/test_smart_list.py | wikimedia/operations-debs-python-mwparserfromhell | 8039469b268372a63389e2b43825fa3b3463608a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Ben Kurtovic <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
try:
import unittest2 as unittest
except ImportError:
import unittest
from mwparserfromhell.compat import py3k, range
from mwparserfromhell.smart_list import SmartList, _ListProxy
class TestSmartList(unittest.TestCase):
"""Test cases for the SmartList class and its child, _ListProxy."""
def _test_get_set_del_item(self, builder):
"""Run tests on __get/set/delitem__ of a list built with *builder*."""
def assign(L, s1, s2, s3, val):
L[s1:s2:s3] = val
def delete(L, s1):
del L[s1]
list1 = builder([0, 1, 2, 3, "one", "two"])
list2 = builder(list(range(10)))
self.assertEqual(1, list1[1])
self.assertEqual("one", list1[-2])
self.assertEqual([2, 3], list1[2:4])
self.assertRaises(IndexError, lambda: list1[6])
self.assertRaises(IndexError, lambda: list1[-7])
self.assertEqual([0, 1, 2], list1[:3])
self.assertEqual([0, 1, 2, 3, "one", "two"], list1[:])
self.assertEqual([3, "one", "two"], list1[3:])
self.assertEqual([3, "one", "two"], list1[3:100])
self.assertEqual(["one", "two"], list1[-2:])
self.assertEqual([0, 1], list1[:-4])
self.assertEqual([], list1[6:])
self.assertEqual([], list1[4:2])
self.assertEqual([0, 2, "one"], list1[0:5:2])
self.assertEqual([0, 2], list1[0:-3:2])
self.assertEqual([0, 1, 2, 3, "one", "two"], list1[::])
self.assertEqual([2, 3, "one", "two"], list1[2::])
self.assertEqual([0, 1, 2, 3], list1[:4:])
self.assertEqual([2, 3], list1[2:4:])
self.assertEqual([0, 2, 4, 6, 8], list2[::2])
self.assertEqual([2, 5, 8], list2[2::3])
self.assertEqual([0, 3], list2[:6:3])
self.assertEqual([2, 5, 8], list2[-8:9:3])
self.assertEqual([], list2[100000:1000:-100])
list1[3] = 100
self.assertEqual(100, list1[3])
list1[-3] = 101
self.assertEqual([0, 1, 2, 101, "one", "two"], list1)
list1[5:] = [6, 7, 8]
self.assertEqual([6, 7, 8], list1[5:])
self.assertEqual([0, 1, 2, 101, "one", 6, 7, 8], list1)
list1[2:4] = [-1, -2, -3, -4, -5]
self.assertEqual([0, 1, -1, -2, -3, -4, -5, "one", 6, 7, 8], list1)
list1[0:-3] = [99]
self.assertEqual([99, 6, 7, 8], list1)
list2[0:6:2] = [100, 102, 104]
self.assertEqual([100, 1, 102, 3, 104, 5, 6, 7, 8, 9], list2)
list2[::3] = [200, 203, 206, 209]
self.assertEqual([200, 1, 102, 203, 104, 5, 206, 7, 8, 209], list2)
list2[::] = range(7)
self.assertEqual([0, 1, 2, 3, 4, 5, 6], list2)
self.assertRaises(ValueError, assign, list2, 0, 5, 2,
[100, 102, 104, 106])
with self.assertRaises(IndexError):
list2[7] = "foo"
with self.assertRaises(IndexError):
list2[-8] = "foo"
del list2[2]
self.assertEqual([0, 1, 3, 4, 5, 6], list2)
del list2[-3]
self.assertEqual([0, 1, 3, 5, 6], list2)
self.assertRaises(IndexError, delete, list2, 100)
self.assertRaises(IndexError, delete, list2, -6)
list2[:] = range(10)
del list2[3:6]
self.assertEqual([0, 1, 2, 6, 7, 8, 9], list2)
del list2[-2:]
self.assertEqual([0, 1, 2, 6, 7], list2)
del list2[:2]
self.assertEqual([2, 6, 7], list2)
list2[:] = range(10)
del list2[2:8:2]
self.assertEqual([0, 1, 3, 5, 7, 8, 9], list2)
def _test_add_radd_iadd(self, builder):
"""Run tests on __r/i/add__ of a list built with *builder*."""
list1 = builder(range(5))
list2 = builder(range(5, 10))
self.assertEqual([0, 1, 2, 3, 4, 5, 6], list1 + [5, 6])
self.assertEqual([0, 1, 2, 3, 4], list1)
self.assertEqual(list(range(10)), list1 + list2)
self.assertEqual([-2, -1, 0, 1, 2, 3, 4], [-2, -1] + list1)
self.assertEqual([0, 1, 2, 3, 4], list1)
list1 += ["foo", "bar", "baz"]
self.assertEqual([0, 1, 2, 3, 4, "foo", "bar", "baz"], list1)
def _test_other_magic_methods(self, builder):
"""Run tests on other magic methods of a list built with *builder*."""
list1 = builder([0, 1, 2, 3, "one", "two"])
list2 = builder([])
list3 = builder([0, 2, 3, 4])
list4 = builder([0, 1, 2])
if py3k:
self.assertEqual("[0, 1, 2, 3, 'one', 'two']", str(list1))
self.assertEqual(b"\x00\x01\x02", bytes(list4))
self.assertEqual("[0, 1, 2, 3, 'one', 'two']", repr(list1))
else:
self.assertEqual("[0, 1, 2, 3, u'one', u'two']", unicode(list1))
self.assertEqual(b"[0, 1, 2, 3, u'one', u'two']", str(list1))
self.assertEqual(b"[0, 1, 2, 3, u'one', u'two']", repr(list1))
self.assertTrue(list1 < list3)
self.assertTrue(list1 <= list3)
self.assertFalse(list1 == list3)
self.assertTrue(list1 != list3)
self.assertFalse(list1 > list3)
self.assertFalse(list1 >= list3)
other1 = [0, 2, 3, 4]
self.assertTrue(list1 < other1)
self.assertTrue(list1 <= other1)
self.assertFalse(list1 == other1)
self.assertTrue(list1 != other1)
self.assertFalse(list1 > other1)
self.assertFalse(list1 >= other1)
other2 = [0, 0, 1, 2]
self.assertFalse(list1 < other2)
self.assertFalse(list1 <= other2)
self.assertFalse(list1 == other2)
self.assertTrue(list1 != other2)
self.assertTrue(list1 > other2)
self.assertTrue(list1 >= other2)
other3 = [0, 1, 2, 3, "one", "two"]
self.assertFalse(list1 < other3)
self.assertTrue(list1 <= other3)
self.assertTrue(list1 == other3)
self.assertFalse(list1 != other3)
self.assertFalse(list1 > other3)
self.assertTrue(list1 >= other3)
self.assertTrue(bool(list1))
self.assertFalse(bool(list2))
self.assertEqual(6, len(list1))
self.assertEqual(0, len(list2))
out = []
for obj in list1:
out.append(obj)
self.assertEqual([0, 1, 2, 3, "one", "two"], out)
out = []
for ch in list2:
out.append(ch)
self.assertEqual([], out)
gen1 = iter(list1)
out = []
for i in range(len(list1)):
out.append(next(gen1))
self.assertRaises(StopIteration, next, gen1)
self.assertEqual([0, 1, 2, 3, "one", "two"], out)
gen2 = iter(list2)
self.assertRaises(StopIteration, next, gen2)
self.assertEqual(["two", "one", 3, 2, 1, 0], list(reversed(list1)))
self.assertEqual([], list(reversed(list2)))
self.assertTrue("one" in list1)
self.assertTrue(3 in list1)
self.assertFalse(10 in list1)
self.assertFalse(0 in list2)
self.assertEqual([], list2 * 5)
self.assertEqual([], 5 * list2)
self.assertEqual([0, 1, 2, 0, 1, 2, 0, 1, 2], list4 * 3)
self.assertEqual([0, 1, 2, 0, 1, 2, 0, 1, 2], 3 * list4)
list4 *= 2
self.assertEqual([0, 1, 2, 0, 1, 2], list4)
def _test_list_methods(self, builder):
"""Run tests on the public methods of a list built with *builder*."""
list1 = builder(range(5))
list2 = builder(["foo"])
list3 = builder([("a", 5), ("d", 2), ("b", 8), ("c", 3)])
list1.append(5)
list1.append(1)
list1.append(2)
self.assertEqual([0, 1, 2, 3, 4, 5, 1, 2], list1)
self.assertEqual(0, list1.count(6))
self.assertEqual(2, list1.count(1))
list1.extend(range(5, 8))
self.assertEqual([0, 1, 2, 3, 4, 5, 1, 2, 5, 6, 7], list1)
self.assertEqual(1, list1.index(1))
self.assertEqual(6, list1.index(1, 3))
self.assertEqual(6, list1.index(1, 3, 7))
self.assertRaises(ValueError, list1.index, 1, 3, 5)
list1.insert(0, -1)
self.assertEqual([-1, 0, 1, 2, 3, 4, 5, 1, 2, 5, 6, 7], list1)
list1.insert(-1, 6.5)
self.assertEqual([-1, 0, 1, 2, 3, 4, 5, 1, 2, 5, 6, 6.5, 7], list1)
list1.insert(13, 8)
self.assertEqual([-1, 0, 1, 2, 3, 4, 5, 1, 2, 5, 6, 6.5, 7, 8], list1)
self.assertEqual(8, list1.pop())
self.assertEqual(7, list1.pop())
self.assertEqual([-1, 0, 1, 2, 3, 4, 5, 1, 2, 5, 6, 6.5], list1)
self.assertEqual(-1, list1.pop(0))
self.assertEqual(5, list1.pop(5))
self.assertEqual(6.5, list1.pop(-1))
self.assertEqual([0, 1, 2, 3, 4, 1, 2, 5, 6], list1)
self.assertEqual("foo", list2.pop())
self.assertRaises(IndexError, list2.pop)
self.assertEqual([], list2)
list1.remove(6)
self.assertEqual([0, 1, 2, 3, 4, 1, 2, 5], list1)
list1.remove(1)
self.assertEqual([0, 2, 3, 4, 1, 2, 5], list1)
list1.remove(1)
self.assertEqual([0, 2, 3, 4, 2, 5], list1)
self.assertRaises(ValueError, list1.remove, 1)
list1.reverse()
self.assertEqual([5, 2, 4, 3, 2, 0], list1)
list1.sort()
self.assertEqual([0, 2, 2, 3, 4, 5], list1)
list1.sort(reverse=True)
self.assertEqual([5, 4, 3, 2, 2, 0], list1)
if not py3k:
func = lambda x, y: abs(3 - x) - abs(3 - y) # Distance from 3
list1.sort(cmp=func)
self.assertEqual([3, 4, 2, 2, 5, 0], list1)
list1.sort(cmp=func, reverse=True)
self.assertEqual([0, 5, 4, 2, 2, 3], list1)
list3.sort(key=lambda i: i[1])
self.assertEqual([("d", 2), ("c", 3), ("a", 5), ("b", 8)], list3)
list3.sort(key=lambda i: i[1], reverse=True)
self.assertEqual([("b", 8), ("a", 5), ("c", 3), ("d", 2)], list3)
def _dispatch_test_for_children(self, meth):
"""Run a test method on various different types of children."""
meth(lambda L: SmartList(list(L))[:])
meth(lambda L: SmartList([999] + list(L))[1:])
meth(lambda L: SmartList(list(L) + [999])[:-1])
meth(lambda L: SmartList([101, 102] + list(L) + [201, 202])[2:-2])
def test_docs(self):
"""make sure the methods of SmartList/_ListProxy have docstrings"""
methods = ["append", "count", "extend", "index", "insert", "pop",
"remove", "reverse", "sort"]
for meth in methods:
expected = getattr(list, meth).__doc__
smartlist_doc = getattr(SmartList, meth).__doc__
listproxy_doc = getattr(_ListProxy, meth).__doc__
self.assertEqual(expected, smartlist_doc)
self.assertEqual(expected, listproxy_doc)
def test_doctest(self):
"""make sure the test embedded in SmartList's docstring passes"""
parent = SmartList([0, 1, 2, 3])
self.assertEqual([0, 1, 2, 3], parent)
child = parent[2:]
self.assertEqual([2, 3], child)
child.append(4)
self.assertEqual([2, 3, 4], child)
self.assertEqual([0, 1, 2, 3, 4], parent)
def test_parent_get_set_del(self):
"""make sure SmartList's getitem/setitem/delitem work"""
self._test_get_set_del_item(SmartList)
def test_parent_add(self):
"""make sure SmartList's add/radd/iadd work"""
self._test_add_radd_iadd(SmartList)
def test_parent_other_magics(self):
"""make sure SmartList's other magically implemented features work"""
self._test_other_magic_methods(SmartList)
def test_parent_methods(self):
"""make sure SmartList's non-magic methods work, like append()"""
self._test_list_methods(SmartList)
def test_child_get_set_del(self):
"""make sure _ListProxy's getitem/setitem/delitem work"""
self._dispatch_test_for_children(self._test_get_set_del_item)
def test_child_add(self):
"""make sure _ListProxy's add/radd/iadd work"""
self._dispatch_test_for_children(self._test_add_radd_iadd)
def test_child_other_magics(self):
"""make sure _ListProxy's other magically implemented features work"""
self._dispatch_test_for_children(self._test_other_magic_methods)
def test_child_methods(self):
"""make sure _ListProxy's non-magic methods work, like append()"""
self._dispatch_test_for_children(self._test_list_methods)
def test_influence(self):
"""make sure changes are propagated from parents to children"""
parent = SmartList([0, 1, 2, 3, 4, 5])
child1 = parent[2:]
child2 = parent[2:5]
self.assertEqual([0, 1, 2, 3, 4, 5], parent)
self.assertEqual([2, 3, 4, 5], child1)
self.assertEqual([2, 3, 4], child2)
self.assertEqual(2, len(parent._children))
parent.append(6)
child1.append(7)
child2.append(4.5)
self.assertEqual([0, 1, 2, 3, 4, 4.5, 5, 6, 7], parent)
self.assertEqual([2, 3, 4, 4.5, 5, 6, 7], child1)
self.assertEqual([2, 3, 4, 4.5], child2)
parent.insert(0, -1)
parent.insert(4, 2.5)
parent.insert(10, 6.5)
self.assertEqual([-1, 0, 1, 2, 2.5, 3, 4, 4.5, 5, 6, 6.5, 7], parent)
self.assertEqual([2, 2.5, 3, 4, 4.5, 5, 6, 6.5, 7], child1)
self.assertEqual([2, 2.5, 3, 4, 4.5], child2)
self.assertEqual(7, parent.pop())
self.assertEqual(6.5, child1.pop())
self.assertEqual(4.5, child2.pop())
self.assertEqual([-1, 0, 1, 2, 2.5, 3, 4, 5, 6], parent)
self.assertEqual([2, 2.5, 3, 4, 5, 6], child1)
self.assertEqual([2, 2.5, 3, 4], child2)
parent.remove(-1)
child1.remove(2.5)
self.assertEqual([0, 1, 2, 3, 4, 5, 6], parent)
self.assertEqual([2, 3, 4, 5, 6], child1)
self.assertEqual([2, 3, 4], child2)
self.assertEqual(0, parent.pop(0))
self.assertEqual([1, 2, 3, 4, 5, 6], parent)
self.assertEqual([2, 3, 4, 5, 6], child1)
self.assertEqual([2, 3, 4], child2)
child2.reverse()
self.assertEqual([1, 4, 3, 2, 5, 6], parent)
self.assertEqual([4, 3, 2, 5, 6], child1)
self.assertEqual([4, 3, 2], child2)
parent.extend([7, 8])
child1.extend([8.1, 8.2])
child2.extend([1.9, 1.8])
self.assertEqual([1, 4, 3, 2, 1.9, 1.8, 5, 6, 7, 8, 8.1, 8.2], parent)
self.assertEqual([4, 3, 2, 1.9, 1.8, 5, 6, 7, 8, 8.1, 8.2], child1)
self.assertEqual([4, 3, 2, 1.9, 1.8], child2)
child3 = parent[9:]
self.assertEqual([8, 8.1, 8.2], child3)
del parent[8:]
self.assertEqual([1, 4, 3, 2, 1.9, 1.8, 5, 6], parent)
self.assertEqual([4, 3, 2, 1.9, 1.8, 5, 6], child1)
self.assertEqual([4, 3, 2, 1.9, 1.8], child2)
self.assertEqual([], child3)
del child1
self.assertEqual([1, 4, 3, 2, 1.9, 1.8, 5, 6], parent)
self.assertEqual([4, 3, 2, 1.9, 1.8], child2)
self.assertEqual([], child3)
self.assertEqual(2, len(parent._children))
del child3
self.assertEqual([1, 4, 3, 2, 1.9, 1.8, 5, 6], parent)
self.assertEqual([4, 3, 2, 1.9, 1.8], child2)
self.assertEqual(1, len(parent._children))
parent.remove(1.9)
parent.remove(1.8)
self.assertEqual([1, 4, 3, 2, 5, 6], parent)
self.assertEqual([4, 3, 2], child2)
parent.reverse()
self.assertEqual([6, 5, 2, 3, 4, 1], parent)
self.assertEqual([4, 3, 2], child2)
self.assertEqual(0, len(parent._children))
if __name__ == "__main__":
unittest.main(verbosity=2)
| 39.571765 | 79 | 0.565644 |
4a27a0d2f818f40967bc1566198c3282338deb33 | 2,467 | py | Python | disentanglement_lib/evaluation/metrics/unsupervised_metrics_test.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | disentanglement_lib/evaluation/metrics/unsupervised_metrics_test.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | disentanglement_lib/evaluation/metrics/unsupervised_metrics_test.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for unsupervised_metrics.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from disentanglement_lib.evaluation.metrics import unsupervised_metrics
import numpy as np
import scipy
class UnsupervisedMetricsTest(absltest.TestCase):
def test_gaussian_total_correlation_zero(self):
score = unsupervised_metrics.gaussian_total_correlation(
np.diag(np.ones(5, dtype=np.float64)))
self.assertBetween(score, -0.01, 0.01)
def test_gaussian_total_correlation_same(self):
"""Check that the results of the both functions are the same."""
cov = np.array([[1, 0.9], [0.9, 1.0]], dtype=np.float32)
means = np.array([0.0, 0.0], dtype=np.float32)
cov_central = np.diag(np.diag(cov))
shouldbe = unsupervised_metrics.kl_gaussians_numerically_unstable(
means, cov, means, cov_central, 2)
score = unsupervised_metrics.gaussian_total_correlation(cov)
self.assertBetween(score, shouldbe - 0.01, shouldbe + 0.01)
def test_gaussian_wasserstein_correlation_zero(self):
score = unsupervised_metrics.gaussian_wasserstein_correlation(
np.diag(np.ones(5, dtype=np.float64)))
self.assertBetween(score, -0.01, 0.01)
def test_gaussian_wasserstein_correlation_same(self):
cov = np.array([[1, 0.9], [0.9, 1.0]], dtype=np.float32)
score = unsupervised_metrics.gaussian_wasserstein_correlation(cov)
cov_only_diagonal = np.diag(np.diag(cov))
sqrtm = scipy.linalg.sqrtm(np.matmul(cov, cov_only_diagonal))
shouldbe = np.trace(cov + cov_only_diagonal - 2 * sqrtm)
self.assertBetween(score, shouldbe - 0.01, shouldbe + 0.01)
if __name__ == "__main__":
absltest.main()
| 41.813559 | 74 | 0.72274 |
4a27a275d71ebe221d71d1aee3da2a74ab41bda2 | 5,023 | py | Python | mars/learn/semi_supervised/tests/test_label_propagation.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 1 | 2022-02-02T03:03:48.000Z | 2022-02-02T03:03:48.000Z | mars/learn/semi_supervised/tests/test_label_propagation.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | mars/learn/semi_supervised/tests/test_label_propagation.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import assert_no_warnings, assert_warns
from .... import tensor as mt
from ...metrics.pairwise import rbf_kernel
from ...neighbors import NearestNeighbors
from .. import LabelPropagation
estimators = [
(LabelPropagation, {"kernel": "rbf"}),
(LabelPropagation, {"kernel": "knn", "n_neighbors": 2}),
(LabelPropagation, {"kernel": lambda x, y: rbf_kernel(x, y, gamma=20)}),
]
@pytest.mark.parametrize("estimator, parameters", estimators)
def test_fit_transduction(setup, estimator, parameters):
samples = [[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]
labels = [0, 1, -1]
clf = estimator(**parameters).fit(samples, labels)
assert clf.transduction_[2].fetch() == 1
@pytest.mark.parametrize("estimator, parameters", estimators)
def test_distribution(setup, estimator, parameters):
samples = [[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]]
labels = [0, 1, -1]
clf = estimator(**parameters).fit(samples, labels)
if parameters["kernel"] == "knn":
return # unstable test; changes in k-NN ordering break it
else:
np.testing.assert_array_almost_equal(
np.asarray(clf.label_distributions_[2]), np.array([0.5, 0.5]), 2
)
@pytest.mark.parametrize("estimator, parameters", estimators)
def test_predict(setup, estimator, parameters):
samples = [[1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]
labels = [0, 1, -1]
clf = estimator(**parameters).fit(samples, labels)
np.testing.assert_array_equal(clf.predict([[0.5, 2.5]]).fetch(), np.array([1]))
@pytest.mark.parametrize("estimator, parameters", estimators)
def test_predict_proba(setup, estimator, parameters):
samples = [[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]]
labels = [0, 1, -1]
clf = estimator(**parameters).fit(samples, labels)
np.testing.assert_almost_equal(
clf.predict_proba([[1.0, 1.0]]).fetch(), np.array([[0.5, 0.5]])
)
def test_label_propagation_closed_form(setup):
n_classes = 2
X, y = make_classification(n_classes=n_classes, n_samples=200, random_state=0)
y[::3] = -1
Y = np.zeros((len(y), n_classes + 1))
Y[np.arange(len(y)), y] = 1
unlabelled_idx = Y[:, (-1,)].nonzero()[0]
labelled_idx = (Y[:, (-1,)] == 0).nonzero()[0]
clf = LabelPropagation(max_iter=10000, gamma=0.1)
clf.fit(X, y)
# adopting notation from Zhu et al 2002
T_bar = clf._build_graph().to_numpy()
Tuu = T_bar[tuple(np.meshgrid(unlabelled_idx, unlabelled_idx, indexing="ij"))]
Tul = T_bar[tuple(np.meshgrid(unlabelled_idx, labelled_idx, indexing="ij"))]
Y = Y[:, :-1]
Y_l = Y[labelled_idx, :]
Y_u = np.dot(np.dot(np.linalg.inv(np.eye(Tuu.shape[0]) - Tuu), Tul), Y_l)
expected = Y.copy()
expected[unlabelled_idx, :] = Y_u
expected /= expected.sum(axis=1)[:, np.newaxis]
np.testing.assert_array_almost_equal(expected, clf.label_distributions_.fetch(), 4)
def test_convergence_warning(setup):
# This is a non-regression test for #5774
X = np.array([[1.0, 0.0], [0.0, 1.0], [1.0, 2.5]])
y = np.array([0, 1, -1])
mdl = LabelPropagation(kernel="rbf", max_iter=1)
assert_warns(ConvergenceWarning, mdl.fit, X, y)
assert mdl.n_iter_ == mdl.max_iter
mdl = LabelPropagation(kernel="rbf", max_iter=500)
assert_no_warnings(mdl.fit, X, y)
def test_predict_sparse_callable_kernel(setup):
# This is a non-regression test for #15866
# Custom sparse kernel (top-K RBF)
def topk_rbf(X, Y=None, n_neighbors=10, gamma=1e-5):
nn = NearestNeighbors(n_neighbors=10, metric="euclidean", n_jobs=-1)
nn.fit(X)
W = -1 * mt.power(nn.kneighbors_graph(Y, mode="distance"), 2) * gamma
W = mt.exp(W)
assert W.issparse()
return W.T
n_classes = 4
n_samples = 500
n_test = 10
X, y = make_classification(
n_classes=n_classes,
n_samples=n_samples,
n_features=20,
n_informative=20,
n_redundant=0,
n_repeated=0,
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=0
)
model = LabelPropagation(kernel=topk_rbf)
model.fit(X_train, y_train)
assert model.score(X_test, y_test).fetch() >= 0.9
| 34.641379 | 87 | 0.664344 |
4a27a2ed310586b5415d034e487c860405b9c26e | 57,062 | py | Python | conda_build/skeletons/cran.py | grlee77/conda-build | ea99e2dc2fa473039dbeb73d92b3f5d5c59548fe | [
"BSD-3-Clause"
] | null | null | null | conda_build/skeletons/cran.py | grlee77/conda-build | ea99e2dc2fa473039dbeb73d92b3f5d5c59548fe | [
"BSD-3-Clause"
] | null | null | null | conda_build/skeletons/cran.py | grlee77/conda-build | ea99e2dc2fa473039dbeb73d92b3f5d5c59548fe | [
"BSD-3-Clause"
] | null | null | null | """
Tools for converting Cran packages to conda recipes.
"""
from __future__ import absolute_import, division, print_function
import argparse
import copy
from itertools import chain
from os import makedirs, listdir, sep, environ
from os.path import (basename, commonprefix, exists, isabs, isdir,
isfile, join, normpath, realpath, relpath)
import re
import subprocess
import sys
import hashlib
import requests
import tarfile
import unicodedata
import yaml
# try to import C dumper
try:
from yaml import CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeDumper
from conda_build import source, metadata
from conda_build.config import get_or_merge_config
from conda_build.conda_interface import text_type, iteritems, TemporaryDirectory, cc_conda_build
from conda_build.license_family import allowed_license_families, guess_license_family
from conda_build.utils import rm_rf, ensure_list
from conda_build.variants import get_package_variants, DEFAULT_VARIANTS
SOURCE_META = """\
{archive_keys}
{git_url_key} {git_url}
{git_tag_key} {git_tag}
{patches}
"""
BINARY_META = """\
url: {cranurl}{sel}
{hash_entry}{sel}
"""
VERSION_META = """\
{{% set version = '{cran_version}' %}}{sel}"""
CRAN_META = """\
{version_source}
{version_binary1}
{version_binary2}
{{% set posix = 'm2-' if win else '' %}}
{{% set native = 'm2w64-' if win else '' %}}
package:
name: {packagename}
version: {{{{ version|replace("-", "_") }}}}
source:
{source}
{binary1}
{binary2}
build:
merge_build_host: True{sel_src_and_win}
# If this is a new build for the same version, increment the build number.
number: {build_number}
{noarch_generic}
# This is required to make R link correctly on Linux.
rpaths:
- lib/R/lib/
- lib/
{script_env}
{suggests}
requirements:
build:{build_depends}
host:{host_depends}
run:{run_depends}
test:
commands:
# You can put additional test commands to be run here.
- $R -e "library('{cran_packagename}')" # [not win]
- "\\"%R%\\" -e \\"library('{cran_packagename}')\\"" # [win]
# You can also put a file called run_test.py, run_test.sh, or run_test.bat
# in the recipe that will be run at test time.
# requires:
# Put any additional test requirements here.
about:
{home_comment}home:{homeurl}
license: {license}
{summary_comment}summary:{summary}
license_family: {license_family}
{extra_recipe_maintainers}
# The original CRAN metadata for this package was:
{cran_metadata}
# See
# http://docs.continuum.io/conda/build.html for
# more information about meta.yaml
"""
CRAN_BUILD_SH_SOURCE = """\
#!/bin/bash
# 'Autobrew' is being used by more and more packages these days
# to grab static libraries from Homebrew bottles. These bottles
# are fetched via Homebrew's --force-bottle option which grabs
# a bottle for the build machine which may not be macOS 10.9.
# Also, we want to use conda packages (and shared libraries) for
# these 'system' dependencies. See:
# https://github.com/jeroen/autobrew/issues/3
export DISABLE_AUTOBREW=1
# R refuses to build packages that mark themselves as Priority: Recommended
mv DESCRIPTION DESCRIPTION.old
grep -va '^Priority: ' DESCRIPTION.old > DESCRIPTION
$R CMD INSTALL --build .
# Add more build steps here, if they are necessary.
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build process.
"""
CRAN_BUILD_SH_MIXED = """\
#!/bin/bash
if {source_pf_bash}; then
export DISABLE_AUTOBREW=1
mv DESCRIPTION DESCRIPTION.old
grep -v '^Priority: ' DESCRIPTION.old > DESCRIPTION
$R CMD INSTALL --build .
else
mkdir -p $PREFIX/lib/R/library/{cran_packagename}
mv * $PREFIX/lib/R/library/{cran_packagename}
if [[ $target_platform == osx-64 ]]; then
pushd $PREFIX
for libdir in lib/R/lib lib/R/modules lib/R/library lib/R/bin/exec sysroot/usr/lib; do
pushd $libdir || exit 1
for SHARED_LIB in $(find . -type f -iname "*.dylib" -or -iname "*.so" -or -iname "R"); do
echo "fixing SHARED_LIB $SHARED_LIB"
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5.0-MRO/Resources/lib/libR.dylib "$PREFIX"/lib/R/lib/libR.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libR.dylib "$PREFIX"/lib/R/lib/libR.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/clang4/lib/libomp.dylib "$PREFIX"/lib/libomp.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/gfortran/lib/libgfortran.3.dylib "$PREFIX"/lib/libgfortran.3.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libquadmath.0.dylib "$PREFIX"/lib/libquadmath.0.dylib $SHARED_LIB || true
install_name_tool -change /usr/local/gfortran/lib/libquadmath.0.dylib "$PREFIX"/lib/libquadmath.0.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libgfortran.3.dylib "$PREFIX"/lib/libgfortran.3.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libgcc_s.1.dylib "$PREFIX"/lib/libgcc_s.1.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libiconv.2.dylib "$PREFIX"/sysroot/usr/lib/libiconv.2.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libncurses.5.4.dylib "$PREFIX"/sysroot/usr/lib/libncurses.5.4.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libicucore.A.dylib "$PREFIX"/sysroot/usr/lib/libicucore.A.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libexpat.1.dylib "$PREFIX"/lib/libexpat.1.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libcurl.4.dylib "$PREFIX"/lib/libcurl.4.dylib $SHARED_LIB || true
install_name_tool -change /usr/lib/libc++.1.dylib "$PREFIX"/lib/libc++.1.dylib $SHARED_LIB || true
install_name_tool -change /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libc++.1.dylib "$PREFIX"/lib/libc++.1.dylib $SHARED_LIB || true
done
popd
done
popd
fi
fi
"""
CRAN_BUILD_SH_BINARY = """\
#!/bin/bash
mkdir -p $PREFIX/lib/R/library/{cran_packagename}
mv * $PREFIX/lib/R/library/{cran_packagename}
fi
"""
CRAN_BLD_BAT_SOURCE = """\
"%R%" CMD INSTALL --build .
IF %ERRORLEVEL% NEQ 0 exit 1
"""
# We hardcode the fact that CRAN does not provide win32 binaries here.
CRAN_BLD_BAT_MIXED = """\
if "%target_platform%" == "win-64" goto skip_source_build
"%R%" CMD INSTALL --build .
IF %ERRORLEVEL% NEQ 0 exit 1
exit 0
:skip_source_build
mkdir %PREFIX%\lib\R\library
robocopy /E . "%PREFIX%\lib\R\library\{cran_packagename}"
if %ERRORLEVEL% NEQ 1 exit 1
exit 0
"""
INDENT = '\n - '
CRAN_KEYS = [
'Site',
'Archs',
'Depends',
'Enhances',
'Imports',
'License',
'License_is_FOSS',
'License_restricts_use',
'LinkingTo',
'MD5sum',
'NeedsCompilation',
'OS_type',
'Package',
'Path',
'Priority',
'Suggests',
'Version',
'Title',
'Author',
'Maintainer',
]
# The following base/recommended package names are derived from R's source
# tree (R-3.0.2/share/make/vars.mk). Hopefully they don't change too much
# between versions.
R_BASE_PACKAGE_NAMES = (
'base',
'compiler',
'datasets',
'graphics',
'grDevices',
'grid',
'methods',
'parallel',
'splines',
'stats',
'stats4',
'tcltk',
'tools',
'utils',
)
R_RECOMMENDED_PACKAGE_NAMES = (
'MASS',
'lattice',
'Matrix',
'nlme',
'survival',
'boot',
'cluster',
'codetools',
'foreign',
'KernSmooth',
'rpart',
'class',
'nnet',
'spatial',
'mgcv',
)
# Stolen then tweaked from debian.deb822.PkgRelation.__dep_RE.
VERSION_DEPENDENCY_REGEX = re.compile(
r'^\s*(?P<name>[a-zA-Z0-9.+\-]{1,})'
r'(\s*\(\s*(?P<relop>[>=<]+)\s*'
r'(?P<version>[0-9a-zA-Z:\-+~.]+)\s*\))'
r'?(\s*\[(?P<archs>[\s!\w\-]+)\])?\s*$'
)
target_platform_bash_test_by_sel = {'linux': '=~ linux.*',
'linux32': '== linux-32',
'linux64': '== linux-64',
'win32': '== win-32',
'win64': '== win-64',
'osx': '== osx-64'}
def package_exists(package_name):
# TODO: how can we get cran to spit out package presence?
# available.packages() is probably a start, but no channels are working on mac right now?
return True
# install_output = subprocess.check_output([join(sys.prefix, "r"), "-e",
# # ind=2 arbitrarily chooses some CRAN mirror to try.
# "chooseCRANmirror(ind=2);install.packages('{}')".format(package_name)])
def add_parser(repos):
# for loading default variant info
cran = repos.add_parser(
"cran",
help="""
Create recipe skeleton for packages hosted on the Comprehensive R Archive
Network (CRAN) (cran.r-project.org).
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
cran.add_argument(
"packages",
nargs='+',
help="""CRAN packages to create recipe skeletons for.""",
)
cran.add_argument(
"--output-dir",
help="Directory to write recipes to (default: %(default)s).",
default=".",
)
cran.add_argument(
"--output-suffix",
help="Suffix to add to recipe dir, can contain other dirs (eg: -feedstock/recipe).",
default="",
)
cran.add_argument(
"--add-maintainer",
help="Add this github username as a maintainer if not already present.",
)
cran.add_argument(
"--version",
help="Version to use. Applies to all packages.",
)
cran.add_argument(
"--git-tag",
help="Git tag to use for GitHub recipes.",
)
cran.add_argument(
"--all-urls",
action="store_true",
help="""Look at all URLs, not just source URLs. Use this if it can't
find the right URL.""",
)
cran.add_argument(
"--cran-url",
help="URL to use for as source package repository",
)
cran.add_argument(
"--r-interp",
default='r-base',
help="Declare R interpreter package",
)
cran.add_argument(
"--use-binaries-ver",
help=("Repackage binaries from version provided by argument instead of building "
"from source."),
)
cran.add_argument(
"--use-when-no-binary",
choices=('src',
'old',
'src-old',
'old-src',
'error'),
default='src',
help="""Sometimes binaries are not available at the correct version for
a given platform (macOS). You can use this flag to specify what
fallback to take, either compiling from source or using an older
binary or trying one then the other."""
)
cran.add_argument(
"--use-noarch-generic",
action='store_true',
dest='use_noarch_generic',
help=("Mark packages that do not need compilation as `noarch: generic`"),
)
cran.add_argument(
"--use-rtools-win",
action='store_true',
help="Use Rtools when building from source on Windows",
)
cran.add_argument(
"--recursive",
action='store_true',
help='Create recipes for dependencies if they do not already exist.',
)
cran.add_argument(
"--no-recursive",
action='store_false',
dest='recursive',
help="Don't create recipes for dependencies if they do not already exist.",
)
cran.add_argument(
'--no-archive',
action='store_false',
dest='archive',
help="Don't include an Archive download url.",
)
cran.add_argument(
"--version-compare",
action='store_true',
help="""Compare the package version of the recipe with the one available
on CRAN. Exits 1 if a newer version is available and 0 otherwise."""
)
cran.add_argument(
"--update-policy",
action='store',
choices=('error',
'skip-up-to-date',
'skip-existing',
'overwrite',
'merge-keep-build-num',
'merge-incr-build-num'),
default='error',
help="""Dictates what to do when existing packages are encountered in the
output directory (set by --output-dir). In the present implementation, the
merge options avoid overwriting bld.bat and build.sh and only manage copying
across patches, and the `build/{number,script_env}` fields. When the version
changes, both merge options reset `build/number` to 0. When the version does
not change they either keep the old `build/number` or else increase it by one."""
)
cran.add_argument(
'-m', '--variant-config-files',
default=cc_conda_build.get('skeleton_config_yaml', None),
help="""Variant config file to add. These yaml files can contain
keys such as `cran_mirror`. Only one can be provided here."""
)
def dict_from_cran_lines(lines):
d = {}
for line in lines:
if not line:
continue
try:
if ': ' in line:
(k, v) = line.split(': ', 1)
else:
# Sometimes fields are included but left blank, e.g.:
# - Enhances in data.tree
# - Suggests in corpcor
(k, v) = line.split(':', 1)
except ValueError:
sys.exit("Error: Could not parse metadata (%s)" % line)
d[k] = v
# if k not in CRAN_KEYS:
# print("Warning: Unknown key %s" % k)
d['orig_lines'] = lines
return d
def remove_package_line_continuations(chunk):
"""
>>> chunk = [
'Package: A3',
'Version: 0.9.2',
'Depends: R (>= 2.15.0), xtable, pbapply',
'Suggests: randomForest, e1071',
'Imports: MASS, R.methodsS3 (>= 1.5.2), R.oo (>= 1.15.8), R.utils (>=',
' 1.27.1), matrixStats (>= 0.8.12), R.filesets (>= 2.3.0), ',
' sampleSelection, scatterplot3d, strucchange, systemfit',
'License: GPL (>= 2)',
'NeedsCompilation: no']
>>> remove_package_line_continuations(chunk)
['Package: A3',
'Version: 0.9.2',
'Depends: R (>= 2.15.0), xtable, pbapply',
'Suggests: randomForest, e1071',
'Imports: MASS, R.methodsS3 (>= 1.5.2), R.oo (>= 1.15.8), R.utils (>= 1.27.1), matrixStats (>= 0.8.12), R.filesets (>= 2.3.0), sampleSelection, scatterplot3d, strucchange, systemfit, rgl,'
'License: GPL (>= 2)',
'NeedsCompilation: no']
""" # NOQA
continuation = (' ', '\t')
continued_ix = None
continued_line = None
had_continuation = False
accumulating_continuations = False
chunk.append('')
for (i, line) in enumerate(chunk):
if line.startswith(continuation):
line = ' ' + line.lstrip()
if accumulating_continuations:
assert had_continuation
continued_line += line
chunk[i] = None
else:
accumulating_continuations = True
continued_ix = i - 1
continued_line = chunk[continued_ix] + line
had_continuation = True
chunk[i] = None
else:
if accumulating_continuations:
assert had_continuation
chunk[continued_ix] = continued_line
accumulating_continuations = False
continued_line = None
continued_ix = None
if had_continuation:
# Remove the None(s).
chunk = [c for c in chunk if c]
chunk.append('')
return chunk
def yaml_quote_string(string):
"""
Quote a string for use in YAML.
We can't just use yaml.dump because it adds ellipses to the end of the
string, and it in general doesn't handle being placed inside an existing
document very well.
Note that this function is NOT general.
"""
return yaml.dump(string, Dumper=SafeDumper).replace('\n...\n', '').replace('\n', '\n ')
# Due to how we render the metadata there can be significant areas of repeated newlines.
# This collapses them and also strips any trailing spaces.
def clear_whitespace(string):
lines = []
last_line = ''
for line in string.splitlines():
line = line.rstrip()
if not (line == '' and last_line == ''):
lines.append(line)
last_line = line
return '\n'.join(lines)
def get_package_metadata(cran_url, package, session):
url = cran_url + '/web/packages/' + package + '/DESCRIPTION'
r = session.get(url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
sys.exit("ERROR: %s (404 Not Found)" % url)
raise
DESCRIPTION = r.text
d = dict_from_cran_lines(remove_package_line_continuations(DESCRIPTION.splitlines()))
d['orig_description'] = DESCRIPTION
return d
def get_latest_git_tag(config):
# SO says to use taggerdate instead of committerdate, but that is invalid for lightweight tags.
p = subprocess.Popen(['git', 'for-each-ref',
'refs/tags',
'--sort=-committerdate',
'--format=%(refname:short)',
'--count=1'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=config.work_dir)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if stderr or p.returncode:
sys.exit("Error: git tag failed (%s)" % stderr)
tags = stdout.strip().splitlines()
if not tags:
sys.exit("Error: no tags found")
print("Using tag %s" % tags[-1])
return tags[-1]
def _ssl_no_verify():
"""Gets whether the SSL_NO_VERIFY environment variable is set to 1 or True.
This provides a workaround for users in some corporate environments where
MITM style proxies make it difficult to fetch data over HTTPS.
"""
return environ.get('SSL_NO_VERIFY', '').strip().lower() in ('1', 'true')
def get_session(output_dir, verbose=True):
session = requests.Session()
session.verify = _ssl_no_verify()
try:
import cachecontrol
import cachecontrol.caches
except ImportError:
if verbose:
print("Tip: install CacheControl and lockfile (conda packages) to cache the "
"CRAN metadata")
else:
session = cachecontrol.CacheControl(session,
cache=cachecontrol.caches.FileCache(join(output_dir,
'.web_cache')))
return session
def get_cran_metadata(cran_url, output_dir, verbose=True):
session = get_session(output_dir, verbose=verbose)
if verbose:
print("Fetching metadata from %s" % cran_url)
r = session.get(cran_url + "/src/contrib/PACKAGES")
r.raise_for_status()
PACKAGES = r.text
package_list = [remove_package_line_continuations(i.splitlines())
for i in PACKAGES.split('\n\n')]
return {d['Package'].lower(): d for d in map(dict_from_cran_lines,
package_list)}
def make_array(m, key, allow_empty=False):
result = []
try:
old_vals = m.get_value(key, [])
except:
old_vals = []
if old_vals or allow_empty:
result.append(key.split('/')[-1] + ":")
for old_val in old_vals:
result.append("{indent}{old_val}".format(indent=INDENT, old_val=old_val))
return result
def existing_recipe_dir(output_dir, output_suffix, package):
result = None
if exists(join(output_dir, package)):
result = normpath(join(output_dir, package))
elif exists(join(output_dir, package + output_suffix)):
result = normpath(join(output_dir, package + output_suffix))
elif exists(join(output_dir, 'r-' + package + output_suffix)):
result = normpath(join(output_dir, 'r-' + package + output_suffix))
return result
def strip_end(string, end):
if string.endswith(end):
return string[:-len(end)]
return string
def package_to_inputs_dict(output_dir, output_suffix, git_tag, package):
"""
Converts `package` (*) into a tuple of:
pkg_name (without leading 'r-')
location (in a subdir of output_dir - may not exist - or at GitHub)
old_git_rev (from existing metadata, so corresponds to the *old* version)
metadata or None (if a recipe does *not* already exist)
(*) `package` could be:
1. A package name beginning (or not) with 'r-'
2. A GitHub URL
3. A file:// URL to a tarball
4. A relative path to a recipe from output_dir
5. An absolute path to a recipe (fatal unless in the output_dir hierarchy)
6. Any of the above ending (or not) in sep or '/'
So this function cleans all that up:
Some packages may be from GitHub but we'd like the user not to have to worry
about that on the command-line (for pre-existing recipes). Also, we may want
to get version information from them (or existing metadata to merge) so lets
load *all* existing recipes (later we will add or replace this metadata with
any that we create).
"""
if isfile(package):
return None
print("Parsing input package %s:" % package)
package = strip_end(package, '/')
package = strip_end(package, sep)
if 'github.com' in package:
package = strip_end(package, '.git')
pkg_name = basename(package).lower()
pkg_name = strip_end(pkg_name, '-feedstock')
if output_suffix:
pkg_name = strip_end(pkg_name, output_suffix)
if pkg_name.startswith('r-'):
pkg_name = pkg_name[2:]
if package.startswith('file://'):
location = package.replace('file://', '')
pkg_filename = basename(location)
pkg_name = re.match(r'(.*)_(.*)', pkg_filename).group(1).lower()
existing_location = existing_recipe_dir(output_dir, output_suffix, 'r-' + pkg_name)
elif isabs(package):
commp = commonprefix((package, output_dir))
if commp != output_dir:
raise RuntimeError("package %s specified with abs path outside of output-dir %s" % (
package, output_dir))
location = package
existing_location = existing_recipe_dir(output_dir, output_suffix, 'r-' + pkg_name)
elif 'github.com' in package:
location = package
existing_location = existing_recipe_dir(output_dir, output_suffix, 'r-' + pkg_name)
else:
location = existing_location = existing_recipe_dir(output_dir, output_suffix, package)
if existing_location:
try:
m = metadata.MetaData(existing_location)
except:
# Happens when the folder exists but contains no recipe.
m = None
else:
m = None
# It can still be the case that a package without 'github.com' in the location does really
# come from there, for that we need to inspect the existing metadata's source/git_url.
old_git_rev = git_tag
if location and m and 'github.com' not in location:
git_url = m.get_value('source/git_url', '')
if 'github.com' in git_url:
location = git_url
old_git_rev = m.get_value('source/git_rev', None)
new_location = join(output_dir, 'r-' + pkg_name + output_suffix)
print(".. name: %s location: %s new_location: %s" % (pkg_name, location, new_location))
return {'pkg-name': pkg_name,
'location': location,
'old-git-rev': old_git_rev,
'old-metadata': m,
'new-location': new_location}
def get_available_binaries(cran_url, details):
import requests
from bs4 import BeautifulSoup
def get_url_paths(url, ext='', params={}):
response = requests.get(url, params=params)
if response.ok:
response_text = response.text
else:
return response.raise_for_status()
soup = BeautifulSoup(response_text, 'html.parser')
parent = [url + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
return parent
url = cran_url + '/' + details['dir']
ext = details['ext']
result = get_url_paths(url, ext)
for p in result:
filename = basename(p)
pkg, _, ver = filename.rpartition('_')
ver, _, _ = ver.rpartition(ext)
if pkg in details['binaries']:
details['binaries'][pkg.lower()].extend((ver, p))
else:
details['binaries'][pkg.lower()] = [(ver, p)]
def skeletonize(in_packages, output_dir=".", output_suffix="", add_maintainer=None, version=None,
git_tag=None, cran_url=None, recursive=False, archive=True,
version_compare=False, update_policy='', r_interp='r-base', use_binaries_ver=None,
use_noarch_generic=False, use_when_no_binary='src', use_rtools_win=False, config=None,
variant_config_files=None):
if use_when_no_binary != 'error' and \
use_when_no_binary != 'src' and \
use_when_no_binary != 'old' and \
use_when_no_binary != 'old-src':
print("ERROR: --use_when_no_binary={} not yet implemented".format(use_when_no_binary))
sys.exit(1)
output_dir = realpath(output_dir)
config = get_or_merge_config(config, variant_config_files=variant_config_files)
if not cran_url:
with TemporaryDirectory() as t:
_variant = get_package_variants(t, config)[0]
cran_url = ensure_list(_variant.get('cran_mirror', DEFAULT_VARIANTS['cran_mirror']))[0]
if len(in_packages) > 1 and version_compare:
raise ValueError("--version-compare only works with one package at a time")
if update_policy == 'error' and not in_packages:
raise ValueError("At least one package must be supplied")
package_dicts = {}
package_list = []
cran_url = cran_url.rstrip('/')
cran_metadata = get_cran_metadata(cran_url, output_dir)
cran_layout_template = \
{'source': {'selector': '{others}',
'dir': 'src/contrib/',
'ext': '.tar.gz',
# If we had platform filters we would change this to:
# build_for_linux or is_github_url or is_tarfile
'use_this': True},
'win-64': {'selector': 'win64',
'dir': 'bin/windows/contrib/{}/'.format(use_binaries_ver),
'ext': '.zip',
'use_this': True if use_binaries_ver else False},
'osx-64': {'selector': 'osx',
'dir': 'bin/macosx/el-capitan/contrib/{}/'.format(
use_binaries_ver),
'ext': '.tgz',
'use_this': True if use_binaries_ver else False}}
# Figure out what binaries are available once:
for archive_type, archive_details in iteritems(cran_layout_template):
archive_details['binaries'] = dict()
if archive_type != 'source' and archive_details['use_this']:
get_available_binaries(cran_url, archive_details)
for package in in_packages:
inputs_dict = package_to_inputs_dict(output_dir, output_suffix, git_tag, package)
if inputs_dict:
package_dicts.update({inputs_dict['pkg-name']: {'inputs': inputs_dict}})
for package_name, package_dict in package_dicts.items():
package_list.append(package_name)
while package_list:
inputs = package_dicts[package_list.pop()]['inputs']
location = inputs['location']
pkg_name = inputs['pkg-name']
is_github_url = location and 'github.com' in location
is_tarfile = location and isfile(location) and tarfile.is_tarfile(location)
url = inputs['location']
dir_path = inputs['new-location']
print("Making/refreshing recipe for {}".format(pkg_name))
# Bodges GitHub packages into cran_metadata
if is_github_url or is_tarfile:
rm_rf(config.work_dir)
if is_github_url:
m = metadata.MetaData.fromdict({'source': {'git_url': location}}, config=config)
source.git_source(m.get_section('source'), m.config.git_cache, m.config.work_dir)
new_git_tag = git_tag if git_tag else get_latest_git_tag(config)
p = subprocess.Popen(['git', 'checkout', new_git_tag], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=config.work_dir)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if p.returncode:
sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" %
(new_git_tag, stderr.strip()))
if stdout:
print(stdout, file=sys.stdout)
if stderr:
print(stderr, file=sys.stderr)
else:
m = metadata.MetaData.fromdict({'source': {'url': location}}, config=config)
source.unpack(m.get_section('source'), m.config.work_dir, m.config.src_cache,
output_dir, m.config.work_dir)
DESCRIPTION = join(config.work_dir, "DESCRIPTION")
if not isfile(DESCRIPTION):
sub_description_pkg = join(config.work_dir, 'pkg', "DESCRIPTION")
sub_description_name = join(config.work_dir, location.split('/')[-1], "DESCRIPTION")
if isfile(sub_description_pkg):
DESCRIPTION = sub_description_pkg
elif isfile(sub_description_name):
DESCRIPTION = sub_description_name
else:
sys.exit("%s does not appear to be a valid R package "
"(no DESCRIPTION file in %s, %s)"
% (location, sub_description_pkg, sub_description_name))
with open(DESCRIPTION) as f:
description_text = clear_whitespace(f.read())
d = dict_from_cran_lines(remove_package_line_continuations(
description_text.splitlines()))
d['orig_description'] = description_text
package = d['Package'].lower()
cran_metadata[package] = d
else:
package = pkg_name
if pkg_name not in cran_metadata:
sys.exit("Package %s not found" % pkg_name)
# Make sure package always uses the CRAN capitalization
package = cran_metadata[package.lower()]['Package']
if not is_github_url and not is_tarfile:
session = get_session(output_dir)
cran_metadata[package.lower()].update(get_package_metadata(cran_url,
package, session))
cran_package = cran_metadata[package.lower()]
package_dicts[package.lower()].update(
{
'cran_packagename': package,
'packagename': 'r-' + package.lower(),
'patches': '',
'build_number': 0,
'build_depends': '',
'host_depends': '',
'run_depends': '',
# CRAN doesn't seem to have this metadata :(
'home_comment': '#',
'homeurl': '',
'summary_comment': '#',
'summary': '',
})
d = package_dicts[package.lower()]
d['binary1'] = ''
d['binary2'] = ''
if version:
d['version'] = version
raise NotImplementedError("Package versions from CRAN are not yet implemented")
d['cran_version'] = cran_package['Version']
# Conda versions cannot have -. Conda (verlib) will treat _ as a .
d['conda_version'] = d['cran_version'].replace('-', '_')
if version_compare:
sys.exit(not version_compare(dir_path, d['conda_version']))
patches = []
script_env = []
extra_recipe_maintainers = []
build_number = 0
if update_policy.startswith('merge') and inputs['old-metadata']:
m = inputs['old-metadata']
patches = make_array(m, 'source/patches')
script_env = make_array(m, 'build/script_env')
extra_recipe_maintainers = make_array(m, 'extra/recipe-maintainers', add_maintainer)
if m.version() == d['conda_version']:
build_number = int(m.get_value('build/number', 0))
build_number += 1 if update_policy == 'merge-incr-build-num' else 0
if add_maintainer:
new_maintainer = "{indent}{add_maintainer}".format(indent=INDENT,
add_maintainer=add_maintainer)
if new_maintainer not in extra_recipe_maintainers:
if not len(extra_recipe_maintainers):
# We hit this case when there is no existing recipe.
extra_recipe_maintainers = make_array({}, 'extra/recipe-maintainers', True)
extra_recipe_maintainers.append(new_maintainer)
if len(extra_recipe_maintainers):
extra_recipe_maintainers[1:].sort()
extra_recipe_maintainers.insert(0, "extra:\n ")
d['extra_recipe_maintainers'] = ''.join(extra_recipe_maintainers)
d['patches'] = ''.join(patches)
d['script_env'] = ''.join(script_env)
d['build_number'] = build_number
cached_path = None
cran_layout = copy.deepcopy(cran_layout_template)
available = {}
for archive_type, archive_details in iteritems(cran_layout):
contrib_url = ''
archive_details['cran_version'] = d['cran_version']
archive_details['conda_version'] = d['conda_version']
avaliable_artefact = True if archive_type == 'source' else \
package in archive_details['binaries'] and \
d['cran_version'] in archive_details['binaries'][package][0]
if not avaliable_artefact:
if use_when_no_binary == 'error':
print("ERROR: --use-when-no-binary is error (and there is no binary)")
sys.exit(1)
elif use_when_no_binary.startswith('old'):
if package not in archive_details['binaries']:
if use_when_no_binary.endswith('src'):
avaliable_artefact = False
archive_details['use_this'] = False
continue
else:
print("ERROR: No binary nor old binary found "
"(maybe pass --use-when-no-binary=old-src to fallback to source?)")
sys.exit(1)
# Version needs to be stored in archive_details.
archive_details['cranurl'] = archive_details['binaries'][package][-1][1]
archive_details['conda_version'] = archive_details['binaries'][package][-1][0]
archive_details['cran_version'] = archive_details['conda_version'].replace('_', '-')
avaliable_artefact = True
# We may need to inspect the file later to determine which compilers are needed.
cached_path = None
sha256 = hashlib.sha256()
if archive_details['use_this'] and avaliable_artefact:
if is_tarfile:
filename = basename(location)
contrib_url = relpath(location, dir_path)
contrib_url_rendered = package_url = contrib_url
cached_path = location
elif not is_github_url or archive_type != 'source':
filename_rendered = '{}_{}{}'.format(
package, archive_details['cran_version'], archive_details['ext'])
filename = '{}_{{{{ version }}}}'.format(package) + archive_details['ext']
contrib_url = '{{{{ cran_mirror }}}}/{}'.format(archive_details['dir'])
contrib_url_rendered = cran_url + '/{}'.format(archive_details['dir'])
package_url = contrib_url_rendered + filename_rendered
print("Downloading {} from {}".format(archive_type, package_url))
try:
cached_path, _ = source.download_to_cache(
config.src_cache, '',
{'url': package_url, 'fn': archive_type + '-' + filename_rendered})
except:
print("logic error, file {} should exist, we found it in a dir listing earlier."
.format(package_url))
sys.exit(1)
available_details = {}
available_details['selector'] = archive_details['selector']
available_details['cran_version'] = archive_details['cran_version']
available_details['conda_version'] = archive_details['conda_version']
if cached_path:
sha256.update(open(cached_path, 'rb').read())
archive_details['cranurl'] = package_url
available_details['filename'] = filename
available_details['contrib_url'] = contrib_url
available_details['contrib_url_rendered'] = contrib_url_rendered
available_details['hash_entry'] = 'sha256: {}'.format(sha256.hexdigest())
available_details['cached_path'] = cached_path
# This is rubbish; d[] should be renamed global[] and should be
# merged into source and binaryN.
if archive_type == 'source':
if is_github_url:
available_details['url_key'] = ''
available_details['git_url_key'] = 'git_url:'
available_details['git_tag_key'] = 'git_tag:'
hash_msg = '# You can add a hash for the file here, (md5, sha1 or sha256)'
available_details['hash_entry'] = hash_msg
available_details['filename'] = ''
available_details['cranurl'] = ''
available_details['git_url'] = url
available_details['git_tag'] = new_git_tag
available_details['archive_keys'] = ''
else:
available_details['url_key'] = 'url:'
available_details['git_url_key'] = ''
available_details['git_tag_key'] = ''
available_details['cranurl'] = ' ' + contrib_url + filename
available_details['git_url'] = ''
available_details['git_tag'] = ''
else:
available_details['cranurl'] = archive_details['cranurl']
available_details['patches'] = d['patches']
available[archive_type] = available_details
# Figure out the selectors according to what is available.
_all = ['linux', 'win32', 'win64', 'osx']
from_source = _all[:]
binary_id = 1
for archive_type, archive_details in iteritems(available):
if archive_type == 'source':
for k, v in iteritems(archive_details):
d[k] = v
else:
sel = archive_details['selector']
# Does the file exist? If not we need to build from source.
from_source.remove(sel)
binary_id += 1
if from_source == _all:
sel_src = ""
sel_src_and_win = ' # [win]'
sel_src_not_win = ' # [not win]'
else:
sel_src = ' # [' + ' or '.join(from_source) + ']'
sel_src_and_win = ' # [' + ' or '.join(fs for fs in from_source if
fs.startswith('win')) + ']'
sel_src_not_win = ' # [' + ' or '.join(fs for fs in from_source if not
fs.startswith('win')) + ']'
d['sel_src'] = sel_src
d['sel_src_and_win'] = sel_src_and_win
d['sel_src_not_win'] = sel_src_not_win
d['from_source'] = from_source
if 'source' in available:
available_details = available['source']
available_details['sel'] = sel_src
filename = available_details['filename']
if 'contrib_url' in available_details:
contrib_url = available_details['contrib_url']
if archive:
if is_tarfile:
available_details['cranurl'] = (INDENT + contrib_url)
else:
available_details['cranurl'] = (INDENT + contrib_url +
filename + sel_src + INDENT + contrib_url +
'Archive/{}/'.format(package) + filename + sel_src)
else:
available_details['cranurl'] = ' ' + contrib_url + filename + sel_src
if not is_github_url:
available_details['archive_keys'] = '{url_key}{sel}' \
' {cranurl}\n' \
' {hash_entry}{sel}'.format(
**available_details)
d['cran_metadata'] = '\n'.join(['# %s' % l for l in
cran_package['orig_lines'] if l])
# Render the source and binaryN keys
binary_id = 1
d['version_binary1'] = d['version_binary2'] = ""
for archive_type, archive_details in iteritems(available):
if archive_type == 'source':
d['source'] = SOURCE_META.format(**archive_details)
d['version_source'] = VERSION_META.format(**archive_details)
else:
archive_details['sel'] = ' # [' + archive_details['selector'] + ']'
d['binary' + str(binary_id)] = BINARY_META.format(**archive_details)
d['version_binary' + str(binary_id)] = VERSION_META.format(**archive_details)
binary_id += 1
# XXX: We should maybe normalize these
d['license'] = cran_package.get("License", "None")
d['license_family'] = guess_license_family(d['license'], allowed_license_families)
if 'License_is_FOSS' in cran_package:
d['license'] += ' (FOSS)'
if cran_package.get('License_restricts_use') == 'yes':
d['license'] += ' (Restricts use)'
if "URL" in cran_package:
d['home_comment'] = ''
d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL'])
else:
# use CRAN page as homepage if nothing has been specified
d['home_comment'] = ''
if is_github_url:
d['homeurl'] = ' {}'.format(location)
else:
d['homeurl'] = ' https://CRAN.R-project.org/package={}'.format(package)
if not use_noarch_generic or cran_package.get("NeedsCompilation", 'no') == 'yes':
d['noarch_generic'] = ''
else:
d['noarch_generic'] = 'noarch: generic'
if 'Description' in cran_package:
d['summary_comment'] = ''
d['summary'] = ' ' + yaml_quote_string(cran_package['Description'])
if "Suggests" in cran_package:
d['suggests'] = "# Suggests: %s" % cran_package['Suggests']
else:
d['suggests'] = ''
# Every package depends on at least R.
# I'm not sure what the difference between depends and imports is.
depends = [s.strip() for s in cran_package.get('Depends',
'').split(',') if s.strip()]
imports = [s.strip() for s in cran_package.get('Imports',
'').split(',') if s.strip()]
links = [s.strip() for s in cran_package.get("LinkingTo",
'').split(',') if s.strip()]
dep_dict = {}
seen = set()
for s in list(chain(imports, depends, links)):
match = VERSION_DEPENDENCY_REGEX.match(s)
if not match:
sys.exit("Could not parse version from dependency of %s: %s" %
(package, s))
name = match.group('name')
if name in seen:
continue
seen.add(name)
archs = match.group('archs')
relop = match.group('relop') or ''
ver = match.group('version') or ''
ver = ver.replace('-', '_')
# If there is a relop there should be a version
assert not relop or ver
if archs:
sys.exit("Don't know how to handle archs from dependency of "
"package %s: %s" % (package, s))
dep_dict[name] = '{relop}{version}'.format(relop=relop, version=ver)
if 'R' not in dep_dict:
dep_dict['R'] = ''
need_git = is_github_url
if cran_package.get("NeedsCompilation", 'no') == 'yes':
with tarfile.open(available['source']['cached_path']) as tf:
need_f = any([f.name.lower().endswith(('.f', '.f90', '.f77')) for f in tf])
# Fortran builds use CC to perform the link (they do not call the linker directly).
need_c = True if need_f else \
any([f.name.lower().endswith('.c') for f in tf])
need_cxx = any([f.name.lower().endswith(('.cxx', '.cpp', '.cc', '.c++'))
for f in tf])
need_autotools = any([f.name.lower().endswith('/configure') for f in tf])
need_make = True if any((need_autotools, need_f, need_cxx, need_c)) else \
any([f.name.lower().endswith(('/makefile', '/makevars'))
for f in tf])
else:
need_c = need_cxx = need_f = need_autotools = need_make = False
if 'Rcpp' in dep_dict or 'RcppArmadillo' in dep_dict:
need_cxx = True
if need_cxx:
need_c = True
for dep_type in ['build', 'host', 'run']:
deps = []
# Put non-R dependencies first.
if dep_type == 'build':
if need_c:
deps.append("{indent}{{{{ compiler('c') }}}} {sel}".format(
indent=INDENT, sel=sel_src_not_win))
if need_cxx:
deps.append("{indent}{{{{ compiler('cxx') }}}} {sel}".format(
indent=INDENT, sel=sel_src_not_win))
if need_f:
deps.append("{indent}{{{{ compiler('fortran') }}}}{sel}".format(
indent=INDENT, sel=sel_src_not_win))
if use_rtools_win:
need_c = need_cxx = need_f = need_autotools = need_make = False
deps.append("{indent}rtools {sel}".format(
indent=INDENT, sel=sel_src_and_win))
# extsoft is legacy. R packages will download rwinlib subprojects
# as necessary according to Jeroen Ooms. (may need to disable that
# for non-MRO builds or maybe switch to Jeroen's toolchain?)
# deps.append("{indent}{{{{native}}}}extsoft {sel}".format(
# indent=INDENT, sel=sel_src_and_win))
if need_c or need_cxx or need_f:
deps.append("{indent}{{{{native}}}}toolchain {sel}".format(
indent=INDENT, sel=sel_src_and_win))
if need_autotools or need_make or need_git:
deps.append("{indent}{{{{posix}}}}filesystem {sel}".format(
indent=INDENT, sel=sel_src_and_win))
if need_git:
deps.append("{indent}{{{{posix}}}}git".format(indent=INDENT))
if need_autotools:
deps.append("{indent}{{{{posix}}}}sed {sel}".format(
indent=INDENT, sel=sel_src_and_win))
deps.append("{indent}{{{{posix}}}}grep {sel}".format(
indent=INDENT, sel=sel_src_and_win))
deps.append("{indent}{{{{posix}}}}autoconf {sel}".format(
indent=INDENT, sel=sel_src))
deps.append("{indent}{{{{posix}}}}automake {sel}".format(
indent=INDENT, sel=sel_src_not_win))
deps.append("{indent}{{{{posix}}}}automake-wrapper{sel}".format(
indent=INDENT, sel=sel_src_and_win))
deps.append("{indent}{{{{posix}}}}pkg-config".format(indent=INDENT))
if need_make:
deps.append("{indent}{{{{posix}}}}make {sel}".format(
indent=INDENT, sel=sel_src))
if not need_autotools:
deps.append("{indent}{{{{posix}}}}sed {sel}".format(
indent=INDENT, sel=sel_src_and_win))
deps.append("{indent}{{{{posix}}}}coreutils {sel}".format(
indent=INDENT, sel=sel_src_and_win))
deps.append("{indent}{{{{posix}}}}zip {sel}".format(
indent=INDENT, sel=sel_src_and_win))
elif dep_type == 'run':
if need_c or need_cxx or need_f:
deps.append("{indent}{{{{native}}}}gcc-libs {sel}".format(
indent=INDENT, sel=sel_src_and_win))
if dep_type == 'host' or dep_type == 'run':
for name in sorted(dep_dict):
if name in R_BASE_PACKAGE_NAMES:
continue
if name == 'R':
# Put R first
# Regarless of build or run, and whether this is a
# recommended package or not, it can only depend on
# r_interp since anything else can and will cause
# cycles in the dependency graph. The cran metadata
# lists all dependencies anyway, even those packages
# that are in the recommended group.
# We don't include any R version restrictions because
# conda-build always pins r-base and mro-base version.
deps.insert(0, '{indent}{r_name}'.format(indent=INDENT, r_name=r_interp))
else:
conda_name = 'r-' + name.lower()
if dep_dict[name]:
deps.append('{indent}{name} {version}'.format(name=conda_name,
version=dep_dict[name], indent=INDENT))
else:
deps.append('{indent}{name}'.format(name=conda_name,
indent=INDENT))
if recursive:
lower_name = name.lower()
if lower_name not in package_dicts:
inputs_dict = package_to_inputs_dict(output_dir, output_suffix,
git_tag, lower_name)
assert lower_name == inputs_dict['pkg-name'], \
"name %s != inputs_dict['pkg-name'] %s" % (
name, inputs_dict['pkg-name'])
assert lower_name not in package_list
package_dicts.update({lower_name: {'inputs': inputs_dict}})
package_list.append(lower_name)
d['%s_depends' % dep_type] = ''.join(deps)
for package in package_dicts:
d = package_dicts[package]
dir_path = d['inputs']['new-location']
if exists(dir_path) and not version_compare:
if update_policy == 'error':
raise RuntimeError("directory already exists "
"(and --update-policy is 'error'): %s" % dir_path)
elif update_policy == 'overwrite':
rm_rf(dir_path)
elif update_policy == 'skip-up-to-date' and up_to_date(cran_metadata,
d['inputs']['old-metadata']):
continue
elif update_policy == 'skip-existing' and d['inputs']['old-metadata']:
continue
from_sources = d['from_source']
# Normalize the metadata values
d = {k: unicodedata.normalize("NFKD", text_type(v)).encode('ascii', 'ignore')
.decode() for k, v in iteritems(d)}
try:
makedirs(join(dir_path))
except:
pass
print("Writing recipe for %s" % package.lower())
with open(join(dir_path, 'meta.yaml'), 'w') as f:
f.write(clear_whitespace(CRAN_META.format(**d)))
if not exists(join(dir_path, 'build.sh')) or update_policy == 'overwrite':
with open(join(dir_path, 'build.sh'), 'w') as f:
if from_sources == all:
f.write(CRAN_BUILD_SH_SOURCE.format(**d))
elif from_sources == []:
f.write(CRAN_BUILD_SH_BINARY.format(**d))
else:
tpbt = [target_platform_bash_test_by_sel[t] for t in from_sources]
d['source_pf_bash'] = ' || '.join(['[[ $target_platform ' + s + ' ]]'
for s in tpbt])
f.write(CRAN_BUILD_SH_MIXED.format(**d))
if not exists(join(dir_path, 'bld.bat')) or update_policy == 'overwrite':
with open(join(dir_path, 'bld.bat'), 'w') as f:
if len([fs for fs in from_sources if fs.startswith('win')]) == 2:
f.write(CRAN_BLD_BAT_SOURCE.format(**d))
else:
f.write(CRAN_BLD_BAT_MIXED.format(**d))
def version_compare(recipe_dir, newest_conda_version):
m = metadata.MetaData(recipe_dir)
local_version = m.version()
package = basename(recipe_dir)
print("Local recipe for %s has version %s." % (package, local_version))
print("The version on CRAN for %s is %s." % (package, newest_conda_version))
return local_version == newest_conda_version
def get_outdated(output_dir, cran_metadata, packages=()):
to_update = []
recipes = listdir(output_dir)
for recipe in recipes:
if not recipe.startswith('r-') or not isdir(recipe):
continue
recipe_name = recipe[2:]
if packages and not (recipe_name in packages or recipe in packages):
continue
if recipe_name not in cran_metadata:
print("Skipping %s, not found on CRAN" % recipe)
continue
version_compare(join(output_dir, recipe),
cran_metadata[recipe_name]['Version'].replace('-', '_'))
print("Updating %s" % recipe)
to_update.append(recipe_name)
return to_update
def get_existing(output_dir, cran_metadata, packages=()):
existing = []
recipes = listdir(output_dir)
for recipe in recipes:
if not recipe.startswith('r-') or not isdir(recipe):
continue
recipe_name = recipe[2:]
if packages and not (recipe_name in packages or recipe in packages):
continue
existing.append(recipe_name)
return existing
def up_to_date(cran_metadata, package):
r_pkg_name, location, old_git_rev, m = package
cran_pkg_name = r_pkg_name[2:]
# Does not exist, so is not up to date.
if not m:
return False
# For now. We can do better; need to collect *all* information upfront.
if 'github.com' in location:
return False
else:
if cran_pkg_name not in cran_metadata:
return False
if m.version() != cran_metadata[cran_pkg_name]['Version'].replace('-', '_'):
return False
return True
| 40.584637 | 193 | 0.569451 |
4a27a3281c4846825986bfafd73e85eb31770945 | 637 | py | Python | snake/rl/player.py | barryzhai/snake | c749e101a374b7e0a60151e8128cee80a686b6b1 | [
"MIT"
] | null | null | null | snake/rl/player.py | barryzhai/snake | c749e101a374b7e0a60151e8128cee80a686b6b1 | [
"MIT"
] | null | null | null | snake/rl/player.py | barryzhai/snake | c749e101a374b7e0a60151e8128cee80a686b6b1 | [
"MIT"
] | null | null | null | from snake.base.direc import Direc
from snake.rl.nnet_wrapper import NNetWrapper
from snake.game import PureGame
from snake.utils import dotdict
import numpy as np
class NNPlayer(NNetWrapper):
def __init__(self, game: PureGame, args: dotdict):
super().__init__(game, args)
self.load_checkpoint(args.checkpoint, 'temp.pth.tar')
def best_action(self) -> None:
current_state = self.game._get_state()
pi, v = self.predict(current_state)
best_a_index = int(np.argwhere(pi == np.max(pi)))
print('action %s' % Direc(best_a_index))
self.game.action(Direc(best_a_index))
| 33.526316 | 61 | 0.687598 |
4a27a39ff9b434429a34104410cffe5befb0316f | 6,646 | py | Python | spectral/tests/database.py | wwlswj/spectral | e886e4d9f8c34f512c8e81867f0de76e15550572 | [
"MIT"
] | 398 | 2015-01-16T14:55:20.000Z | 2022-03-29T04:13:00.000Z | spectral/tests/database.py | wwlswj/spectral | e886e4d9f8c34f512c8e81867f0de76e15550572 | [
"MIT"
] | 108 | 2015-01-20T15:39:17.000Z | 2022-02-23T09:59:55.000Z | spectral/tests/database.py | wwlswj/spectral | e886e4d9f8c34f512c8e81867f0de76e15550572 | [
"MIT"
] | 123 | 2015-03-25T10:15:54.000Z | 2022-03-06T14:24:21.000Z | '''
Runs unit tests of functions associated with spectral databases.
To run the unit tests, type the following from the system command line:
# python -m spectral.tests.database
Note that the ECOSTRESS database must be requested so if the data files are
not located on the local file system, these tests will be skipped.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import os
from numpy.testing import assert_almost_equal
import spectral as spy
from spectral.io.aviris import read_aviris_bands
from spectral.tests import testdir
from spectral.tests.spytest import SpyTest
ECOSTRESS_DATA_DIR = os.path.join(os.path.split(__file__)[0],
'data/ecostress')
ECOSTRESS_DB = os.path.join(testdir, 'ecostress.db')
USGS_DATA_DIR = os.path.join(os.path.split(__file__)[0],
'data/usgs/ASCIIdata')
USGS_DB = os.path.join(testdir, 'usgs.db')
AVIRIS_BAND_FILE = os.path.join(os.path.split(__file__)[0], 'data/92AV3C.spc')
class ECOSTRESSDatabaseCreationTest(SpyTest):
'''Tests ECOSTRESS database creation from text files.'''
def __init__(self):
pass
def setup(self):
if not os.path.isdir(testdir):
os.makedirs(testdir)
if os.path.exists(ECOSTRESS_DB):
os.remove(ECOSTRESS_DB)
def test_create_database(self):
'''Test creating new database from ECOSTRESS data files.'''
db = spy.EcostressDatabase.create(ECOSTRESS_DB,
ECOSTRESS_DATA_DIR)
assert(list(db.query('SELECT COUNT() FROM Spectra'))[0][0] == 3)
class ECOSTRESSDatabaseTest(SpyTest):
'''Tests that ECOSTRESS database works properly'''
def __init__(self):
pass
def setup(self):
self.db = spy.EcostressDatabase(ECOSTRESS_DB)
def test_read_signatures(self):
'''Can get spectra from the opened database.'''
assert(list(self.db.query('SELECT COUNT() FROM Spectra'))[0][0] == 3)
def test_create_envi_lib(self):
'''Can resample spectra and create an ENVI spectral library.'''
bands = read_aviris_bands(AVIRIS_BAND_FILE)
cursor = self.db.query('SELECT SpectrumID FROM Spectra')
ids = [r[0] for r in cursor]
bands.centers = [x / 1000. for x in bands.centers]
bands.bandwidths = [x / 1000. for x in bands.bandwidths]
slib = self.db.create_envi_spectral_library(ids, bands)
assert(slib.spectra.shape == (3, 220))
class USGSDatabaseCreationTest(SpyTest):
'''Tests USGS database creation from text files.'''
def __init__(self):
pass
def setup(self):
if not os.path.isdir(testdir):
os.makedirs(testdir)
if os.path.exists(USGS_DB):
os.remove(USGS_DB)
def test_create_database(self):
'''Test creating new database from USGS data files.'''
db = spy.USGSDatabase.create(USGS_DB, USGS_DATA_DIR)
assert(list(db.query('SELECT COUNT() FROM Samples'))[0][0] == 8)
assert(list(db.query('SELECT COUNT() FROM SpectrometerData'))
[0][0] == 13)
class USGSDatabaseTest(SpyTest):
'''Tests that USGS database works properly'''
def __init__(self):
pass
def setup(self):
self.db = spy.USGSDatabase(USGS_DB)
def test_read_signatures(self):
'''Can get spectra from the opened database.'''
assert(list(self.db.query('SELECT COUNT() FROM Samples'))[0][0] == 8)
assert(list(self.db.query('SELECT COUNT() FROM SpectrometerData'))
[0][0] == 13)
some_sample = list(self.db.query('''SELECT Chapter, FileName,
AssumedWLSpmeterDataID,
NumValues, MinValue, MaxValue
FROM Samples
WHERE LibName='liba' AND Record=1 AND
Description='Material a b0 0 ASDFRa AREF' AND
Spectrometer='ASDFR' AND Purity='a' AND MeasurementType='AREF'
'''))[0]
assert(some_sample[0] == 'ChapterB_b0')
assert(some_sample[1] == 'liba_Material_a_b0_0_ASDFRa_AREF.txt')
assert(some_sample[3] == 24)
assert_almost_equal(some_sample[4], 0.33387077)
assert_almost_equal(some_sample[5], 0.51682192)
some_spectrometer_data = list(self.db.query('''SELECT LibName, Record, MeasurementType, Unit,
Name, Description, FileName, NumValues, MinValue, MaxValue
FROM SpectrometerData
WHERE SpectrometerDataID=?
''', (some_sample[2],)))[0]
assert(some_spectrometer_data[0] == 'liba')
assert(some_spectrometer_data[1] == 13)
assert(some_spectrometer_data[2] == 'Wavelengths')
assert(some_spectrometer_data[3] == 'micrometer')
assert(some_spectrometer_data[4] == 'ASD')
assert(some_spectrometer_data[5] == 'Wavelengths ASD 0.35-2.5 um')
assert(some_spectrometer_data[6] ==
'liba_Wavelengths_ASD_0.35-2.5_um.txt')
assert(some_spectrometer_data[7] == 24)
assert_almost_equal(some_spectrometer_data[8], 0.35)
assert_almost_equal(some_spectrometer_data[9], 2.5)
def test_get_spectrum(self):
some_sample_id = list(self.db.query('''SELECT SampleID
FROM Samples
WHERE LibName='libc' AND Description='Material D 2 AVIRISb RTGC'
'''))[0][0]
(x, y) = self.db.get_spectrum(some_sample_id)
assert(len(x) == len(y))
assert(len(y) == 7)
assert_almost_equal(y[0], 0.010381651)
assert_almost_equal(x[-1], 2.2020326)
def test_create_envi_lib(self):
'''Can resample spectra and create an ENVI spectral library.'''
bands = read_aviris_bands(AVIRIS_BAND_FILE)
cursor = self.db.query('SELECT SampleID FROM Samples')
ids = [r[0] for r in cursor]
bands.centers = [x / 1000. for x in bands.centers]
bands.bandwidths = [x / 1000. for x in bands.bandwidths]
slib = self.db.create_envi_spectral_library(ids, bands)
assert(slib.spectra.shape == (8, 220))
def run():
print('\n' + '-' * 72)
print('Running database tests.')
print('-' * 72)
for T in [ECOSTRESSDatabaseCreationTest, ECOSTRESSDatabaseTest, USGSDatabaseCreationTest, USGSDatabaseTest]:
T().run()
if __name__ == '__main__':
from spectral.tests.run import parse_args, reset_stats, print_summary
import logging
logging.getLogger('spectral').setLevel(logging.ERROR)
parse_args()
reset_stats()
run()
print_summary()
| 37.128492 | 112 | 0.640686 |
4a27a42784d6e858d40cc688f150ff509e0f3d54 | 1,469 | py | Python | old/query.py | yxtj/VideoServing | 52d1c1c97021f11cc4d77c181ac1144fe3a789ce | [
"MIT"
] | null | null | null | old/query.py | yxtj/VideoServing | 52d1c1c97021f11cc4d77c181ac1144fe3a789ce | [
"MIT"
] | null | null | null | old/query.py | yxtj/VideoServing | 52d1c1c97021f11cc4d77c181ac1144fe3a789ce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
class Query():
def __init__(self, task_id, period=None, channel=0, other=None):
self.tid = task_id
if period is None:
self.time_start = 0
self.time_end = None
else:
l = period.split('-')
assert 1 <= len(l) <= 2
if len(l) == 2:
self.time_start_hms = Query.ParseTime(l[0])
self.time_end_hms = Query.ParseTime(l[1])
else:
self.time_start_hms = Query.ParseTime(l[0])
self.time_end_hms = self.time_start
self.time_start = Query.HMS2second(self.time_start_hms)
self.time_end = Query.HMS2second(self.time_end_hms)
self.channel = channel
self.other = other
# parse arguments
def __repr__(self):
return 'Query(task=%d, channel=%d, period=%d-%d)' % \
(self.tid, self.channel, self.time_start, self.time_end)
@staticmethod
def generate(string):
return None
@staticmethod
def ParseTime(period):
if period is None:
return None
pat = re.compile('''(?:(?:(\d{1,2}):)?(\d{1,2}):)?(\d{1,2})''')
m = pat.match(period)
if m is None:
return None
return tuple(int(i) if i is not None else 0 for i in m.groups())
@staticmethod
def HMS2second(hms):
return hms[0]*3600+hms[1]*60+hms[2]
| 31.934783 | 72 | 0.528931 |
4a27a5de1d2d663bb8fd1bca61766ea873160dc0 | 7,858 | py | Python | docs/conf.py | Lodewic/vantage-project | 3430e8205b1bc6d6c8c65e3f1dc93d25e5e43557 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | Lodewic/vantage-project | 3430e8205b1bc6d6c8c65e3f1dc93d25e5e43557 | [
"Apache-2.0"
] | null | null | null | docs/conf.py | Lodewic/vantage-project | 3430e8205b1bc6d6c8c65e3f1dc93d25e5e43557 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# vantage-project documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'vantage-project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'vantage-projectdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'vantage-project.tex',
u'vantage-project Documentation',
u"Lodewic van Twillert", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'vantage-project', u'vantage-project Documentation',
[u"Lodewic van Twillert"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'vantage-project', u'vantage-project Documentation',
u"Lodewic van Twillert", 'vantage-project',
'A data science case for Vantage AI', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.073469 | 80 | 0.708195 |
4a27a7695a2173bf4558649fe11af9bb548cf31f | 817 | py | Python | scripts/utils.py | alexbotello/concert_finder | b2d0ac446dbd9b1b99afeda507698d8918e9b0dd | [
"MIT"
] | 1 | 2017-03-07T00:01:09.000Z | 2017-03-07T00:01:09.000Z | scripts/utils.py | alexbotello/concert_finder | b2d0ac446dbd9b1b99afeda507698d8918e9b0dd | [
"MIT"
] | null | null | null | scripts/utils.py | alexbotello/concert_finder | b2d0ac446dbd9b1b99afeda507698d8918e9b0dd | [
"MIT"
] | null | null | null | import requests
import settings
def post_to_discord(json_param):
""" Post a message to discord channel
JSON Params
------------
content : str
The message contents (up to 2000 characters)
embed : embed object
Example - { 'embed': {
'image': {
'url': "http://theimageurlhere"
}
}
}
"""
resp = requests.post(settings.BASE_URL + '/channels/' +
settings.CHANNEL + '/messages', json=json_param,
headers={'Authorization': settings.TOKEN})
if resp.status_code != 200:
raise ValueError('POST /content/ {}'.format(resp.status_code))
print("Message posted. ID: {}".format(resp.json()['id']))
| 29.178571 | 73 | 0.499388 |
4a27a77d28bd34e8ba433469c47416100a0dad7c | 836 | py | Python | proj01/proj01/proj01.py | Bossman64/savy-2017 | 3b30cd4d830493d70992cd8d992d26baf6567508 | [
"MIT"
] | null | null | null | proj01/proj01/proj01.py | Bossman64/savy-2017 | 3b30cd4d830493d70992cd8d992d26baf6567508 | [
"MIT"
] | null | null | null | proj01/proj01/proj01.py | Bossman64/savy-2017 | 3b30cd4d830493d70992cd8d992d26baf6567508 | [
"MIT"
] | null | null | null | name = raw_input('Enter your name please:')
Date = raw_input('What is today: ')
Age = raw_input('How old are you:')
Birthdaythisyear = raw_input('Have you had your birthday this year Yes or No please:')
print 'you will be 100 in the year'
if Birthdaythisyear == 'yes':
Yes = 2017-int(Age)
x = Yes + 100
print x
elif Birthdaythisyear == 'no':
No = int(Age) + 1
v = 2017 - No
y = v + 100
print y
if int(Age) < 12:
print 'you may watch rated G movies or PG movies with an adults consent'
elif 13<=int(Age)<18:
print 'you may watch PG,G, Or... PG-13 or R with parental guidance'
elif int(Age) >= 18:
print'you may watch any movie without parental guidance'
# if a user that is older than 100 uses my app then it will tell them the date that they turned 50
#thsi is a test comment
| 19.904762 | 98 | 0.659091 |
4a27a8157aa42808f9a2102750fa12fad546e877 | 3,809 | py | Python | 18_fastx_sampler/solution2_gzip.py | LongNguyen1984/biofx_python | b8d45dc38d968674c6b641051b73f8ed1503b1e4 | [
"MIT"
] | 1 | 2021-04-21T07:15:27.000Z | 2021-04-21T07:15:27.000Z | 18_fastx_sampler/solution2_gzip.py | LongNguyen1984/biofx_python | b8d45dc38d968674c6b641051b73f8ed1503b1e4 | [
"MIT"
] | null | null | null | 18_fastx_sampler/solution2_gzip.py | LongNguyen1984/biofx_python | b8d45dc38d968674c6b641051b73f8ed1503b1e4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" Probabilistically subset FASTA files """
import argparse
import os
import random
import gzip
from Bio import SeqIO
from typing import List, NamedTuple, Optional, TextIO
class Args(NamedTuple):
""" Command-line arguments """
files: List[TextIO]
file_format: str
percent: float
max_reads: int
seed: Optional[int]
outdir: str
# --------------------------------------------------
def get_args() -> Args:
""" Get command-line arguments """
parser = argparse.ArgumentParser(
description='Probabalistically subset FASTA files',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
metavar='FILE',
type=str,
nargs='+',
help='Input FASTA/Q file(s)')
parser.add_argument('-f',
'--format',
help='Input file format',
metavar='format',
type=str,
choices=['fasta', 'fastq'],
default='fasta')
parser.add_argument('-p',
'--percent',
help='Percent of reads',
metavar='reads',
type=float,
default=.1)
parser.add_argument('-m',
'--max',
help='Maximum number of reads',
metavar='max',
type=int,
default=0)
parser.add_argument('-s',
'--seed',
help='Random seed value',
metavar='seed',
type=int,
default=None)
parser.add_argument('-o',
'--outdir',
help='Output directory',
metavar='DIR',
type=str,
default='out')
args = parser.parse_args()
if not 0 < args.percent < 1:
parser.error(f'--percent "{args.percent}" must be between 0 and 1')
if not os.path.isdir(args.outdir):
os.makedirs(args.outdir)
if bad_files := [file for file in args.file if not os.path.isfile(file)]:
parser.error(f'Invalid file: {", ".join(bad_files)}')
return Args(files=args.file,
file_format=args.format,
percent=args.percent,
max_reads=args.max,
seed=args.seed,
outdir=args.outdir)
# --------------------------------------------------
def main() -> None:
""" Make a jazz noise here """
args = get_args()
random.seed(args.seed)
total_num = 0
for i, file in enumerate(args.files, start=1):
basename = os.path.basename(file)
out_file = os.path.join(args.outdir, basename)
print(f'{i:3}: {basename}')
ext = os.path.splitext(basename)[1]
fh = gzip.open(file, 'rt') if ext == '.gz' else open(file, 'rt')
out_fh = open(out_file, 'wt')
num_taken = 0
for rec in SeqIO.parse(fh, args.file_format):
if random.random() <= args.percent:
num_taken += 1
SeqIO.write(rec, out_fh, 'fasta')
if args.max_reads and num_taken == args.max_reads:
break
out_fh.close()
total_num += num_taken
num_files = len(args.files)
print(f'Wrote {total_num:,} sequence{"" if total_num == 1 else "s"} '
f'from {num_files:,} file{"" if num_files == 1 else "s"} '
f'to directory "{args.outdir}".')
# --------------------------------------------------
if __name__ == '__main__':
main()
| 29.527132 | 77 | 0.470202 |
4a27a82a5810f39602881335afacd1b0f14786fa | 2,699 | py | Python | amftrack/pipeline/scripts/image_processing/hyphae_extraction.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
] | 1 | 2021-06-10T02:51:53.000Z | 2021-06-10T02:51:53.000Z | amftrack/pipeline/scripts/image_processing/hyphae_extraction.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
] | null | null | null | amftrack/pipeline/scripts/image_processing/hyphae_extraction.py | Cocopyth/MscThesis | 60162bc779a3a668e7447b60bb9a4b2a616b8093 | [
"MIT"
] | null | null | null | from path import path_code_dir
import sys
sys.path.insert(0, path_code_dir)
from amftrack.util import get_dates_datetime
import os
from amftrack.pipeline.functions.image_processing.experiment_class_surf import Experiment
# from experiment_class_surftest import Experiment, clean_exp_with_hyphaes
from amftrack.pipeline.functions.image_processing.hyphae_id_surf import (
get_mother,
save_hyphaes,
width_based_cleaning,
resolve_anastomosis_crossing_by_root,
)
import pandas as pd
from amftrack.pipeline.paths.directory import directory_scratch
import json
from time import time_ns
directory = str(sys.argv[1])
limit = int(sys.argv[2])
version = str(sys.argv[3])
i = int(sys.argv[-1])
op_id = int(sys.argv[-2])
run_info = pd.read_json(f'{directory_scratch}temp/{op_id}.json')
plates = list(set(run_info['Plate'].values))
plates.sort()
plate = plates[i]
print(plate)
select_folders = run_info.loc[run_info['Plate'] == plate]
corrupted_rotation = select_folders.loc[select_folders['/Analysis/transform_corrupt.mat']]['folder']
folder_list = list(select_folders['folder'])
folder_list.sort()
indexes = [folder_list.index(corrupt_folder) for corrupt_folder in corrupted_rotation]
indexes = [index for index in indexes if index<limit]
indexes.sort()
indexes += [limit]
start = 0
for index in indexes:
stop = index
select_folder_names = folder_list[start:stop]
plate = int(folder_list[0].split('_')[-1][5:])
#confusion between plate number and position in Prince
exp = Experiment(plate, directory)
select_folders = run_info.loc[run_info['folder'].isin(select_folder_names)]
exp.load(select_folders)
exp.dates.sort()
#when no width is included
# width_based_cleaning(exp)
resolve_anastomosis_crossing_by_root(exp)
# get_mother(exp.hyphaes)
# solve_two_ends = resolve_ambiguity_two_ends(exp_clean.hyphaes)
# solved = solve_degree4(exp_clean)
# clean_obvious_fake_tips(exp_clean)
dates = exp.dates
op_id = time_ns()
dirName = f"{directory}Analysis_{op_id}_{start}_{stop}_Version{version}"
try:
os.mkdir(dirName)
print("Directory ", dirName, " Created ")
except FileExistsError:
print("Directory ", dirName, " already exists")
# hyphs, gr_inf = save_hyphaes(
# exp, f"{directory}Analysis_Plate{plate}_{dates[0]}_{dates[-1]}/"
# )
# exp.save(f"{directory}Analysis_Plate{plate}_{dates[0]}_{dates[-1]}/")
exp.save_location = dirName
exp.pickle_save(f"{dirName}/")
with open(f"{dirName}/folder_info.json", 'w') as jsonf:
json.dump(folder_list[start:stop], jsonf, indent=4) | 35.986667 | 101 | 0.714339 |
4a27a8af34ae9961f8dc28c8095e863ebe10d466 | 2,631 | py | Python | examples/block_copolymer/runIm-BCP-dots_vs_lines.py | zhouzhouxpyf/CFN-softbio | 21e4f4845e7a49c97f4ed2b0aa78a7eb831f6bcc | [
"BSD-3-Clause"
] | 13 | 2018-04-17T06:35:20.000Z | 2021-09-22T08:49:47.000Z | examples/block_copolymer/runIm-BCP-dots_vs_lines.py | zhouzhouxpyf/CFN-softbio | 21e4f4845e7a49c97f4ed2b0aa78a7eb831f6bcc | [
"BSD-3-Clause"
] | 14 | 2018-04-18T01:05:57.000Z | 2020-11-05T21:57:09.000Z | examples/block_copolymer/runIm-BCP-dots_vs_lines.py | zhouzhouxpyf/CFN-softbio | 21e4f4845e7a49c97f4ed2b0aa78a7eb831f6bcc | [
"BSD-3-Clause"
] | 9 | 2017-05-26T14:47:38.000Z | 2021-03-24T02:44:59.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys # To get commandline arguments
import glob
#SciAnalysis_PATH='/home/kyager/current/code/SciAnalysis/main/'
SciAnalysis_PATH='/home/yager/current/code/SciAnalysis/main/'
SciAnalysis_PATH in sys.path or sys.path.append(SciAnalysis_PATH)
from SciAnalysis import tools
#from SciAnalysis.XSAnalysis.Data import *
from SciAnalysis.ImAnalysis.Data import *
from SciAnalysis.ImAnalysis import Protocols
#L0 = 27 # nm 'C48'
#L0 = 32 # nm 'C48'
#L0 = 44 # nm 'C67'
#L0 = 43.6 # nm 'C99'
#L0 = 29 # nm 'L36'
#L0 = 51 # nm 'L104'
#L0 = 38.5 # nm 'L75'
#L0 = 76 # nm 'C177'
#L0 = 79 # nm 'O184'
#L0 = 65 # nm 'L176'
#L0 = 128 # nm 'L570'
#L0 = 32 # nm 'SEO30'
#L0 = 30 # nm 'S2VP45'
# layering distance
L0 = 44 # nm 'C67'
q0 = 2*np.pi/(L0)
# cyl-cyl distance
d_cc = L0/(np.sqrt(3.0)/2.0)
process = Protocols.ProcessorIm()
run_args = { 'verbosity' : 3,
'q0' : q0, # nm^-1
'dq' : q0*0.6, # nm^-1
'NN_cutoff_distance_nm' : L0*1.36,
'correlation_step_size_points' : 40, # Ignore image edges for grain size analysis
'correlation_edge_exclusion' : 50, # Step for grain size analysis (speeds up code)
'radius_min' : L0*0.08, # nm
'dot_size_cutoff_nm' : L0*0.3, # size cutoff for distinguishing a dot vs. line
}
load_args = { 'format' : 'custom',
'scale' : 500.0/403, # nm/pixel
'crop_edges' : [0, 0, 124, 0],
'load_param_file' : False,
}
protocols = [
#Protocols.fft(blur=0.6, **run_args),
#Protocols.thumbnails(resize=0.5, crop=0.5),
#Protocols.particles(threshold=190, invert=False, preprocess='highloweq', **run_args),
Protocols.dots_vs_lines(threshold=190, invert=False, **run_args),
Protocols.grain_size_hex(name='grain_size_hex_dots', threshold=190, invert=False, symmetry=6, mask='dots', **run_args),
Protocols.grain_size(name='grain_size_lines', symmetry=2, mask='lines', **run_args),
Protocols.fft(name='fft_dots', mask='dots', blur=0.6, **run_args),
Protocols.fft(name='fft_lines', mask='lines', blur=0.6, **run_args),
]
source_dir = '../'
output_dir = './'
pattern = '*'
infiles = glob.glob(source_dir + '/'+pattern+'.tif')
infiles.sort()
print('{} infiles'.format(len(infiles)))
process.run(infiles, protocols, output_dir=output_dir, force=False, load_args=load_args, run_args=run_args)
| 29.233333 | 135 | 0.595211 |
4a27aa9332db66c4786e58f0dc21187c9016e143 | 74 | py | Python | plugins/markdown/komand_markdown/actions/markdown_to_txt/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/markdown/komand_markdown/actions/markdown_to_txt/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/markdown/komand_markdown/actions/markdown_to_txt/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import MarkdownToTxt
| 24.666667 | 39 | 0.783784 |
4a27ac2123181d2bcfef94f5bb6c74f38ee01b90 | 9,426 | py | Python | tanh.py | janivanecky/Numpy-RNNs | 6ed9e4727dad520f169debcd914cdf6af48466f6 | [
"MIT"
] | 67 | 2016-08-05T13:22:06.000Z | 2021-05-21T08:11:41.000Z | tanh.py | janivanecky/Numpy-RNNs | 6ed9e4727dad520f169debcd914cdf6af48466f6 | [
"MIT"
] | null | null | null | tanh.py | janivanecky/Numpy-RNNs | 6ed9e4727dad520f169debcd914cdf6af48466f6 | [
"MIT"
] | 10 | 2016-08-06T16:05:06.000Z | 2019-12-15T02:42:22.000Z | '''
Simple Vanilla RNN model in python/numpy, written by Jan Ivanecky (@janivanecyk)
MIT license
'''
import numpy as np
import math
import util
from random import uniform
from graph import Grapher
# softmax layer
def softmax(bottom):
top = np.exp(bottom) / np.sum(np.exp(bottom))
return top
# note: this is not a real cross entropy loss implementation, it's a simplified version
# built with assumption that the ground truth vector contains only one non-zero component with a value of 1
# gt_index is the index of that non-zero component
def cross_entropy(bottom, gt_index):
loss = -np.log(bottom[gt_index]) if bottom[gt_index] > 0 else 0
return loss
# note: once again, this function does not compute a general derivative of a softmax followed by a cross entropy loss,
# it computes a derivative for this special case
def cross_entropy_softmax_d(top, gt_index):
d = np.copy(top)
d[gt_index] -= 1
return d
# relu activation
def relu(bottom):
top = bottom * (bottom > 0)
return top
# computes derivative of a relu activation with respect to its inputs
def relu_d(bottom):
d = bottom > 0
return d
# tanh activation
def tanh(bottom):
top = np.tanh(bottom)
return top
# computes derivative of a tanh activation with respect to its inputs
def tanh_d(top):
d = 1 - top * top
return d
# initialize input matrix with a Xavier method
def input_matrix_init(input_size, hidden_size):
stdev = np.sqrt(2.0 / (input_size + hidden_size))
Wxh = np.random.randn(input_size, hidden_size) * stdev
return Wxh
# intialize recurrent weights by positive definitive matrix with all except the highest eigenvalue < 0 (Talathi et al. http://arxiv.org/pdf/1511.03771v3.pdf)
def recurrent_matrix_init_NPRNN(hidden_size):
R = np.random.randn(hidden_size,hidden_size)
A = np.dot(R.T, R) / float(hidden_size)
I = np.identity(hidden_size)
e = (np.linalg.eigvals(A + I)).max()
Whh = (A + I) / e
return Whh
# initialize recurrent weights with an identity matrix (Le et al. http://arxiv.org/pdf/1504.00941v2.pdf)
def recurrent_matrix_init_IRNN(hidden_size):
Whh = np.identity(hidden_size)
return Whh
# initialize recurrent weights with a Xavier method
def recurrent_matrix_init_basic(hidden_size):
stdev = np.sqrt(2.0 / (hidden_size + hidden_size))
Whh = np.random.randn(hidden_size, hidden_size) * stdev
return Whh
# hyperparameters
HIDDEN_LAYER_SIZE = 256
DEPTH = 3
DROPOUT_RATE = 0.1
SEQ_SIZE = 100
BATCH_SIZE = 10
L_RATE = 0.01
MAX_ITERATIONS = 100000
EVAL_INTERVAL = 100
PRINT_SAMPLES = True
TEMPERATURE = 0.7
# get input
input, VOCABULARY_SIZE, char_to_index, index_to_char = util.get_input('shakespear_train.txt', -1)
validation_input, _, _, _, = util.get_input('shakespear_val.txt', 5000)
# model parameters
Wxh = [input_matrix_init(VOCABULARY_SIZE if d == 0 else HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE) for d in xrange(DEPTH)]
Whh = [recurrent_matrix_init_basic(HIDDEN_LAYER_SIZE) for d in xrange(DEPTH)]
bh = [np.zeros((1, HIDDEN_LAYER_SIZE)) for d in xrange(DEPTH)]
Why = np.random.randn(HIDDEN_LAYER_SIZE, VOCABULARY_SIZE) * np.sqrt(2.0 / (VOCABULARY_SIZE + HIDDEN_LAYER_SIZE))
by = np.zeros((1, VOCABULARY_SIZE))
def forward_backward(inputs, targets, initial_states):
'''
Computes forward and backward pass through the recurrent net, for SEQ_SIZE time steps
-inputs is an array of shape [BATCH_SIZE, SEQ_SIZE, VOCABULARY_SIZE] and holds one hot encoded inputs to the model
-targets has a shape [BATCH_SIZE, SEQ_SIZE], holds just the indices of the target chars
-initial_states contains state of all the recurrent hidden units, shape [DEPTH, BATCH_SIZE, HIDDEN_LAYER_SIZE]
Returns loss, gradients and the last state of the hidden units
'''
loss = 0
dropout = [{} for i in xrange(DEPTH)]
x,h,z = [{} for i in xrange(DEPTH + 1)], [{} for i in xrange(DEPTH)], {}
# Initialize states
h = [{-1: initial_states[d]} for d in xrange(DEPTH)]
# Forward pass
for t in xrange(SEQ_SIZE):
x[0][t] = np.reshape(inputs[:,t,:], (BATCH_SIZE, VOCABULARY_SIZE))
for d in xrange(DEPTH):
dropout[d][t] = np.random.binomial(1, 1 - DROPOUT_RATE, (1, HIDDEN_LAYER_SIZE)) * 1.0 / (1 - DROPOUT_RATE)
h[d][t] = tanh(np.dot(x[d][t], Wxh[d]) + np.dot(h[d][t - 1], Whh[d]) + bh[d])
x[d + 1][t] = np.copy(h[d][t]) * dropout[d][t]
y = np.dot(x[DEPTH][t], Why) + by
y = np.clip(y, -100,100) # clipping to prevent state explosions at the beggining of training
z[t] = np.array([softmax(y[b,:]) for b in xrange(BATCH_SIZE)])
# Backward pass
dWhy = np.zeros((HIDDEN_LAYER_SIZE, VOCABULARY_SIZE))
dby = np.zeros((1, VOCABULARY_SIZE))
dWxh = [np.zeros((VOCABULARY_SIZE if d == 0 else HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE)) for d in xrange(DEPTH)]
dWhh = [np.zeros((HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE)) for d in xrange(DEPTH)]
dbh = [np.zeros((1, HIDDEN_LAYER_SIZE)) for d in xrange(DEPTH)]
dhh = [np.zeros((BATCH_SIZE, HIDDEN_LAYER_SIZE)) for i in xrange(DEPTH)]
for t in reversed(xrange(SEQ_SIZE)):
gt = targets[:, t]
loss += np.array([cross_entropy(z[t][b, :], gt[b]) for b in xrange(BATCH_SIZE)]).sum() / (SEQ_SIZE * BATCH_SIZE)
dy = np.array([cross_entropy_softmax_d(z[t][b, :], gt[b]) for b in xrange(BATCH_SIZE)]) / (SEQ_SIZE * BATCH_SIZE)
dWhy += np.dot(x[DEPTH][t].T, dy)
dby += dy.sum(0)
dh = np.dot(dy, Why.T)
for d in reversed(xrange(DEPTH)):
dhinput = tanh_d(h[d][t]) * (dh * dropout[d][t] + dhh[d])
dWxh[d] += np.dot(x[d][t].T, dhinput)
dWhh[d] += np.dot(h[d][t-1].T, dhinput)
dbh[d] += dhinput.sum(0)
dhh[d] = np.dot(dhinput, Whh[d].T)
dh = np.dot(dhinput, Wxh[d].T)
h_prev = np.array([h[d][SEQ_SIZE - 1] for d in xrange(DEPTH)]) # get last states for the next train step
return loss, dWxh, dWhh, dbh, dWhy, dby, h_prev
def forward(input, state):
'''
Computes only the forward pass through one step of the time, note that the input to the softmax is divided by a hyperparameter TEMPERATURE
-input is an index of the char in vocabulary
-state, the same as for forward_backward, but the BATCH_SIZE is 1, so the final shape is [DEPTH, 1, HIDDEN_LAYER_SIZE]
Returns probabilities and the updated state of the hidden units
'''
ox = np.zeros((1, VOCABULARY_SIZE))
ox[0, input] = 1
for d in xrange(DEPTH):
state[d] = tanh(np.dot(ox, Wxh[d]) + np.dot(state[d], Whh[d]) + bh[d])
ox = state[d]
y = np.dot(ox, Why) + by
y = np.clip(y, -100, 100)
oz = softmax(y / TEMPERATURE)
return np.reshape(oz, (VOCABULARY_SIZE)), state
def evaluate_loss(input):
'''
Evaluates and returns loss on the input string (array of chars)
'''
oh = [np.zeros((1, HIDDEN_LAYER_SIZE)) for i in xrange(DEPTH)]
loss = 0
N = len(input) - 1
for i in xrange(N):
inpt = char_to_index[input[i]]
target = char_to_index[input[i + 1]]
prob, oh = forward(inpt, oh)
target_prob = -np.log(prob[target]) / N
loss += target_prob
return loss
def sample_model(N):
'''
Samples the model, returns the sample of length N as a string
'''
ix = np.random.randint(0, VOCABULARY_SIZE)
output = []
output.append(index_to_char[ix])
oh = [np.zeros((1, HIDDEN_LAYER_SIZE)) for i in xrange(DEPTH)]
for c in xrange(N):
oz, oh = forward(ix, oh)
result = np.random.choice(range(VOCABULARY_SIZE), p=oz.ravel())
output.append(index_to_char[result])
ix = result
return ''.join(output).rstrip()
# initial states
h_prev = np.array([np.zeros((BATCH_SIZE, HIDDEN_LAYER_SIZE)) for d in xrange(DEPTH)])
p = 0
# momentum for Adagrad
mWxh = [np.zeros((VOCABULARY_SIZE if d == 0 else HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE)) + 0.1 for d in xrange(DEPTH)]
mWhh = [np.zeros((HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE)) + 0.1 for d in xrange(DEPTH)]
mbh = [np.zeros((1, HIDDEN_LAYER_SIZE)) + 0.1 for d in xrange(DEPTH)]
mWhy = np.zeros((HIDDEN_LAYER_SIZE, VOCABULARY_SIZE)) + 0.1
mby = np.zeros((1, VOCABULARY_SIZE)) + 0.1
losses = {}
graph = Grapher('Train Loss')
# training loop
for iteration in xrange(MAX_ITERATIONS):
# get inputs for current iteration
if p == 0:
h_prev = np.array([np.zeros((BATCH_SIZE, HIDDEN_LAYER_SIZE)) for d in xrange(DEPTH)])
targets, _ = util.batch_input_seq(input, p + 1, SEQ_SIZE, BATCH_SIZE, char_to_index)
t, _ = util.batch_input_seq(input, p, SEQ_SIZE, BATCH_SIZE, char_to_index)
inputs, p = util.batch_input_one_hot_seq(input, p, SEQ_SIZE, BATCH_SIZE, VOCABULARY_SIZE, char_to_index)
loss, dWxh, dWhh, dbh, dWhy, dby, h_prev = forward_backward(inputs, targets, h_prev)
# update model parameters
all_d = dWxh + dWhh + dbh + [dWhy, dby]
all_m = mWxh + mWhh + mbh + [mWhy, mby]
all_w = Wxh + Whh + bh + [Why, by]
for d, m, w in zip(all_d, all_m, all_w):
np.clip(d, -1, 1, out=d)
# Adagrad
m += d * d
w -= L_RATE * d / np.sqrt(m + 1e-8)
# RMSProp
#m = 0.9 * m + 0.1 * d * d
#w -= L_RATE * d / np.sqrt(m + 1e-8)
# sample from the model and evaluate test loss
if(iteration % EVAL_INTERVAL == 0):
print("ITERATION " + str(iteration))
print 'loss: {}'.format(loss * 25)
# evaluate test loss
validation_loss = evaluate_loss(validation_input)
print('validation loss: {}'.format(validation_loss * 25))
# sample the model
if(PRINT_SAMPLES):
output = sample_model(200)
print(output)
losses[iteration] = loss
graph_keys = np.array(sorted(losses), dtype=np.uint32)
graph_data = np.array([losses[key] for key in graph_keys], dtype=np.float32)
graph.update(graph_keys, graph_data)
weights = [Wxh, Whh, bh, Why, by]
np.save('tanh_weights.npy', weights)
| 35.040892 | 157 | 0.701782 |
4a27ac4c6157f12b137ae57b1ae4fba5e549ffdb | 828 | py | Python | dns-checker.py | estaji/multi-nslookup | 068d6aaa302abf0a3e1b1050814a9e88aff275be | [
"MIT"
] | null | null | null | dns-checker.py | estaji/multi-nslookup | 068d6aaa302abf0a3e1b1050814a9e88aff275be | [
"MIT"
] | null | null | null | dns-checker.py | estaji/multi-nslookup | 068d6aaa302abf0a3e1b1050814a9e88aff275be | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# By Omid Estaji - [email protected] - 1400/07/16
# Compatible to Python3
# Usage: #python3 dns-checker.py
import socket
import os
dns_server = input('What is your DNS server?\n e.g: ns1.parspack.net\n e.g: 8.8.8.8\n')
domain = input("What is your domain address?\n e.g: 0mid.net\n e.g: mail.0mid.net\n")
record_type = input("Which type of record are you looking for?\n e.g: a\n e.g: ptr\n")
addressInfo = socket.getaddrinfo(dns_server, 80, family=socket.AF_INET, proto=socket.IPPROTO_TCP)
ips = []
for index in addressInfo:
ip = index[4][0]
ips.append(index[4][0])
print("----- DNS server %s IPs -----\n %s" % (dns_server, ips))
for ip in ips:
print('----- Server %s nslookup result -----' % (ip))
cmd = "nslookup -type=%s %s %s" % (record_type, domain, ip)
os.system(cmd)
| 30.666667 | 97 | 0.657005 |
4a27acaa482f096862bc703f00d89394bc604d07 | 589 | py | Python | alboPretorioJCityGov/pipelines.py | mfortini/alboPretorioJCityGov | feb8eef26e796b897985b6614db1859fb4053600 | [
"MIT"
] | null | null | null | alboPretorioJCityGov/pipelines.py | mfortini/alboPretorioJCityGov | feb8eef26e796b897985b6614db1859fb4053600 | [
"MIT"
] | null | null | null | alboPretorioJCityGov/pipelines.py | mfortini/alboPretorioJCityGov | feb8eef26e796b897985b6614db1859fb4053600 | [
"MIT"
] | null | null | null | from dbmanager import *
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class AlbopretorioJCityGovPipeline(object):
def open_spider(self, spider):
self.dbInsertNuovi = dbInsertNuovi()
pass
def process_item(self, item, spider):
print item
self.dbInsertNuovi.add_item(item)
return item
def close_spider(self, spider):
self.dbElaboraNuovi = dbElaboraNuovi()
self.dbElaboraNuovi.elabora()
pass
| 26.772727 | 65 | 0.692699 |
4a27acf9ca7f610114873d76853b1108fef29114 | 41,090 | py | Python | lib/spack/spack/test/config.py | QianJianhua1/spack | 363536fd929d2aee280e07780ff6c98498d7be46 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | lib/spack/spack/test/config.py | QianJianhua1/spack | 363536fd929d2aee280e07780ff6c98498d7be46 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | lib/spack/spack/test/config.py | QianJianhua1/spack | 363536fd929d2aee280e07780ff6c98498d7be46 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import collections
import getpass
import os
import sys
import tempfile
import pytest
from six import StringIO
from llnl.util.filesystem import getuid, mkdirp, touch
import spack.config
import spack.environment as ev
import spack.main
import spack.paths
import spack.schema.compilers
import spack.schema.config
import spack.schema.env
import spack.schema.mirrors
import spack.schema.packages
import spack.schema.repos
import spack.util.path as spack_path
import spack.util.spack_yaml as syaml
# sample config data
config_low = {
'config': {
'install_tree': {'root': 'install_tree_path'},
'build_stage': ['path1', 'path2', 'path3']}}
config_override_all = {
'config:': {
'install_tree:': {'root': 'override_all'}}}
config_override_key = {
'config': {
'install_tree:': {'root': 'override_key'}}}
config_merge_list = {
'config': {
'build_stage': ['patha', 'pathb']}}
config_override_list = {
'config': {
'build_stage:': ['pathd', 'pathe']}}
config_merge_dict = {
'config': {
'info': {
'a': 3,
'b': 4}}}
config_override_dict = {
'config': {
'info:': {
'a': 7,
'c': 9}}}
@pytest.fixture()
def write_config_file(tmpdir):
"""Returns a function that writes a config file."""
def _write(config, data, scope):
config_yaml = tmpdir.join(scope, config + '.yaml')
config_yaml.ensure()
with config_yaml.open('w') as f:
syaml.dump_config(data, f)
return _write
@pytest.fixture()
def env_yaml(tmpdir):
"""Return a sample env.yaml for test purposes"""
env_yaml = str(tmpdir.join("env.yaml"))
with open(env_yaml, 'w') as f:
f.write("""\
env:
config:
verify_ssl: False
dirty: False
packages:
libelf:
compiler: [ '[email protected]' ]
repos:
- /x/y/z
""")
return env_yaml
def cross_plat_join(*pths):
"""os.path.join does not prepend paths to other paths
beginning with a Windows drive label i.e. D:\\
"""
return os.sep.join([pth for pth in pths])
def check_compiler_config(comps, *compiler_names):
"""Check that named compilers in comps match Spack's config."""
config = spack.config.get('compilers')
compiler_list = ['cc', 'cxx', 'f77', 'fc']
flag_list = ['cflags', 'cxxflags', 'fflags', 'cppflags',
'ldflags', 'ldlibs']
param_list = ['modules', 'paths', 'spec', 'operating_system']
for compiler in config:
conf = compiler['compiler']
if conf['spec'] in compiler_names:
comp = next((c['compiler'] for c in comps if
c['compiler']['spec'] == conf['spec']), None)
if not comp:
raise ValueError('Bad config spec')
for p in param_list:
assert conf[p] == comp[p]
for f in flag_list:
expected = comp.get('flags', {}).get(f, None)
actual = conf.get('flags', {}).get(f, None)
assert expected == actual
for c in compiler_list:
expected = comp['paths'][c]
actual = conf['paths'][c]
assert expected == actual
#
# Some sample compiler config data and tests.
#
a_comps = {
'compilers': [
{'compiler': {
'paths': {
"cc": "/gcc473",
"cxx": "/g++473",
"f77": None,
"fc": None
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/gcc450",
"cxx": "/g++450",
"f77": 'gfortran',
"fc": 'gfortran'
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/gcc422",
"cxx": "/g++422",
"f77": 'gfortran',
"fc": 'gfortran'
},
'flags': {
"cppflags": "-O0 -fpic",
"fflags": "-f77",
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "<overwritten>",
"cxx": "<overwritten>",
"f77": '<overwritten>',
"fc": '<overwritten>'},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}}
]
}
b_comps = {
'compilers': [
{'compiler': {
'paths': {
"cc": "/icc100",
"cxx": "/icp100",
"f77": None,
"fc": None
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/icc111",
"cxx": "/icp111",
"f77": 'ifort',
"fc": 'ifort'
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "/icc123",
"cxx": "/icp123",
"f77": 'ifort',
"fc": 'ifort'
},
'flags': {
"cppflags": "-O3",
"fflags": "-f77rtl",
},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc": "<overwritten>",
"cxx": "<overwritten>",
"f77": '<overwritten>',
"fc": '<overwritten>'},
'modules': None,
'spec': '[email protected]',
'operating_system': 'CNL10'
}}
]
}
@pytest.fixture()
def compiler_specs():
"""Returns a couple of compiler specs needed for the tests"""
a = [ac['compiler']['spec'] for ac in a_comps['compilers']]
b = [bc['compiler']['spec'] for bc in b_comps['compilers']]
CompilerSpecs = collections.namedtuple('CompilerSpecs', ['a', 'b'])
return CompilerSpecs(a=a, b=b)
def test_write_key_in_memory(mock_low_high_config, compiler_specs):
# Write b_comps "on top of" a_comps.
spack.config.set('compilers', a_comps['compilers'], scope='low')
spack.config.set('compilers', b_comps['compilers'], scope='high')
# Make sure the config looks how we expect.
check_compiler_config(a_comps['compilers'], *compiler_specs.a)
check_compiler_config(b_comps['compilers'], *compiler_specs.b)
def test_write_key_to_disk(mock_low_high_config, compiler_specs):
# Write b_comps "on top of" a_comps.
spack.config.set('compilers', a_comps['compilers'], scope='low')
spack.config.set('compilers', b_comps['compilers'], scope='high')
# Clear caches so we're forced to read from disk.
spack.config.config.clear_caches()
# Same check again, to ensure consistency.
check_compiler_config(a_comps['compilers'], *compiler_specs.a)
check_compiler_config(b_comps['compilers'], *compiler_specs.b)
def test_write_to_same_priority_file(mock_low_high_config, compiler_specs):
# Write b_comps in the same file as a_comps.
spack.config.set('compilers', a_comps['compilers'], scope='low')
spack.config.set('compilers', b_comps['compilers'], scope='low')
# Clear caches so we're forced to read from disk.
spack.config.config.clear_caches()
# Same check again, to ensure consistency.
check_compiler_config(a_comps['compilers'], *compiler_specs.a)
check_compiler_config(b_comps['compilers'], *compiler_specs.b)
#
# Sample repo data and tests
#
repos_low = {'repos': ["/some/path"]}
repos_high = {'repos': ["/some/other/path"]}
# Test setting config values via path in filename
def test_add_config_path(mutable_config):
# Try setting a new install tree root
path = "config:install_tree:root:/path/to/config.yaml"
spack.config.add(path)
set_value = spack.config.get('config')['install_tree']['root']
assert set_value == '/path/to/config.yaml'
# Now a package:all setting
path = "packages:all:compiler:[gcc]"
spack.config.add(path)
compilers = spack.config.get('packages')['all']['compiler']
assert "gcc" in compilers
@pytest.mark.regression('17543,23259')
def test_add_config_path_with_enumerated_type(mutable_config):
spack.config.add("config:concretizer:clingo")
assert spack.config.get('config')['concretizer'] == "clingo"
spack.config.add("config:concretizer:original")
assert spack.config.get('config')['concretizer'] == "original"
with pytest.raises(spack.config.ConfigError):
spack.config.add("config:concretizer:foo")
def test_add_config_filename(mock_low_high_config, tmpdir):
config_yaml = tmpdir.join('config-filename.yaml')
config_yaml.ensure()
with config_yaml.open('w') as f:
syaml.dump_config(config_low, f)
spack.config.add_from_file(str(config_yaml), scope="low")
assert "build_stage" in spack.config.get('config')
build_stages = spack.config.get('config')['build_stage']
for stage in config_low['config']['build_stage']:
assert stage in build_stages
# repos
def test_write_list_in_memory(mock_low_high_config):
spack.config.set('repos', repos_low['repos'], scope='low')
spack.config.set('repos', repos_high['repos'], scope='high')
config = spack.config.get('repos')
assert config == repos_high['repos'] + repos_low['repos']
class MockEnv(object):
def __init__(self, path):
self.path = path
def test_substitute_config_variables(mock_low_high_config, monkeypatch):
prefix = spack.paths.prefix.lstrip('/')
assert cross_plat_join(
os.sep + os.path.join('foo', 'bar', 'baz'), prefix
) == spack_path.canonicalize_path('/foo/bar/baz/$spack')
assert cross_plat_join(
spack.paths.prefix, os.path.join('foo', 'bar', 'baz')
) == spack_path.canonicalize_path('$spack/foo/bar/baz/')
assert cross_plat_join(
os.sep + os.path.join('foo', 'bar', 'baz'),
prefix, os.path.join('foo', 'bar', 'baz')
) == spack_path.canonicalize_path('/foo/bar/baz/$spack/foo/bar/baz/')
assert cross_plat_join(
os.sep + os.path.join('foo', 'bar', 'baz'), prefix
) == spack_path.canonicalize_path('/foo/bar/baz/${spack}')
assert cross_plat_join(
spack.paths.prefix, os.path.join('foo', 'bar', 'baz')
) == spack_path.canonicalize_path('${spack}/foo/bar/baz/')
assert cross_plat_join(
os.sep + os.path.join('foo', 'bar', 'baz'),
prefix, os.path.join('foo', 'bar', 'baz')
) == spack_path.canonicalize_path('/foo/bar/baz/${spack}/foo/bar/baz/')
assert cross_plat_join(
os.sep + os.path.join('foo', 'bar', 'baz'),
prefix, os.path.join('foo', 'bar', 'baz')
) != spack_path.canonicalize_path('/foo/bar/baz/${spack/foo/bar/baz/')
# $env replacement is a no-op when no environment is active
assert spack_path.canonicalize_path(
os.sep + os.path.join('foo', 'bar', 'baz', '$env')
) == os.sep + os.path.join('foo', 'bar', 'baz', '$env')
# Fake an active environment and $env is replaced properly
fake_env_path = os.sep + os.path.join('quux', 'quuux')
monkeypatch.setattr(ev, 'active_environment',
lambda: MockEnv(fake_env_path))
assert spack_path.canonicalize_path(
'$env/foo/bar/baz'
) == os.path.join(fake_env_path, os.path.join('foo', 'bar', 'baz'))
# relative paths without source information are relative to cwd
assert spack_path.canonicalize_path(
os.path.join('foo', 'bar', 'baz')
) == os.path.abspath(os.path.join('foo', 'bar', 'baz'))
# relative paths with source information are relative to the file
spack.config.set(
'modules:default',
{'roots': {'lmod': os.path.join('foo', 'bar', 'baz')}}, scope='low')
spack.config.config.clear_caches()
path = spack.config.get('modules:default:roots:lmod')
assert spack_path.canonicalize_path(path) == os.path.normpath(
os.path.join(mock_low_high_config.scopes['low'].path,
os.path.join('foo', 'bar', 'baz')))
packages_merge_low = {
'packages': {
'foo': {
'variants': ['+v1']
},
'bar': {
'variants': ['+v2']
}
}
}
packages_merge_high = {
'packages': {
'foo': {
'version': ['a']
},
'bar': {
'version': ['b'],
'variants': ['+v3']
},
'baz': {
'version': ['c']
}
}
}
@pytest.mark.regression('7924')
def test_merge_with_defaults(mock_low_high_config, write_config_file):
"""This ensures that specified preferences merge with defaults as
expected. Originally all defaults were initialized with the
exact same object, which led to aliasing problems. Therefore
the test configs used here leave 'version' blank for multiple
packages in 'packages_merge_low'.
"""
write_config_file('packages', packages_merge_low, 'low')
write_config_file('packages', packages_merge_high, 'high')
cfg = spack.config.get('packages')
assert cfg['foo']['version'] == ['a']
assert cfg['bar']['version'] == ['b']
assert cfg['baz']['version'] == ['c']
def test_substitute_user(mock_low_high_config):
user = getpass.getuser()
assert os.sep + os.path.join('foo', 'bar') + os.sep \
+ user + os.sep \
+ 'baz' == spack_path.canonicalize_path(
os.sep + os.path.join('foo', 'bar', '$user', 'baz')
)
def test_substitute_user_cache(mock_low_high_config):
user_cache_path = spack.paths.user_cache_path
assert user_cache_path + os.sep + 'baz' == spack_path.canonicalize_path(
os.path.join('$user_cache_path', 'baz')
)
def test_substitute_tempdir(mock_low_high_config):
tempdir = tempfile.gettempdir()
assert tempdir == spack_path.canonicalize_path('$tempdir')
assert tempdir + os.sep + \
os.path.join('foo', 'bar', 'baz') == spack_path.canonicalize_path(
os.path.join('$tempdir', 'foo', 'bar', 'baz')
)
PAD_STRING = spack.util.path.SPACK_PATH_PADDING_CHARS
MAX_PATH_LEN = spack.util.path.get_system_path_max()
MAX_PADDED_LEN = MAX_PATH_LEN - spack.util.path.SPACK_MAX_INSTALL_PATH_LENGTH
reps = [PAD_STRING for _ in range((MAX_PADDED_LEN // len(PAD_STRING) + 1) + 2)]
full_padded_string = os.path.join(
os.sep + 'path', os.sep.join(reps))[:MAX_PADDED_LEN]
@pytest.mark.parametrize('config_settings,expected', [
([], [None, None, None]),
([['config:install_tree:root', os.sep + 'path']], [os.sep + 'path', None, None]),
([['config:install_tree', os.sep + 'path']], [os.sep + 'path', None, None]),
([['config:install_tree:projections', {'all': '{name}'}]],
[None, None, {'all': '{name}'}]),
([['config:install_path_scheme', '{name}']],
[None, None, {'all': '{name}'}]),
])
def test_parse_install_tree(config_settings, expected, mutable_config):
expected_root = expected[0] or spack.store.default_install_tree_root
expected_unpadded_root = expected[1] or expected_root
expected_proj = expected[2] or spack.directory_layout.default_projections
# config settings is a list of 2-element lists, [path, value]
# where path is a config path and value is the value to set at that path
# these can be "splatted" in as the arguments to config.set
for config_setting in config_settings:
mutable_config.set(*config_setting)
config_dict = mutable_config.get('config')
root, unpadded_root, projections = spack.store.parse_install_tree(
config_dict)
assert root == expected_root
assert unpadded_root == expected_unpadded_root
assert projections == expected_proj
@pytest.mark.skipif(sys.platform == 'win32',
reason='Padding unsupported on Windows')
@pytest.mark.parametrize('config_settings,expected', [
([['config:install_tree:root', os.sep + 'path'],
['config:install_tree:padded_length', 11]],
[os.path.join(os.sep + 'path', PAD_STRING[:5]), os.sep + 'path', None]),
([['config:install_tree:root', '/path/$padding:11']],
[os.path.join(os.sep + 'path', PAD_STRING[:5]), os.sep + 'path', None]),
([['config:install_tree', '/path/${padding:11}']],
[os.path.join(os.sep + 'path', PAD_STRING[:5]), os.sep + 'path', None]),
([['config:install_tree:padded_length', False]], [None, None, None]),
([['config:install_tree:padded_length', True],
['config:install_tree:root', os.sep + 'path']],
[full_padded_string, os.sep + 'path', None]),
([['config:install_tree:', os.sep + 'path$padding']],
[full_padded_string, os.sep + 'path', None]),
([['config:install_tree:', os.sep + 'path' + os.sep + '${padding}']],
[full_padded_string, os.sep + 'path', None]),
])
def test_parse_install_tree_padded(config_settings, expected, mutable_config):
expected_root = expected[0] or spack.store.default_install_tree_root
expected_unpadded_root = expected[1] or expected_root
expected_proj = expected[2] or spack.directory_layout.default_projections
# config settings is a list of 2-element lists, [path, value]
# where path is a config path and value is the value to set at that path
# these can be "splatted" in as the arguments to config.set
for config_setting in config_settings:
mutable_config.set(*config_setting)
config_dict = mutable_config.get('config')
root, unpadded_root, projections = spack.store.parse_install_tree(
config_dict)
assert root == expected_root
assert unpadded_root == expected_unpadded_root
assert projections == expected_proj
def test_read_config(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
assert spack.config.get('config') == config_low['config']
def test_read_config_override_all(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_override_all, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'override_all'
}
}
def test_read_config_override_key(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_override_key, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'override_key'
},
'build_stage': ['path1', 'path2', 'path3']
}
def test_read_config_merge_list(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_merge_list, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'install_tree_path'
},
'build_stage': ['patha', 'pathb', 'path1', 'path2', 'path3']
}
def test_read_config_override_list(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
write_config_file('config', config_override_list, 'high')
assert spack.config.get('config') == {
'install_tree': {
'root': 'install_tree_path'
},
'build_stage': config_override_list['config']['build_stage:']
}
def test_ordereddict_merge_order():
""""Test that source keys come before dest keys in merge_yaml results."""
source = syaml.syaml_dict([
("k1", "v1"),
("k2", "v2"),
("k3", "v3"),
])
dest = syaml.syaml_dict([
("k4", "v4"),
("k3", "WRONG"),
("k5", "v5"),
])
result = spack.config.merge_yaml(dest, source)
assert "WRONG" not in result.values()
expected_keys = ["k1", "k2", "k3", "k4", "k5"]
expected_items = [
("k1", "v1"), ("k2", "v2"), ("k3", "v3"), ("k4", "v4"), ("k5", "v5")
]
assert expected_keys == list(result.keys())
assert expected_items == list(result.items())
def test_list_merge_order():
""""Test that source lists are prepended to dest."""
source = ["a", "b", "c"]
dest = ["d", "e", "f"]
result = spack.config.merge_yaml(dest, source)
assert ["a", "b", "c", "d", "e", "f"] == result
def test_internal_config_update(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
before = mock_low_high_config.get('config')
assert before['install_tree']['root'] == 'install_tree_path'
# add an internal configuration scope
scope = spack.config.InternalConfigScope('command_line')
assert 'InternalConfigScope' in repr(scope)
mock_low_high_config.push_scope(scope)
command_config = mock_low_high_config.get('config', scope='command_line')
command_config['install_tree'] = {'root': 'foo/bar'}
mock_low_high_config.set('config', command_config, scope='command_line')
after = mock_low_high_config.get('config')
assert after['install_tree']['root'] == 'foo/bar'
def test_internal_config_filename(mock_low_high_config, write_config_file):
write_config_file('config', config_low, 'low')
mock_low_high_config.push_scope(
spack.config.InternalConfigScope('command_line'))
with pytest.raises(NotImplementedError):
mock_low_high_config.get_config_filename('command_line', 'config')
def test_mark_internal():
data = {
'config': {
'bool': False,
'int': 6,
'numbers': [1, 2, 3],
'string': 'foo',
'dict': {
'more_numbers': [1, 2, 3],
'another_string': 'foo',
'another_int': 7,
}
}
}
marked = spack.config._mark_internal(data, 'x')
# marked version should be equal to the original
assert data == marked
def assert_marked(obj):
if type(obj) is bool:
return # can't subclass bool, so can't mark it
assert hasattr(obj, '_start_mark') and obj._start_mark.name == 'x'
assert hasattr(obj, '_end_mark') and obj._end_mark.name == 'x'
# everything in the marked version should have marks
checks = (marked.keys(), marked.values(),
marked['config'].keys(), marked['config'].values(),
marked['config']['numbers'],
marked['config']['dict'].keys(),
marked['config']['dict'].values(),
marked['config']['dict']['more_numbers'])
for seq in checks:
for obj in seq:
assert_marked(obj)
def test_internal_config_from_data():
config = spack.config.Configuration()
# add an internal config initialized from an inline dict
config.push_scope(spack.config.InternalConfigScope('_builtin', {
'config': {
'verify_ssl': False,
'build_jobs': 6,
}
}))
assert config.get('config:verify_ssl', scope='_builtin') is False
assert config.get('config:build_jobs', scope='_builtin') == 6
assert config.get('config:verify_ssl') is False
assert config.get('config:build_jobs') == 6
# push one on top and see what happens.
config.push_scope(spack.config.InternalConfigScope('higher', {
'config': {
'checksum': True,
'verify_ssl': True,
}
}))
assert config.get('config:verify_ssl', scope='_builtin') is False
assert config.get('config:build_jobs', scope='_builtin') == 6
assert config.get('config:verify_ssl', scope='higher') is True
assert config.get('config:build_jobs', scope='higher') is None
assert config.get('config:verify_ssl') is True
assert config.get('config:build_jobs') == 6
assert config.get('config:checksum') is True
assert config.get('config:checksum', scope='_builtin') is None
assert config.get('config:checksum', scope='higher') is True
def test_keys_are_ordered():
"""Test that keys in Spack YAML files retain their order from the file."""
expected_order = (
'bin',
'man',
'share/man',
'share/aclocal',
'lib',
'lib64',
'include',
'lib/pkgconfig',
'lib64/pkgconfig',
'share/pkgconfig',
''
)
config_scope = spack.config.ConfigScope(
'modules',
os.path.join(spack.paths.test_path, 'data', 'config')
)
data = config_scope.get_section('modules')
prefix_inspections = data['modules']['prefix_inspections']
for actual, expected in zip(prefix_inspections, expected_order):
assert actual == expected
def test_config_format_error(mutable_config):
"""This is raised when we try to write a bad configuration."""
with pytest.raises(spack.config.ConfigFormatError):
spack.config.set('compilers', {'bad': 'data'}, scope='site')
def get_config_error(filename, schema, yaml_string):
"""Parse a YAML string and return the resulting ConfigFormatError.
Fail if there is no ConfigFormatError
"""
with open(filename, 'w') as f:
f.write(yaml_string)
# parse and return error, or fail.
try:
spack.config.read_config_file(filename, schema)
except spack.config.ConfigFormatError as e:
return e
else:
pytest.fail('ConfigFormatError was not raised!')
def test_config_parse_dict_in_list(tmpdir):
with tmpdir.as_cwd():
e = get_config_error(
'repos.yaml', spack.schema.repos.schema, """\
repos:
- https://foobar.com/foo
- https://foobar.com/bar
- error:
- abcdef
- https://foobar.com/baz
""")
assert "repos.yaml:4" in str(e)
def test_config_parse_str_not_bool(tmpdir):
with tmpdir.as_cwd():
e = get_config_error(
'config.yaml', spack.schema.config.schema, """\
config:
verify_ssl: False
checksum: foobar
dirty: True
""")
assert "config.yaml:3" in str(e)
def test_config_parse_list_in_dict(tmpdir):
with tmpdir.as_cwd():
e = get_config_error(
'mirrors.yaml', spack.schema.mirrors.schema, """\
mirrors:
foo: http://foobar.com/baz
bar: http://barbaz.com/foo
baz: http://bazfoo.com/bar
travis: [1, 2, 3]
""")
assert "mirrors.yaml:5" in str(e)
def test_bad_config_section(mock_low_high_config):
"""Test that getting or setting a bad section gives an error."""
with pytest.raises(spack.config.ConfigSectionError):
spack.config.set('foobar', 'foobar')
with pytest.raises(spack.config.ConfigSectionError):
spack.config.get('foobar')
@pytest.mark.skipif(sys.platform == 'win32',
reason="Not supported on Windows (yet)")
@pytest.mark.skipif(getuid() == 0, reason='user is root')
def test_bad_command_line_scopes(tmpdir, mock_low_high_config):
cfg = spack.config.Configuration()
with tmpdir.as_cwd():
with pytest.raises(spack.config.ConfigError):
spack.config._add_command_line_scopes(cfg, ['bad_path'])
touch('unreadable_file')
with pytest.raises(spack.config.ConfigError):
spack.config._add_command_line_scopes(cfg, ['unreadable_file'])
mkdirp('unreadable_dir')
with pytest.raises(spack.config.ConfigError):
try:
os.chmod('unreadable_dir', 0)
spack.config._add_command_line_scopes(cfg, ['unreadable_dir'])
finally:
os.chmod('unreadable_dir', 0o700) # so tmpdir can be removed
def test_add_command_line_scopes(tmpdir, mutable_config):
config_yaml = str(tmpdir.join('config.yaml'))
with open(config_yaml, 'w') as f:
f.write("""\
config:
verify_ssl: False
dirty: False
""")
spack.config._add_command_line_scopes(mutable_config, [str(tmpdir)])
def test_nested_override():
"""Ensure proper scope naming of nested overrides."""
base_name = spack.config.overrides_base_name
def _check_scopes(num_expected, debug_values):
scope_names = [s.name for s in spack.config.config.scopes.values() if
s.name.startswith(base_name)]
for i in range(num_expected):
name = '{0}{1}'.format(base_name, i)
assert name in scope_names
data = spack.config.config.get_config('config', name)
assert data['debug'] == debug_values[i]
# Check results from single and nested override
with spack.config.override('config:debug', True):
with spack.config.override('config:debug', False):
_check_scopes(2, [True, False])
_check_scopes(1, [True])
def test_alternate_override(monkeypatch):
"""Ensure proper scope naming of override when conflict present."""
base_name = spack.config.overrides_base_name
def _matching_scopes(regexpr):
return [spack.config.InternalConfigScope('{0}1'.format(base_name))]
# Check that the alternate naming works
monkeypatch.setattr(spack.config.config, 'matching_scopes',
_matching_scopes)
with spack.config.override('config:debug', False):
name = '{0}2'.format(base_name)
scope_names = [s.name for s in spack.config.config.scopes.values() if
s.name.startswith(base_name)]
assert name in scope_names
data = spack.config.config.get_config('config', name)
assert data['debug'] is False
def test_immutable_scope(tmpdir):
config_yaml = str(tmpdir.join('config.yaml'))
with open(config_yaml, 'w') as f:
f.write("""\
config:
install_tree:
root: dummy_tree_value
""")
scope = spack.config.ImmutableConfigScope('test', str(tmpdir))
data = scope.get_section('config')
assert data['config']['install_tree'] == {'root': 'dummy_tree_value'}
with pytest.raises(spack.config.ConfigError):
scope._write_section('config')
def test_single_file_scope(config, env_yaml):
scope = spack.config.SingleFileScope(
'env', env_yaml, spack.schema.env.schema, ['env']
)
with spack.config.override(scope):
# from the single-file config
assert spack.config.get('config:verify_ssl') is False
assert spack.config.get('config:dirty') is False
assert spack.config.get('packages:libelf:compiler') == ['[email protected]']
# from the lower config scopes
assert spack.config.get('config:checksum') is True
assert spack.config.get('config:checksum') is True
assert spack.config.get('packages:externalmodule:buildable') is False
assert spack.config.get('repos') == [
'/x/y/z', '$spack/var/spack/repos/builtin']
def test_single_file_scope_section_override(tmpdir, config):
"""Check that individual config sections can be overridden in an
environment config. The config here primarily differs in that the
``packages`` section is intended to override all other scopes (using the
"::" syntax).
"""
env_yaml = str(tmpdir.join("env.yaml"))
with open(env_yaml, 'w') as f:
f.write("""\
env:
config:
verify_ssl: False
packages::
libelf:
compiler: [ '[email protected]' ]
repos:
- /x/y/z
""")
scope = spack.config.SingleFileScope(
'env', env_yaml, spack.schema.env.schema, ['env'])
with spack.config.override(scope):
# from the single-file config
assert spack.config.get('config:verify_ssl') is False
assert spack.config.get('packages:libelf:compiler') == ['[email protected]']
# from the lower config scopes
assert spack.config.get('config:checksum') is True
assert not spack.config.get('packages:externalmodule')
assert spack.config.get('repos') == [
'/x/y/z', '$spack/var/spack/repos/builtin']
def test_write_empty_single_file_scope(tmpdir):
env_schema = spack.schema.env.schema
scope = spack.config.SingleFileScope(
'test', str(tmpdir.ensure('config.yaml')), env_schema, ['spack'])
scope._write_section('config')
# confirm we can write empty config
assert not scope.get_section('config')
def check_schema(name, file_contents):
"""Check a Spack YAML schema against some data"""
f = StringIO(file_contents)
data = syaml.load_config(f)
spack.config.validate(data, name)
def test_good_env_yaml(tmpdir):
check_schema(spack.schema.env.schema, """\
spack:
config:
verify_ssl: False
dirty: False
repos:
- ~/my/repo/location
mirrors:
remote: /foo/bar/baz
compilers:
- compiler:
spec: [email protected]
operating_system: cnl
modules: []
paths:
cc: /path/to/cc
cxx: /path/to/cxx
fc: /path/to/fc
f77: /path/to/f77
""")
def test_bad_env_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.env.schema, """\
env:
foobar:
verify_ssl: False
dirty: False
""")
def test_bad_config_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.config.schema, """\
config:
verify_ssl: False
install_tree:
root:
extra_level: foo
""")
def test_bad_mirrors_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.mirrors.schema, """\
mirrors:
local: True
""")
def test_bad_repos_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.repos.schema, """\
repos:
True
""")
def test_bad_compilers_yaml(tmpdir):
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.compilers.schema, """\
compilers:
key_instead_of_list: 'value'
""")
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.compilers.schema, """\
compilers:
- shmompiler:
environment: /bad/value
""")
with pytest.raises(spack.config.ConfigFormatError):
check_schema(spack.schema.compilers.schema, """\
compilers:
- compiler:
fenfironfent: /bad/value
""")
def test_internal_config_section_override(mock_low_high_config,
write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
mock_low_high_config.push_scope(spack.config.InternalConfigScope
('high', {
'config:': {
'build_stage': wanted_list
}
}))
assert mock_low_high_config.get('config:build_stage') == wanted_list
def test_internal_config_dict_override(mock_low_high_config,
write_config_file):
write_config_file('config', config_merge_dict, 'low')
wanted_dict = config_override_dict['config']['info:']
mock_low_high_config.push_scope(spack.config.InternalConfigScope
('high', config_override_dict))
assert mock_low_high_config.get('config:info') == wanted_dict
def test_internal_config_list_override(mock_low_high_config,
write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
mock_low_high_config.push_scope(spack.config.InternalConfigScope
('high', config_override_list))
assert mock_low_high_config.get('config:build_stage') == wanted_list
def test_set_section_override(mock_low_high_config, write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
with spack.config.override('config::build_stage', wanted_list):
assert mock_low_high_config.get('config:build_stage') == wanted_list
assert config_merge_list['config']['build_stage'] == \
mock_low_high_config.get('config:build_stage')
def test_set_list_override(mock_low_high_config, write_config_file):
write_config_file('config', config_merge_list, 'low')
wanted_list = config_override_list['config']['build_stage:']
with spack.config.override('config:build_stage:', wanted_list):
assert wanted_list == mock_low_high_config.get('config:build_stage')
assert config_merge_list['config']['build_stage'] == \
mock_low_high_config.get('config:build_stage')
def test_set_dict_override(mock_low_high_config, write_config_file):
write_config_file('config', config_merge_dict, 'low')
wanted_dict = config_override_dict['config']['info:']
with spack.config.override('config:info:', wanted_dict):
assert wanted_dict == mock_low_high_config.get('config:info')
assert config_merge_dict['config']['info'] == \
mock_low_high_config.get('config:info')
def test_set_bad_path(config):
with pytest.raises(syaml.SpackYAMLError, match='Illegal leading'):
with spack.config.override(':bad:path', ''):
pass
def test_bad_path_double_override(config):
with pytest.raises(syaml.SpackYAMLError,
match='Meaningless second override'):
with spack.config.override('bad::double:override::directive', ''):
pass
@pytest.mark.regression('22547')
def test_single_file_scope_cache_clearing(env_yaml):
scope = spack.config.SingleFileScope(
'env', env_yaml, spack.schema.env.schema, ['env']
)
# Check that we can retrieve data from the single file scope
before = scope.get_section('config')
assert before
# Clear the cache of the Single file scope
scope.clear()
# Check that the section can be retireved again and it's
# the same as before
after = scope.get_section('config')
assert after
assert before == after
@pytest.mark.regression('22611')
def test_internal_config_scope_cache_clearing():
"""
An InternalConfigScope object is constructed from data that is already
in memory, therefore it doesn't have any cache to clear. Here we ensure
that calling the clear method is consistent with that..
"""
data = {
'config': {
'build_jobs': 10
}
}
internal_scope = spack.config.InternalConfigScope('internal', data)
# Ensure that the initial object is properly set
assert internal_scope.sections['config'] == data
# Call the clear method
internal_scope.clear()
# Check that this didn't affect the scope object
assert internal_scope.sections['config'] == data
def test_system_config_path_is_overridable(working_env):
p = "/some/path"
os.environ['SPACK_SYSTEM_CONFIG_PATH'] = p
assert spack.paths._get_system_config_path() == p
def test_system_config_path_is_default_when_env_var_is_empty(working_env):
os.environ['SPACK_SYSTEM_CONFIG_PATH'] = ''
assert os.sep + os.path.join('etc', 'spack') == \
spack.paths._get_system_config_path()
def test_user_config_path_is_overridable(working_env):
p = "/some/path"
os.environ['SPACK_USER_CONFIG_PATH'] = p
assert p == spack.paths._get_user_config_path()
def test_user_config_path_is_default_when_env_var_is_empty(working_env):
os.environ['SPACK_USER_CONFIG_PATH'] = ''
assert os.path.expanduser("~%s.spack" % os.sep) == \
spack.paths._get_user_config_path()
def test_local_config_can_be_disabled(working_env):
os.environ['SPACK_DISABLE_LOCAL_CONFIG'] = 'true'
cfg = spack.config._config()
assert "defaults" in cfg.scopes
assert "system" not in cfg.scopes
assert "site" in cfg.scopes
assert "user" not in cfg.scopes
os.environ['SPACK_DISABLE_LOCAL_CONFIG'] = ''
cfg = spack.config._config()
assert "defaults" in cfg.scopes
assert "system" not in cfg.scopes
assert "site" in cfg.scopes
assert "user" not in cfg.scopes
del os.environ['SPACK_DISABLE_LOCAL_CONFIG']
cfg = spack.config._config()
assert "defaults" in cfg.scopes
assert "system" in cfg.scopes
assert "site" in cfg.scopes
assert "user" in cfg.scopes
def test_user_cache_path_is_overridable(working_env):
p = "/some/path"
os.environ['SPACK_USER_CACHE_PATH'] = p
assert spack.paths._get_user_cache_path() == p
def test_user_cache_path_is_default_when_env_var_is_empty(working_env):
os.environ['SPACK_USER_CACHE_PATH'] = ''
assert os.path.expanduser("~%s.spack" % os.sep) == \
spack.paths._get_user_cache_path()
| 32.741036 | 85 | 0.625627 |
4a27adb558b3f23d40e9b2fdd738f532713f34f9 | 2,437 | py | Python | ANNForFingerprint/NeuralNetwork/data.py | shams-sam/logic-lab | 559990b0c3d44bfe59d32dcb8038a0cab3efc26e | [
"MIT"
] | null | null | null | ANNForFingerprint/NeuralNetwork/data.py | shams-sam/logic-lab | 559990b0c3d44bfe59d32dcb8038a0cab3efc26e | [
"MIT"
] | null | null | null | ANNForFingerprint/NeuralNetwork/data.py | shams-sam/logic-lab | 559990b0c3d44bfe59d32dcb8038a0cab3efc26e | [
"MIT"
] | null | null | null | import numpy, random
from scipy import misc
from config import *
import random
#--------------------------------------------------------------------------------
# . .
# ,-| ,-. |- ,-. ,-. ,-. ,-. ,-. ,-. ,-. ,-. . ,-. ,-.
# | | ,-| | ,-| | | | | | | |-' `-. `-. | | | | |
# `-^ `-^ `' `-^ |-' ' `-' `-' `-' `-' `-' ' ' ' `-|
# | ,|
# ' `'
#--------------------------------------------------------------------------------
class Processor:
def import_bmp(self, path, flatten):
image_mat = misc.imread(path, flatten = flatten)
return image_mat
def folder_namer(self, num):
return str(num).zfill(3)
def file_namer(self, num, hand_label, finger_key, image_index):
folder_name = self.folder_namer(num)
return '_'.join([folder_name, hand_label + finger_key, image_index + '.bmp'])
def get_path(self, path, num, hand_label, finger_key, image_index):
return '/'.join([path, self.folder_namer(num), hand_label, self.file_namer(num, hand_label, finger_key, image_index)])
def data_prefetch(self):
offset = 0 if config['zero_indexed'] else 1
X = numpy.empty((nn_config[0], 1), int)
Y = numpy.empty((nn_config[2], 1), int)
for num in xrange(0 + offset, config['m'] + offset):
for hand_label in hand_labels:
for finger_key in finger_keys:
for image_index in image_indices:
finger_print = self.import_bmp(self.get_path(config['dataset_path'], num, hand_label, finger_key, image_index), 0)
finger_print = finger_print.reshape(nn_config[0], 1)
X = numpy.concatenate((X, finger_print), axis = 1)
Y_i = numpy.zeros(100)
Y_i[num] = 1
Y = numpy.concatenate((Y, Y_i.reshape(config['output_bit_encoding'], 1)), axis = 1)
X = numpy.delete(X, (0), axis = 1).T
X = X/255.0
Y = numpy.delete(Y, (0), axis = 1).T
return zip(X, Y)
def get_dataset(self):
data = []
print 'fetching dataset ... '
for elem in self.data_prefetch():
data.append([elem[0].reshape(nn_config[0], 1), elem[1].reshape(config['output_bit_encoding'], 1)])
random.shuffle(data)
return data
| 43.517857 | 138 | 0.479688 |
4a27adb5b0c058c42f0e2d67f54eeb94efd60499 | 2,274 | py | Python | _4_CPython_ctypes/tests.py | teyrana/py_opt_talk | 7be0e5946a59d2d0af3534e94395a46fcc5afbed | [
"BSD-2-Clause"
] | null | null | null | _4_CPython_ctypes/tests.py | teyrana/py_opt_talk | 7be0e5946a59d2d0af3534e94395a46fcc5afbed | [
"BSD-2-Clause"
] | null | null | null | _4_CPython_ctypes/tests.py | teyrana/py_opt_talk | 7be0e5946a59d2d0af3534e94395a46fcc5afbed | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import unittest
from bst import BinaryTreeNode as node
from bst import BinarySearchTree
class TestBinaryTree(unittest.TestCase):
def test_create_from_list(self):
"""This method creates a fully-populated binary-search-tree of depth 4, on the numbers: [0, 30]"""
#
# ___________________15_____________________
# / \
# ______7_______ __________23_________
# / \ / \
# __3__ ___11___ ____19___ ____27___
# / \ / \ / \ / \
# 1 5 9 _13 _17 _21 _25 _29
# / \ / \ / \ / \ / \ / \ / \ / \
# 0 2 4 6 8 10 12 14 16 18 20 22 24 26 28 30
#
# If we add the above values in the correct order, the tree is balanced, for free.
tree = BinarySearchTree([15,
7, 23,
3, 11, 19, 27,
1, 5, 9, 13, 17, 21, 25, 29,
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
# DEBUG
# print(str(root))
# spot check -- *not full* check:
self.assertEqual(node.value(tree), 15)
self.assertEqual(node.value(node.right(tree)), 23)
self.assertEqual(node.value(node.left(node.right(tree))), 19)
self.assertEqual(node.value(node.left(node.left(node.right(tree)))), 17)
self.assertEqual(node.value(node.left(node.left(node.left(node.right(tree))))), 16)
self.assertEqual(node.value(node.right(node.left(node.left(node.right(tree))))), 18)
# again spot checks -- verify that the tree can find values it contains
self.assertEqual(tree.search(tree, 8), 8)
self.assertEqual(tree.search(tree, 16), 16)
self.assertEqual(tree.search(tree, 18), 18)
self.assertEqual(tree.search(tree, 24), 24)
if __name__ == '__main__':
unittest.main()
| 42.90566 | 106 | 0.467898 |
4a27ae89766938b282fd54e925c68aa8126bcb65 | 1,657 | py | Python | cf_speedtest/options.py | 12932/cf-speedtest | 8f2139915c4d4eb8d017b755b6e2368fc77d9f97 | [
"MIT"
] | 9 | 2021-01-03T03:13:26.000Z | 2022-03-21T12:42:04.000Z | cf_speedtest/options.py | 12932/cf-speedtest | 8f2139915c4d4eb8d017b755b6e2368fc77d9f97 | [
"MIT"
] | null | null | null | cf_speedtest/options.py | 12932/cf-speedtest | 8f2139915c4d4eb8d017b755b6e2368fc77d9f97 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
def str_to_bool(s: str) -> bool:
if isinstance(s, bool):
return s
if s.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif s.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError(f"Boolean value expected, received {s!r}")
def valid_percentile(s: str) -> int:
try:
value = int(s)
except ValueError:
raise argparse.ArgumentTypeError(f"Expected integer between 0 and 100, received {s!r}")
if not (0 <= value <= 100):
raise argparse.ArgumentTypeError(f"Expected integer between 0 and 100, received {s}")
return value
def add_run_options(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument(
"--output", "-o",
type=str,
help="The file to output the csv data of measurements to"
)
parser.add_argument(
'--percentile', "-p",
default=90,
type=valid_percentile,
help=("The percentile of measurements to be considered download speed "
" where default is 90 https://en.wikipedia.org/wiki/Percentile")
)
parser.add_argument(
'--verifyssl', "-k",
default=True,
type=str_to_bool,
help=("Whether to verify that the server connection is secure by validating the server "
"certificate has the correct name and verifies successfully using this machines certificate store")
)
parser.add_argument(
'--proxy', "-x",
default=None,
type=str,
help=("Use the specified proxy. Supports HTTP/HTTPS/SOCKS5 with or without authentication")
)
parser.add_argument(
"--testpatience",
type=int,
default=20,
help="The longest time to wait for an individual test to run"
)
return parser | 26.725806 | 101 | 0.69825 |
4a27aedf3e7f2fb54e1df7c182a979fb7cb0c835 | 3,682 | py | Python | upstream/byol_a/expert.py | AmirHussein96/Self-Supervised-Speech-Pretraining-and-Representation-Learning | fa8dd981b90973d108687f18593ca131bb82e782 | [
"MIT"
] | 3 | 2021-08-07T19:12:56.000Z | 2022-03-29T15:16:31.000Z | upstream/byol_a/expert.py | AmirHussein96/Self-Supervised-Speech-Pretraining-and-Representation-Learning | fa8dd981b90973d108687f18593ca131bb82e782 | [
"MIT"
] | 2 | 2021-07-28T20:35:59.000Z | 2021-07-30T16:01:53.000Z | upstream/byol_a/expert.py | AmirHussein96/Self-Supervised-Speech-Pretraining-and-Representation-Learning | fa8dd981b90973d108687f18593ca131bb82e782 | [
"MIT"
] | 2 | 2021-07-21T11:05:26.000Z | 2021-07-22T09:46:38.000Z | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ upstream/byol_a/expert.py ]
# Synopsis [ the BYOL-Audio wrapper ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import math
#-------------#
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
#-------------#
import torchaudio
#-------------#
from .byol_a import load_yaml_config, PrecomputedNorm, AudioNTT2020
###################
# UPSTREAM EXPERT #
###################
class UpstreamExpert(nn.Module):
"""
The BYOL-A wrapper
"""
def __init__(self, ckpt, model_config=None, **kwargs):
super(UpstreamExpert, self).__init__()
if model_config is not None:
print('[UpstreamExpert] - Using upstream expert config file from:', model_config)
else:
model_config = './upstream/byol_a/config.yaml'
config = load_yaml_config(model_config)
# Preprocessor and normalizer.
self.to_melspec = torchaudio.transforms.MelSpectrogram(
sample_rate=config.sample_rate,
n_fft=config.n_fft,
win_length=config.win_length,
hop_length=config.hop_length,
n_mels=config.n_mels,
f_min=config.f_min,
f_max=config.f_max,
)
stats = [-5.4919195, 5.0389895] # provided by authors
self.normalizer = PrecomputedNorm(stats)
# Load pretrained weights.
self.model = AudioNTT2020(d=config.feature_d)
self.model.load_weight(ckpt, device='cpu')
# attributes
self.output_dim = config.feature_d
self.max_input_length = config.shape[-1]
# Interface
def get_output_dim(self):
return self.output_dim
# Interface
def get_downsample_rate(self):
return 15344.655344655344 # computed by: len(wavs[0]) / len(features[0]) * self.max_input_length
# forward in chunks
def forward_in_chunks(self, features):
outputs = []
for i in range(0, features.size(1), self.max_input_length):
subseq = features[:, i:i+self.max_input_length, :]
if subseq.size(1) < self.max_input_length: break # break if the chunk is too small for the model to forward
feats = self.model(subseq.permute(0, 2, 1).unsqueeze(1)) # features: (B, 1, F, T)
outputs.append(feats.unsqueeze(1)) # (B, 1, D)
outputs = torch.cat(outputs, dim=1) # (B, T, D)
return outputs
# Interface
def forward(self, wavs):
"""
Args:
wavs:
list of unpadded wavs [wav1, wav2, ...]
each wav is in torch.FloatTensor with sample rate 16000
and already put in the device assigned by command-line args
Return:
features:
list of unpadded features [feat1, feat2, ...]
each feat is in torch.FloatTensor and already
put in the device assigned by command-line args
"""
features = [self.normalizer((self.to_melspec(wav) + torch.finfo(torch.float).eps).log()).permute(1, 0) for wav in wavs] # features: (B, T, F)
features = pad_sequence(features, batch_first=True)
# forward the sequence in chunks then concat
features = self.forward_in_chunks(features)
return {
"last_hidden_state": features,
"hidden_states": [features],
}
| 35.066667 | 149 | 0.560022 |
4a27afffdee9892827a468365a4a03d4d0e7a262 | 141 | py | Python | mlutils/experiment/__init__.py | marcopodda/mldatautils | 57bf5d6ee2fb62d9dffd4b344d7d91eb8795457d | [
"MIT"
] | 2 | 2020-03-06T19:55:53.000Z | 2020-03-07T14:14:53.000Z | mlutils/experiment/__init__.py | marcopodda/mldatautils | 57bf5d6ee2fb62d9dffd4b344d7d91eb8795457d | [
"MIT"
] | null | null | null | mlutils/experiment/__init__.py | marcopodda/mldatautils | 57bf5d6ee2fb62d9dffd4b344d7d91eb8795457d | [
"MIT"
] | null | null | null | from .experiment import Experiment
from .model_selection.selector import ModelSelector
from .model_evaluation.evaluator import ModelEvaluator | 47 | 54 | 0.886525 |
4a27b1ad447dcfa1f8426d8f321052427c83afca | 2,589 | py | Python | autotabular/pipeline/components/classification/k_nearest_neighbors.py | jianzhnie/AutoTabular | fb407300adf97532a26d33f7442d2a606fa30512 | [
"Apache-2.0"
] | 48 | 2021-09-06T08:09:26.000Z | 2022-03-28T13:02:54.000Z | autotabular/pipeline/components/classification/k_nearest_neighbors.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | null | null | null | autotabular/pipeline/components/classification/k_nearest_neighbors.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | 7 | 2021-09-23T07:28:46.000Z | 2021-10-02T21:15:18.000Z | from autotabular.pipeline.components.base import AutotabularClassificationAlgorithm
from autotabular.pipeline.constants import DENSE, PREDICTIONS, SPARSE, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter, UniformIntegerHyperparameter
class KNearestNeighborsClassifier(AutotabularClassificationAlgorithm):
def __init__(self, n_neighbors, weights, p, random_state=None):
self.n_neighbors = n_neighbors
self.weights = weights
self.p = p
self.random_state = random_state
def fit(self, X, Y):
import sklearn.neighbors
import sklearn.multiclass
estimator = \
sklearn.neighbors.KNeighborsClassifier(n_neighbors=self.n_neighbors,
weights=self.weights,
p=self.p)
if len(Y.shape) == 2 and Y.shape[1] > 1:
self.estimator = sklearn.multiclass.OneVsRestClassifier(
estimator, n_jobs=1)
else:
self.estimator = estimator
self.estimator.fit(X, Y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
'shortname': 'KNN',
'name': 'K-Nearest Neighbor Classification',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS, )
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
n_neighbors = UniformIntegerHyperparameter(
name='n_neighbors', lower=1, upper=100, log=True, default_value=1)
weights = CategoricalHyperparameter(
name='weights',
choices=['uniform', 'distance'],
default_value='uniform')
p = CategoricalHyperparameter(
name='p', choices=[1, 2], default_value=2)
cs.add_hyperparameters([n_neighbors, weights, p])
return cs
| 35.465753 | 95 | 0.62611 |
4a27b240182cf72bc394138978da17724853b3ef | 1,156 | py | Python | var/spack/repos/builtin/packages/py-jpype1/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/py-jpype1/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/py-jpype1/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJpype1(PythonPackage):
"""JPype is an effort to allow python programs full access to java class
libraries."""
homepage = "https://github.com/originell/jpype"
url = "https://pypi.io/packages/source/J/JPype1/JPype1-0.6.2.tar.gz"
version('0.6.3', sha256='6841523631874a731e1f94e1b1f130686ad3772030eaa3b6946256eeb1d10dd1')
version('0.6.2', sha256='99206412d80b9d5a81a7cc205267ca63554403eb57f13420302e2f39bfad7f25')
version('0.6.1', sha256='0d366228b7b37b0266184161cc7ea1ce58f60199f6ec9451985149ea873774be')
version('0.6.0', sha256='f5d783520cb4c30595c3bc509065e30fc292ec7cfb57045141eae77c518bcdb0')
variant('numpy', default=False, description='Build numpy extensions')
depends_on('[email protected]:')
depends_on('py-setuptools', type='build')
depends_on('java', type=('build', 'run'))
# extra requirements
depends_on('[email protected]:', type=('build', 'run'), when='+numpy')
| 39.862069 | 95 | 0.737889 |
4a27b2a90db4552de38bb2bc063ac6de5ea140d2 | 819 | py | Python | tests/test_api.py | Wandao-Corp/mdformat | 9169a0892050ca05d9553fb43a401eba40f8523f | [
"MIT"
] | null | null | null | tests/test_api.py | Wandao-Corp/mdformat | 9169a0892050ca05d9553fb43a401eba40f8523f | [
"MIT"
] | null | null | null | tests/test_api.py | Wandao-Corp/mdformat | 9169a0892050ca05d9553fb43a401eba40f8523f | [
"MIT"
] | null | null | null | import pytest
import mdformat
UNFORMATTED_MARKDOWN = "\n\n# A header\n\n"
FORMATTED_MARKDOWN = "# A header\n"
def test_fmt_file(tmp_path):
file_path = tmp_path / "test_markdown.md"
# Use string argument
file_path.write_text(UNFORMATTED_MARKDOWN)
mdformat.file(str(file_path))
assert file_path.read_text() == FORMATTED_MARKDOWN
# Use pathlib.Path argument
file_path.write_text(UNFORMATTED_MARKDOWN)
mdformat.file(file_path)
assert file_path.read_text() == FORMATTED_MARKDOWN
def test_fmt_file__invalid_filename():
with pytest.raises(ValueError) as exc_info:
mdformat.file("this is not a valid filepath?`=|><@{[]\\/,.%¤#'")
assert "not a file" in str(exc_info.value)
def test_fmt_string():
assert mdformat.text(UNFORMATTED_MARKDOWN) == FORMATTED_MARKDOWN
| 26.419355 | 72 | 0.727717 |
4a27b3ed6fd00fac9c4e817856e20b2433c483ed | 5,771 | py | Python | ITM/dataset.py | rungjoo/dstc10 | 4a554c88aaea997ecfeda5ee0a8d79e5df34a5f2 | [
"MIT"
] | 2 | 2022-01-02T13:04:00.000Z | 2022-03-06T05:33:07.000Z | ITM/dataset.py | rungjoo/dstc10 | 4a554c88aaea997ecfeda5ee0a8d79e5df34a5f2 | [
"MIT"
] | null | null | null | ITM/dataset.py | rungjoo/dstc10 | 4a554c88aaea997ecfeda5ee0a8d79e5df34a5f2 | [
"MIT"
] | null | null | null | from torch.utils.data import Dataset, DataLoader
import random, pdb
from tqdm import tqdm
import json
import glob, os
import pickle
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# import matplotlib.pyplot as plt
class post_loader(Dataset):
def __init__(self, data_path, image_obj_path, description_path, fashion_path, furniture_path):
with open(data_path, 'r') as f: # './simmc2/data/simmc2_dials_dstc10_train.json'
json_data = json.load(f)
dialogue_data = json_data['dialogue_data']
""" Image input """
try:
with open(image_obj_path, 'rb') as f: # "../res/image_obj.pickle"
image_visual = pickle.load(f)
except:
image_obj_path = os.path.splitext(image_obj_path)[0]+'_py37'+os.path.splitext(image_obj_path)[1]
with open(image_obj_path, 'rb') as f: # "../res/image_obj.pickle"
image_visual = pickle.load(f)
image_des_list = glob.glob(description_path) # "./simmc2/data/public/*scene*"
with open(fashion_path, 'r') as f: # './simmc2/data/fashion_prefab_metadata_all.json'
fashion_metadata = json.load(f)
with open(furniture_path, 'r') as f: # './simmc2/data/furniture_prefab_metadata_all.json'
furniture_metadata = json.load(f)
non_visual_meta_type = ['customerReview', 'brand' , 'price', 'size', 'materials']
visual_meta_type = ['assetType', 'color' , 'pattern', 'sleeveLength', 'type']
self.post_input = {}
self.dial2object = {}
self.dial2rel = {}
self.dial2bg = {}
cnt = 0
for dialog_cnt, one_dialogue in enumerate(dialogue_data):
dialogue_idx, domain, mentioned_object_ids, scene_ids = one_dialogue['dialogue_idx'], one_dialogue['domain'], \
one_dialogue['mentioned_object_ids'], one_dialogue['scene_ids']
if domain == 'fashion':
metadata = fashion_metadata
else:
metadata = furniture_metadata
""" Image description save """
self.dial2object[dialog_cnt] = {}
self.dial2rel[dialog_cnt] = {}
self.dial2bg[dialog_cnt] = {}
self.dial2object[dialog_cnt] = []
for k, image_name in scene_ids.items():
if image_name[:2] == 'm_':
image_find_name = image_name[2:]
else:
image_find_name = image_name
image = image_visual[image_find_name]
self.dial2bg[dialog_cnt][image_find_name] = image
for image_des_path in image_des_list:
if image_name in image_des_path: # find image name
with open(image_des_path, 'r') as f:
image_des_data = json.load(f)
if 'scenes' in image_des_data.keys():
scenes = image_des_data['scenes']
for scene in scenes:
objects, relationships = scene['objects'], scene['relationships']
self.dial2rel[dialog_cnt].update(relationships)
for object_data in objects:
prefab_path, unique_id, object_id, bbox, position = object_data['prefab_path'], object_data['unique_id'], object_data['index'], \
object_data['bbox'], object_data['position']
## object 2D & meta save
visual_metalist = []
non_visual_metalist = []
for k, v in metadata[prefab_path].items():
if k in visual_meta_type:
visual_metalist.append(v)
elif k in non_visual_meta_type:
non_visual_metalist.append(v)
visual_meta_flatten = ' '.join([str(x) for x in visual_metalist])
non_visual_meta_flatten = ' '.join([str(x) for x in non_visual_metalist])
left, top, height, width = bbox[0], bbox[1], bbox[2], bbox[3]
object_visual = image.crop((left, top, left+width, top+height))
self.post_input[cnt] = {}
self.post_input[cnt]['visual'] = object_visual
self.post_input[cnt]['visual_meta'] = visual_meta_flatten
self.post_input[cnt]['dialog_cnt'] = dialog_cnt
self.dial2object[dialog_cnt].append(visual_meta_flatten)
cnt += 1
for key, data in self.post_input.items():
dialog_cnt = data['dialog_cnt']
visual_meta = data['visual_meta']
dial_objects = self.dial2object[dialog_cnt]
self.post_input[key]['neg_visual_meta'] = []
for dial_object in dial_objects:
if dial_object != visual_meta:
self.post_input[key]['neg_visual_meta'].append(dial_object)
def __len__(self):
return len(self.post_input)
def __getitem__(self, idx):
return self.post_input[idx] | 49.324786 | 165 | 0.507711 |
4a27b4ac16ea38f12f943af11c9a358f43277ef3 | 440 | py | Python | emell/computation/test_identity.py | jakevoytko/emell | b26c643342ce0011fd574cb0848d2ba6182d8129 | [
"MIT"
] | 2 | 2020-02-13T05:47:47.000Z | 2020-03-06T17:38:33.000Z | emell/computation/test_identity.py | jakevoytko/emell | b26c643342ce0011fd574cb0848d2ba6182d8129 | [
"MIT"
] | null | null | null | emell/computation/test_identity.py | jakevoytko/emell | b26c643342ce0011fd574cb0848d2ba6182d8129 | [
"MIT"
] | null | null | null | """Contains tests for identity.py"""
import unittest
from emell.computation import identity
class TestIdentity(unittest.TestCase):
"""Tests the identity function."""
def test_identity(self) -> None:
"""Runs simple tests for identity()."""
self.assertEqual("a", identity("a"))
self.assertEqual(5, identity(5))
self.assertEqual([3], identity([3]))
if __name__ == "__main__":
unittest.main()
| 22 | 47 | 0.65 |
4a27b5ce42526b571338afb0dd5be7aebccd9f83 | 16,329 | py | Python | differentiable_filters/utils/base_layer.py | akloss/differentiable_filters | 821889dec411927658c6ef7dd01c9028d2f28efd | [
"MIT"
] | 14 | 2021-01-10T10:44:31.000Z | 2022-03-28T07:46:49.000Z | differentiable_filters/utils/base_layer.py | brentyi/differentiable_filters | 7ae1f5022a9f5cf9485cb7748cadf0f0d65c01bd | [
"MIT"
] | null | null | null | differentiable_filters/utils/base_layer.py | brentyi/differentiable_filters | 7ae1f5022a9f5cf9485cb7748cadf0f0d65c01bd | [
"MIT"
] | 7 | 2021-01-13T12:38:36.000Z | 2022-03-06T16:49:43.000Z | """
A base class providing
- wrappers around some tensorflow layers to set default arguments
- a function to compute jacobians
- a spatial-softmax layer
"""
import tensorflow as tf
class BaseLayer(tf.keras.layers.Layer):
def __init__(self):
"""
A base class providing
- wrappers around some tensorflow layers to set default arguments
- a function to compute jacobians
- a spatial-softmax layer
The list "updatable" is used to register paramaeters like moving
averages that need to be updated during training (since this is not
automatically handeld in tensorflow 1)
"""
super(BaseLayer, self).__init__()
self.updateable = []
###########################################################################
# convenience functions
###########################################################################
def _conv_layer(self, name, kernel_size, output_channels, stride=[1, 1],
padding='VALID', activation=tf.nn.relu, std=None, mean=0,
bias=1e-3, wd_w=1e-3, wd_b=1e-3, add_bias=True,
trainable=True):
"""
Helper to create a 2d convolution layer
Parameters
----------
name : str
name of the layer
kernel_size : int
kernel size
output_channels : int
number of output channels
stride : int, optional
stride for the convolution. The default is [1, 1].
padding : int, optional
padding ('SAME' or 'VALID'). The default is 'VALID'.
activation : function, optional
activation function. The default is tf.nn.relu.
std : float, optional
standard deviation of a truncated Gaussian for initializing
the kernel weights. The default is None.
mean : float, optional
mean of the gaussian for initializing the kernel weights. The
default is 0.
bias : float, optional
constant value to which to initialize the bias. The default is
1e-3.
wd_w : float, optional
weight decay factor for the kernel weights. The default is 1e-3.
wd_b : float, optional
weight decay factor for the bias. The default is 1e-3.
add_bias : bool, optional
whether to add a bias. The default is True.
trainable : bool, optional
make this layer's variable trainable?. The default is True.
Returns
-------
lay : tf.keras.layers.Conv2D
A keras conv2D layer with the requested parameters
"""
if std is None:
init_w = tf.initializers.glorot_normal()
else:
init_w = tf.keras.initializers.truncated_normal(stddev=std,
mean=mean)
init_b = tf.constant_initializer(bias)
lay = tf.keras.layers.Conv2D(
filters=output_channels, kernel_size=kernel_size,
strides=stride, padding=padding, activation=activation,
use_bias=add_bias,
kernel_initializer=init_w, bias_initializer=init_b,
kernel_regularizer=tf.keras.regularizers.l2(l=wd_w),
bias_regularizer=tf.keras.regularizers.l2(l=wd_b),
name=name, trainable=trainable)
return lay
def _deconv_layer(self, name, kernel_size, output_channels, stride=[1, 1],
padding='VALID', activation=tf.nn.relu, std=None, mean=0,
bias=1e-3, wd_w=1e-3, wd_b=1e-3, add_bias=True,
trainable=True):
"""
Helper to create a 2d deconvolution layer
Parameters
----------
name : str
name of the layer
kernel_size : int
kernel size
output_channels : int
number of output channels
stride : int, optional
stride for the deconvolution. The default is [1, 1].
padding : int, optional
padding ('SAME' or 'VALID'). The default is 'VALID'.
activation : function, optional
activation function. The default is tf.nn.relu.
std : float, optional
standard deviation of a truncated Gaussian for initializing
the kernel weights. The default is None.
mean : float, optional
mean of the gaussian for initializing the kernel weights. The
default is 0.
bias : float, optional
constant value to which to initialize the bias. The default is
1e-3.
wd_w : float, optional
weight decay factor for the kernel weights. The default is 1e-3.
wd_b : float, optional
weight decay factor for the bias. The default is 1e-3.
add_bias : bool, optional
whether to add a bias. The default is True.
trainable : bool, optional
make this layer's variable trainable?. The default is True.
Returns
-------
lay : tf.keras.layers.Conv2DTranspose
A keras Conv2DTranspose layer with the requested parameters
"""
if std is None:
init_w = tf.initializers.glorot_normal()
else:
init_w = tf.keras.initializers.truncated_normal(stddev=std,
mean=mean)
init_b = tf.constant_initializer(bias)
lay = tf.keras.layers.Conv2DTranspose(
filters=output_channels, kernel_size=kernel_size,
strides=stride, padding=padding, activation=activation,
use_bias=add_bias,
kernel_initializer=init_w, bias_initializer=init_b,
kernel_regularizer=tf.keras.regularizers.l2(l=wd_w),
bias_regularizer=tf.keras.regularizers.l2(l=wd_b),
name=name, trainable=trainable)
return lay
def _fc_layer(self, name, output_channels, activation=tf.nn.relu, std=None,
mean=0, bias=1e-3, wd_w=1e-3, wd_b=1e-3, add_bias=True,
trainable=True):
"""
Helper to create a 2d fully connected layer
Parameters
----------
name : str
name of the layer
output_channels : int
number of output channels
activation : function, optional
activation function. The default is tf.nn.relu.
std : float, optional
standard deviation of a truncated Gaussian for initializing
the kernel weights. The default is None.
mean : float, optional
mean of the gaussian for initializing the kernel weights. The
default is 0.
bias : float, optional
constant value to which to initialize the bias. The default is
1e-3.
wd_w : float, optional
weight decay factor for the kernel weights. The default is 1e-3.
wd_b : float, optional
weight decay factor for the bias. The default is 1e-3.
add_bias : bool, optional
whether to add a bias. The default is True.
trainable : bool, optional
make this layer's variable trainable?. The default is True.
Returns
-------
lay : tf.keras.layers.Dense
A keras Cdense layer with the requested parameters
"""
if std is None:
init_w = tf.initializers.glorot_normal()
else:
init_w = tf.keras.initializers.truncated_normal(stddev=std,
mean=mean)
init_b = tf.constant_initializer(bias)
lay = tf.keras.layers.Dense(
units=output_channels, activation=activation,
use_bias=add_bias,
kernel_initializer=init_w, bias_initializer=init_b,
kernel_regularizer=tf.keras.regularizers.l2(l=wd_w),
bias_regularizer=tf.keras.regularizers.l2(l=wd_b),
name=name, trainable=trainable)
return lay
def _compute_jacobian(self, ys, xs, no_batch=False):
"""
Helper to compute the jacobian of ys wrt. xs
Parameters
----------
ys : tensor
tensor to be derived
xs : tensor
tensor with respect to whom to derive
no_batch : TYPE, optional
Whether the tensors have a leading batch dimension or not. The
default is False.
Returns
-------
J : tensor
J = d ys / d xs
"""
if no_batch:
xs = tf.reshape(xs, [-1])
ys = tf.reshape(ys, [-1])
y_num = ys.get_shape()[0]
x_num = xs.get_shape()[0]
if y_num.value > 1:
tmp = []
yys = tf.unstack(ys)
for ind, y in enumerate(yys):
grad = tf.gradients(y, xs)
if grad == [None]:
grad = [tf.zeros([x_num])]
tmp += [grad]
tmp = tf.stack(tmp)
else:
tmp = tf.gradients(ys, xs)
if tmp == [None]:
tmp = tf.zeros([y_num, x_num])
tmp = tf.reshape(tmp, [y_num, x_num])
return tmp
ys = tf.reshape(ys, [self.batch_size, -1])
out = []
for b, batch in enumerate(tf.unstack(ys)):
tmp = []
for y in tf.unstack(batch):
tmp += [tf.gradients(y, xs)]
# each gradient tensor in tmp is batch_size x dim_x, but we
# only need the parts that correspond to the bth batch
tmp = [tf.slice(t[0], [b, 0], [1, -1]) for t in tmp]
tmp = tf.stack(tmp)
if len(tmp.get_shape()) > 2:
tmp = tf.reshape(tmp, [-1, xs.get_shape()[1].value])
out += [tmp]
J = tf.stack(out)
return J
def _normalize_2d(self, bottom, name='normalizer', method='normal',
summary=True):
"""
Helper to turn a 2d input into a 2d distribution
Parameters
----------
bottom : tensor [batch_size, height, width, channels]
Dinput tensor
name : str, optional
name of the oberation. The default is 'normalizer'.
method : str, optional
What method to use for the normalization, either "normal" (i.e
divide by the sum over the image) or "softmax". The default is '
normal'.
summary : bool, optional
write out summary information to tensorboard?. The default is True.
Returns
-------
out : tensor
the normalized output tensor
"""
with tf.variable_scope(name + '/normalize') as scope:
sh = bottom.get_shape().as_list()
# the input should be of shape (batch, height, width, channels)
# or (height, width, channels)
image_dim = sh[-2] * sh[-3]
channel_dim = sh[-1]
# Assume features is of size [(N), H, W, C]
# Transpose it to [(N), C, H, W], reshape to [N * C, H * W]
if len(sh) == 4:
channel_dim *= sh[0]
features = tf.reshape(tf.transpose(bottom, [0, 3, 1, 2]),
[channel_dim, image_dim])
else:
features = tf.reshape(tf.transpose(features, [2, 0, 1]),
[channel_dim, image_dim])
features = features - tf.reduce_min(features, reduction_indices=1,
keepdims=True)
if method == 'normal':
normalizer = tf.reduce_sum(features, reduction_indices=1,
keepdims=True)
# check each channel
fs = tf.unstack(features, axis=0)
tmp = []
for ind, chan in enumerate(tf.unstack(normalizer, axis=0)):
# if the normalizer is 0, we set each element to 1/#elem
tmp += [tf.cond(tf.squeeze(tf.equal(chan, 0.)),
lambda: tf.divide(tf.ones_like(fs[ind]),
tf.cast(tf.size(fs[ind]),
tf.float32)),
lambda: tf.divide(fs[ind], chan))]
normalized = tf.stack(tmp)
# create activation summary
if summary and not scope.reuse:
if scope.name not in name:
tf.summary.histogram(scope.name + '/' + name,
normalizer)
else:
tf.summary.histogram(name + '/normalizer', normalizer)
elif method == 'softmax':
normalized = tf.nn.softmax(features)
# Reshape and transpose back to original format.
if len(sh) == 4:
out = tf.transpose(tf.reshape(normalized,
[sh[0], sh[3], sh[1], sh[2]]),
[0, 2, 3, 1])
else:
out = tf.transpose(tf.reshape(normalized,
[sh[2], sh[0], sh[1]]),
[1, 2, 0])
return out
def _spatial_softmax(self, bottom, name, method='softmax', summary=False):
"""
Helper to find the pixel position of the mean of a 2d input. First
computes the softmax of the input and then returns the 2d position
of the mean.
Parameters
----------
bottom : tensor [batch_size, height, width, channels]
the input tensor
name : str
the name of the operation
method : str, optional
What method to use for normalizating the input tensor, either
"normal" (i.e divide by the sum over the image) or "softmax".
The default is 'softmax'.
summary : bool, optional
write out summary information to tensorboard?. The default is True.
Returns
-------
out : tensor [batch_size, channels, 2]
the pixel coordinates of the mean
"""
sh = bottom.get_shape().as_list()
dist = self._normalize_2d(bottom, name, method, summary)
# image_coords is a tensor of size [H, W, 2] representing the
# image coordinates of each pixel.
x_vals = tf.expand_dims(tf.linspace(-sh[-2]/2, sh[-2]/2., sh[-2]),
1)
x_t = tf.matmul(tf.ones(shape=[sh[-3], 1]),
tf.transpose(x_vals, [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-sh[-3]/2, sh[-3]/2.,
sh[-3]), 1),
tf.ones(shape=[1, sh[-2]]))
xs = tf.expand_dims(x_t, -1)
ys = tf.expand_dims(y_t, -1)
image_coords = tf.concat(axis=-1, values=[xs, ys])
# Convert the distribution to shape [N, H, W, C, 1]
dist_ex = tf.expand_dims(dist, -1)
# Convert image coords to shape [H, W, 1, 2]
image_coords = tf.expand_dims(image_coords, 2)
# Convert image coords to shape [1, H, W, 1, 2]
image_coords = tf.expand_dims(image_coords, 0)
# tile
image_coords = tf.tile(image_coords,
[sh[0], 1, 1, sh[-1], 1])
# Multiply (with broadcasting) and reduce over image dimensions to
# get the result of shape [N, C, 2]
mult = dist_ex * image_coords
out = tf.reduce_sum(mult, reduction_indices=[1, 2])
# create activation summary
scope = tf.get_variable_scope()
if summary:
if scope.name not in name:
tf.summary.histogram(scope.name + '/' + name, out)
tf.summary.image(scope.name + '/' + name, dist)
else:
tf.summary.histogram(name, out)
tf.summary.image(name, dist)
return out
| 38.878571 | 79 | 0.52269 |
4a27b70e25407f49fcb15959ec555775363e982b | 37,537 | py | Python | python_modules/dagster/dagster/cli/pipeline.py | abkfenris/dagster | 7f35164535200cf904a4fdb18af207ccad09ad68 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/cli/pipeline.py | abkfenris/dagster | 7f35164535200cf904a4fdb18af207ccad09ad68 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/cli/pipeline.py | abkfenris/dagster | 7f35164535200cf904a4fdb18af207ccad09ad68 | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import textwrap
import click
import pendulum
import yaml
from tabulate import tabulate
from dagster import PipelineDefinition
from dagster import __version__ as dagster_version
from dagster import check, execute_pipeline
from dagster.cli.workspace.cli_target import (
WORKSPACE_TARGET_WARNING,
get_external_pipeline_or_job_from_external_repo,
get_external_pipeline_or_job_from_kwargs,
get_external_repository_from_kwargs,
get_external_repository_from_repo_location,
get_pipeline_or_job_python_origin_from_kwargs,
get_repository_location_from_workspace,
get_workspace_from_kwargs,
pipeline_target_argument,
python_pipeline_or_job_config_argument,
python_pipeline_target_argument,
repository_target_argument,
)
from dagster.core.definitions.pipeline_base import IPipeline
from dagster.core.errors import DagsterBackfillFailedError, DagsterInvariantViolationError
from dagster.core.execution.api import create_execution_plan
from dagster.core.execution.backfill import BulkActionStatus, PartitionBackfill, create_backfill_run
from dagster.core.host_representation import (
ExternalPipeline,
ExternalRepository,
RepositoryHandle,
RepositoryLocation,
)
from dagster.core.host_representation.external_data import ExternalPartitionSetExecutionParamData
from dagster.core.host_representation.selector import PipelineSelector
from dagster.core.instance import DagsterInstance
from dagster.core.snap import PipelineSnapshot, SolidInvocationSnap
from dagster.core.storage.tags import MEMOIZED_RUN_TAG
from dagster.core.telemetry import log_external_repo_stats, telemetry_wrapper
from dagster.core.utils import make_new_backfill_id
from dagster.seven import IS_WINDOWS, JSONDecodeError, json
from dagster.utils import DEFAULT_WORKSPACE_YAML_FILENAME, load_yaml_from_glob_list, merge_dicts
from dagster.utils.error import serializable_error_info_from_exc_info
from dagster.utils.hosted_user_process import recon_pipeline_from_origin
from dagster.utils.indenting_printer import IndentingPrinter
from dagster.utils.interrupts import capture_interrupts
from .config_scaffolder import scaffold_pipeline_config
from .utils import get_instance_for_service
@click.group(name="pipeline")
def pipeline_cli():
"""
Commands for working with Dagster pipelines/jobs.
"""
def apply_click_params(command, *click_params):
for click_param in click_params:
command = click_param(command)
return command
@pipeline_cli.command(
name="list",
help="List the pipelines/jobs in a repository. {warning}".format(
warning=WORKSPACE_TARGET_WARNING
),
)
@repository_target_argument
def pipeline_list_command(**kwargs):
return execute_list_command(kwargs, click.echo)
def execute_list_command(cli_args, print_fn, using_job_op_graph_apis=False):
with get_instance_for_service(
"``dagster job list``" if using_job_op_graph_apis else "``dagster pipeline list``"
) as instance:
with get_external_repository_from_kwargs(
instance, version=dagster_version, kwargs=cli_args
) as external_repository:
title = "Repository {name}".format(name=external_repository.name)
print_fn(title)
print_fn("*" * len(title))
first = True
for pipeline in (
external_repository.get_external_jobs()
if using_job_op_graph_apis
else external_repository.get_all_external_pipelines()
):
pipeline_title = "{pipeline_or_job}: {name}".format(
pipeline_or_job="Job" if using_job_op_graph_apis else "Pipeline",
name=pipeline.name,
)
if not first:
print_fn("*" * len(pipeline_title))
first = False
print_fn(pipeline_title)
if pipeline.description:
print_fn("Description:")
print_fn(format_description(pipeline.description, indent=" " * 4))
print_fn(
"{solid_or_op}: (Execution Order)".format(
solid_or_op="Ops" if using_job_op_graph_apis else "Solids"
)
)
for solid_name in pipeline.pipeline_snapshot.solid_names_in_topological_order:
print_fn(" " + solid_name)
def format_description(desc, indent):
check.str_param(desc, "desc")
check.str_param(indent, "indent")
desc = re.sub(r"\s+", " ", desc)
dedented = textwrap.dedent(desc)
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=indent)
filled = wrapper.fill(dedented)
return filled
def get_pipeline_in_same_python_env_instructions(command_name):
return (
"This commands targets a pipeline/job. The pipeline/job can be specified in a number of ways:"
"\n\n1. dagster pipeline {command_name} -f /path/to/file.py -a define_some_pipeline"
"\n\n2. dagster pipeline {command_name} -m a_module.submodule -a define_some_pipeline"
"\n\n3. dagster pipeline {command_name} -f /path/to/file.py -a define_some_repo -p <<pipeline_name>>"
"\n\n4. dagster pipeline {command_name} -m a_module.submodule -a define_some_repo -p <<pipeline_name>>"
).format(command_name=command_name)
def get_pipeline_instructions(command_name):
return (
"This commands targets a pipeline. The pipeline can be specified in a number of ways:"
"\n\n1. dagster pipeline {command_name} -p <<pipeline_name>> (works if .{default_filename} exists)"
"\n\n2. dagster pipeline {command_name} -p <<pipeline_name>> -w path/to/{default_filename}"
"\n\n3. dagster pipeline {command_name} -f /path/to/file.py -a define_some_pipeline"
"\n\n4. dagster pipeline {command_name} -m a_module.submodule -a define_some_pipeline"
"\n\n5. dagster pipeline {command_name} -f /path/to/file.py -a define_some_repo -p <<pipeline_name>>"
"\n\n6. dagster pipeline {command_name} -m a_module.submodule -a define_some_repo -p <<pipeline_name>>"
).format(command_name=command_name, default_filename=DEFAULT_WORKSPACE_YAML_FILENAME)
def get_partitioned_pipeline_instructions(command_name):
return (
"This commands targets a partitioned pipeline/job. The pipeline/job and partition set must be "
"defined in a repository, which can be specified in a number of ways:"
"\n\n1. dagster pipeline {command_name} -p <<pipeline_name>> (works if .{default_filename} exists)"
"\n\n2. dagster pipeline {command_name} -p <<pipeline_name>> -w path/to/{default_filename}"
"\n\n3. dagster pipeline {command_name} -f /path/to/file.py -a define_some_repo -p <<pipeline_name>>"
"\n\n4. dagster pipeline {command_name} -m a_module.submodule -a define_some_repo -p <<pipeline_name>>"
).format(command_name=command_name, default_filename=DEFAULT_WORKSPACE_YAML_FILENAME)
@pipeline_cli.command(
name="print",
help="Print a pipeline/job.\n\n{instructions}".format(
instructions=get_pipeline_instructions("print")
),
)
@click.option("--verbose", is_flag=True)
@pipeline_target_argument
def pipeline_print_command(verbose, **cli_args):
with DagsterInstance.get() as instance:
return execute_print_command(instance, verbose, cli_args, click.echo)
def execute_print_command(instance, verbose, cli_args, print_fn, using_job_op_graph_apis=False):
with get_external_pipeline_or_job_from_kwargs(
instance,
version=dagster_version,
kwargs=cli_args,
using_job_op_graph_apis=using_job_op_graph_apis,
) as external_pipeline:
pipeline_snapshot = external_pipeline.pipeline_snapshot
if verbose:
print_pipeline_or_job(
pipeline_snapshot,
print_fn=print_fn,
using_job_op_graph_apis=using_job_op_graph_apis,
)
else:
print_solids_or_ops(
pipeline_snapshot,
print_fn=print_fn,
using_job_op_graph_apis=using_job_op_graph_apis,
)
def print_solids_or_ops(pipeline_snapshot, print_fn, using_job_op_graph_apis=False):
check.inst_param(pipeline_snapshot, "pipeline", PipelineSnapshot)
check.callable_param(print_fn, "print_fn")
printer = IndentingPrinter(indent_level=2, printer=print_fn)
printer.line(f"{'Job' if using_job_op_graph_apis else 'Pipeline'}: {pipeline_snapshot.name}")
printer.line(f"{'Ops' if using_job_op_graph_apis else 'Solids'}")
for solid in pipeline_snapshot.dep_structure_snapshot.solid_invocation_snaps:
with printer.with_indent():
printer.line(f"{'Op' if using_job_op_graph_apis else 'Solid'}: {solid.solid_name}")
def print_pipeline_or_job(pipeline_snapshot, print_fn, using_job_op_graph_apis=False):
check.inst_param(pipeline_snapshot, "pipeline", PipelineSnapshot)
check.callable_param(print_fn, "print_fn")
printer = IndentingPrinter(indent_level=2, printer=print_fn)
printer.line(f"{'Job' if using_job_op_graph_apis else 'Pipeline'}: {pipeline_snapshot.name}")
print_description(printer, pipeline_snapshot.description)
printer.line(f"{'Ops' if using_job_op_graph_apis else 'Solids'}")
for solid in pipeline_snapshot.dep_structure_snapshot.solid_invocation_snaps:
with printer.with_indent():
print_solid_or_op(printer, pipeline_snapshot, solid, using_job_op_graph_apis)
def print_description(printer, desc):
with printer.with_indent():
if desc:
printer.line("Description:")
with printer.with_indent():
printer.line(format_description(desc, printer.current_indent_str))
def print_solid_or_op(printer, pipeline_snapshot, solid_invocation_snap, using_job_op_graph_apis):
check.inst_param(pipeline_snapshot, "pipeline_snapshot", PipelineSnapshot)
check.inst_param(solid_invocation_snap, "solid_invocation_snap", SolidInvocationSnap)
printer.line(
f"{'Op' if using_job_op_graph_apis else 'Solid'}: {solid_invocation_snap.solid_name}"
)
with printer.with_indent():
printer.line("Inputs:")
for input_dep_snap in solid_invocation_snap.input_dep_snaps:
with printer.with_indent():
printer.line("Input: {name}".format(name=input_dep_snap.input_name))
printer.line("Outputs:")
for output_def_snap in pipeline_snapshot.get_node_def_snap(
solid_invocation_snap.solid_def_name
).output_def_snaps:
printer.line(output_def_snap.name)
@pipeline_cli.command(
name="list_versions",
help="Display the freshness of memoized results for the given pipeline.\n\n{instructions}".format(
instructions=get_pipeline_in_same_python_env_instructions("list_versions")
),
)
@python_pipeline_target_argument
@python_pipeline_or_job_config_argument("list_versions")
@click.option(
"--preset",
type=click.STRING,
help="Specify a preset to use for this pipeline. Presets are defined on pipelines under "
"preset_defs.",
)
@click.option(
"--mode", type=click.STRING, help="The name of the mode in which to execute the pipeline."
)
def pipeline_list_versions_command(**kwargs):
with DagsterInstance.get() as instance:
execute_list_versions_command(instance, kwargs)
def execute_list_versions_command(instance, kwargs):
check.inst_param(instance, "instance", DagsterInstance)
config = list(check.opt_tuple_param(kwargs.get("config"), "config", default=(), of_type=str))
preset = kwargs.get("preset")
mode = kwargs.get("mode")
if preset and config:
raise click.UsageError("Can not use --preset with --config.")
pipeline_origin = get_pipeline_or_job_python_origin_from_kwargs(kwargs)
pipeline = recon_pipeline_from_origin(pipeline_origin)
run_config = get_run_config_from_file_list(config)
memoized_plan = create_execution_plan(
pipeline,
run_config=run_config,
mode=mode,
instance_ref=instance.get_ref(),
tags={MEMOIZED_RUN_TAG: "true"},
)
add_step_to_table(memoized_plan)
def add_step_to_table(memoized_plan):
# the step keys that we need to execute are those which do not have their inputs populated.
step_keys_not_stored = set(memoized_plan.step_keys_to_execute)
table = []
for step_output_handle, version in memoized_plan.step_output_versions.items():
table.append(
[
"{key}.{output}".format(
key=step_output_handle.step_key, output=step_output_handle.output_name
),
version,
"stored"
if step_output_handle.step_key not in step_keys_not_stored
else "to-be-recomputed",
]
)
table_str = tabulate(
table, headers=["Step Output", "Version", "Status of Output"], tablefmt="github"
)
click.echo(table_str)
@pipeline_cli.command(
name="execute",
help="Execute a pipeline.\n\n{instructions}".format(
instructions=get_pipeline_in_same_python_env_instructions("execute")
),
)
@python_pipeline_target_argument
@python_pipeline_or_job_config_argument("execute")
@click.option(
"--preset",
type=click.STRING,
help="Specify a preset to use for this pipeline. Presets are defined on pipelines under "
"preset_defs.",
)
@click.option(
"--mode", type=click.STRING, help="The name of the mode in which to execute the pipeline."
)
@click.option(
"--tags", type=click.STRING, help="JSON string of tags to use for this pipeline/job run"
)
@click.option(
"-s",
"--solid-selection",
type=click.STRING,
help=(
"Specify the solid subselection to execute. It can be multiple clauses separated by commas."
"Examples:"
'\n- "some_solid" will execute "some_solid" itself'
'\n- "*some_solid" will execute "some_solid" and all its ancestors (upstream dependencies)'
'\n- "*some_solid+++" will execute "some_solid", all its ancestors, and its descendants'
" (downstream dependencies) within 3 levels down"
'\n- "*some_solid,other_solid_a,other_solid_b+" will execute "some_solid" and all its'
' ancestors, "other_solid_a" itself, and "other_solid_b" and its direct child solids'
),
)
def pipeline_execute_command(**kwargs):
with capture_interrupts():
with get_instance_for_service("``dagster pipeline execute``") as instance:
execute_execute_command(instance, kwargs)
@telemetry_wrapper
def execute_execute_command(instance, kwargs, using_job_op_graph_apis=False):
check.inst_param(instance, "instance", DagsterInstance)
config = list(check.opt_tuple_param(kwargs.get("config"), "config", default=(), of_type=str))
preset = kwargs.get("preset")
mode = kwargs.get("mode")
if preset and config:
raise click.UsageError("Can not use --preset with --config.")
tags = get_tags_from_args(kwargs)
pipeline_origin = get_pipeline_or_job_python_origin_from_kwargs(kwargs, using_job_op_graph_apis)
pipeline = recon_pipeline_from_origin(pipeline_origin)
solid_selection = get_solid_selection_from_args(kwargs)
result = do_execute_command(pipeline, instance, config, mode, tags, solid_selection, preset)
if not result.success:
raise click.ClickException("Pipeline run {} resulted in failure.".format(result.run_id))
return result
def get_run_config_from_file_list(file_list):
check.opt_list_param(file_list, "file_list", of_type=str)
return load_yaml_from_glob_list(file_list) if file_list else {}
def _check_execute_external_pipeline_args(
external_pipeline, run_config, mode, preset, tags, solid_selection
):
check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
run_config = check.opt_dict_param(run_config, "run_config")
check.opt_str_param(mode, "mode")
check.opt_str_param(preset, "preset")
check.invariant(
not (mode is not None and preset is not None),
"You may set only one of `mode` (got {mode}) or `preset` (got {preset}).".format(
mode=mode, preset=preset
),
)
tags = check.opt_dict_param(tags, "tags", key_type=str)
check.opt_list_param(solid_selection, "solid_selection", of_type=str)
if preset is not None:
pipeline_preset = external_pipeline.get_preset(preset)
if pipeline_preset.run_config is not None:
check.invariant(
(not run_config) or (pipeline_preset.run_config == run_config),
"The environment set in preset '{preset}' does not agree with the environment "
"passed in the `run_config` argument.".format(preset=preset),
)
run_config = pipeline_preset.run_config
# load solid_selection from preset
if pipeline_preset.solid_selection is not None:
check.invariant(
solid_selection is None or solid_selection == pipeline_preset.solid_selection,
"The solid_selection set in preset '{preset}', {preset_subset}, does not agree with "
"the `solid_selection` argument: {solid_selection}".format(
preset=preset,
preset_subset=pipeline_preset.solid_selection,
solid_selection=solid_selection,
),
)
solid_selection = pipeline_preset.solid_selection
check.invariant(
mode is None or mode == pipeline_preset.mode,
"Mode {mode} does not agree with the mode set in preset '{preset}': "
"('{preset_mode}')".format(preset=preset, preset_mode=pipeline_preset.mode, mode=mode),
)
mode = pipeline_preset.mode
tags = merge_dicts(pipeline_preset.tags, tags)
if mode is not None:
if not external_pipeline.has_mode(mode):
raise DagsterInvariantViolationError(
(
"You have attempted to execute pipeline {name} with mode {mode}. "
"Available modes: {modes}"
).format(
name=external_pipeline.name,
mode=mode,
modes=external_pipeline.available_modes,
)
)
else:
if len(external_pipeline.available_modes) > 1:
raise DagsterInvariantViolationError(
(
"Pipeline {name} has multiple modes (Available modes: {modes}) and you have "
"attempted to execute it without specifying a mode. Set "
"mode property on the PipelineRun object."
).format(name=external_pipeline.name, modes=external_pipeline.available_modes)
)
mode = external_pipeline.get_default_mode_name()
tags = merge_dicts(external_pipeline.tags, tags)
return (
run_config,
mode,
tags,
solid_selection,
)
def _create_external_pipeline_run(
instance,
repo_location,
external_repo,
external_pipeline,
run_config,
mode,
preset,
tags,
solid_selection,
run_id,
):
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(repo_location, "repo_location", RepositoryLocation)
check.inst_param(external_repo, "external_repo", ExternalRepository)
check.inst_param(external_pipeline, "external_pipeline", ExternalPipeline)
check.opt_dict_param(run_config, "run_config")
check.opt_str_param(mode, "mode")
check.opt_str_param(preset, "preset")
check.opt_dict_param(tags, "tags", key_type=str)
check.opt_list_param(solid_selection, "solid_selection", of_type=str)
check.opt_str_param(run_id, "run_id")
run_config, mode, tags, solid_selection = _check_execute_external_pipeline_args(
external_pipeline,
run_config,
mode,
preset,
tags,
solid_selection,
)
pipeline_name = external_pipeline.name
pipeline_selector = PipelineSelector(
location_name=repo_location.name,
repository_name=external_repo.name,
pipeline_name=pipeline_name,
solid_selection=solid_selection,
)
external_pipeline = repo_location.get_external_pipeline(pipeline_selector)
pipeline_mode = mode or external_pipeline.get_default_mode_name()
external_execution_plan = repo_location.get_external_execution_plan(
external_pipeline,
run_config,
pipeline_mode,
step_keys_to_execute=None,
known_state=None,
instance=instance,
)
execution_plan_snapshot = external_execution_plan.execution_plan_snapshot
return instance.create_run(
pipeline_name=pipeline_name,
run_id=run_id,
run_config=run_config,
mode=pipeline_mode,
solids_to_execute=external_pipeline.solids_to_execute,
step_keys_to_execute=execution_plan_snapshot.step_keys_to_execute,
solid_selection=solid_selection,
status=None,
root_run_id=None,
parent_run_id=None,
tags=tags,
pipeline_snapshot=external_pipeline.pipeline_snapshot,
execution_plan_snapshot=execution_plan_snapshot,
parent_pipeline_snapshot=external_pipeline.parent_pipeline_snapshot,
external_pipeline_origin=external_pipeline.get_external_origin(),
pipeline_code_origin=external_pipeline.get_python_origin(),
)
def do_execute_command(
pipeline,
instance,
config,
mode=None,
tags=None,
solid_selection=None,
preset=None,
):
check.inst_param(pipeline, "pipeline", IPipeline)
check.inst_param(instance, "instance", DagsterInstance)
check.opt_list_param(config, "config", of_type=str)
return execute_pipeline(
pipeline,
run_config=get_run_config_from_file_list(config),
mode=mode,
tags=tags,
instance=instance,
raise_on_error=False,
solid_selection=solid_selection,
preset=preset,
)
@pipeline_cli.command(
name="launch",
help="Launch a pipeline using the run launcher configured on the Dagster instance.\n\n{instructions}".format(
instructions=get_pipeline_instructions("launch")
),
)
@pipeline_target_argument
@python_pipeline_or_job_config_argument("launch")
@click.option(
"--config-json",
type=click.STRING,
help="JSON string of run config to use for this pipeline/job run. Cannot be used with -c / --config.",
)
@click.option(
"--preset",
type=click.STRING,
help="Specify a preset to use for this pipeline. Presets are defined on pipelines under "
"preset_defs.",
)
@click.option(
"--mode", type=click.STRING, help="The name of the mode in which to execute the pipeline."
)
@click.option("--tags", type=click.STRING, help="JSON string of tags to use for this pipeline run")
@click.option(
"-s",
"--solid-selection",
type=click.STRING,
help=(
"Specify the solid subselection to launch. It can be multiple clauses separated by commas."
"Examples:"
'\n- "some_solid" will launch "some_solid" itself'
'\n- "*some_solid" will launch "some_solid" and all its ancestors (upstream dependencies)'
'\n- "*some_solid+++" will launch "some_solid", all its ancestors, and its descendants'
" (downstream dependencies) within 3 levels down"
'\n- "*some_solid,other_solid_a,other_solid_b+" will launch "some_solid" and all its'
' ancestors, "other_solid_a" itself, and "other_solid_b" and its direct child solids'
),
)
@click.option("--run-id", type=click.STRING, help="The ID to give to the launched pipeline/job run")
def pipeline_launch_command(**kwargs):
with DagsterInstance.get() as instance:
return execute_launch_command(instance, kwargs)
@telemetry_wrapper
def execute_launch_command(instance, kwargs, using_job_op_graph_apis=False):
preset = kwargs.get("preset")
mode = kwargs.get("mode")
check.inst_param(instance, "instance", DagsterInstance)
config = get_config_from_args(kwargs)
with get_workspace_from_kwargs(instance, version=dagster_version, kwargs=kwargs) as workspace:
repo_location = get_repository_location_from_workspace(workspace, kwargs.get("location"))
external_repo = get_external_repository_from_repo_location(
repo_location, kwargs.get("repository")
)
external_pipeline = get_external_pipeline_or_job_from_external_repo(
external_repo, kwargs.get("pipeline_or_job"), using_job_op_graph_apis
)
log_external_repo_stats(
instance=instance,
external_pipeline=external_pipeline,
external_repo=external_repo,
source="pipeline_launch_command",
)
if preset and config:
raise click.UsageError("Can not use --preset with -c / --config / --config-json.")
run_tags = get_tags_from_args(kwargs)
solid_selection = get_solid_selection_from_args(kwargs)
pipeline_run = _create_external_pipeline_run(
instance=instance,
repo_location=repo_location,
external_repo=external_repo,
external_pipeline=external_pipeline,
run_config=config,
mode=mode,
preset=preset,
tags=run_tags,
solid_selection=solid_selection,
run_id=kwargs.get("run_id"),
)
return instance.submit_run(pipeline_run.run_id, workspace)
@pipeline_cli.command(
name="scaffold_config",
help="Scaffold the config for a pipeline.\n\n{instructions}".format(
instructions=get_pipeline_in_same_python_env_instructions("scaffold_config")
),
)
@python_pipeline_target_argument
@click.option("--print-only-required", default=False, is_flag=True)
def pipeline_scaffold_command(**kwargs):
execute_scaffold_command(kwargs, click.echo)
def execute_scaffold_command(cli_args, print_fn, using_job_op_graph_apis=False):
pipeline_origin = get_pipeline_or_job_python_origin_from_kwargs(
cli_args, using_job_op_graph_apis
)
pipeline = recon_pipeline_from_origin(pipeline_origin)
skip_non_required = cli_args["print_only_required"]
do_scaffold_command(pipeline.get_definition(), print_fn, skip_non_required)
def do_scaffold_command(pipeline_def, printer, skip_non_required):
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
check.callable_param(printer, "printer")
check.bool_param(skip_non_required, "skip_non_required")
config_dict = scaffold_pipeline_config(pipeline_def, skip_non_required=skip_non_required)
yaml_string = yaml.dump(config_dict, default_flow_style=False)
printer(yaml_string)
def gen_partition_names_from_args(partition_names, kwargs):
partition_selector_args = [
bool(kwargs.get("all")),
bool(kwargs.get("partitions")),
(bool(kwargs.get("from")) or bool(kwargs.get("to"))),
]
if sum(partition_selector_args) > 1:
raise click.UsageError(
"error, cannot use more than one of: `--all`, `--partitions`, `--from/--to`"
)
if kwargs.get("all"):
return partition_names
if kwargs.get("partitions"):
selected_args = [s.strip() for s in kwargs.get("partitions").split(",") if s.strip()]
selected_partitions = [
partition for partition in partition_names if partition in selected_args
]
if len(selected_partitions) < len(selected_args):
selected_names = [partition for partition in selected_partitions]
unknown = [selected for selected in selected_args if selected not in selected_names]
raise click.UsageError("Unknown partitions: {}".format(", ".join(unknown)))
return selected_partitions
start = validate_partition_slice(partition_names, "from", kwargs.get("from"))
end = validate_partition_slice(partition_names, "to", kwargs.get("to"))
return partition_names[start:end]
def get_config_from_args(kwargs):
config_files = kwargs.get("config")
config_json = kwargs.get("config_json")
if not config_files and not config_json:
return {}
if config_files and config_json:
raise click.UsageError("Cannot specify both -c / --config and --config-json")
if config_files:
config_file_list = list(
check.opt_tuple_param(kwargs.get("config"), "config", default=(), of_type=str)
)
return get_run_config_from_file_list(config_file_list)
if config_json:
try:
return json.loads(config_json)
except JSONDecodeError:
raise click.UsageError(
"Invalid JSON-string given for `--config-json`: {}\n\n{}".format(
config_json,
serializable_error_info_from_exc_info(sys.exc_info()).to_string(),
)
)
def get_tags_from_args(kwargs):
if kwargs.get("tags") is None:
return {}
try:
return json.loads(kwargs.get("tags"))
except JSONDecodeError:
raise click.UsageError(
"Invalid JSON-string given for `--tags`: {}\n\n{}".format(
kwargs.get("tags"),
serializable_error_info_from_exc_info(sys.exc_info()).to_string(),
)
)
def get_solid_selection_from_args(kwargs):
solid_selection_str = kwargs.get("solid_selection")
if not isinstance(solid_selection_str, str):
return None
return [ele.strip() for ele in solid_selection_str.split(",")] if solid_selection_str else None
def print_partition_format(partitions, indent_level):
if not IS_WINDOWS and sys.stdout.isatty():
_, tty_width = os.popen("stty size", "r").read().split()
screen_width = min(250, int(tty_width))
else:
screen_width = 250
max_str_len = max(len(x) for x in partitions)
spacing = 10
num_columns = min(10, int((screen_width - indent_level) / (max_str_len + spacing)))
column_width = int((screen_width - indent_level) / num_columns)
prefix = " " * max(0, indent_level - spacing)
lines = []
for chunk in list(split_chunk(partitions, num_columns)):
lines.append(prefix + "".join(partition.rjust(column_width) for partition in chunk))
return "\n" + "\n".join(lines)
def split_chunk(l, n):
for i in range(0, len(l), n):
yield l[i : i + n]
def validate_partition_slice(partition_names, name, value):
is_start = name == "from"
if value is None:
return 0 if is_start else len(partition_names)
if value not in partition_names:
raise click.UsageError("invalid value {} for {}".format(value, name))
index = partition_names.index(value)
return index if is_start else index + 1
@pipeline_cli.command(
name="backfill",
help="Backfill a partitioned pipeline/job.\n\n{instructions}".format(
instructions=get_partitioned_pipeline_instructions("backfill")
),
)
@pipeline_target_argument
@click.option(
"--partitions",
type=click.STRING,
help="Comma-separated list of partition names that we want to backfill",
)
@click.option(
"--partition-set",
type=click.STRING,
help="The name of the partition set over which we want to backfill.",
)
@click.option(
"--all",
type=click.STRING,
help="Specify to select all partitions to backfill.",
)
@click.option(
"--from",
type=click.STRING,
help=(
"Specify a start partition for this backfill job"
"\n\nExample: "
"dagster pipeline backfill log_daily_stats --from 20191101"
),
)
@click.option(
"--to",
type=click.STRING,
help=(
"Specify an end partition for this backfill job"
"\n\nExample: "
"dagster pipeline backfill log_daily_stats --to 20191201"
),
)
@click.option(
"--tags", type=click.STRING, help="JSON string of tags to use for this pipeline/job run"
)
@click.option("--noprompt", is_flag=True)
def pipeline_backfill_command(**kwargs):
with DagsterInstance.get() as instance:
execute_backfill_command(kwargs, click.echo, instance)
def execute_backfill_command(cli_args, print_fn, instance, using_graph_job_op_apis=False):
with get_workspace_from_kwargs(instance, version=dagster_version, kwargs=cli_args) as workspace:
repo_location = get_repository_location_from_workspace(workspace, cli_args.get("location"))
_execute_backfill_command_at_location(
cli_args, print_fn, instance, workspace, repo_location, using_graph_job_op_apis
)
def _execute_backfill_command_at_location(
cli_args, print_fn, instance, workspace, repo_location, using_graph_job_op_apis=False
):
external_repo = get_external_repository_from_repo_location(
repo_location, cli_args.get("repository")
)
external_pipeline = get_external_pipeline_or_job_from_external_repo(
external_repo, cli_args.get("pipeline_or_job")
)
noprompt = cli_args.get("noprompt")
pipeline_partition_set_names = {
external_partition_set.name: external_partition_set
for external_partition_set in external_repo.get_external_partition_sets()
if external_partition_set.pipeline_name == external_pipeline.name
}
if not pipeline_partition_set_names:
raise click.UsageError(
"No partition sets found for pipeline/job `{}`".format(external_pipeline.name)
)
partition_set_name = cli_args.get("partition_set")
if not partition_set_name:
if len(pipeline_partition_set_names) == 1:
partition_set_name = next(iter(pipeline_partition_set_names.keys()))
elif noprompt:
raise click.UsageError("No partition set specified (see option `--partition-set`)")
else:
partition_set_name = click.prompt(
"Select a partition set to use for backfill: {}".format(
", ".join(x for x in pipeline_partition_set_names.keys())
)
)
partition_set = pipeline_partition_set_names.get(partition_set_name)
if not partition_set:
raise click.UsageError("No partition set found named `{}`".format(partition_set_name))
run_tags = get_tags_from_args(cli_args)
repo_handle = RepositoryHandle(
repository_name=external_repo.name,
repository_location=repo_location,
)
try:
partition_names_or_error = repo_location.get_external_partition_names(
repo_handle,
partition_set_name,
)
except Exception:
error_info = serializable_error_info_from_exc_info(sys.exc_info())
raise DagsterBackfillFailedError(
"Failure fetching partition names: {error_message}".format(
error_message=error_info.message
),
serialized_error_info=error_info,
)
partition_names = gen_partition_names_from_args(
partition_names_or_error.partition_names, cli_args
)
# Print backfill info
print_fn("\n Pipeline/Job: {}".format(external_pipeline.name))
if not using_graph_job_op_apis:
print_fn("Partition set: {}".format(partition_set_name))
print_fn(" Partitions: {}\n".format(print_partition_format(partition_names, indent_level=15)))
# Confirm and launch
if noprompt or click.confirm(
"Do you want to proceed with the backfill ({} partitions)?".format(len(partition_names))
):
print_fn("Launching runs... ")
backfill_id = make_new_backfill_id()
backfill_job = PartitionBackfill(
backfill_id=backfill_id,
partition_set_origin=partition_set.get_external_origin(),
status=BulkActionStatus.REQUESTED,
partition_names=partition_names,
from_failure=False,
reexecution_steps=None,
tags=run_tags,
backfill_timestamp=pendulum.now("UTC").timestamp(),
)
try:
partition_execution_data = (
repo_location.get_external_partition_set_execution_param_data(
repository_handle=repo_handle,
partition_set_name=partition_set_name,
partition_names=partition_names,
)
)
except Exception:
error_info = serializable_error_info_from_exc_info(sys.exc_info())
instance.add_backfill(
backfill_job.with_status(BulkActionStatus.FAILED).with_error(error_info)
)
return print_fn("Backfill failed: {}".format(error_info))
assert isinstance(partition_execution_data, ExternalPartitionSetExecutionParamData)
for partition_data in partition_execution_data.partition_data:
pipeline_run = create_backfill_run(
instance,
repo_location,
external_pipeline,
partition_set,
backfill_job,
partition_data,
)
if pipeline_run:
instance.submit_run(pipeline_run.run_id, workspace)
instance.add_backfill(backfill_job.with_status(BulkActionStatus.COMPLETED))
print_fn("Launched backfill job `{}`".format(backfill_id))
else:
print_fn("Aborted!")
| 37.877901 | 113 | 0.690306 |
4a27b78ef409eccd5b1006f58a650ee51c7662e0 | 1,714 | py | Python | drl/dqn/nn_blocks.py | lych1233/compactDRL | 392eeff336c833b97b4c9b7f6b044144c242b1fa | [
"MIT"
] | 1 | 2021-08-10T03:00:34.000Z | 2021-08-10T03:00:34.000Z | drl/dqn/nn_blocks.py | lych1233/compactDRL | 392eeff336c833b97b4c9b7f6b044144c242b1fa | [
"MIT"
] | null | null | null | drl/dqn/nn_blocks.py | lych1233/compactDRL | 392eeff336c833b97b4c9b7f6b044144c242b1fa | [
"MIT"
] | null | null | null | import torch.nn as nn
class MLPFeature(nn.Sequential):
def __init__(self, input_dim, hidden_dim):
super(MLPFeature, self).__init__(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
)
class CNNFeature(nn.Sequential):
def __init__(self, args, in_channels, width, height, channel_dim):
divider_list = args.channel_divider
kernel_list = args.kernel_size
stride_list = args.stride
layers = []
for divider, kernel, stride in zip(divider_list, kernel_list, stride_list):
layers.append(nn.Conv2d(in_channels, channel_dim // divider, kernel, stride))
layers.append(nn.ReLU())
in_channels = channel_dim // divider
width = (width - kernel) // stride + 1
height = (height - kernel) // stride + 1
layers.append(nn.Flatten())
self.conv_nodes = in_channels * width * height
super(CNNFeature, self).__init__(*layers)
class QNet(nn.Module):
def __init__(self, args, n_obs, n_act):
super(QNet, self).__init__()
self.hidden_dim = args.hidden_dim
if len(n_obs) == 1:
self.feature_extractor = MLPFeature(*n_obs, self.hidden_dim)
self.feature_dim = self.hidden_dim
else:
self.feature_extractor = CNNFeature(args, *n_obs, args.channel_dim)
self.feature_dim = self.feature_extractor.conv_nodes
self.Q_mlp = nn.Sequential(
nn.Linear(self.feature_dim, self.hidden_dim),
nn.ReLU(),
nn.Linear(self.hidden_dim, n_act),
)
def forward(self, obs):
feature = self.feature_extractor(obs)
return self.Q_mlp(feature)
| 37.26087 | 89 | 0.617853 |
4a27b88e4626576f0424457af04307a171df6f3f | 2,191 | py | Python | mlapp/handlers/databases/database_interface.py | nbk905/mlapp | af650a8a302959674dd5a1bc6d15e30e90abf227 | [
"Apache-2.0"
] | null | null | null | mlapp/handlers/databases/database_interface.py | nbk905/mlapp | af650a8a302959674dd5a1bc6d15e30e90abf227 | [
"Apache-2.0"
] | null | null | null | mlapp/handlers/databases/database_interface.py | nbk905/mlapp | af650a8a302959674dd5a1bc6d15e30e90abf227 | [
"Apache-2.0"
] | null | null | null | from abc import ABCMeta, abstractmethod
class DatabaseInterface:
__metaclass__ = ABCMeta
@abstractmethod
def execute_query(self, query, params=None):
"""
Executes Query in the database.
:param query: str - query to be executed.
:param params: list - list of parameters to be used if necessary in query
:return: result of query
"""
raise NotImplementedError()
@abstractmethod
def insert_query(self, query, values):
"""
Executes an "INSERT" query in the database.
:param query: str - query to be executed.
:param values: list - list of values to be used in the query
:return: None
"""
raise NotImplementedError()
@abstractmethod
def insert_df(self, sql_table, df, batch_length=1000):
"""
Inserts a DataFrame into a table in the database.
:param sql_table: str - name of the table.
:param df: DataFrame (Pandas, PySpark or other) - Matrix type DataFrame containing all values to insert.
:param batch_length: int - length of the how many rows to insert from matrix at a time
:return: None
"""
raise NotImplementedError()
@abstractmethod
def get_df(self, query, params=None):
"""
Executes a query in the database and returns it as a DataFrame.
:param query: str - query to be executed.
:param params: list - list of parameters to be used if necessary in query
:return: result of query as a DataFrame
"""
raise NotImplementedError()
@abstractmethod
def update_job_running(self, job_id):
"""
Updates row in the table of jobs by the job_id to status `Running`
Functionality of the MLCP (Machine Learning Control Panel)
:param job_id: str - id of the job
:return: None
"""
raise NotImplementedError()
@abstractmethod
def update_actuals(self, df):
"""
Update target table with the y_true
:param df: the dataframe that represents the real data that was loaded
:return: none
"""
raise NotImplementedError()
| 33.19697 | 112 | 0.628024 |
4a27b969775d0e952c66f381768ced81f685cc76 | 343 | py | Python | tests/apps/overridable/models.py | samuelmaudo/yepes | 1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb | [
"BSD-3-Clause"
] | null | null | null | tests/apps/overridable/models.py | samuelmaudo/yepes | 1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb | [
"BSD-3-Clause"
] | null | null | null | tests/apps/overridable/models.py | samuelmaudo/yepes | 1ef9a42d4eaa70d9b3e6e7fa519396c1e1174fcb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from yepes.apps import apps
AbstractArticle = apps.get_class('overridable.abstract_models', 'AbstractArticle')
AbstractAuthor = apps.get_class('overridable.abstract_models', 'AbstractAuthor')
class Article(AbstractArticle):
pass
class Author(AbstractAuthor):
pass
| 19.055556 | 82 | 0.772595 |
4a27b9833e60f2ffb7094acc326ce7652ed5213d | 4,089 | py | Python | joystick.py | salendron/blobblast | e557a1c275c843fff3fde679c06cf5e022c4c27c | [
"MIT"
] | null | null | null | joystick.py | salendron/blobblast | e557a1c275c843fff3fde679c06cf5e022c4c27c | [
"MIT"
] | null | null | null | joystick.py | salendron/blobblast | e557a1c275c843fff3fde679c06cf5e022c4c27c | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2022 Bruno Hautzenberger
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import pygame
from config import Config
class Joystick:
"""
A helper for pygame.joystick that requires two connected game controllers
It will read all command from the game pads and map them to defined events
that can be used like pygame.events later on in the game.
To make this work with a specific game pad, use the debug method,
comment in and out what you want to debug, and then change the mapping in
get_events to fit your controllers.
"""
def __init__(self):
self.joysticks = [pygame.joystick.Joystick(i) for i in range(pygame.joystick.get_count())]
for joy in self.joysticks:
joy.init()
if len(self.joysticks) != 2:
print("Could not detect two controllers!")
sys.exit(0)
def get_events(self):
events = {}
# joaystick 1
j_1 = self.joysticks[0]
# up / down
j_1_axis_1 = round(j_1.get_axis(1))
events[Config.P1_UP] = True if j_1_axis_1 < 0 else False
events[Config.P1_DOWN] = True if j_1_axis_1 > 0 else False
# shoot, pause, exit
events[Config.P1_SHOOT] = True if j_1.get_button(0) == 1 else False
events[Config.P1_START] = True if j_1.get_button(6) == 1 else False
events[Config.P1_EXIT] = True if j_1.get_button(4) == 1 else False
# joaystick 2
j_2 = self.joysticks[1]
# up / down
j_2_axis_1 = round(j_2.get_axis(1))
events[Config.P2_UP] = True if j_2_axis_1 < 0 else False
events[Config.P2_DOWN] = True if j_2_axis_1 > 0 else False
# shoot, pause, exit
events[Config.P2_SHOOT] = True if j_2.get_button(0) == 1 else False
events[Config.P2_START] = True if j_2.get_button(6) == 1 else False
events[Config.P2_EXIT] = True if j_2.get_button(4) == 1 else False
#if len(events.keys()) > 0:
print(events)
return events
def debug(self):
# joystick buttons
for j in range(len(self.joysticks)):
joystick = pygame.joystick.Joystick(j)
#name = joystick.get_name()
#print("Joystick name: {}".format(name) )
#axes = joystick.get_numaxes()
#print("Number of axes: {}".format(axes) )
#for i in range( axes ):
# axis = joystick.get_axis( i )
# print("Axis {} value: {:>6.0f}".format(i, axis) )
buttons = joystick.get_numbuttons()
#print("Number of buttons: {}".format(buttons) )
for i in range( buttons ):
button = joystick.get_button( i )
print("Button {:>2} value: {}".format(i,button) )
#hats = joystick.get_numhats()
#print("Number of hats: {}".format(hats) )
#for i in range( hats ):
# hat = joystick.get_hat( i )
# print("Hat {} value: {}".format(i, str(hat)) )
| 37.172727 | 98 | 0.630717 |
4a27b9c057b4f29ab24b0ccc3787557b56f340ac | 2,962 | py | Python | supports/pyload/src/pyload/plugins/downloaders/RapiduNet.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | 1 | 2020-04-02T17:03:39.000Z | 2020-04-02T17:03:39.000Z | supports/pyload/src/pyload/plugins/downloaders/RapiduNet.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | supports/pyload/src/pyload/plugins/downloaders/RapiduNet.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import time
from datetime import timedelta
import pycurl
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.simple_downloader import SimpleDownloader
class RapiduNet(SimpleDownloader):
__name__ = "RapiduNet"
__type__ = "downloader"
__version__ = "0.15"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"https?://(?:www\.)?rapidu\.net/(?P<ID>\d{10})"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Rapidu.net downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("prOq", None)]
COOKIES = [("rapidu.net", "rapidu_lang", "en")]
INFO_PATTERN = (
r'<h1 title="(?P<N>.*)">.*</h1>\s*<small>(?P<S>\d+(\.\d+)?)\s(?P<U>\w+)</small>'
)
OFFLINE_PATTERN = r"<h1>404"
ERROR_PATTERN = r'<div class="error">'
RECAPTCHA_KEY = r"6Ld12ewSAAAAAHoE6WVP_pSfCdJcBQScVweQh8Io"
def setup(self):
self.resume_download = True
self.multi_dl = self.premium
def handle_free(self, pyfile):
self.req.http.last_url = pyfile.url
self.req.http.c.setopt(pycurl.HTTPHEADER, ["X-Requested-With: XMLHttpRequest"])
jsvars = self.get_json_response(
"https://rapidu.net/ajax.php",
get={"a": "getLoadTimeToDownload"},
post={"_go": ""},
)
if str(jsvars["timeToDownload"]) == "stop":
t = (
(timedelta(hours=24).seconds)
- (int(time.time()) % timedelta(hours=24).seconds)
+ time.altzone
)
self.log_info(self._("You've reach your daily download transfer"))
# NOTE: check t in case of not synchronised clock
self.retry(10, 10 if t < 1 else None, self._("Try tomorrow again"))
else:
self.wait(int(jsvars["timeToDownload"]) - int(time.time()))
self.captcha = ReCaptcha(pyfile)
response, challenge = self.captcha.challenge(self.RECAPTCHA_KEY)
jsvars = self.get_json_response(
"https://rapidu.net/ajax.php",
get={"a": "getCheckCaptcha"},
post={
"_go": "",
"captcha1": challenge,
"captcha2": response,
"fileId": self.info["pattern"]["ID"],
},
)
if jsvars["message"] == "success":
self.link = jsvars["url"]
def get_json_response(self, *args, **kwargs):
res = self.load(*args, **kwargs)
if not res.startswith("{"):
self.retry()
self.log_debug(res)
return json.loads(res)
| 29.919192 | 88 | 0.565159 |
4a27ba2ff98301a758075bbcd8c347345390ad59 | 10,453 | py | Python | mayan/apps/documents/tests/test_trashed_document_views.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 4 | 2021-09-02T00:16:30.000Z | 2021-09-09T22:25:15.000Z | mayan/apps/documents/tests/test_trashed_document_views.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 86 | 2021-09-01T23:53:02.000Z | 2021-09-20T02:25:10.000Z | mayan/apps/documents/tests/test_trashed_document_views.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 70 | 2021-09-01T12:54:51.000Z | 2022-02-16T00:53:18.000Z | from ..models.document_models import Document
from ..models.trashed_document_models import TrashedDocument
from ..permissions import (
permission_trashed_document_delete, permission_trashed_document_restore,
permission_document_trash, permission_document_view,
permission_trash_empty
)
from .base import GenericDocumentViewTestCase
from .mixins.trashed_document_mixins import TrashedDocumentViewTestMixin
class DocumentTrashViewTestCase(
TrashedDocumentViewTestMixin, GenericDocumentViewTestCase
):
def test_document_trash_get_view_no_permission(self):
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_document_trash_get_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_document_trash_get_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_trash
)
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_document_trash_get_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_trashed_document_trash_get_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_trash
)
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_document_trash_get_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_document_trash_post_view_no_permission(self):
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_document_trash_post_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_document_trash_post_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_trash
)
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_document_trash_post_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(Document.valid.count(), document_count - 1)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count + 1
)
def test_trashed_document_trash_post_view_with_access(self):
self.grant_access(
obj=self.test_document, permission=permission_document_trash
)
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_document_trash_post_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
class TrashedDocumentViewTestCase(
TrashedDocumentViewTestMixin, GenericDocumentViewTestCase
):
def test_trashed_document_delete_get_view_no_permission(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_trashed_document_delete_get_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
Document.valid.count(), document_count
)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_trashed_document_delete_get_view_with_access(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
self.grant_access(
obj=self.test_document,
permission=permission_trashed_document_delete
)
response = self._request_test_trashed_document_delete_get_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(
Document.valid.count(), document_count
)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_trashed_document_delete_post_view_no_permission(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_trashed_document_delete_post_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_trashed_document_delete_post_view_with_access(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
self.grant_access(
obj=self.test_document, permission=permission_trashed_document_delete
)
response = self._request_test_trashed_document_delete_post_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count - 1
)
def test_trashed_document_list_view_no_permission(self):
self.test_document.delete()
response = self._request_test_trashed_document_list_view()
self.assertNotContains(
response=response, text=self.test_document.label, status_code=200
)
def test_trashed_document_list_view_with_access(self):
self.test_document.delete()
self.grant_access(
obj=self.test_document, permission=permission_document_view
)
response = self._request_test_trashed_document_list_view()
self.assertContains(
response=response, text=self.test_document.label, status_code=200
)
def test_trashed_document_restore_get_view_no_permission(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_trashed_document_restore_get_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_trashed_document_restore_get_view_with_access(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
self.grant_access(
obj=self.test_document,
permission=permission_trashed_document_restore
)
document_count = Document.valid.count()
response = self._request_test_trashed_document_restore_get_view()
self.assertEqual(response.status_code, 200)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_trashed_document_restore_post_view_no_permission(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_test_trashed_document_restore_post_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_trashed_document_restore_post_view_with_access(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
self.grant_access(
obj=self.test_document,
permission=permission_trashed_document_restore
)
response = self._request_test_trashed_document_restore_post_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(Document.valid.count(), document_count + 1)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count - 1
)
class TrashCanViewTestCase(
TrashedDocumentViewTestMixin, GenericDocumentViewTestCase
):
def test_trash_can_empty_view_no_permission(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
response = self._request_empty_trash_view()
self.assertEqual(response.status_code, 403)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count
)
def test_trash_can_empty_view_with_permission(self):
self.test_document.delete()
document_count = Document.valid.count()
trashed_document_count = TrashedDocument.objects.count()
self.grant_permission(permission=permission_trash_empty)
response = self._request_empty_trash_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(Document.valid.count(), document_count)
self.assertEqual(
TrashedDocument.objects.count(), trashed_document_count - 1
)
| 35.433898 | 81 | 0.710705 |
4a27ba6c5ec0a330e5b977a78d274eaf6bd171f9 | 5,170 | py | Python | rl_agents/agents/deep_q_network/pytorch.py | saArbabi/rl-agents | 18ae779f015748eefb346e34b8406a3e4ff16208 | [
"MIT"
] | 1 | 2021-06-10T07:16:36.000Z | 2021-06-10T07:16:36.000Z | rl_agents/agents/deep_q_network/pytorch.py | saArbabi/rl-agents | 18ae779f015748eefb346e34b8406a3e4ff16208 | [
"MIT"
] | null | null | null | rl_agents/agents/deep_q_network/pytorch.py | saArbabi/rl-agents | 18ae779f015748eefb346e34b8406a3e4ff16208 | [
"MIT"
] | null | null | null | import logging
import torch
from rl_agents.agents.common.memory import Transition
from rl_agents.agents.common.models import model_factory, size_model_config, trainable_parameters
from rl_agents.agents.common.optimizers import loss_function_factory, optimizer_factory
from rl_agents.agents.common.utils import choose_device
from rl_agents.agents.deep_q_network.abstract import AbstractDQNAgent
logger = logging.getLogger(__name__)
class DQNAgent(AbstractDQNAgent):
def __init__(self, env, config=None):
super(DQNAgent, self).__init__(env, config)
size_model_config(self.env, self.config["model"])
self.value_net = model_factory(self.config["model"])
self.target_net = model_factory(self.config["model"])
self.target_net.load_state_dict(self.value_net.state_dict())
self.target_net.eval()
logger.debug("Number of trainable parameters: {}".format(trainable_parameters(self.value_net)))
self.device = choose_device(self.config["device"])
self.value_net.to(self.device)
self.target_net.to(self.device)
self.loss_function = loss_function_factory(self.config["loss_function"])
self.optimizer = optimizer_factory(self.config["optimizer"]["type"],
self.value_net.parameters(),
**self.config["optimizer"])
self.steps = 0
def step_optimizer(self, loss):
# Optimize the model
self.optimizer.zero_grad()
loss.backward()
for param in self.value_net.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
def compute_bellman_residual(self, batch, target_state_action_value=None):
# Compute concatenate the batch elements
if not isinstance(batch.state, torch.Tensor):
# logger.info("Casting the batch to torch.tensor")
state = torch.cat(tuple(torch.tensor([batch.state], dtype=torch.float))).to(self.device)
action = torch.tensor(batch.action, dtype=torch.long).to(self.device)
reward = torch.tensor(batch.reward, dtype=torch.float).to(self.device)
next_state = torch.cat(tuple(torch.tensor([batch.next_state], dtype=torch.float))).to(self.device)
terminal = torch.tensor(batch.terminal, dtype=torch.bool).to(self.device)
batch = Transition(state, action, reward, next_state, terminal, batch.info)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken
state_action_values = self.value_net(batch.state)
state_action_values = state_action_values.gather(1, batch.action.unsqueeze(1)).squeeze(1)
if target_state_action_value is None:
with torch.no_grad():
# Compute V(s_{t+1}) for all next states.
next_state_values = torch.zeros(batch.reward.shape).to(self.device)
if self.config["double"]:
# Double Q-learning: pick best actions from policy network
_, best_actions = self.value_net(batch.next_state).max(1)
# Double Q-learning: estimate action values from target network
best_values = self.target_net(batch.next_state).gather(1, best_actions.unsqueeze(1)).squeeze(1)
else:
best_values, _ = self.target_net(batch.next_state).max(1)
next_state_values[~batch.terminal] = best_values[~batch.terminal]
# Compute the expected Q values
target_state_action_value = batch.reward + self.config["gamma"] * next_state_values
# Compute loss
loss = self.loss_function(state_action_values, target_state_action_value)
return loss, target_state_action_value, batch
def get_batch_state_values(self, states):
values, actions = self.value_net(torch.tensor(states, dtype=torch.float).to(self.device)).max(1)
return values.data.cpu().numpy(), actions.data.cpu().numpy()
def get_batch_state_action_values(self, states):
return self.value_net(torch.tensor(states, dtype=torch.float).to(self.device)).data.cpu().numpy()
def save(self, filename):
state = {'state_dict': self.value_net.state_dict(),
'optimizer': self.optimizer.state_dict()}
torch.save(state, filename)
return filename
def load(self, filename):
checkpoint = torch.load(filename, map_location=self.device)
self.value_net.load_state_dict(checkpoint['state_dict'])
self.target_net.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
return filename
def initialize_model(self):
self.value_net.reset()
def set_writer(self, writer):
super().set_writer(writer)
model_input = torch.zeros((1, *self.env.observation_space.shape), dtype=torch.float, device=self.device)
self.writer.add_graph(self.value_net, input_to_model=(model_input,)),
self.writer.add_scalar("agent/trainable_parameters", trainable_parameters(self.value_net), 0)
| 50.686275 | 115 | 0.671567 |
4a27ba6db1fbcf1085f67e1865f5ebebd2addce8 | 10,931 | py | Python | nasse/utils/xml.py | Animenosekai/nasse | ba83187cab84b62338a5fab00db12f7d80a9fb5b | [
"MIT"
] | null | null | null | nasse/utils/xml.py | Animenosekai/nasse | ba83187cab84b62338a5fab00db12f7d80a9fb5b | [
"MIT"
] | null | null | null | nasse/utils/xml.py | Animenosekai/nasse | ba83187cab84b62338a5fab00db12f7d80a9fb5b | [
"MIT"
] | null | null | null | """
XML Conversion
Source: dict2xml by delfick \n
GitHub: https://github.com/delfick/python-dict2xml \n
PyPI: https://pypi.org/project/dict2xml/ \n
Based on version 1.7.0 \n
Licensed under the MIT License
The MIT License (MIT)
Copyright (c) 2018 Stephen Moore
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-------------------
Arranged for Nasse
It supports bytes, file-like objects, null (None), booleans, and fallback for non-supported types.
The API is also a little bit simpler.
"""
import base64
import collections
import collections.abc
import re
import typing
from nasse import logging, utils
start_ranges = "|".join(
"[{0}]".format(r)
for r in [
"\xC0-\xD6",
"\xD8-\xF6",
"\xF8-\u02FF",
"\u0370-\u037D",
"\u037F-\u1FFF",
"\u200C-\u200D",
"\u2070-\u218F",
"\u2C00-\u2FEF",
"\u3001-\uD7FF",
"\uF900-\uFDCF",
"\uFDF0-\uFFFD",
]
)
NameStartChar = re.compile(r"(:|[A-Z]|_|[a-z]|{0})".format(start_ranges))
NameChar = re.compile(r"(\-|\.|[0-9]|\xB7|[\u0300-\u036F]|[\u203F-\u2040])")
########################
# NODE
########################
class Node(object):
"""
Represents each tag in the tree
Each node has _either_ a single value or one or more children
If it has a value:
The serialized result is <%(tag)s>%(value)s</%(tag)s>
If it has children:
The serialized result is
<%(wrap)s>
%(children)s
</%(wrap)s>
Which one it is depends on the implementation of self.convert
"""
# A mapping of characters to treat as escapable entities and their replacements
entities = [("&", "&"), ("<", "<"), (">", ">")]
def __init__(self, wrap="", tag="", data=None, iterables_repeat_wrap=True):
self.tag = self.sanitize_element(tag)
self.wrap = self.sanitize_element(wrap)
self.data = data
self.type = self.determine_type()
self.iterables_repeat_wrap = iterables_repeat_wrap
if self.type == "flat" and isinstance(self.data, str):
# Make sure we deal with entities
for entity, replacement in self.entities:
self.data = self.data.replace(entity, replacement)
def serialize(self, indenter):
"""Returns the Node serialized as an xml string"""
# Determine the start and end of this node
wrap = self.wrap
end, start = "", ""
if wrap:
end = "</{0}>".format(wrap)
start = "<{0}>".format(wrap)
# Convert the data attached in this node into a value and children
value, children = self.convert()
# Determine the content of the node (essentially the children as a string value)
content = ""
if children:
if self.type != "iterable":
# Non-iterable wraps all it's children in the same tag
content = indenter((c.serialize(indenter)
for c in children), wrap)
else:
if self.iterables_repeat_wrap:
# Iterables repeat the wrap for each child
result = []
for c in children:
content = c.serialize(indenter)
if c.type == "flat":
# Child with value, it already is surrounded by the tag
result.append(content)
else:
# Child with children of it's own, they need to be wrapped by start and end
content = indenter([content], True)
result.append("".join((start, content, end)))
# We already have what we want, return the indented result
return indenter(result, False)
else:
result = []
for c in children:
result.append(c.serialize(indenter))
return "".join([start, indenter(result, True), end])
# If here, either:
# * Have a value
# * Or this node is not an iterable
return "".join((start, value, content, end))
def determine_type(self):
"""
Return the type of the data on this node as an identifying string
* Iterable : Supports "for item in data"
* Mapping : Supports "for key in data: value = data[key]"
* flat : A string or something that isn't iterable or a mapping
"""
data = self.data
if isinstance(data, (int, float, str)):
return "flat"
elif isinstance(data, bool):
return "bool"
elif data is None:
return "null"
elif utils.annotations.is_unpackable(data):
return "mapping"
elif isinstance(data, bytes):
return "bytes"
elif hasattr(data, "read") and hasattr(data, "tell") and hasattr(data, "seek"):
return "file"
elif isinstance(data, typing.Iterable):
return "iterable"
else:
logging.log("Object of type <{type}> will be converted to str while encoding to XML".format(
type=data.__class__.__name__))
return "flat"
def convert(self):
"""
Convert data on this node into a (value, children) tuple depending on the type of the data
If the type is :
* flat : Use self.tag to surround the value. <tag>value</tag>
* mapping : Return a list of tags where the key for each child is the wrap for that node
* iterable : Return a list of Nodes where self.wrap is the tag for that node
"""
val = ""
typ = self.type
data = self.data
children = []
if typ == "mapping":
sorted_data = data
if not isinstance(data, collections.OrderedDict):
sorted_data = sorted(data)
for key in sorted_data:
item = data[key]
children.append(
Node(key, "", item,
iterables_repeat_wrap=self.iterables_repeat_wrap)
)
elif typ == "iterable":
for item in data:
children.append(
Node("", self.wrap, item,
iterables_repeat_wrap=self.iterables_repeat_wrap,)
)
elif typ == "bool":
val = "true" if data else "false"
if self.tag:
val = "<{0}>{1}</{2}>".format(self.tag, val, self.tag)
elif typ == "null":
val = "null"
if self.tag:
val = "<{0}>{1}</{2}>".format(self.tag, val, self.tag)
elif typ == "bytes":
val = base64.b64encode(data).decode("utf-8")
if self.tag:
val = "<{0}>{1}</{2}>".format(self.tag, val, self.tag)
elif typ == "file":
position = data.tell() # storing the current position
content = data.read() # read it (place the cursor at the end)
data.seek(position) # go back to the original position
if "b" in data.mode: # if binary mode
val = base64.b64encode(data).decode("utf-8")
else:
val = str(content)
if self.tag:
val = "<{0}>{1}</{2}>".format(self.tag, val, self.tag)
else:
val = str(data)
if self.tag:
val = "<{0}>{1}</{2}>".format(self.tag, val, self.tag)
return val, children
@staticmethod
def sanitize_element(wrap):
"""
Convert `wrap` into a valid tag name applying the XML Naming Rules.
* Names can contain letters, numbers, and other characters
* Names cannot start with a number or punctuation character
* Names cannot start with the letters xml (or XML, or Xml, etc)
* Names cannot contain spaces
* Any name can be used, no words are reserved.
:ref: http://www.w3.org/TR/REC-xml/#NT-NameChar
"""
if wrap and isinstance(wrap, str):
if wrap.lower().startswith("xml"):
wrap = "_" + wrap
return "".join(
["_" if not NameStartChar.match(wrap) else ""]
+ ["_" if not (NameStartChar.match(c)
or NameChar.match(c)) else c for c in wrap]
)
else:
return wrap
def encode(data, minify: bool = False):
"""
Encodes Python data to XML
Parameters
----------
data: Any
The data to be converted
minify: bool
If the result should be minified
"""
if minify:
def ret(nodes, wrapped): return "".join(nodes)
else:
def ret(nodes, wrapped):
"""
Indent nodes depending on value of wrapped and indent
If not wrapped, then don't indent
Otherwise,
Seperate each child by a newline
and indent each line in the child by one indent unit
"""
def eachline(nodes):
"""Yield each line in each node"""
for node in nodes:
for line in node.split("\n"):
yield line
if wrapped:
seperator = "\n{0}".format(" ")
surrounding = "\n{0}{{0}}\n".format(" ")
else:
seperator = "\n"
surrounding = "{0}"
return surrounding.format(seperator.join(eachline(nodes)))
return Node( # wrap should be app.id
wrap="nasse", data=data, iterables_repeat_wrap=True
).serialize(ret)
| 36.195364 | 104 | 0.541488 |
4a27bb14b1ce70e76658a3db9b156d4e8c7baabe | 616 | py | Python | atcoder/abc/abc149_d.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/abc/abc149_d.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/abc/abc149_d.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | import sys
sys.setrecursionlimit(10 ** 5 + 100)
def dfs(i, type_):
if i >= N:
return 0
if dp[i][type_] != -1:
return dp[i][type_]
ret = 0
for t in range(3):
if t == type_:
continue
ret = max(ret, dfs(i + K, t))
dp[i][type_] = ret + ((type_ + 1) % 3 == T[i]) * score[type_]
return dp[i][type_]
N, K = map(int, input().split())
score = [int(x) for x in input().split()]
dic = {"r": 0, "s": 1, "p": 2}
T = [dic[x] for x in input()]
dp = [[-1] * 3 for _ in range(N)]
ans = 0
for i in range(K):
ans += max(dfs(i, t) for t in range(3))
print(ans)
| 22.814815 | 65 | 0.49513 |
4a27bbcbfb948e5b30f69959abdef66c06c20612 | 911 | py | Python | cocos/tests/test_numerics/test_statistics/test_rng/test_logistic_rng.py | michaelnowotny/cocos | 3c34940d7d9eb8592a97788a5df84b8d472f2928 | [
"MIT"
] | 101 | 2019-03-30T05:23:01.000Z | 2021-11-27T09:09:40.000Z | cocos/tests/test_numerics/test_statistics/test_rng/test_logistic_rng.py | michaelnowotny/cocos | 3c34940d7d9eb8592a97788a5df84b8d472f2928 | [
"MIT"
] | 3 | 2019-04-17T06:04:12.000Z | 2020-12-14T17:36:01.000Z | cocos/tests/test_numerics/test_statistics/test_rng/test_logistic_rng.py | michaelnowotny/cocos | 3c34940d7d9eb8592a97788a5df84b8d472f2928 | [
"MIT"
] | 5 | 2020-02-07T14:29:50.000Z | 2020-12-09T17:54:07.000Z | import numpy as np
import pytest
import cocos.numerics as cn
from cocos.tests.test_numerics.test_statistics.utilities import perform_ks_test
n_kolmogorov_smirnov = 1000000
test_data = [(5.0, np.sqrt(3.0) / np.pi, n_kolmogorov_smirnov),
(5.0, 2.0, n_kolmogorov_smirnov),
(9.0, 3.0, n_kolmogorov_smirnov),
(9.0, 4.0, n_kolmogorov_smirnov),
(6.0, 2.0, n_kolmogorov_smirnov),
(2.0, 1.0, n_kolmogorov_smirnov)]
@pytest.mark.parametrize("loc, scale, n_kolmogorov_smirnov", test_data)
def test_logistic_distribution(loc, scale, n_kolmogorov_smirnov):
u = cn.random.logistic(loc, scale, n_kolmogorov_smirnov)
reject = perform_ks_test(u,
alpha=0.01,
distribution='logistic',
args=(loc, scale),
verbose=True)
assert not reject
| 33.740741 | 79 | 0.614709 |
4a27bc4303f579b99c73833f6b3b8d05c5a62e9d | 957 | py | Python | agent/indy_catalyst_agent/messaging/responder.py | mikelodder7/indy-catalyst | 5e80d8d6764144303b7ef1851aee32291bcb2d98 | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/messaging/responder.py | mikelodder7/indy-catalyst | 5e80d8d6764144303b7ef1851aee32291bcb2d98 | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/messaging/responder.py | mikelodder7/indy-catalyst | 5e80d8d6764144303b7ef1851aee32291bcb2d98 | [
"Apache-2.0"
] | null | null | null | from abc import ABC
from .agent_message import AgentMessage
from ..models.connection_target import ConnectionTarget
from ..error import BaseError
class ResponderError(BaseError):
"""Responder error."""
pass
class BaseResponder(ABC):
"""Interface for message handlers to send responses"""
async def send_outbound(self, message: AgentMessage, target: ConnectionTarget):
"""
Send a message to a given connection target (endpoint). The
message may be queued for later delivery.
"""
async def send_reply(self, message: AgentMessage):
"""
Send a message back to the same agent. This relies
on the presence of an active connection. The message
may be multicast to multiple endpoints or queued for
later delivery.
"""
async def send_admin_message(self, message: AgentMessage):
"""
Send an admin message to active listeners.
"""
| 27.342857 | 83 | 0.673981 |
4a27bcf1e7a83ec31d775db561c7bf2125fa9257 | 4,987 | py | Python | library/f5bigip_ltm_profile_fix.py | GabrielFortin/ansible-module-f5bigip | 8d1323e912388e20eafd63a73ec015dd6d8a012c | [
"Apache-2.0"
] | 6 | 2017-01-11T01:28:00.000Z | 2019-02-19T16:11:09.000Z | library/f5bigip_ltm_profile_fix.py | GabrielFortin/ansible-module-f5bigip | 8d1323e912388e20eafd63a73ec015dd6d8a012c | [
"Apache-2.0"
] | 48 | 2017-05-29T17:50:59.000Z | 2020-02-09T15:24:27.000Z | library/f5bigip_ltm_profile_fix.py | GabrielFortin/ansible-module-f5bigip | 8d1323e912388e20eafd63a73ec015dd6d8a012c | [
"Apache-2.0"
] | 5 | 2017-05-05T18:30:51.000Z | 2017-12-19T23:13:05.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2018, Eric Jacob <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: f5bigip_ltm_profile_fix
short_description: BIG-IP ltm profile fix module
description:
- Configures a Financial Information eXchange Protocol (FIX) profile.
version_added: "2.4"
author:
- "Gabriel Fortin (@GabrielFortin)"
options:
app_service:
description:
- Specifies the name of the application service to which this object belongs.
error_action:
description:
- Specifies the error handling method.
choices: ['drop_connection', 'dont_forward']
full_logon_parsing:
description:
- Enable or disable logon message is always fully parsed.
default: true
choices: ['true', 'false']
message_log_publisher:
description:
- Specifies the publisher for message logging.
partition:
description:
- Specifies the administrative partition within which the profile resides.
quick_parsing:
description:
- Enable or disable quick parsing which parses the basic standard fields and validates message length and
checksum.
default: false
choices: ['true', 'false']
report_log_publisher:
description:
- Specifies the publisher for error message and status report.
response_parsing:
description:
- Enable or disable response parsing which parses the messages from FIX server.
default: false
choices: ['true', 'false']
sender_tag_class:
description:
- Specifies the tag substitution map between sender id and tag substitution data group.
state:
description:
- Specifies the state of the component on the BIG-IP system.
default: present
choices: ['absent', 'present']
statistics_sample_interval:
description:
- Specifies the sample interval in seconds of the message rate.
requirements:
- BIG-IP >= 12.0
- ansible-common-f5
- f5-sdk
'''
EXAMPLES = '''
- name: Create LTM Profile FIX
f5bigip_ltm_profile_fix:
f5_hostname: 172.16.227.35
f5_username: admin
f5_password: admin
f5_port: 443
name: my_fix_profile
partition: Common
state: present
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_common_f5.base import F5_NAMED_OBJ_ARGS
from ansible_common_f5.base import F5_PROVIDER_ARGS
from ansible_common_f5.bigip import F5BigIpNamedObject
class ModuleParams(object):
@property
def argument_spec(self):
argument_spec = dict(
app_service=dict(type='str'),
error_action=dict(type='str', choices=['drop_connection', 'dont_forward']),
full_logon_parsing=dict(type='str', choices=['true', 'false']),
message_log_publisher=dict(type='str'),
quick_parsing=dict(type='str', choices=['true', 'false']),
report_log_publisher=dict(type='str'),
response_parsing=dict(type='str', choices=['true', 'false']),
sender_tag_class=dict(type='dict'),
statistics_sample_interval=dict(type='int')
)
argument_spec.update(F5_PROVIDER_ARGS)
argument_spec.update(F5_NAMED_OBJ_ARGS)
return argument_spec
@property
def supports_check_mode(self):
return True
class F5BigIpLtmProfileFix(F5BigIpNamedObject):
def _set_crud_methods(self):
self._methods = {
'create': self._api.tm.ltm.profile.fixs.fix.create,
'read': self._api.tm.ltm.profile.fixs.fix.load,
'update': self._api.tm.ltm.profile.fixs.fix.update,
'delete': self._api.tm.ltm.profile.fixs.fix.delete,
'exists': self._api.tm.ltm.profile.fixs.fix.exists
}
def main():
params = ModuleParams()
module = AnsibleModule(argument_spec=params.argument_spec, supports_check_mode=params.supports_check_mode)
try:
obj = F5BigIpLtmProfileFix(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except Exception as exc:
module.fail_json(msg=str(exc))
if __name__ == '__main__':
main()
| 32.809211 | 117 | 0.666934 |
4a27bd57670ccf1c31d1cd24395ce6ab830d14ee | 2,170 | py | Python | Aulas Python/Aula022.py | RenanRibeiroDaSilva/Meu-Aprendizado-Python | 280bf2ad132ae0d26255e70b894fa7dbb69a5d01 | [
"MIT"
] | 2 | 2021-05-21T23:17:44.000Z | 2021-05-22T04:34:37.000Z | Aulas Python/Aula022.py | RenanRibeiroDaSilva/Meu-Aprendizado-Python | 280bf2ad132ae0d26255e70b894fa7dbb69a5d01 | [
"MIT"
] | null | null | null | Aulas Python/Aula022.py | RenanRibeiroDaSilva/Meu-Aprendizado-Python | 280bf2ad132ae0d26255e70b894fa7dbb69a5d01 | [
"MIT"
] | null | null | null | """ Aula - 022 - Módulos e Pacotes
"""
'''
Modularização:
-> Surgiu no início da década de 60.
-> Sistemas ficando cada vez maiores.
-> Foco: dividir um programa grande.
-> Foco: aumentar a legibilidade.
-> Foco: facilitar a manutenção.
'''
# Teoria:
'''
def fatorial(n):
f = 1
for c in range(1, n+1):
f*=c -------------------uteis.py-----------------
return f |def fatorial(n): |
| f = 1 |
| for c in range(1, n+1): |
def dobro(n): | f*=c |
return n * 2 | return f |
| def dobro(n): |
| return n * 2 |
def triplo(n): | def triplo(n): |
return n * 3 | return n * 3 |
--------------------------------------------
^ Criação de um módulo!
num = int(input("Digite um valor"))
fat = fatorial(num)
print(f"O fatorial de {num} é {fat}")
Com o módulo uteis.py
import uteis
num = int(input("Digite um valor"))
fat = uteis.fatorial(num)
print(f"O fatorial de {num} é {fat}")
print(f'O dobro de {num} é {uteis.dobro(num)}')
print(f'O triplo de {num} é {uteis.triplo(num)}')
Vantagens de se usar modularização:
-> Organização do código
-> Facilidade na manutenção
-> Ocutação de código detalhado
-> Reutilização em outros projetos
>>> PACÓTES:
Junção de varios módulos, separados por assunto!
'''
| 40.185185 | 97 | 0.337327 |
4a27bef1d52e7ebefa8fb178b0af4e049123f1dd | 33,206 | py | Python | tests/test_builder.py | thealphacod3r/tools-python | d197a3adf95e2f4fc78c6983f5477f9b962bdaab | [
"Apache-2.0"
] | null | null | null | tests/test_builder.py | thealphacod3r/tools-python | d197a3adf95e2f4fc78c6983f5477f9b962bdaab | [
"Apache-2.0"
] | null | null | null | tests/test_builder.py | thealphacod3r/tools-python | d197a3adf95e2f4fc78c6983f5477f9b962bdaab | [
"Apache-2.0"
] | 2 | 2021-06-09T12:03:20.000Z | 2021-07-01T07:42:17.000Z | # Copyright (c) 2014 Ahmed H. Ismail
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from unittest import TestCase
import tests.testing_utils as testing_utils
from spdx.document import Document, License
import spdx.parsers.tagvaluebuilders as builders
from spdx.version import Version
class TestDocumentBuilder(TestCase):
maxDiff = None
def setUp(self):
self.document = Document()
self.builder = builders.DocBuilder()
def test_correct_version(self):
version_str = "SPDX-2.1"
self.builder.set_doc_version(self.document, version_str)
assert self.document.version.major == 2 and self.document.version.minor == 1
@testing_utils.raises(builders.CardinalityError)
def test_version_cardinality(self):
version_str = "SPDX-2.1"
self.builder.set_doc_version(self.document, version_str)
self.builder.set_doc_version(self.document, version_str)
@testing_utils.raises(builders.SPDXValueError)
def test_version_value(self):
version_str = "2.1"
self.builder.set_doc_version(self.document, version_str)
def test_correct_data_lics(self):
lics_str = "CC0-1.0"
self.builder.set_doc_data_lics(self.document, lics_str)
assert self.document.data_license == License.from_identifier(lics_str)
@testing_utils.raises(builders.SPDXValueError)
def test_data_lics_value(self):
lics_str = "GPL"
self.builder.set_doc_data_lics(self.document, lics_str)
@testing_utils.raises(builders.CardinalityError)
def test_data_lics_cardinality(self):
lics_str = "CC0-1.0"
self.builder.set_doc_data_lics(self.document, lics_str)
self.builder.set_doc_data_lics(self.document, lics_str)
def test_correct_name(self):
name_str = "Sample_Document-V2.1"
self.builder.set_doc_name(self.document, name_str)
assert self.document.name == name_str
@testing_utils.raises(builders.CardinalityError)
def test_name_cardinality(self):
name_str = "Sample_Document-V2.1"
self.builder.set_doc_name(self.document, name_str)
self.builder.set_doc_name(self.document, name_str)
def test_correct_doc_namespace(self):
doc_namespace_str = "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301"
self.builder.set_doc_namespace(self.document, doc_namespace_str)
assert self.document.namespace == doc_namespace_str
@testing_utils.raises(builders.SPDXValueError)
def test_doc_namespace_value(self):
doc_namespace_str = "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301#SPDXRef-DOCUMENT"
self.builder.set_doc_data_lics(self.document, doc_namespace_str)
@testing_utils.raises(builders.CardinalityError)
def test_doc_namespace_cardinality(self):
doc_namespace_str = "https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301"
self.builder.set_doc_namespace(self.document, doc_namespace_str)
self.builder.set_doc_namespace(self.document, doc_namespace_str)
def test_correct_data_comment(self):
comment_str = "This is a comment."
comment_text = "<text>" + comment_str + "</text>"
self.builder.set_doc_comment(self.document, comment_text)
assert self.document.comment == comment_str
@testing_utils.raises(builders.CardinalityError)
def test_comment_cardinality(self):
comment_str = "This is a comment."
comment_text = "<text>" + comment_str + "</text>"
self.builder.set_doc_comment(self.document, comment_text)
self.builder.set_doc_comment(self.document, comment_text)
@testing_utils.raises(builders.SPDXValueError)
def test_comment_value(self):
comment = "<text>slslss<text"
self.builder.set_doc_comment(self.document, comment)
class TestExternalDocumentRefBuilder(TestCase):
maxDiff = None
def setUp(self):
self.document = Document()
self.builder = builders.ExternalDocumentRefBuilder()
def test_external_doc_id(self):
ext_doc_id = "DocumentRef-spdx-tool-2.1"
self.builder.set_ext_doc_id(self.document, ext_doc_id)
assert (
self.document.ext_document_references[-1].external_document_id == ext_doc_id
)
def test_spdx_doc_uri(self):
spdx_doc_uri = "https://spdx.org/spdxdocs/spdx-tools-v2.1-3F2504E0-4F89-41D3-9A0C-0305E82C3301"
self.builder.set_ext_doc_id(self.document, "DocumentRef-spdx-tool-2.1")
self.builder.set_spdx_doc_uri(self.document, spdx_doc_uri)
assert (
self.document.ext_document_references[-1].spdx_document_uri == spdx_doc_uri
)
def test_checksum(self):
chksum = "SHA1: d6a770ba38583ed4bb4525bd96e50461655d2759"
chksum_val = "d6a770ba38583ed4bb4525bd96e50461655d2759"
self.builder.set_ext_doc_id(self.document, "DocumentRef-spdx-tool-2.1")
self.builder.set_chksum(self.document, chksum)
assert self.document.ext_document_references[-1].check_sum.value == chksum_val
def test_add_ext_doc_refs(self):
ext_doc_id_val = "DocumentRef-spdx-tool-2.1"
spdx_doc_uri = "http://spdx.org/spdxdocs/spdx-tools-v2.1-3F2504E0-4F89-41D3-9A0C-0305E82C3301"
chksum = "SHA1: d6a770ba38583ed4bb4525bd96e50461655d2759"
chksum_val = "d6a770ba38583ed4bb4525bd96e50461655d2759"
self.builder.add_ext_doc_refs(
self.document, ext_doc_id_val, spdx_doc_uri, chksum
)
assert (
self.document.ext_document_references[-1].external_document_id
== ext_doc_id_val
)
assert (
self.document.ext_document_references[-1].spdx_document_uri == spdx_doc_uri
)
assert self.document.ext_document_references[-1].check_sum.value == chksum_val
class TestEntityBuilder(TestCase):
maxDiff = None
def setUp(self):
self.builder = builders.EntityBuilder()
self.document = Document()
def test_tool(self):
tool_name = "autoanal-2.0"
tool_str = "Tool: " + tool_name
tool = self.builder.build_tool(self.document, tool_str)
assert tool.name == tool_name
@testing_utils.raises(builders.SPDXValueError)
def test_tool_value_error(self):
tool_str = "tool: ll"
self.builder.build_tool(self.document, tool_str)
def test_org_with_email(self):
org_name = "Example"
org_email = "[email protected]"
org_str = "Organization: {0} ( {1} )".format(org_name, org_email)
org = self.builder.build_org(self.document, org_str)
assert org.name == org_name
assert org.email == org_email
def test_org(self):
org_name = "Example"
org_str = "Organization: {0} ()".format(org_name)
org = self.builder.build_org(self.document, org_str)
assert org.name == org_name
assert org.email is None
@testing_utils.raises(builders.SPDXValueError)
def test_org_value_error(self):
org_name = "Example"
org_str = "Organization {0}".format(org_name)
self.builder.build_org(self.document, org_str)
def test_person_with_email(self):
per_name = "Bob"
per_email = "[email protected]"
per_str = "Person: {0} ( {1} )".format(per_name, per_email)
per = self.builder.build_person(self.document, per_str)
assert per.name == per_name
assert per.email == per_email
def test_per(self):
per_name = "Bob"
per_str = "Person: {0} ()".format(per_name)
per = self.builder.build_person(self.document, per_str)
assert per.name == per_name
assert per.email is None
@testing_utils.raises(builders.SPDXValueError)
def test_per_value_error(self):
per_name = "Bob"
per_str = "Person {0}".format(per_name)
self.builder.build_person(self.document, per_str)
class TestCreationInfoBuilder(TestCase):
maxDiff = None
def setUp(self):
self.document = Document()
self.builder = builders.CreationInfoBuilder()
self.entity_builder = builders.EntityBuilder()
def test_add_creator(self):
per_str = "Person: Bob ([email protected])"
per = self.entity_builder.build_person(self.document, per_str)
assert self.builder.add_creator(self.document, per)
assert len(self.document.creation_info.creators) == 1
assert self.document.creation_info.creators[0] == per
@testing_utils.raises(builders.SPDXValueError)
def test_invalid_creator_type(self):
self.builder.add_creator(self.document, "hello")
def test_created(self):
created_str = "2010-02-03T00:00:00Z"
assert self.builder.set_created_date(self.document, created_str)
@testing_utils.raises(builders.CardinalityError)
def test_more_than_one_created(self):
created_str = "2010-02-03T00:00:00Z"
self.builder.set_created_date(self.document, created_str)
self.builder.set_created_date(self.document, created_str)
@testing_utils.raises(builders.SPDXValueError)
def test_created_value(self):
created_str = "2010-02-03T00:00:00"
self.builder.set_created_date(self.document, created_str)
def test_license_list_vers(self):
vers_str = "1.2"
assert self.builder.set_lics_list_ver(self.document, vers_str)
assert self.document.creation_info.license_list_version == Version(1, 2)
@testing_utils.raises(builders.SPDXValueError)
def test_lics_list_ver_value(self):
self.builder.set_lics_list_ver(self.document, "1 2")
@testing_utils.raises(builders.CardinalityError)
def test_lics_list_ver_card(self):
self.builder.set_lics_list_ver(self.document, "1.2")
self.builder.set_lics_list_ver(self.document, "1.3")
class TestReviewBuilder(TestCase):
maxDiff = None
def setUp(self):
self.entity_builder = builders.EntityBuilder()
self.builder = builders.ReviewBuilder()
self.document = Document()
@testing_utils.raises(builders.OrderError)
def test_reviewed_without_reviewer(self):
date_str = "2010-02-03T00:00:00Z"
self.builder.add_review_date(self.document, date_str)
@testing_utils.raises(builders.OrderError)
def test_comment_without_reviewer(self):
comment = "<text>Comment</text>"
self.builder.add_review_comment(self.document, comment)
@testing_utils.raises(builders.CardinalityError)
def test_comment_cardinality(self):
comment = "<text>Comment</text>"
self.add_reviewer()
assert self.builder.add_review_comment(self.document, comment)
self.builder.add_review_comment(self.document, comment)
@testing_utils.raises(builders.CardinalityError)
def test_reviewed_cardinality(self):
date_str = "2010-02-03T00:00:00Z"
self.add_reviewer()
assert self.builder.add_review_date(self.document, date_str)
self.builder.add_review_date(self.document, date_str)
def test_comment_reset(self):
comment = "<text>Comment</text>"
self.add_reviewer()
assert self.builder.add_review_comment(self.document, comment)
self.add_reviewer()
assert self.builder.add_review_comment(self.document, comment)
def test_reviewed_reset(self):
date_str = "2010-02-03T00:00:00Z"
self.add_reviewer()
assert self.builder.add_review_date(self.document, date_str)
self.add_reviewer()
assert self.builder.add_review_date(self.document, date_str)
@testing_utils.raises(builders.SPDXValueError)
def test_date_value(self):
date_str = "2010-2-03T00:00:00Z"
self.add_reviewer()
self.builder.add_review_date(self.document, date_str)
@testing_utils.raises(builders.SPDXValueError)
def test_comment_value(self):
comment = "<text>Comment<text>"
self.add_reviewer()
self.builder.add_review_comment(self.document, comment)
def add_reviewer(self):
per_str = "Person: Bob ([email protected])"
per = self.entity_builder.build_person(self.document, per_str)
self.builder.add_reviewer(self.document, per)
class TestAnnotationBuilder(TestCase):
maxDiff = None
def setUp(self):
self.entity_builder = builders.EntityBuilder()
self.builder = builders.AnnotationBuilder()
self.document = Document()
@testing_utils.raises(builders.OrderError)
def test_annotation_without_annotator(self):
date_str = "2014-08-06T00:00:00Z"
self.builder.add_annotation_date(self.document, date_str)
@testing_utils.raises(builders.OrderError)
def test_comment_without_annotator(self):
comment = "<text>Comment without annotator</text>"
self.builder.add_annotation_comment(self.document, comment)
@testing_utils.raises(builders.OrderError)
def test_type_without_annotator(self):
annotation_type = "REVIEW"
self.builder.add_annotation_type(self.document, annotation_type)
@testing_utils.raises(builders.OrderError)
def test_spdx_id_without_annotator(self):
spdx_id = "SPDXRef-45"
self.builder.set_annotation_spdx_id(self.document, spdx_id)
@testing_utils.raises(builders.CardinalityError)
def test_annotation_comment_cardinality(self):
comment = "<text>Annotation Comment</text>"
self.add_annotator()
assert self.builder.add_annotation_comment(self.document, comment)
self.builder.add_annotation_comment(self.document, comment)
@testing_utils.raises(builders.CardinalityError)
def test_annotation_cardinality(self):
date_str = "2014-08-06T00:00:00Z"
self.add_annotator()
assert self.builder.add_annotation_date(self.document, date_str)
self.builder.add_annotation_date(self.document, date_str)
@testing_utils.raises(builders.CardinalityError)
def test_annotation_spdx_id_cardinality(self):
spdx_id = "SPDXRef-45"
self.add_annotator()
self.builder.set_annotation_spdx_id(self.document, spdx_id)
self.builder.set_annotation_spdx_id(self.document, spdx_id)
def test_annotation_comment_reset(self):
comment = "<text>Annotation Comment</text>"
self.add_annotator()
assert self.builder.add_annotation_comment(self.document, comment)
self.add_annotator()
assert self.builder.add_annotation_comment(self.document, comment)
def test_annotation_reset(self):
date_str = "2014-08-06T00:00:00Z"
self.add_annotator()
assert self.builder.add_annotation_date(self.document, date_str)
self.add_annotator()
assert self.builder.add_annotation_date(self.document, date_str)
@testing_utils.raises(builders.SPDXValueError)
def test_annotation_date_value(self):
date_str = "2014-8-06T00:00:00Z"
self.add_annotator()
self.builder.add_annotation_date(self.document, date_str)
@testing_utils.raises(builders.SPDXValueError)
def test_annotation_comment_value(self):
comment = "<text>Annotation Comment<text>"
self.add_annotator()
self.builder.add_annotation_comment(self.document, comment)
@testing_utils.raises(builders.SPDXValueError)
def test_incorrect_annotation_type_value(self):
annotation_type = "Some random value instead of REVIEW or OTHER"
self.add_annotator()
self.builder.add_annotation_type(self.document, annotation_type)
def test_correct_annotation_type(self):
annotation_type = "REVIEW"
self.add_annotator()
assert self.builder.add_annotation_type(self.document, annotation_type)
def test_correct_annotation_spdx_id(self):
spdx_id = "SPDXRef-45"
self.add_annotator()
self.builder.set_annotation_spdx_id(self.document, spdx_id)
@testing_utils.raises(builders.CardinalityError)
def test_annotation_type_cardinality(self):
annotation_type = "REVIEW"
self.add_annotator()
assert self.builder.add_annotation_type(self.document, annotation_type)
self.builder.add_annotation_type(self.document, annotation_type)
def add_annotator(self):
per_str = "Person: Jim ([email protected])"
per = self.entity_builder.build_person(self.document, per_str)
self.builder.add_annotator(self.document, per)
class TestRelationshipBuilder(TestCase):
maxDiff = None
def setUp(self):
self.entity_builder = builders.EntityBuilder()
self.builder = builders.RelationshipBuilder()
self.document = Document()
@testing_utils.raises(builders.OrderError)
def test_relationship_comment_without_relationship(self):
comment = "<text>Comment without relationship</text>"
self.builder.add_relationship_comment(self.document, comment)
def test_relationship_comment_reset(self):
comment = "<text>Relationship Comment</text>"
self.add_relationship()
assert self.builder.add_relationship_comment(self.document, comment)
self.add_relationship()
assert self.builder.add_relationship_comment(self.document, comment)
@testing_utils.raises(builders.SPDXValueError)
def test_relationship_comment_value(self):
comment = "<text>Relationship Comment<text>"
self.add_relationship()
self.builder.add_relationship_comment(self.document, comment)
def test_correct_relationship(self):
relationship = "SPDXRef-DOCUMENT DESCRIBES SPDXRef-File"
assert self.builder.add_relationship(self.document, relationship)
def add_relationship(self):
relate_str = "SPDXRef-DOCUMENT DESCRIBES SPDXRef-File"
self.builder.add_relationship(self.document, relate_str)
class TestPackageBuilder(TestCase):
maxDiff = None
def setUp(self):
self.builder = builders.PackageBuilder()
self.document = Document()
self.entity_builder = builders.EntityBuilder()
@testing_utils.raises(builders.CardinalityError)
def test_package_cardinality(self):
assert self.builder.create_package(self.document, "pkg1")
self.builder.create_package(self.document, "pkg2")
def make_package(self):
self.builder.create_package(self.document, "pkg")
def make_person(self):
per_str = "Person: Bob ([email protected])"
per = self.entity_builder.build_person(self.document, per_str)
return per
@testing_utils.raises(builders.OrderError)
def test_vers_order(self):
self.builder.set_pkg_vers(self.document, "1.1")
@testing_utils.raises(builders.OrderError)
def test_file_name_order(self):
self.builder.set_pkg_file_name(self.document, "test.jar")
@testing_utils.raises(builders.OrderError)
def test_pkg_supplier_order(self):
self.builder.set_pkg_supplier(self.document, self.make_person())
@testing_utils.raises(builders.OrderError)
def test_pkg_originator_order(self):
self.builder.set_pkg_originator(self.document, self.make_person())
@testing_utils.raises(builders.OrderError)
def test_pkg_down_loc_order(self):
self.builder.set_pkg_down_location(self.document, "http://example.com/pkg")
@testing_utils.raises(builders.OrderError)
def test_pkg_home_order(self):
self.builder.set_pkg_home(self.document, "http://example.com")
@testing_utils.raises(builders.OrderError)
def test_pkg_verif_order(self):
self.builder.set_pkg_verif_code(self.document, "some code")
@testing_utils.raises(builders.OrderError)
def test_pkg_chksum_order(self):
self.builder.set_pkg_chk_sum(self.document, "some code")
@testing_utils.raises(builders.OrderError)
def test_pkg_source_info_order(self):
self.builder.set_pkg_source_info(self.document, "hello")
@testing_utils.raises(builders.OrderError)
def test_pkg_licenses_concluded_order(self):
self.builder.set_pkg_licenses_concluded(self.document, "some license")
@testing_utils.raises(builders.OrderError)
def test_pkg_lics_from_file_order(self):
self.builder.set_pkg_license_from_file(self.document, "some license")
@testing_utils.raises(builders.OrderError)
def test_pkg_lics_decl_order(self):
self.builder.set_pkg_license_declared(self.document, "license")
@testing_utils.raises(builders.OrderError)
def test_pkg_lics_comment_order(self):
self.builder.set_pkg_license_comment(self.document, "<text>hello</text>")
@testing_utils.raises(builders.OrderError)
def test_pkg_attribution_text_order(self):
self.builder.set_pkg_attribution_text(self.document, "<text>hello</text>")
def test_correct_pkg_attribution_text(self):
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_attribution_text(self.document, "<text>something</text>")
@testing_utils.raises(builders.SPDXValueError)
def test_incorrect_pkg_attribution_text(self):
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_attribution_text(self.document, "not_free_form_text")
@testing_utils.raises(builders.OrderError)
def test_pkg_cr_text_order(self):
self.builder.set_pkg_cr_text(self.document, "<text>Something</text>")
@testing_utils.raises(builders.OrderError)
def test_pkg_summary_order(self):
self.builder.set_pkg_summary(self.document, "<text>Something</text>")
@testing_utils.raises(builders.OrderError)
def test_set_pkg_desc_order(self):
self.builder.set_pkg_desc(self.document, "<text>something</text>")
@testing_utils.raises(builders.OrderError)
def test_set_pkg_spdx_id_order(self):
self.builder.set_pkg_spdx_id(self.document, "SPDXRe-Package")
@testing_utils.raises(builders.OrderError)
def test_set_pkg_files_analyzed_order(self):
self.builder.set_pkg_files_analyzed(self.document, "True")
@testing_utils.raises(builders.OrderError)
def test_set_pkg_comment_order(self):
self.builder.set_pkg_comment(self.document, "<text>something</text>")
def test_correct_pkg_comment(self):
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_comment(self.document, "<text>something</text>")
@testing_utils.raises(builders.SPDXValueError)
def test_incorrect_pkg_comment(self):
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_comment(self.document, "not_free_form_text")
def test_correct_pkg_spdx_id(self):
self.builder.create_package(self.document, "pkg")
assert self.builder.set_pkg_spdx_id(self.document, "SPDXRef-Package")
assert self.document.package.spdx_id == "SPDXRef-Package"
@testing_utils.raises(builders.SPDXValueError)
def test_incorrect_pkg_spdx_id(self):
self.builder.create_package(self.document, "pkg")
assert self.builder.set_pkg_spdx_id(self.document, "SPDXRe-Package")
@testing_utils.raises(builders.SPDXValueError)
def test_incorrect_pkg_files_analyzed(self):
self.builder.create_package(self.document, "pkg")
assert self.builder.set_pkg_files_analyzed(self.document, "XYZ")
def test_correct_pkg_files_analyzed_1(self):
self.builder.create_package(self.document, "pkg")
assert self.builder.set_pkg_files_analyzed(self.document, "True")
def test_correct_pkg_files_analyzed_2(self):
self.builder.create_package(self.document, "pkg")
assert self.builder.set_pkg_files_analyzed(self.document, "true")
def test_correct_pkg_ext_ref_category(self):
category = "SECURITY"
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_ext_ref_category(self.document, category)
assert self.document.package.pkg_ext_refs[-1].category == category
@testing_utils.raises(builders.SPDXValueError)
def test_incorrect_pkg_ext_ref_category(self):
category = "some_other_value"
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_ext_ref_category(self.document, category)
def test_correct_pkg_ext_ref_type(self):
pkg_ext_ref_type = "cpe23Type"
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_ext_ref_type(self.document, pkg_ext_ref_type)
assert (
self.document.package.pkg_ext_refs[-1].pkg_ext_ref_type == pkg_ext_ref_type
)
@testing_utils.raises(builders.SPDXValueError)
def test_incorrect_pkg_ext_ref_type(self):
pkg_ext_ref_type = "cpe23Type_with_special_symbols&%"
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_ext_ref_type(self.document, pkg_ext_ref_type)
def test_correct_pkg_ext_ref_locator(self):
locator = "cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*"
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_ext_ref_locator(self.document, locator)
assert self.document.package.pkg_ext_refs[-1].locator == locator
@testing_utils.raises(builders.OrderError)
def test_pkg_ext_ref_without_pkg(self):
locator = "cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*"
self.builder.set_pkg_ext_ref_locator(self.document, locator)
def test_correct_pkg_ext_comment(self):
comment_str = "This is a comment."
comment_text = "<text>" + comment_str + "</text>"
self.builder.create_package(self.document, "pkg")
self.builder.set_pkg_ext_ref_category(self.document, "SECURITY")
self.builder.add_pkg_ext_ref_comment(self.document, comment_text)
assert self.document.package.pkg_ext_refs[-1].comment == comment_str
@testing_utils.raises(builders.OrderError)
def test_pkg_ext_comment_without_pkg_ext_ref(self):
comment_str = "This is a comment."
comment_text = "<text>" + comment_str + "</text>"
self.builder.create_package(self.document, "pkg")
self.builder.add_pkg_ext_ref_comment(self.document, comment_text)
class TestSnippetBuilder(TestCase):
maxDiff = None
def setUp(self):
self.entity_builder = builders.EntityBuilder()
self.builder = builders.SnippetBuilder()
self.document = Document()
def test_create_snippet(self):
assert self.builder.create_snippet(self.document, "SPDXRef-Snippet")
@testing_utils.raises(builders.SPDXValueError)
def test_incorrect_snippet_spdx_id(self):
self.builder.create_snippet(self.document, "Some_value_with_$%")
def test_snippet_name(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_name(self.document, "Name_of_snippet")
@testing_utils.raises(builders.OrderError)
def test_snippet_name_order(self):
self.builder.set_snippet_name(self.document, "Name_of_snippet")
def test_snippet_comment(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_comment(self.document, "<text>Comment</text>")
@testing_utils.raises(builders.OrderError)
def test_snippet_comment_order(self):
self.builder.set_snippet_comment(self.document, "<text>Comment</text>")
@testing_utils.raises(builders.SPDXValueError)
def test_snippet_comment_text_value(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_comment(self.document, "Comment.")
@testing_utils.raises(builders.OrderError)
def test_snippet_attribution_text_order(self):
self.builder.set_snippet_attribution_text(self.document, "<text>hello</text>")
def test_correct_snippet_attribution_text(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_attribution_text(
self.document, "<text>something</text>"
)
@testing_utils.raises(builders.SPDXValueError)
def test_incorrect_snippet_attribution_text(self):
self.builder.create_snippet(self.document, "SPDXRef-Package")
self.builder.set_snippet_attribution_text(self.document, "not_free_form_text")
def test_snippet_copyright(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_copyright(
self.document, "<text>Copyright 2008-2010 John Smith</text>"
)
@testing_utils.raises(builders.SPDXValueError)
def test_snippet_copyright_text_value(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_copyright(
self.document, "Copyright 2008-2010 John Smith"
)
@testing_utils.raises(builders.OrderError)
def test_snippet_copyright_order(self):
self.builder.set_snippet_copyright(
self.document, "<text>Copyright 2008-2010 John Smith</text>"
)
def test_snippet_lic_comment(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_lic_comment(self.document, "<text>Lic comment</text>")
@testing_utils.raises(builders.SPDXValueError)
def test_snippet_lic_comment_text_value(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_lic_comment(self.document, "Lic comment")
@testing_utils.raises(builders.OrderError)
def test_snippet_lic_comment_order(self):
self.builder.set_snippet_lic_comment(self.document, "<text>Lic comment</text>")
def test_snippet_from_file_spdxid(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snip_from_file_spdxid(self.document, "SPDXRef-DoapSource")
@testing_utils.raises(builders.SPDXValueError)
def test_snippet_from_file_spdxid_value(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snip_from_file_spdxid(self.document, "#_$random_chars")
@testing_utils.raises(builders.OrderError)
def test_snippet_from_file_spdxid_order(self):
self.builder.set_snip_from_file_spdxid(self.document, "SPDXRef-DoapSource")
@testing_utils.raises(builders.CardinalityError)
def test_snippet_from_file_spdxid_cardinality(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snip_from_file_spdxid(self.document, "SPDXRef-DoapSource")
self.builder.set_snip_from_file_spdxid(self.document, "SPDXRef-somevalue")
def test_snippet_conc_lics(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snip_concluded_license(
self.document, License.from_identifier("Apache-2.0")
)
@testing_utils.raises(builders.SPDXValueError)
def test_snippet_conc_lics_value(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snip_concluded_license(self.document, "Apache-2.0")
@testing_utils.raises(builders.OrderError)
def test_snippet_conc_lics_order(self):
self.builder.set_snip_concluded_license(
self.document, License.from_identifier("Apache-2.0")
)
@testing_utils.raises(builders.CardinalityError)
def test_snippet_conc_lics_cardinality(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snip_concluded_license(
self.document, License.from_identifier("Apache-2.0")
)
self.builder.set_snip_concluded_license(
self.document, License.from_identifier("Apache-2.0")
)
def test_snippet_lics_info(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_lics_info(
self.document, License.from_identifier("Apache-2.0")
)
self.builder.set_snippet_lics_info(
self.document, License.from_identifier("GPL-2.0-or-later")
)
@testing_utils.raises(builders.SPDXValueError)
def test_snippet_lics_info_value(self):
self.builder.create_snippet(self.document, "SPDXRef-Snippet")
self.builder.set_snippet_lics_info(self.document, "Apache-2.0")
@testing_utils.raises(builders.OrderError)
def test_snippet_lics_info_order(self):
self.builder.set_snippet_lics_info(
self.document, License.from_identifier("Apache-2.0")
)
| 40.793612 | 122 | 0.720261 |
4a27bf944b4e7f88dcb3302302e71e4a39ccaed0 | 791 | py | Python | pe_tree/exceptions.py | lybtongji/pe_tree | 2be607fc55702293cd02cbc6cda5283452464aff | [
"Apache-2.0"
] | 1,271 | 2020-07-27T14:46:44.000Z | 2022-03-30T15:58:24.000Z | pe_tree/exceptions.py | lybtongji/pe_tree | 2be607fc55702293cd02cbc6cda5283452464aff | [
"Apache-2.0"
] | 9 | 2020-08-04T13:23:38.000Z | 2021-05-18T16:53:49.000Z | pe_tree/exceptions.py | lybtongji/pe_tree | 2be607fc55702293cd02cbc6cda5283452464aff | [
"Apache-2.0"
] | 168 | 2020-07-27T13:56:42.000Z | 2022-03-29T12:48:00.000Z | #
# Copyright (c) 2020 BlackBerry Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PE Tree exceptions"""
class ThreadStopping(Exception):
"""Exception raised when the application is stopping, used to terminate threads gracefully"""
| 37.666667 | 97 | 0.737042 |
4a27bfb7ab31795f449448265d95bf90a39b93a0 | 513 | py | Python | pontos-turisticos/core/migrations/0007_auto_20181022_1748.py | LucasVarela42/PontosTuristicos | 96d8a20739dbd9f56ae26bda069ae1862b89e35d | [
"BSD-3-Clause"
] | null | null | null | pontos-turisticos/core/migrations/0007_auto_20181022_1748.py | LucasVarela42/PontosTuristicos | 96d8a20739dbd9f56ae26bda069ae1862b89e35d | [
"BSD-3-Clause"
] | null | null | null | pontos-turisticos/core/migrations/0007_auto_20181022_1748.py | LucasVarela42/PontosTuristicos | 96d8a20739dbd9f56ae26bda069ae1862b89e35d | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.1.2 on 2018-10-22 17:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0006_pontoturistico_endereco'),
]
operations = [
migrations.AlterField(
model_name='pontoturistico',
name='endereco',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='enderecos.Endereco'),
),
]
| 25.65 | 132 | 0.65692 |
4a27bfff170b2187bff2b54d2ffc00e7f28b95ce | 1,102 | py | Python | cea/interfaces/cli/excel_to_dbf.py | architecture-building-systems/cea-toolbox | bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6 | [
"MIT"
] | 121 | 2017-08-15T20:10:22.000Z | 2022-03-24T01:25:42.000Z | cea/interfaces/cli/excel_to_dbf.py | architecture-building-systems/cea-toolbox | bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6 | [
"MIT"
] | 2,121 | 2017-07-27T12:02:01.000Z | 2022-03-31T16:39:28.000Z | cea/interfaces/cli/excel_to_dbf.py | architecture-building-systems/cea-toolbox | bfec7ecb4b242449ab8796a1e8ce68c05c35f1d6 | [
"MIT"
] | 42 | 2017-09-19T09:59:56.000Z | 2022-02-19T20:19:56.000Z | """
Use the py:mod:`cea.utilities.dbf` module to convert an excel file to a dbf file.
"""
import os
import cea.config
import cea.inputlocator
import cea.utilities.dbf
def main(config):
"""
Convert an Excel file (*.xls) to a DBF file (*.dbf). The configuration uses the section ``dbf-tools`` with
the parameters ``excel-file`` (path to the input) and ``dbf-file`` (path to the output)
:param config: uses ``config.dbf_tools.excel_file`` and ``config.dbf_tools.dbf_file``
:type config: cea.config.Configuration
:return:
"""
input_file = config.dbf_tools.input_file
output_file_name = config.dbf_tools.output_file_name
output_path = config.dbf_tools.output_path
assert os.path.exists(input_file), 'Input file not found: %s' % input_file
# print out all configuration variables used by this script
print("Running excel-to-dbf with excel-file = %s" % input_file)
cea.utilities.dbf.xls_to_dbf(input_file=input_file, output_path=output_path, output_file_name=output_file_name)
if __name__ == '__main__':
main(cea.config.Configuration())
| 29.783784 | 115 | 0.720508 |
4a27c04340903e1ee121b8a6c6c8c9cffdca4e45 | 1,501 | py | Python | omniglot/FCDecomp.py | OpenBanboo/AgileNet | d75baa20b9f762cb56b249dca272150de9ae8def | [
"BSD-3-Clause"
] | null | null | null | omniglot/FCDecomp.py | OpenBanboo/AgileNet | d75baa20b9f762cb56b249dca272150de9ae8def | [
"BSD-3-Clause"
] | null | null | null | omniglot/FCDecomp.py | OpenBanboo/AgileNet | d75baa20b9f762cb56b249dca272150de9ae8def | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import create_dic_fuc
from LinearFunction import LinearFunction
class FCDecomp(nn.Module):
def __init__(self, coefs, dictionary, bias_val, input_features, output_features, bias=True, is_dic_grad=False, is_coef_grad=False, is_bias_grad=False):
super(FCDecomp, self).__init__()
self.is_dic_grad = is_dic_grad
self.is_coef_grad = is_coef_grad
self.is_bias_grad = is_bias_grad
print(self.is_dic_grad)
print(self.is_coef_grad)
print(self.is_bias_grad)
self.dictionary = nn.Parameter(dictionary, requires_grad=self.is_dic_grad)
self.coefs = nn.Parameter(coefs, requires_grad=self.is_coef_grad)
if bias:
self.bias = nn.Parameter(bias_val, requires_grad=self.is_bias_grad)
else:
self.register_parameter('bias', None)
# Not a very smart way to initialize weights
#self.weight.data.uniform_(-0.1, 0.1)
#if bias is not None:
#self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input):
# See the autograd section for explanation of what happens here.
self.weight = torch.mm(self.dictionary, self.coefs)
return LinearFunction.apply(input, self.weight, self.bias)
| 39.5 | 155 | 0.713524 |
4a27c0acd68d779f4f147f66450156dbe9e1a335 | 63,163 | py | Python | leo/plugins/viewrendered.py | thhonigm/leo-editor | 34cc572bd80b2833984bcd34878a572ab51816de | [
"MIT"
] | null | null | null | leo/plugins/viewrendered.py | thhonigm/leo-editor | 34cc572bd80b2833984bcd34878a572ab51816de | [
"MIT"
] | 1 | 2020-06-19T02:28:25.000Z | 2020-06-19T02:28:25.000Z | leo/plugins/viewrendered.py | connorng/leo-editor | dd41fd3c854ceab286b1111fa4bbe30f65237dcb | [
"MIT"
] | null | null | null | #@+leo-ver=5-thin
#@+node:tbrown.20100318101414.5990: * @file viewrendered.py
#@+<< vr docstring >>
#@+node:tbrown.20100318101414.5991: ** << vr docstring >>
'''
Creates a window for *live* rendering of reSTructuredText, markdown text,
images, movies, sounds, rst, html, jupyter notebooks, etc.
Dependencies
============
This plugin uses docutils, http://docutils.sourceforge.net/, to render reStructuredText,
so installing docutils is highly recommended when using this plugin.
This plugin uses markdown, http://http://pypi.python.org/pypi/Markdown, to render Markdown,
so installing markdown is highly recommended when using this plugin.
Commands
========
viewrendered.py creates the following (``Alt-X``) commands:
``viewrendered (abbreviated vr)``
Opens a new rendering window.
By default, the rendering pane renders body text as reStructuredText,
with all Leo directives removed.
However, if the body text starts with ``<`` (after removing directives),
the body text is rendered as html.
**Important**: The default rendering just described does not apply to nodes
whose headlines begin with @image, @html, @movie, @networkx, @svg and @url.
See the section called **Special Renderings** below.
Rendering sets the process current directory (os.chdir()) to the path
to the node being rendered, to allow relative paths to work in ``.. image::`` directives.
.. ``viewrendered-big``
.. as above, but zoomed in, useful for presentations
.. ``viewrendered-html``
.. displays the html source generated from reStructuredText, useful for
.. debugging
``vr-hide``
Makes the rendering pane invisible, but does not destroy it.
``vr-lock`` and ``vr-unlock``
Locks and unlocks the rendering pane.
When unlocked (the initial state), the rendering pane renders the contents
of the presently selected node.
When locked, the rendering pane does not change when other nodes are selected.
This is useful for playing movies in the rendering pane.
``vr-pause-play-movie``
This command has effect only if the rendering pane is presently showing a movie.
It pauses the movie if playing, or resumes the movie if paused.
``vr-show``
Makes the rendering pane visible.
``vr-toggle``
Shows the rendering pane if invisible, otherwise hides it.
``vr-update``
Forces an update of the rendering pane.
This is especially useful for @graphics-script nodes:
such nodes are update automatically only when selected,
not when the body text changes.
Rendering reStructuredText
==========================
For example, both::
Heading
-------
`This` is **really** a line of text.
and::
<h1>Heading<h1>
<tt>This</tt> is <b>really</b> a line of text.
will look something like:
**Heading**
`This` is **really** a line of text.
**Important**: reStructuredText errors and warnings will appear in red in the rendering pane.
Rendering markdown
==================
Please see the markdown syntax document at http://daringfireball.net/projects/markdown/syntax
for more information on markdown.
Unless ``@string view-rendered-default-kind`` is set to ``md``, markdown rendering must be
specified by putting it in a ``@md`` node.
Special Renderings
===================
As stated above, the rendering pane renders body text as reStructuredText
by default, with all Leo directives removed. However, if the body text
starts with ``<`` (after removing directives), the body text is rendered as
html.
This plugin renders @md, @image, @jupyter, @html, @movie, @networkx and @svg nodes as follows:
**Note**: For @image, @movie and @svg nodes, either the headline or the first line of body text may
contain a filename. If relative, the filename is resolved relative to Leo's load directory.
- ``@md`` renderes the body text as markdown, as described above.
- ``@graphics-script`` executes the script in the body text in a context containing
two predefined variables:
- gs is the QGraphicsScene for the rendering pane.
- gv is the QGraphicsView for the rendering pane.
Using these variables, the script in the body text may create graphics to the rendering pane.
- ``@image`` renders the file as an image.
The headline should start with @image.
All other characters in the headline are ignored.
The first line of the body should be the full path to the image file.
All other lines are ignored.
- ``@html`` renders the body text as html.
- ``@jupyter`` renders the output from Jupyter Notebooks.
The contents of the @jupyter node can be either a url to the notebook or
the actual JSON notebook itself.
Use file:// urls for local files. Some examples:
Windows: file:///c:/Test/a_notebook.ipynb
Linux: file:///home/a_notebook.ipynb
- ``@movie`` plays the file as a movie. @movie also works for music files.
- ``@networkx`` is non-functional at present. It is intended to
render the body text as a networkx graph.
See http://networkx.lanl.gov/
- ``@svg`` renders the file as a (possibly animated!) svg (Scalable Vector Image).
See http://en.wikipedia.org/wiki/Scalable_Vector_Graphics
**Note**: if the first character of the body text is ``<`` after removing Leo directives,
the contents of body pane is taken to be an svg image.
Relative file names
===================
vr.convert_to_html resolves relative paths using whatever @path directive
is in effect for a particular node. It also does `os.chdir(path)` for that
path.
Settings
========
- ``@color rendering-pane-background-color = white``
The background color the rendering pane when rendering text.
- ``@bool view-rendered-auto-create = False``
When True, show the rendering pane when Leo opens an outline.
- ``@bool view-rendered-auto-hide = False``
When True, hide the rendering pane for text-only renderings.
- ``@string view-rendered-default-kind = rst``
The default kind of rendering. One of (big,rst,md,html)
- ``@string view-rendered-md-extensions = extra``
A comma-delineated list of markdown extensions to use.
Suitable extensions can be seen here:
http://pythonhosted.org/Markdown/extensions/index.html
Acknowledgments
================
Terry Brown created this initial version of this plugin, and the
free_layout and NestedSplitter plugins used by viewrendered.
Edward K. Ream generalized this plugin and added communication and
coordination between the free_layout, NestedSplitter and viewrendered
plugins.
Jacob Peck added markdown support to this plugin.
'''
#@-<< vr docstring >>
#@+<< to do >>
#@+node:ekr.20140924060835.19485: ** << to do >> (vr)
#@+at
# To do:
#
# - Use the free_layout rotate-all command in Leo's toggle-split-direction command.
# - Add dict to allow customize must_update.
# - Lock movies automatically until they are finished?
# - Render @url nodes as html?
# - Support uA's that indicate the kind of rendering desired.
# - (Failed) Make viewrendered-big work.
#@-<< to do >>
#pylint: disable=no-member
trace = False
# This global trace is convenient.
#@+<< imports >>
#@+node:tbrown.20100318101414.5993: ** << imports >> (vr)
import leo.core.leoGlobals as g
try:
import leo.plugins.qt_text as qt_text
import leo.plugins.free_layout as free_layout
from leo.core.leoQt import isQt5, QtCore, QtGui, QtWidgets
from leo.core.leoQt import phonon, QtMultimedia, QtSvg, QtWebKitWidgets
except Exception:
QtWidgets = False
from distutils.spawn import find_executable
try:
import docutils
import docutils.core
except ImportError:
docutils = None
if docutils:
try:
from docutils.core import publish_string
from docutils.utils import SystemMessage
got_docutils = True
except ImportError:
got_docutils = False
g.es_exception()
except SyntaxError:
got_docutils = False
g.es_exception()
else:
got_docutils = False
# markdown support, non-vital
try:
from markdown import markdown
got_markdown = True
except ImportError:
got_markdown = False
import os
# nbformat (@jupyter) support, non-vital.
try:
import nbformat
from nbconvert import HTMLExporter
# from traitlets.config import Config
except ImportError:
nbformat = None
import json
from urllib.request import urlopen
#@-<< imports >>
asciidoctor_exec = find_executable('asciidoctor')
asciidoc3_exec = find_executable('asciidoc3')
pandoc_exec = find_executable('pandoc')
#@+<< set BaseTextWidget >>
#@+node:ekr.20190424081947.1: ** << set BaseTextWidget >> (vr)
if QtWidgets:
try:
BaseTextWidget = QtWebKitWidgets.QWebView
except Exception:
BaseTextWidget = QtWidgets.QTextBrowser
else:
BaseTextWidget = None
#@-<< set BaseTextWidget >>
#@+<< define html templates >>
#@+node:ekr.20170324090828.1: ** << define html templates >> (vr)
image_template = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head></head>
<body bgcolor="#fffbdc">
<img src="%s">
</body>
</html>
'''
# http://docs.mathjax.org/en/latest/start.html
latex_template = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<script src='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'>
</script>
</head>
<body bgcolor="#fffbdc">
%s
</body>
</html>
'''
#@-<< define html templates >>
controllers = {}
# Keys are c.hash(): values are PluginControllers (QWidget's).
layouts = {}
# Keys are c.hash(): values are tuples (layout_when_closed, layout_when_open)
#@+others
#@+node:ekr.20110320120020.14491: ** vr.Top-level
#@+node:tbrown.20100318101414.5994: *3* vr.decorate_window
def decorate_window(w):
# Do not override the style sheet!
# This interferes with themes
# w.setStyleSheet(stickynote_stylesheet)
g.app.gui.attachLeoIcon(w)
w.resize(600, 300)
#@+node:tbrown.20100318101414.5995: *3* vr.init
def init():
'''Return True if the plugin has loaded successfully.'''
global got_docutils
if g.app.gui.guiName() != 'qt':
return False
# #1248.
# if g.app.gui.guiName()
if not QtWidgets or not g.app.gui.guiName().startswith('qt'):
if (
not g.unitTesting and
not g.app.batchMode and
g.app.gui.guiName() not in ('browser', 'curses')
):
g.es_print('viewrendered requires Qt')
return False
if not got_docutils:
g.es_print('Warning: viewrendered.py running without docutils.')
# Always enable this plugin, even if imports fail.
g.plugin_signon(__name__)
g.registerHandler('after-create-leo-frame', onCreate)
g.registerHandler('close-frame', onClose)
g.registerHandler('scrolledMessage', show_scrolled_message)
return True
#@+node:ekr.20180825025924.1: *3* vr.isVisible
def isVisible():
'''Return True if the VR pane is visible.'''
#@+node:ekr.20110317024548.14376: *3* vr.onCreate
def onCreate(tag, keys):
c = keys.get('c')
if not c:
return
provider = ViewRenderedProvider(c)
free_layout.register_provider(c, provider)
if g.app.dock:
# Instantiate immediately.
viewrendered(event={'c': c})
#@+node:vitalije.20170712174157.1: *3* vr.onClose
def onClose(tag, keys):
c = keys.get('c')
h = c.hash()
vr = controllers.get(h)
if vr:
c.bodyWantsFocus()
del controllers[h]
vr.deactivate()
vr.deleteLater()
#@+node:tbrown.20110629132207.8984: *3* vr.show_scrolled_message
def show_scrolled_message(tag, kw):
if g.unitTesting:
return None # This just slows the unit tests.
c = kw.get('c')
flags = kw.get('flags') or 'rst'
vr = viewrendered(event=kw)
title = kw.get('short_title', '').strip()
vr.setWindowTitle(title)
s = '\n'.join([
title,
'=' * len(title),
'',
kw.get('msg')
])
vr.show_dock_or_pane() # #1332.
vr.update(
tag='show-scrolled-message',
keywords={'c': c, 'force': True, 's': s, 'flags': flags},
)
return True
#@+node:vitalije.20170713082256.1: *3* vr.split_last_sizes
def split_last_sizes(sizes):
result = [2 * x for x in sizes[:-1]]
result.append(sizes[-1])
result.append(sizes[-1])
return result
#@+node:ekr.20110320120020.14490: ** vr.Commands
#@+node:ekr.20131213163822.16471: *3* g.command('preview')
@g.command('preview')
def preview(event):
'''A synonym for the vr-toggle command.'''
toggle_rendering_pane(event)
#@+node:tbrown.20100318101414.5998: *3* g.command('vr')
@g.command('vr')
def viewrendered(event):
"""Open render view for commander"""
global controllers, layouts
if g.app.gui.guiName() != 'qt':
return None
c = event.get('c')
if not c:
return None
h = c.hash()
vr = controllers.get(h)
if not vr:
controllers[h] = vr = ViewRenderedController(c)
if g.app.dock:
dock = vr.leo_dock
if not c.mFileName:
# #1318 and #1332: Tricky init code for new windows.
g.app.restoreWindowState(c)
dock.hide()
dock.raise_()
return vr
#
# Legacy code: add the pane to the splitter.
layouts[h] = c.db.get('viewrendered_default_layouts', (None, None))
vr._ns_id = '_leo_viewrendered' # for free_layout load/save
vr.splitter = splitter = c.free_layout.get_top_splitter()
if splitter:
vr.store_layout('closed')
sizes = split_last_sizes(splitter.sizes())
ok = splitter.add_adjacent(vr, 'bodyFrame', 'right-of')
if not ok:
splitter.insert(0, vr)
elif splitter.orientation() == QtCore.Qt.Horizontal:
splitter.setSizes(sizes)
vr.adjust_layout('open')
c.bodyWantsFocusNow()
return vr
#@+node:ekr.20130413061407.10362: *3* g.command('vr-contract')
@g.command('vr-contract')
def contract_rendering_pane(event):
'''Contract the rendering pane.'''
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if g.app.dock:
return
vr.contract()
#@+node:ekr.20130413061407.10361: *3* g.command('vr-expand')
@g.command('vr-expand')
def expand_rendering_pane(event):
'''Expand the rendering pane.'''
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if g.app.dock:
return
vr.expand()
#@+node:ekr.20110917103917.3639: *3* g.command('vr-hide')
@g.command('vr-hide')
def hide_rendering_pane(event):
'''Close the rendering pane.'''
global controllers, layouts
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if g.app.dock:
if vr.external_dock:
return # Can't hide a top-level dock.
dock = vr.leo_dock
if dock:
dock.hide()
return
#
# Legacy code.
if vr.pyplot_active:
g.es_print('can not close VR pane after using pyplot')
return
vr.store_layout('open')
vr.deactivate()
vr.deleteLater()
def at_idle(c=c, _vr=vr):
_vr.adjust_layout('closed')
c.bodyWantsFocusNow()
QtCore.QTimer.singleShot(0, at_idle)
h = c.hash()
c.bodyWantsFocus()
if vr == controllers.get(h):
del controllers[h]
else:
g.trace('Can not happen: no controller for %s' % (c))
# Compatibility
close_rendering_pane = hide_rendering_pane
#@+node:ekr.20110321072702.14507: *3* g.command('vr-lock')
@g.command('vr-lock')
def lock_rendering_pane(event):
'''Lock the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if not vr.locked:
vr.lock()
#@+node:ekr.20110320233639.5777: *3* g.command('vr-pause-play')
@g.command('vr-pause-play-movie')
def pause_play_movie(event):
'''Pause or play a movie in the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
vp = vr.vp
if not vp:
return
f = vp.pause if vp.isPlaying() else vp.play
f()
#@+node:ekr.20110317080650.14386: *3* g.command('vr-show')
@g.command('vr-show')
def show_rendering_pane(event):
'''Show the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
vr.show_dock_or_pane()
#@+node:ekr.20131001100335.16606: *3* g.command('vr-toggle')
@g.command('vr-toggle')
def toggle_rendering_pane(event):
'''Toggle the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
if g.app.gui.guiName() != 'qt':
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
vr.hide() # So the toggle below will work.
if g.app.dock:
if vr.external_dock:
return # Can't hide a top-level dock.
dock = vr.leo_dock
if dock:
f = dock.show if dock.isHidden() else dock.hide
f()
elif vr.isHidden():
show_rendering_pane(event)
else:
hide_rendering_pane(event)
#@+node:ekr.20130412180825.10345: *3* g.command('vr-unlock')
@g.command('vr-unlock')
def unlock_rendering_pane(event):
'''Pause or play a movie in the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if vr.locked:
vr.unlock()
#@+node:ekr.20110321151523.14464: *3* g.command('vr-update')
@g.command('vr-update')
def update_rendering_pane(event):
'''Update the rendering pane'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
vr.update(tag='view', keywords={'c': c, 'force': True})
#@+node:vitalije.20170712195827.1: *3* @g.command('vr-zoom')
@g.command('vr-zoom')
def zoom_rendering_pane(event):
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if g.app.dock:
return
flc = c.free_layout
if vr.zoomed:
for ns in flc.get_top_splitter().top().self_and_descendants():
if hasattr(ns, '_unzoom'):
# this splitter could have been added since
ns.setSizes(ns._unzoom)
else:
parents = []
parent = vr
while parent:
parents.append(parent)
parent = parent.parent()
for ns in flc.get_top_splitter().top().self_and_descendants():
# FIXME - shouldn't be doing this across windows
ns._unzoom = ns.sizes()
for i in range(ns.count()):
w = ns.widget(i)
if w in parents:
sizes = [0] * len(ns._unzoom)
sizes[i] = sum(ns._unzoom)
ns.setSizes(sizes)
break
vr.zoomed = not vr.zoomed
#@+node:tbrown.20110629084915.35149: ** class ViewRenderedProvider (vr)
class ViewRenderedProvider:
#@+others
#@+node:tbrown.20110629084915.35154: *3* vr.__init__
def __init__(self, c):
self.c = c
# Careful: we may be unit testing.
if hasattr(c, 'free_layout'):
splitter = c.free_layout.get_top_splitter()
if splitter:
splitter.register_provider(self)
#@+node:tbrown.20110629084915.35150: *3* vr.ns_provides
def ns_provides(self):
return [('Viewrendered', '_leo_viewrendered')]
#@+node:tbrown.20110629084915.35151: *3* vr.ns_provide
def ns_provide(self, id_):
global controllers, layouts
if id_ == '_leo_viewrendered':
c = self.c
vr = controllers.get(c.hash()) or ViewRenderedController(c)
h = c.hash()
controllers[h] = vr
if not layouts.get(h):
layouts[h] = c.db.get('viewrendered_default_layouts', (None, None))
# return ViewRenderedController(self.c)
return vr
return None
#@-others
#@+node:ekr.20110317024548.14375: ** class ViewRenderedController (QWidget)
if QtWidgets: # NOQA
class ViewRenderedController(QtWidgets.QWidget):
'''A class to control rendering in a rendering pane.'''
#@+others
#@+node:ekr.20110317080650.14380: *3* vr.ctor & helpers
def __init__(self, c, parent=None):
'''Ctor for ViewRenderedController class.'''
self.c = c
# Create the widget.
super().__init__(parent)
self.create_pane(parent)
# Set the ivars.
self.active = False
self.badColors = []
self.delete_callback = None
self.gnx = None
self.graphics_class = QtWidgets.QGraphicsWidget
self.pyplot_canvas = None
self.pyplot_imported = False
self.gs = None # For @graphics-script: a QGraphicsScene
self.gv = None # For @graphics-script: a QGraphicsView
self.inited = False
self.length = 0 # The length of previous p.b.
self.locked = False
self.pyplot_active = False
self.scrollbar_pos_dict = {} # Keys are vnodes, values are positions.
self.sizes = [] # Saved splitter sizes.
self.splitter = None
self.splitter_index = None # The index of the rendering pane in the splitter.
self.title = None
self.vp = None # The present video player.
self.w = None # The present widget in the rendering pane.
# User settings.
self.reloadSettings()
self.node_changed = True
# Init.
self.create_dispatch_dict()
self.activate()
self.zoomed = False
#@+node:ekr.20110320120020.14478: *4* vr.create_dispatch_dict
def create_dispatch_dict(self):
pc = self
d = {
'asciidoc': pc.update_asciidoc,
'big': pc.update_rst,
'html': pc.update_html,
'graphics-script': pc.update_graphics_script,
'image': pc.update_image,
'jupyter': pc.update_jupyter,
'latex': pc.update_latex,
'markdown': pc.update_md,
'md': pc.update_md,
'movie': pc.update_movie,
'networkx': pc.update_networkx,
'pandoc': pc.update_pandoc,
'pyplot': pc.update_pyplot,
'rest': pc.update_rst,
'rst': pc.update_rst,
'svg': pc.update_svg,
# 'url': pc.update_url,
# 'xml': pc.update_xml,
}
pc.dispatch_dict = d
return d
#@+node:ekr.20171114150510.1: *4* vr.reloadSettings
def reloadSettings(self):
c = self.c
c.registerReloadSettings(self)
self.auto_create = c.config.getBool('view-rendered-auto-create', False)
self.background_color = c.config.getColor('rendering-pane-background-color') or 'white'
self.default_kind = c.config.getString('view-rendered-default-kind') or 'rst'
self.external_dock = c.config.getBool('use-vr-dock', default=False)
#@+node:ekr.20190614065659.1: *4* vr.create_pane
def create_pane(self, parent):
'''Create the VR pane or dock.'''
c = self.c
dw = c.frame.top
self.leo_dock = None # May be set below.
if g.app.unitTesting:
return
# Create the inner contents.
self.setObjectName('viewrendered_pane')
self.setLayout(QtWidgets.QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
if not g.app.dock:
return
# Allow the VR dock to move only in special circumstances.
central_body = g.app.get_central_widget(c) == 'body'
moveable = g.app.init_docks or central_body
self.leo_dock = dock = g.app.gui.create_dock_widget(
closeable=True, moveable=moveable, height=50, name='Render')
if central_body:
# Create a stand-alone dockable area.
dock.setWidget(self)
dw.addDockWidget(QtCore.Qt.RightDockWidgetArea, dock)
elif g.app.dock:
# Split the body dock. Don't register the new dock as an editor doc.
### dw.leo_docks.append(dock)
dock.setWidget(self)
dw.splitDockWidget(dw.body_dock, dock, QtCore.Qt.Horizontal)
if g.app.init_docks:
dock.show()
#@+node:ekr.20110317080650.14381: *3* vr.activate
def activate(self):
'''Activate the vr-window.'''
pc = self
if pc.active: return
pc.inited = True
pc.active = True
g.registerHandler('select2', pc.update)
g.registerHandler('idle', pc.update)
#@+node:vitalije.20170712183051.1: *3* vr.adjust_layout (legacy only)
def adjust_layout(self, which):
global layouts
c = self.c
splitter = self.splitter
deflo = c.db.get('viewrendered_default_layouts', (None, None))
loc, loo = layouts.get(c.hash(), deflo)
if which == 'closed' and loc and splitter:
splitter.load_layout(loc)
elif which == 'open' and loo and splitter:
splitter.load_layout(loo)
#@+node:tbrown.20110621120042.22676: *3* vr.closeEvent
def closeEvent(self, event):
'''Close the vr window.'''
self.deactivate()
#@+node:ekr.20130413061407.10363: *3* vr.contract & expand
def contract(self):
self.change_size(-100)
def expand(self):
self.change_size(100)
def change_size(self, delta):
if hasattr(self.c, 'free_layout'):
splitter = self.parent()
i = splitter.indexOf(self)
assert i > -1
sizes = splitter.sizes()
n = len(sizes)
for j, size in enumerate(sizes):
if j == i:
sizes[j] = max(0, size + delta)
else:
sizes[j] = max(0, size - int(delta / (n - 1)))
splitter.setSizes(sizes)
#@+node:ekr.20110317080650.14382: *3* vr.deactivate
def deactivate(self):
'''Deactivate the vr window.'''
pc = self
# Never disable the idle-time hook: other plugins may need it.
g.unregisterHandler('select2', pc.update)
g.unregisterHandler('idle', pc.update)
pc.active = False
#@+node:ekr.20110321072702.14508: *3* vr.lock/unlock
def lock(self):
'''Lock the vr pane.'''
g.note('rendering pane locked')
self.locked = True
def unlock(self):
'''Unlock the vr pane.'''
g.note('rendering pane unlocked')
self.locked = False
#@+node:ekr.20200304133109.1: *3* vr.onContextMenuCallback
def onContextMenuCallback(self, point):
"""LeoQtTree: Callback for customContextMenuRequested events."""
# #1286.
c = self.c
w = self
g.app.gui.onContextMenu(c, w, point)
#@+node:ekr.20160921071239.1: *3* vr.set_html
def set_html(self, s, w):
'''Set text in w to s, preserving scroll position.'''
pc = self
p = pc.c.p
sb = w.verticalScrollBar()
if sb:
d = pc.scrollbar_pos_dict
if pc.node_changed:
# Set the scrollbar.
pos = d.get(p.v, sb.sliderPosition())
sb.setSliderPosition(pos)
else:
# Save the scrollbars
d[p.v] = pos = sb.sliderPosition()
# if trace: g.trace('\n'+s)
w.setHtml(s)
if sb:
# Restore the scrollbars
assert pos is not None
sb.setSliderPosition(pos)
#@+node:ekr.20190614133401.1: *3* vr.show_dock_or_pane
def show_dock_or_pane(self):
c, vr = self.c, self
if g.app.dock:
dock = vr.leo_dock
if dock:
dock.show()
dock.raise_()
# #1230.
else:
vr.activate()
vr.show()
vr.adjust_layout('open')
c.bodyWantsFocusNow()
#@+node:vitalije.20170712183618.1: *3* vr.store_layout
def store_layout(self, which):
global layouts
c = self.c; h = c.hash()
splitter = self.splitter
deflo = c.db.get('viewrendered_default_layouts', (None, None))
(loc, loo) = layouts.get(c.hash(), deflo)
if which == 'closed' and splitter:
loc = splitter.get_saveable_layout()
loc = json.loads(json.dumps(loc))
layouts[h] = loc, loo
elif which == 'open' and splitter:
loo = splitter.get_saveable_layout()
loo = json.loads(json.dumps(loo))
layouts[h] = loc, loo
c.db['viewrendered_default_layouts'] = layouts[h]
#@+node:ekr.20110319143920.14466: *3* vr.underline
def underline(self, s):
'''Generate rST underlining for s.'''
ch = '#'
n = max(4, len(g.toEncodedString(s, reportErrors=False)))
# return '%s\n%s\n%s\n\n' % (ch*n,s,ch*n)
return '%s\n%s\n\n' % (s, ch * n)
#@+node:ekr.20101112195628.5426: *3* vr.update & helpers
# Must have this signature: called by leoPlugins.callTagHandler.
def update(self, tag, keywords):
'''Update the vr pane. Called at idle time.'''
pc = self
p = pc.c.p
# #1256.
if self.locked:
return
if pc.must_update(keywords):
#
# Suppress updates until we change nodes.
pc.node_changed = pc.gnx != p.v.gnx
pc.gnx = p.v.gnx
pc.length = len(p.b) # not s
#
# Remove Leo directives.
s = keywords.get('s') if 's' in keywords else p.b
s = pc.remove_directives(s)
#
# Use plain text if we are hidden.
# This avoids annoying messages with rst.
dock = pc.leo_dock or pc
if dock.isHidden():
w = pc.ensure_text_widget()
w.setPlainText(s)
return
#
# Dispatch based on the computed kind.
kind = keywords.get('flags') if 'flags' in keywords else pc.get_kind(p)
if not kind:
# Do *not* try to render plain text.
w = pc.ensure_text_widget()
w.setPlainText(s)
pc.show() # Must be last.
return
f = pc.dispatch_dict.get(kind)
if not f:
g.trace('no handler for kind: %s' % kind)
f = pc.update_rst
f(s, keywords)
else:
# Save the scroll position.
w = pc.w
if w.__class__ == QtWidgets.QTextBrowser:
# 2011/07/30: The widget may no longer exist.
try:
sb = w.verticalScrollBar()
pc.scrollbar_pos_dict[p.v] = sb.sliderPosition()
except Exception:
g.es_exception()
pc.deactivate()
#@+node:ekr.20190424083049.1: *4* vr.create_base_text_widget
def create_base_text_widget(self):
'''Create a QWebView or a QTextBrowser.'''
c = self.c
w = BaseTextWidget()
n = c.config.getInt('qweb-view-font-size')
if n:
try:
# BaseTextWidget is a QWebView.
settings = w.settings()
settings.setFontSize(settings.DefaultFontSize, n)
except AttributeError:
# BaseTextWidget is a QTextBrowser.
pass
return w
#@+node:ekr.20110320120020.14486: *4* vr.embed_widget & helper
def embed_widget(self, w, delete_callback=None):
'''Embed widget w in the free_layout splitter.'''
pc = self; c = pc.c #X ; splitter = pc.splitter
pc.w = w
layout = self.layout()
for i in range(layout.count()):
layout.removeItem(layout.itemAt(0))
self.layout().addWidget(w)
w.show()
# Special inits for text widgets...
if w.__class__ == QtWidgets.QTextBrowser:
text_name = 'body-text-renderer'
w.setObjectName(text_name)
# Do not do this! It interferes with themes.
# pc.setBackgroundColor(pc.background_color, text_name, w)
w.setReadOnly(True)
# Create the standard Leo bindings.
wrapper_name = 'rendering-pane-wrapper'
wrapper = qt_text.QTextEditWrapper(w, wrapper_name, c)
w.leo_wrapper = wrapper
c.k.completeAllBindingsForWidget(wrapper)
w.setWordWrapMode(QtGui.QTextOption.WrapAtWordBoundaryOrAnywhere)
#@+node:ekr.20110321072702.14510: *5* vr.setBackgroundColor
def setBackgroundColor(self, colorName, name, w):
'''Set the background color of the vr pane.'''
if 0: # Do not do this! It interferes with themes.
pc = self
if not colorName: return
styleSheet = 'QTextEdit#%s { background-color: %s; }' % (name, colorName)
if QtGui.QColor(colorName).isValid():
w.setStyleSheet(styleSheet)
elif colorName not in pc.badColors:
pc.badColors.append(colorName)
g.warning('invalid body background color: %s' % (colorName))
#@+node:ekr.20110320120020.14476: *4* vr.must_update
def must_update(self, keywords):
'''Return True if we must update the rendering pane.'''
pc = self
c, p = pc.c, pc.c.p
if g.unitTesting:
return False
if keywords.get('force'):
pc.active = True
return True
if c != keywords.get('c') or not pc.active:
return False
if pc.locked:
return False
if pc.gnx != p.v.gnx:
return True
if len(p.b) != pc.length:
if pc.get_kind(p) in ('html', 'pyplot'):
pc.length = len(p.b)
return False # Only update explicitly.
return True
# This trace would be called at idle time.
# g.trace('no change')
return False
#@+node:ekr.20191004143229.1: *4* vr.update_asciidoc & helpers
def update_asciidoc(self, s, keywords):
'''Update asciidoc in the vr pane.'''
global asciidoctor_exec, asciidoc3_exec
pc = self
# Do this regardless of whether we show the widget or not.
w = pc.ensure_text_widget()
assert pc.w
if s:
pc.show()
if asciidoctor_exec or asciidoc3_exec:
try:
s2 = self.convert_to_asciidoctor(s)
self.set_html(s2,w)
return
except Exception:
g.es_exception()
self.update_rst(s,keywords)
#@+node:ekr.20191004144242.1: *5* vr.make_asciidoc_title
def make_asciidoc_title(self, s):
'''Generate an asciiidoc title for s.'''
line = '#' * (min(4, len(s)))
return f"{line}\n{s}\n{line}\n\n"
#@+node:ekr.20191004143805.1: *5* vr.convert_to_asciidoctor
def convert_to_asciidoctor(self, s):
'''Convert s to html using the asciidoctor or asciidoc processor.'''
pc = self
c, p = pc.c, pc.c.p
path = g.scanAllAtPathDirectives(c, p) or c.getNodePath(p)
if not os.path.isdir(path):
path = os.path.dirname(path)
if os.path.isdir(path):
os.chdir(path)
if pc.title:
s = pc.make_asciidoc_title(pc.title) + s
pc.title = None
s = pc.run_asciidoctor(s)
return g.toUnicode(s)
#@+node:ekr.20191004144128.1: *5* vr.run_asciidoctor
def run_asciidoctor(self, s):
"""
Process s with asciidoctor or asciidoc3.
return the contents of the html file.
The caller handles all exceptions.
"""
global asciidoctor_exec, asciidoc3_exec
assert asciidoctor_exec or asciidoc3_exec, g.callers()
home = g.os.path.expanduser('~')
i_path = g.os_path_finalize_join(home, 'vr_input.adoc')
o_path = g.os_path_finalize_join(home, 'vr_output.html')
# Write the input file.
with open(i_path, 'w') as f:
f.write(s)
# Call the external program to write the output file.
prog = 'asciidoctor' if asciidoctor_exec else 'asciidoc3'
command = f"{prog} {i_path} -b html5 -o {o_path}"
# The -e option deletes css.
g.execute_shell_commands(command)
# Read the output file and return it.
with open(o_path, 'r') as f:
return f.read()
#@+node:ekr.20110321151523.14463: *4* vr.update_graphics_script
def update_graphics_script(self, s, keywords):
'''Update the graphics script in the vr pane.'''
pc = self; c = pc.c
force = keywords.get('force')
if pc.gs and not force:
return
if not pc.gs:
splitter = c.free_layout.get_top_splitter()
# Careful: we may be unit testing.
if not splitter:
g.trace('no splitter')
return
# Create the widgets.
pc.gs = QtWidgets.QGraphicsScene(splitter)
pc.gv = QtWidgets.QGraphicsView(pc.gs)
w = pc.gv.viewport() # A QWidget
# Embed the widgets.
def delete_callback():
for w in (pc.gs, pc.gv):
w.deleteLater()
pc.gs = pc.gv = None
pc.embed_widget(w, delete_callback=delete_callback)
c.executeScript(
script=s,
namespace={'gs': pc.gs, 'gv': pc.gv})
#@+node:ekr.20110321005148.14534: *4* vr.update_html
update_html_count = 0
def update_html(self, s, keywords):
'''Update html in the vr pane.'''
pc = self
c = pc.c
if pc.must_change_widget(BaseTextWidget):
w = self.create_base_text_widget()
pc.embed_widget(w)
assert(w == pc.w)
else:
w = pc.w
if isQt5:
w.hide() # This forces a proper update.
w.setHtml(s)
w.show()
c.bodyWantsFocusNow()
#@+node:ekr.20110320120020.14482: *4* vr.update_image
def update_image(self, s, keywords):
'''Update an image in the vr pane.'''
pc = self
if not s.strip():
return
lines = g.splitLines(s) or []
fn = lines and lines[0].strip()
if not fn:
return
w = pc.ensure_text_widget()
ok, path = pc.get_fn(fn, '@image')
if not ok:
w.setPlainText('@image: file not found: %s' % (path))
return
path = path.replace('\\', '/')
template = image_template % (path)
# Only works in Python 3.x.
template = g.adjustTripleString(template, pc.c.tab_width).strip()
# Sensitive to leading blank lines.
# template = g.toUnicode(template)
pc.show()
w.setReadOnly(False)
w.setHtml(template)
w.setReadOnly(True)
#@+node:ekr.20170105124347.1: *4* vr.update_jupyter & helper
update_jupyter_count = 0
def update_jupyter(self, s, keywords):
'''Update @jupyter node in the vr pane.'''
pc = self
c = pc.c
if pc.must_change_widget(BaseTextWidget):
w = self.create_base_text_widget()
pc.embed_widget(w)
assert(w == pc.w)
else:
w = pc.w
s = self.get_jupyter_source(c)
if isQt5:
w.hide() # This forces a proper update.
w.setHtml(s)
w.show()
c.bodyWantsFocusNow()
#@+node:ekr.20180311090852.1: *5* vr.get_jupyter_source
def get_jupyter_source(self, c):
'''Return the html for the @jupyer node.'''
body = c.p.b.lstrip()
if body.startswith('<'):
# Assume the body is html.
return body
if body.startswith('{'):
# Leo 5.7.1: Allow raw JSON.
s = body
else:
url = g.getUrlFromNode(c.p)
if not url:
return ''
if not nbformat:
return 'can not import nbformt to render url: %r' % url
try:
s = urlopen(url).read().decode()
except Exception:
return 'url not found: %s' % url
try:
nb = nbformat.reads(s, as_version=4)
e = HTMLExporter()
(s, junk_resources) = e.from_notebook_node(nb)
except nbformat.reader.NotJSONError:
pass # Assume the result is html.
return s
#@+node:ekr.20170324064811.1: *4* vr.update_latex & helper
def update_latex(self, s, keywords):
'''Update latex in the vr pane.'''
import sys
pc = self
c = pc.c
if sys.platform.startswith('win'):
g.es_print('latex rendering not ready for Python 3')
w = pc.ensure_text_widget()
pc.show()
w.setPlainText(s)
c.bodyWantsFocusNow()
return
if pc.must_change_widget(BaseTextWidget):
w = self.create_base_text_widget()
pc.embed_widget(w)
assert(w == pc.w)
else:
w = pc.w
w.hide() # This forces a proper update.
s = self.create_latex_html(s)
w.setHtml(s)
w.show()
c.bodyWantsFocusNow()
#@+node:ekr.20170324085132.1: *5* vr.create_latex_html
def create_latex_html(self, s):
'''Create an html page embedding the latex code s.'''
c = self.c
# pylint: disable=deprecated-method
try:
import html
escape = html.escape
except AttributeError:
import cgi
escape = cgi.escape
html_s = escape(s)
template = latex_template % (html_s)
template = g.adjustTripleString(template, c.tab_width).strip()
return template
#@+node:peckj.20130207132858.3671: *4* vr.update_md & helper
def update_md(self, s, keywords):
'''Update markdown text in the vr pane.'''
pc = self; c = pc.c; p = c.p
s = s.strip().strip('"""').strip("'''").strip()
isHtml = s.startswith('<') and not s.startswith('<<')
# Do this regardless of whether we show the widget or not.
w = pc.ensure_text_widget()
assert pc.w
if s:
pc.show()
if got_markdown:
force = keywords.get('force')
colorizer = c.frame.body.colorizer
language = colorizer.scanLanguageDirectives(p)
if force or language in ('rst', 'rest', 'markdown', 'md'):
if not isHtml:
s = self.convert_to_markdown(s)
self.set_html(s,w)
else:
# g.trace('markdown not available: using rst')
self.update_rst(s,keywords)
#@+node:ekr.20160921134552.1: *5* convert_to_markdown
def convert_to_markdown(self, s):
'''Convert s to html using the markdown processor.'''
pc = self
c, p = pc.c, pc.c.p
path = g.scanAllAtPathDirectives(c, p) or c.getNodePath(p)
if not os.path.isdir(path):
path = os.path.dirname(path)
if os.path.isdir(path):
os.chdir(path)
try:
if pc.title:
s = pc.underline(pc.title) + s
pc.title = None
mdext = c.config.getString('view-rendered-md-extensions') or 'extra'
mdext = [x.strip() for x in mdext.split(',')]
s = markdown(s, extensions=mdext)
s = g.toUnicode(s)
except SystemMessage as sm:
msg = sm.args[0]
if 'SEVERE' in msg or 'FATAL' in msg:
s = 'MD error:\n%s\n\n%s' % (msg, s)
return s
#@+node:ekr.20110320120020.14481: *4* vr.update_movie
movie_warning = False
def update_movie(self, s, keywords):
'''Update a movie in the vr pane.'''
# pylint: disable=maybe-no-member
# 'PyQt4.phonon' has no 'VideoPlayer' member
# 'PyQt4.phonon' has no 'VideoCategory' member
# 'PyQt4.phonon' has no 'MediaSource' member
pc = self
ok, path = pc.get_fn(s, '@movie')
if not ok:
w = pc.ensure_text_widget()
w.setPlainText('Not found: %s' % (path))
return
if not phonon and not QtMultimedia:
if not self.movie_warning:
self.movie_warning = True
g.es_print('No phonon and no QtMultimedia modules')
w = pc.ensure_text_widget()
w.setPlainText('')
return
if pc.vp:
vp = pc.vp
pc.vp.stop()
pc.vp.deleteLater()
# Create a fresh player.
g.es_print('playing', path)
if QtMultimedia:
url= QtCore.QUrl.fromLocalFile(path)
content= QtMultimedia.QMediaContent(url)
pc.vp = vp = QtMultimedia.QMediaPlayer()
vp.setMedia(content)
# Won't play .mp4 files: https://bugreports.qt.io/browse/QTBUG-32783
vp.play()
else:
pc.vp = vp = phonon.VideoPlayer(phonon.VideoCategory)
vw = vp.videoWidget()
vw.setObjectName('video-renderer')
# Embed the widgets
def delete_callback():
if pc.vp:
pc.vp.stop()
pc.vp.deleteLater()
pc.vp = None
pc.embed_widget(vp, delete_callback=delete_callback)
pc.show()
vp = pc.vp
vp.load(phonon.MediaSource(path))
vp.play()
#@+node:ekr.20110320120020.14484: *4* vr.update_networkx
def update_networkx(self, s, keywords):
'''Update a networkx graphic in the vr pane.'''
pc = self
w = pc.ensure_text_widget()
w.setPlainText('') # 'Networkx: len: %s' % (len(s)))
pc.show()
#@+node:ekr.20191006155748.1: *4* vr.update_pandoc & helpers
def update_pandoc(self, s, keywords):
'''
Update an @pandoc in the vr pane.
There is no such thing as @language pandoc,
so only @pandoc nodes trigger this code.
'''
global pandoc_exec
pc = self
w = pc.ensure_text_widget()
assert pc.w
if s:
pc.show()
if pandoc_exec:
try:
s2 = self.convert_to_pandoc(s)
self.set_html(s2,w)
except Exception:
g.es_exception()
return
self.update_rst(s,keywords)
#@+node:ekr.20191006155748.3: *5* vr.convert_to_pandoc
def convert_to_pandoc(self, s):
'''Convert s to html using the asciidoctor or asciidoc processor.'''
pc = self
c, p = pc.c, pc.c.p
path = g.scanAllAtPathDirectives(c, p) or c.getNodePath(p)
if not os.path.isdir(path):
path = os.path.dirname(path)
if os.path.isdir(path):
os.chdir(path)
if pc.title:
s = pc.make_pandoc_title(pc.title) + s
pc.title = None
s = pc.run_pandoc(s)
return g.toUnicode(s)
#@+node:ekr.20191006155748.4: *5* vr.run_pandoc
def run_pandoc(self, s):
"""
Process s with pandoc.
return the contents of the html file.
The caller handles all exceptions.
"""
global pandoc_exec
assert pandoc_exec, g.callers()
home = g.os.path.expanduser('~')
i_path = g.os_path_finalize_join(home, 'vr_input.pandoc')
o_path = g.os_path_finalize_join(home, 'vr_output.html')
# Write the input file.
with open(i_path, 'w') as f:
f.write(s)
# Call pandoc to write the output file.
command = f"pandoc {i_path} -t html5 -o {o_path}"
# --quiet does no harm.
g.execute_shell_commands(command)
# Read the output file and return it.
with open(o_path, 'r') as f:
return f.read()
#@+node:ekr.20160928023915.1: *4* vr.update_pyplot
def update_pyplot(self, s, keywords):
'''Get the pyplot script at c.p.b and show it.'''
c = self.c
if not self.pyplot_imported:
self.pyplot_imported = True
backend = g.os_path_finalize_join(
g.app.loadDir, '..', 'plugins', 'pyplot_backend.py')
if g.os_path_exists(backend):
try:
# The order of these statements is important...
import matplotlib
matplotlib.use('module://leo.plugins.pyplot_backend')
except ImportError:
g.trace('===== FAIL: pyplot.backend')
else:
g.trace('===== MISSING: pyplot.backend')
try:
import matplotlib # Make *sure* this is imported.
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
plt.ion() # Automatically set interactive mode.
namespace = {
'animation': animation,
'matplotlib': matplotlib,
'numpy': np, 'np': np,
'pyplot': plt, 'plt': plt,
}
except Exception:
g.es_print('matplotlib imports failed')
namespace = {}
# Embedding already works without this!
# self.embed_pyplot_widget()
self.pyplot_active = True
# pyplot will throw RuntimeError if we close the pane.
c.executeScript(
event=None,
args=None, p=None,
script=None,
useSelectedText=False,
define_g=True,
define_name='__main__',
silent=False,
namespace=namespace,
raiseFlag=False,
runPyflakes=False, # Suppress warnings about pre-defined symbols.
)
c.bodyWantsFocusNow()
#@+node:ekr.20110320120020.14477: *4* vr.update_rst & helpers
def update_rst(self, s, keywords):
'''Update rst in the vr pane.'''
pc = self
s = s.strip().strip('"""').strip("'''").strip()
isHtml = s.startswith('<') and not s.startswith('<<')
# Do this regardless of whether we show the widget or not.
w = pc.ensure_text_widget()
assert pc.w
if s:
pc.show()
if got_docutils:
# Fix #420: viewrendered does not render some nodes
# Users (rightly) complained, so don't be clever here:
# c, p = pc.c, pc.c.p
# force = keywords.get('force')
# colorizer = c.frame.body.colorizer
# language = colorizer.scanLanguageDirectives(p)
# force or language in ('rst', 'rest', 'markdown', 'md'):
if not isHtml:
s = pc.convert_to_html(s)
pc.set_html(s, w)
else:
w.setPlainText(s)
#@+node:ekr.20160920221324.1: *5* vr.convert_to_html
def convert_to_html(self, s):
'''Convert s to html using docutils.'''
c, p = self.c, self.c.p
# Update the current path.
path = g.scanAllAtPathDirectives(c, p) or c.getNodePath(p)
if not os.path.isdir(path):
path = os.path.dirname(path)
if os.path.isdir(path):
os.chdir(path)
try:
if self.title:
s = self.underline(self.title) + s
self.title = None
# Call docutils to get the string.
s = publish_string(s, writer_name='html')
s = g.toUnicode(s)
except SystemMessage as sm:
msg = sm.args[0]
if 'SEVERE' in msg or 'FATAL' in msg:
s = 'RST error:\n%s\n\n%s' % (msg, s)
return s
#@+node:ekr.20110320120020.14479: *4* vr.update_svg
# http://doc.trolltech.com/4.4/qtsvg.html
# http://doc.trolltech.com/4.4/painting-svgviewer.html
def update_svg(self, s, keywords):
pc = self
if pc.must_change_widget(QtSvg.QSvgWidget):
w = QtSvg.QSvgWidget()
pc.embed_widget(w)
assert(w == pc.w)
else:
w = pc.w
if s.strip().startswith('<'):
# Assume it is the svg (xml) source.
s = g.adjustTripleString(s, pc.c.tab_width).strip()
# Sensitive to leading blank lines.
s = g.toEncodedString(s)
pc.show()
w.load(QtCore.QByteArray(s))
w.show()
else:
# Get a filename from the headline or body text.
ok, path = pc.get_fn(s, '@svg')
if ok:
pc.show()
w.load(path)
w.show()
#@+node:ekr.20110321005148.14537: *4* vr.update_url
def update_url(self, s, keywords):
pc = self
c, p = self.c, self.c.p
colorizer = c.frame.body.colorizer
language = colorizer.scanLanguageDirectives(p)
if language == 'asciidoc':
p.update_asciidoc(s, keywords)
elif language in ('rest', 'rst'):
pc.update_rst(s, keywords)
elif language in ('markdown', 'md'):
pc.update_md(s, keywords)
elif pc.default_kind in ('rest', 'rst'):
pc.update_rst(s, keywords)
elif pc.default_kind in ('markdown', 'md'):
pc.update_md(s, keywords)
else:
# Do nothing.
g.trace('ignore',s)
w = pc.ensure_text_widget()
pc.show()
w.setPlainText('')
#@+node:ekr.20110322031455.5765: *4* vr.utils for update helpers...
#@+node:ekr.20110322031455.5764: *5* vr.ensure_text_widget
def ensure_text_widget(self):
'''Swap a text widget into the rendering pane if necessary.'''
c, pc = self.c, self
if pc.must_change_widget(QtWidgets.QTextBrowser):
# Instantiate a new QTextBrowser.
# Allow non-ctrl clicks to open url's.
w = QtWidgets.QTextBrowser()
# #1286.
w.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
w.customContextMenuRequested.connect(self.onContextMenuCallback)
def handleClick(url, w=w):
import leo.plugins.qt_text as qt_text
wrapper = qt_text.QTextEditWrapper(w, name='vr-body', c=c)
event = g.Bunch(c=c, w=wrapper)
g.openUrlOnClick(event, url=url)
# if self.w and hasattr(self.w, 'anchorClicked'):
# try:
# self.w.anchorClicked.disconnect()
# except Exception:
# g.es_exception()
w.anchorClicked.connect(handleClick)
w.setOpenLinks(False)
pc.embed_widget(w) # Creates w.wrapper
assert(w == pc.w)
return pc.w
#@+node:ekr.20110320120020.14483: *5* vr.get_kind
def get_kind(self, p):
'''Return the proper rendering kind for node p.'''
c = self.c
def get_language(p):
"""
Return the language in effect at position p.
Headline directives over-ride normal Leo directives in body text.
"""
h = p.h
# First, look for headline directives.
if h.startswith('@'):
i = g.skip_id(h, 1, chars='-')
word = h[1: i].lower().strip()
if word in self.dispatch_dict:
return word
# Look for @language directives.
# Warning: (see #344): don't use c.target_language as a default.
colorizer = c.frame.body.colorizer
return colorizer.findFirstValidAtLanguageDirective(p.copy())
#
# #1287: Honor both kind of directives node by node.
for p in p.self_and_parents(p):
language = get_language(p)
if got_markdown and language in ('md', 'markdown'):
return language
if got_docutils and language in ('rest', 'rst'):
return language
if language and language in self.dispatch_dict:
return language
return None
#@+node:ekr.20110320233639.5776: *5* vr.get_fn
def get_fn(self, s, tag):
pc = self
c = pc.c
fn = s or c.p.h[len(tag):]
fn = fn.strip()
# Similar to code in g.computeFileUrl
if fn.startswith('~'):
# Expand '~' and handle Leo expressions.
fn = fn[1:]
fn = g.os_path_expanduser(fn)
fn = g.os_path_expandExpression(fn, c=c)
fn = g.os_path_finalize(fn)
else:
# Handle Leo expressions.
fn = g.os_path_expandExpression(fn, c=c)
# Handle ancestor @path directives.
if c and c.openDirectory:
base = c.getNodePath(c.p)
fn = g.os_path_finalize_join(c.openDirectory, base, fn)
else:
fn = g.os_path_finalize(fn)
ok = g.os_path_exists(fn)
# if not ok: g.trace('not found', fn)
return ok, fn
#@+node:ekr.20110321005148.14536: *5* vr.get_url
def get_url(self, s, tag):
p = self.c.p
url = s or p.h[len(tag):]
url = url.strip()
return url
#@+node:ekr.20110322031455.5763: *5* vr.must_change_widget
def must_change_widget(self, widget_class):
pc = self
return not pc.w or pc.w.__class__ != widget_class
#@+node:ekr.20110320120020.14485: *5* vr.remove_directives
def remove_directives(self, s):
lines = g.splitLines(s)
result = []
for s in lines:
if s.startswith('@'):
i = g.skip_id(s, 1)
word = s[1: i]
if word in g.globalDirectiveList:
continue
result.append(s)
return ''.join(result)
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@-leo
| 37.754334 | 99 | 0.54562 |
4a27c0fbdf181e1222c9c477056abc15ada21b46 | 7,910 | py | Python | evaluation_tasks/calculate_static_embeddings.py | shovalf/OGRE-1 | 08efad50fac27e8c9621897838e122a2e8fdae1c | [
"MIT"
] | null | null | null | evaluation_tasks/calculate_static_embeddings.py | shovalf/OGRE-1 | 08efad50fac27e8c9621897838e122a2e8fdae1c | [
"MIT"
] | 1 | 2021-02-04T07:44:59.000Z | 2021-02-04T07:46:10.000Z | evaluation_tasks/calculate_static_embeddings.py | unknownuser2021/OGRE | 08efad50fac27e8c9621897838e122a2e8fdae1c | [
"MIT"
] | 3 | 2021-04-21T07:25:23.000Z | 2021-07-15T11:07:19.000Z | """
Main file to calculate the embeddings with OGRE/DOGRE/WOGRE, and performing link prediction and node classification task.
In order to calculate the embedding, you first must have an edge list file:
"datasets/name_of_dataset.txt" - An edge list txt file. If the graph is unweighted it consists of 2 columns: source, target (with no title, source and target share an edge).
If the graph is weighted, it consists of 3 columns: source target weight.
Example for unweighted graph:
1 2
2 3
1 4
1 3
Example for weighted graph:
1 2 3
1 3 0.3
1 4 4.5
2 4 0.98
You can see examples for this format in "datasets" directory.
If you want to peform vertex classification task or GCN is your initial embedding, you must have labels file:
"labels/{name_of_dataset}_tags.txt" - A txt file which consists of 2 columns: node, label (no title). Notice all node must have labels!
Example:
1 0
2 0
3 1
4 2
Another possibilty is having a .mat file as in NRL_Benchmark (https://pages.github.com/). In this link, go to either `node classification`
or `link prediction` directories, where a link to datasets you can use in .mat format is avaliable. Then this .mat file is both the
edges and labels file.
If you want to perform link prediction task, you must have non edges file:
"evaluation_tasks/non_edges_{name_of_dataset}" - A csv file which consists of two columns: node1, node2 ; where there is no edge between them (again no title).
In order to produce such file, you can go to evaluation_tasks -> calculate_non_edges.py , and follow the instructions there.
When you have all the files you need (depending on what you want to perform), you can run this file.
1. First initialize DATASET parameters dict:
- name: Name of dataset (as the name of the edge list txt file) (string)
- initial_size: List of initial core sizes. (list)
- dim: Embedding dimension (int)
- is_weighted: True if the graph is weighted, else False (bool)
- choose: "degrees" if the vertices of the initial core are the ones with highest degree (as done in our experiments), else "k_core" if the vertices of the initial core are
the ones with highest k-core score. (string)
- "s_a": True if you also want to calculate state-of-the-art embeddings (node2vec/GF/HOPE/GCN), else False.
Params for OGRE:
- epsilon: Weight to the second order neighbours embedding. For more details you can go to the implementation- our_embedding_methods -> OGRE.py (float).
Params for DOGRE/WOGRE:
- "regu_val": Regularization value for regression, only for DOGRE/WOGRE. For more details you can go to the implementation- our_embedding_methods -> D_W_OGRE.py (float).
- "weighted_reg": True for weighted regression, else False.
If the initial embedding method is GCN and/or a vertex classification task is applied, a labels file is also necessary:
- "label_file": path and name (together), so it can be read directly.
2. methods_ : List of our suggested embedding methods (OGRE/DOGRE/WOGRE) with whom you want to embed the given graph.
3. initial_methods_ : List of state-of-the-art embedding methods (node2vec/GF/HOPE/GCN) with whom the initial core will be embed.
4. params_dict_ : Parameters for state-of-the-art embeddings. These are the optimal ones (according to their papers). For more details you can go to-
state_of_the_art -> state_of_the_art_embedding.py
5. save_: True if you want to save the embedding in a .npy format, else False.
Once you have that, you can run "calculate_static_embeddings" function to get the embeddings as dictionaries. You can see function implementation and output format in
evaluation_tasks -> eval_utils.py .
If you only want the embedding of the graph, you can stop here. If you also want to apply link prediction or vertex classification task you should continue.
Line 107: export_time - Export a csv file with running times of each method according to the initial core size.
Lines 123-130- Link prediction task: A csv file of non edges is needed (as explained above), you can see comments in the code. For more details you can go to
evaluation_tasks -> link_prediction.py .
Lines 132-136- Vertex classification task: You can see comments in the code. For more details you can go to evaluation_tasks -> node_classification.py .
"""
from link_prediction import *
from node_classification import *
from static_embeddings import *
import csv
# initialize important variables / parameters
DATASET = {"name": "DBLP", "initial_size": [100, 1000], "dim": 128, "is_weighted": False, "choose": "degrees",
"regu_val": 0, "weighted_reg": False, "s_a": True, "epsilon": 0.1,
"label_file": os.path.join("..", "labels", "dblp_tags.txt")}
# Example for .mat
# DATASET = {"name": "Flickr", "initial_size": [1000], "dim": 128, "is_weighted": False, "choose": "degrees",
# "regu_val": 0, "weighted_reg": False, "s_a": False, "epsilon": 0.01,
# "label_file": os.path.join("..", "datasets", "Flickr.mat")}
datasets_path_ = os.path.join("..", "datasets")
# where to save the embeddings
if DATASET["choose"] == "degrees":
embeddings_path_ = os.path.join("..", "embeddings_degrees")
else:
embeddings_path_ = os.path.join("..", "embeddings_k_core")
# Our suggested embedding method
methods_ = ["OGRE"]
# state-of-the-art embedding methods
initial_methods_ = ["node2vec"]
# Parameters duct for state-of-the-art embedding methods
params_dict_ = {"node2vec": {"dimension": DATASET["dim"], "walk_length": 80, "num_walks": 16, "workers": 2},
"GF": {"dimension": DATASET["dim"], "eta": 0.1, "regularization": 0.1, "max_iter": 3000,
"print_step": 100}, "HOPE": {"dimension": 128, "beta": 0.1},
"GCN": {"dimension": DATASET["dim"], "epochs": 150, "lr": 0.01, "weight_decay": 5e-4, "hidden": 200,
"dropout": 0}}
# if you want to save the embeddings as npy file- save_=True
save_ = True
# calculate dict of embeddings
z, G, initial_size, list_initial_proj_nodes = calculate_static_embeddings(datasets_path_, embeddings_path_, DATASET,
methods_, initial_methods_, params_dict_,
save_=save_)
"""
if the embeddings is all you wanted you can stop here. Otherwise, here are functions to calculate running time, and
applying Link Prediction and Node Classification Tasks.
"""
# where to save resuts files
if DATASET["choose"] == "degrees":
save = "files_degrees"
else:
save = "files_k_core"
# evaluate running time
export_time(z, DATASET["name"], save)
if DATASET["name"] == "Yelp":
mapping = {i: n for i,n in zip(range(G.number_of_nodes()), list(G.nodes()))}
else:
mapping=None
DATASET["initial_size"] = initial_size
print(initial_size)
# Link prediction Task
n = G.number_of_nodes()
non_edges_file = "non_edges_{}.csv".format(DATASET["name"]) # non edges file
# number_true_false: Number of true and false edges, number choose: How many times to choose true and false edges
params_lp_dict = {"number_true_false": 10000, "rounds": 10, "test_ratio": [0.2, 0.3, 0.5], "number_choose": 10}
dict_lp = final_link_prediction(z, params_lp_dict, non_edges_file)
export_results_lp_nc_all(n, save, z, dict_lp, DATASET["initial_size"], DATASET["name"], "Link Prediction")
print("finish link prediction")
# Node Classification Task
params_nc_dict = {"rounds": 10, "test_ratio": [0.5, 0.9]}
# for multi-label node classification add multi=True
dict_nc = final_node_classification(DATASET["name"], z, params_nc_dict, DATASET, mapping=mapping, multi=False)
export_results_lp_nc_all(n, save, z, dict_nc, DATASET["initial_size"], DATASET["name"], "Node Classification")
print("finish node classification")
| 53.809524 | 174 | 0.7067 |
4a27c12265ab14cb8edca360a3280e4b398a50b3 | 7,050 | py | Python | torchvision/datasets/folder.py | igormq/vision | 1f8f7ea71c8807408a7860b8ae9ca63177975b64 | [
"BSD-3-Clause"
] | 10 | 2020-06-09T12:58:03.000Z | 2021-12-01T11:31:16.000Z | torchvision/datasets/folder.py | shatealaboxiaowang/vision | 1f8f7ea71c8807408a7860b8ae9ca63177975b64 | [
"BSD-3-Clause"
] | null | null | null | torchvision/datasets/folder.py | shatealaboxiaowang/vision | 1f8f7ea71c8807408a7860b8ae9ca63177975b64 | [
"BSD-3-Clause"
] | 4 | 2020-08-24T19:50:37.000Z | 2021-05-29T21:13:51.000Z | import torch.utils.data as data
from PIL import Image
import os
import os.path
import sys
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (iterable of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def is_image_file(filename):
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(dir, class_to_idx, extensions):
images = []
dir = os.path.expanduser(dir)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
class DatasetFolder(data.Dataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (list[string]): A list of allowed extensions.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, loader, extensions, transform=None, target_transform=None):
classes, class_to_idx = self._find_classes(root)
samples = make_dataset(root, class_to_idx, extensions)
if len(samples) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"
"Supported extensions are: " + ",".join(extensions)))
self.root = root
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
self.transform = transform
self.target_transform = target_transform
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff']
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class names.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(self, root, transform=None, target_transform=None,
loader=default_loader):
super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS,
transform=transform,
target_transform=target_transform)
self.imgs = self.samples
| 33.412322 | 113 | 0.614184 |
4a27c122e79ed43a52c53e089d5cb73dbefdbfd2 | 7,657 | py | Python | homographyHarker.py | raultron/homographyHarker | 0140f3c0afc1315823393842e3577e881d807df0 | [
"MIT"
] | null | null | null | homographyHarker.py | raultron/homographyHarker | 0140f3c0afc1315823393842e3577e881d807df0 | [
"MIT"
] | null | null | null | homographyHarker.py | raultron/homographyHarker | 0140f3c0afc1315823393842e3577e881d807df0 | [
"MIT"
] | null | null | null | #function [ H, Lh ] = homographyHarker( DataA, DataB, LA, LB)
#
# Purpose : Computes the general projective transformation between two sets
# of 2D data using a linear algorithm (the homography).
#
# Uses (syntax) :
# H = homography( DataA, DataB )
#
# Input Parameters :
# DataA, DataB := 2D homogeneous data sets in matrix form (3xn)
#
# Return Parameters :
# H := the 3x3 homography
#
# Description and algorithms:
# The algorithm is based on the Direct Linear Transform (DLT) method
# outlined in Hartley et al. The method uses orthogonal projections of
# matrices, such that the vanishing line is treated as the principal
# component of the reduction. In this manner, the statistical behaviour
# of the errors in variables are treated uniformly, see Harker and
# O'Leary 2005.
#
# References :
# Harker, M., O'Leary, P., Computation of Homographies, to appear in
# Proceedings of the British Machine Vision Conference 2005, Oxford,
# England.
# Hartley, R., Zisserman, A., Multiple View Geometry in Computer Vision,
# Cambridge University Press, Cambridge, 2001
#
# Cite this as :
#
# Author : Matthew Harker
# Date : July 25, 2005
# Version : 1.0
#--------------------------------------------------------------------------
# (c) 2005, O'Leary, Harker, University of Leoben, Leoben, Austria
# email: [email protected], url: automation.unileoben.ac.at
#--------------------------------------------------------------------------
# History:
# Date: Comment:
# July 25, 2005 Original Version 1.0
#--------------------------------------------------------------------------
#
import numpy as np
# -*- coding: utf-8 -*-
def is2DData(Data):
# Purpose : Tests if the input argument represents valid 2D homogeneous
# coordinates.
#
# Uses (syntax) :
# is2DData( Data )
#
# Input Parameters :
# Data := the variable to be tested (should be 3xn, n greater than 0)
#
# Return Parameters :
# trueFalse := 0 or 1 (false or true)
#
# Description and algorithms:
# Tests the size of the input argument
#
# References :
#
# Cite this as :
#
# Author : Matthew Harker
# Date : July 13, 2005
# Version : 1.0
#--------------------------------------------------------------------------
# (c) 2005, O'Leary, Harker, University of Leoben, Leoben, Austria
# email: [email protected], url: automation.unileoben.ac.at
#--------------------------------------------------------------------------
# History:
# Date: Comment:
# July 13, 2005 Original Version 1.0
#--------------------------------------------------------------------------
#
m = Data.shape[0]
n = Data.shape[1]
if (m == 3) and (n > 0):
trueFalse = True
else:
trueFalse = False
return trueFalse
def normalizeData(Data, L = None):
#
#function [DataN, T, Ti, LN] = normalizeData( Data, L ) ;
#
# Purpose : Computes a set of data corresponding to the input data with its
# centroid subtracted, and scaled such that the root-mean-square distance
# to the origin is sqrt(2). The transformation T, carries the scaled data
# back to its original form. Optionally, the first order estimation of
# covariance matrices are computed.
#
# Uses (syntax) :
# [DataN, T, LN] = normalizeData( Data, L ) Not yet implemented in python
# [DataN, T] = normalizeData( Data )
#
# Input Parameters :
# Data := a 3xn matrix of homogeneous points.
# L := is a 3x3 covariance matrix (all points have identical covariance), or
# a 3x3xn array of n covariance matrices.
#
# Return Parameters :
# DataN := mean-free data scaled s.t. d_RMS = sqrt(2)
# T := transformation to bring DataN to the Affine coordinates
# corresponding to Data (NOTE: T*DataN is in affine coords).
# LN := the covariance of the scaled normalized data (size is
# generally 2x2xn, due to the normalization)
#
# Description and algorithms:
#
# References :
# Clarke, J.C., Modelling Uncertainty: A Primer, Dept. of Engineering
# Science, Oxford University, Technical Report.
#
# Cite this as :
#
# Author : Matthew Harker
# Date : July 7, 2005
# Version :
#
# (c) 2005, Institute for Automation, University of Leoben, Leoben, Austria
# email: [email protected], url: automation.unileoben.ac.at
#
# History:
# Date: Comment:
# Original Version 1.0
#--------------------------------------------------------------------------
#
# Check input arguments :
#
if L == None:
if not is2DData( Data ):
print 'Error:Input does not represent 2D data'
else:
print 'Error: covariance not yet implemented in Python version'
Data = np.copy(Data)
s = Data[0,:]
t = Data[1,:]
u = Data[2,:]
x = s/u
y = t/u
xm = np.mean( x )
ym = np.mean( y )
xh = x - xm
yh = y - ym
n = len( xh )
kappa = np.sum( xh**2 + yh**2 )
beta = np.sqrt( 2*n / kappa ) ;
xn = beta * xh
yn = beta * yh
DataN = np.vstack([xn,yn,np.ones(len(xn))])
T = np.array([[ 1/beta, 0 , xm ],
[ 0 , 1/beta, ym ],
[ 0 , 0 , 1 ]])
Ti = np.array([[ beta , 0 , -beta * xm ],
[ 0 , beta , -beta * ym ],
[ 0 , 0 , 1 ]])
return DataN, T, Ti
def homographyHarker(DataA, DataB, LA = None, LB = None):
# Check input parameters:
if LA == None and LB == None:
if not is2DData( DataA ) or not is2DData( DataB ):
print 'Error: Input does not represent 2D data'
nA = DataA.shape[1]
nB = DataB.shape[1]
if (nA != nB):
print 'Error: Data sets must be the same size'
else:
print 'Error: Input convariance data not yet implemented in Python'
print 'Error propagation not implemented as of yet'
# Normalize the input data:
DataA,TA,TAi = normalizeData( DataA )
DataB,TB,TBi = normalizeData( DataB )
# Construct the orthogonalized design matrix :
C1 = -DataB[0,:] * DataA[0,:]
C2 = -DataB[0,:] * DataA[1,:]
C3 = -DataB[1,:] * DataA[0,:]
C4 = -DataB[1,:] * DataA[1,:]
mC1 = np.mean( C1 )
mC2 = np.mean( C2 )
mC3 = np.mean( C3 )
mC4 = np.mean( C4 )
Mx = np.column_stack([C1 - mC1, C2 - mC2, -DataB[0,:]])
My = np.column_stack([C3 - mC3, C4 - mC4, -DataB[1,:]])
Pp = np.linalg.pinv(DataA[0:2,:].conj().T)
Bx = np.dot(Pp, Mx)
By = np.dot(Pp, My)
D = np.row_stack([Mx - np.dot(DataA[0:2,:].conj().T,Bx), My - np.dot(DataA[0:2,:].conj().T,By)])
#% Find v_min and backsubstitute :
#%
U,S,Vh = np.linalg.svd( D )
V = Vh.T
h789 = V[:,-1]
h12 = -Bx.dot(h789)
h45 = -By.dot(h789)
h3 = -np.array([mC1, mC2]).dot(h789[0:2])
h6 = -np.array([mC3, mC4]).dot(h789[0:2])
# Reshape vector h to matrix H, and transform :
H = np.hstack([h12, h3, h45, h6, h789]).reshape(3,3)
H = TB.dot(H).dot(TAi)
H = H / H[2,2]
return H
def test():
DataA = np.array([[1,2,3,4],[1,3,4,4],[1,1,1,1]])
DataB = np.array([[2,4,6,8],[3,9,12,12],[1,1,1,1]])
DataC = np.array([[2,2,3,1],[2,3,4,1],[1,1,1,1]])
H = homographyHarker(DataA, DataB)
| 31.126016 | 100 | 0.527752 |
4a27c184f4a55668a664e29350bb2c873e4fa130 | 666 | bzl | Python | workspace.bzl | jgennis/rules_rust | 4402ffacc6ff3b1a43c118112cd1aa34a8e797e5 | [
"Apache-2.0"
] | 1 | 2021-02-05T06:44:28.000Z | 2021-02-05T06:44:28.000Z | workspace.bzl | jgennis/rules_rust | 4402ffacc6ff3b1a43c118112cd1aa34a8e797e5 | [
"Apache-2.0"
] | null | null | null | workspace.bzl | jgennis/rules_rust | 4402ffacc6ff3b1a43c118112cd1aa34a8e797e5 | [
"Apache-2.0"
] | 2 | 2020-09-12T19:28:00.000Z | 2020-11-24T02:46:42.000Z | load("@bazel_skylib//lib:versions.bzl", "versions")
def _store_bazel_version(repository_ctx):
bazel_version = versions.get()
if len(bazel_version) == 0:
print("You're using development build of Bazel, make sure it's at least version 0.17.1")
elif versions.is_at_most("0.17.0", bazel_version):
fail("Bazel {} is too old to use with rules_rust, please use at least Bazel 0.17.1, preferably newer.".format(bazel_version))
repository_ctx.file("BUILD", "exports_files(['def.bzl'])")
repository_ctx.file("def.bzl", "BAZEL_VERSION='" + bazel_version + "'")
bazel_version = repository_rule(
implementation = _store_bazel_version,
)
| 44.4 | 133 | 0.71021 |
4a27c19646b4c7a36d0b0a6d07097da9935ca9dc | 2,220 | py | Python | detect.py | Po-Chun-Chien/LUT-Net | 413559027980db2585d939cd4a514a172b62f57d | [
"MIT"
] | null | null | null | detect.py | Po-Chun-Chien/LUT-Net | 413559027980db2585d939cd4a514a172b62f57d | [
"MIT"
] | null | null | null | detect.py | Po-Chun-Chien/LUT-Net | 413559027980db2585d939cd4a514a172b62f57d | [
"MIT"
] | null | null | null | import os
import numpy as np
def readPLA(fn, d):
if not os.path.isfile(fn):
print('Warning: PLA "{}" not found.'.format(fn))
return
getNum = lambda s, head: int(s.strip('\n').replace(head, '').replace(' ', ''))
getPat = lambda s: s.strip('\n').replace(' ', '')
with open(fn) as fp:
ni = getNum(fp.readline(), '.i')
no = getNum(fp.readline(), '.o')
nl = getNum(fp.readline(), '.p')
if d: ni = len(list(d)[0])
assert no == 1
for line in fp:
if line.startswith('.type fr'):
break
data, labels = [], []
for i in range(nl):
pat = getPat(fp.readline())
assert(len(pat) == ni + no)
if pat[:-1] in d:
assert d[pat[:-1]] == pat[-1]
else:
d[pat[:-1]] = pat[-1]
for line in fp:
if line.startswith('.e'):
break
def detectUnate(d):
n = len(list(d)[0])
pos = np.zeros((n,), dtype=np.uintc)
neg = np.zeros((n,), dtype=np.uintc)
eq = np.zeros((n,), dtype=np.uintc)
for i, j in d.items():
for c in range(len(i)):
if i[c] == '0': continue
ii = i[:c] + '0' + i[c+1:]
if ii not in d: continue
jj = d[ii]
if int(j) > int(jj):
pos[c] += 1
elif int(j) < int(jj):
neg[c] += 1
else:
eq[c] += 1
return pos, neg, eq
def detectSym1bit(d):
n = len(list(d)[0])
ret = np.zeros((n, n), dtype=np.uintc)
for i, j in d.items():
for c1 in range(len(i)):
for c2 in range(c1+1, len(i)):
if (ret[c1, c2] > 0) and (ret[c2, c1] > 0): continue
if (i[c1] != '0') or (i[c2] != '1'): continue
ii = i[:c1] + '1' + i[c1+1:c2] + '0' + i[c2+1:]
if ii not in d: continue
if j == d[ii]:
ret[c1, c2] += 1
else:
ret[c2, c1] += 1
return ret
| 30 | 83 | 0.388739 |
4a27c32860b4ed3f4db51d085d6c03607002faaf | 56,324 | py | Python | numpy/lib/recfunctions.py | m10an/numpy | f1c30fde8b614cc7e11789122ba93e6fb0e86385 | [
"BSD-3-Clause"
] | null | null | null | numpy/lib/recfunctions.py | m10an/numpy | f1c30fde8b614cc7e11789122ba93e6fb0e86385 | [
"BSD-3-Clause"
] | 8 | 2021-10-07T10:59:49.000Z | 2021-11-22T20:06:49.000Z | numpy/lib/recfunctions.py | m10an/numpy | f1c30fde8b614cc7e11789122ba93e6fb0e86385 | [
"BSD-3-Clause"
] | null | null | null | """
Collection of utilities to manipulate structured arrays.
Most of these functions were initially implemented by John Hunter for
matplotlib. They have been rewritten and extended for convenience.
"""
import itertools
import numpy as np
import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
from numpy.core.overrides import array_function_dispatch
from numpy.lib._iotools import _is_string_like
from numpy.testing import suppress_warnings
_check_fill_value = np.ma.core._check_fill_value
__all__ = [
'append_fields', 'apply_along_fields', 'assign_fields_by_name',
'drop_fields', 'find_duplicates', 'flatten_descr',
'get_fieldstructure', 'get_names', 'get_names_flat',
'join_by', 'merge_arrays', 'rec_append_fields',
'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
'rename_fields', 'repack_fields', 'require_fields',
'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
]
def _recursive_fill_fields_dispatcher(input, output):
return (input, output)
@array_function_dispatch(_recursive_fill_fields_dispatcher)
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
with support for nested structures.
Parameters
----------
input : ndarray
Input array.
output : ndarray
Output array.
Notes
-----
* `output` should be at least the same size as `input`
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
"""
newdtype = output.dtype
for field in newdtype.names:
try:
current = input[field]
except ValueError:
continue
if current.dtype.names is not None:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
def _get_fieldspec(dtype):
"""
Produce a list of name/dtype pairs corresponding to the dtype fields
Similar to dtype.descr, but the second item of each tuple is a dtype, not a
string. As a result, this handles subarray dtypes
Can be passed to the dtype constructor to reconstruct the dtype, noting that
this (deliberately) discards field offsets.
Examples
--------
>>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
>>> dt.descr
[(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
>>> _get_fieldspec(dt)
[(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
"""
if dtype.names is None:
# .descr returns a nameless field, so we should too
return [('', dtype)]
else:
fields = ((name, dtype.fields[name]) for name in dtype.names)
# keep any titles, if present
return [
(name if len(f) == 2 else (f[2], name), f[0])
for name, f in fields
]
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple. Input datatype
must have fields otherwise error is raised.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)
('A',)
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
"""
listnames = []
names = adtype.names
for name in names:
current = adtype[name]
if current.names is not None:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
return tuple(listnames)
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Input datatype
must have fields otherwise error is raised.
Nested structure are flattened beforehand.
Parameters
----------
adtype : dtype
Input datatype
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
False
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
('A', 'B')
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
"""
listnames = []
names = adtype.names
for name in names:
listnames.append(name)
current = adtype[name]
if current.names is not None:
listnames.extend(get_names_flat(current))
return tuple(listnames)
def flatten_descr(ndtype):
"""
Flatten a structured data-type description.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
>>> rfn.flatten_descr(ndtype)
(('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
"""
names = ndtype.names
if names is None:
return (('', ndtype),)
else:
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
if typ.names is not None:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
def _zip_dtype(seqarrays, flatten=False):
newdtype = []
if flatten:
for a in seqarrays:
newdtype.extend(flatten_descr(a.dtype))
else:
for a in seqarrays:
current = a.dtype
if current.names is not None and len(current.names) == 1:
# special case - dtypes of 1 field are flattened
newdtype.extend(_get_fieldspec(current))
else:
newdtype.append(('', current))
return np.dtype(newdtype)
def _zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
return _zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
"""
Returns a dictionary with fields indexing lists of their parent fields.
This function is used to simplify access to fields nested in other fields.
Parameters
----------
adtype : np.dtype
Input datatype
lastname : optional
Last processed field name (used internally during recursion).
parents : dictionary
Dictionary of parent fields (used interbally during recursion).
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = np.dtype([('A', int),
... ('B', [('BA', int),
... ('BB', [('BBA', int), ('BBB', int)])])])
>>> rfn.get_fieldstructure(ndtype)
... # XXX: possible regression, order of BBA and BBB is swapped
{'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
"""
if parents is None:
parents = {}
names = adtype.names
for name in names:
current = adtype[name]
if current.names is not None:
if lastname:
parents[name] = [lastname, ]
else:
parents[name] = []
parents.update(get_fieldstructure(current, name, parents))
else:
lastparent = [_ for _ in (parents.get(lastname, []) or [])]
if lastparent:
lastparent.append(lastname)
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
return parents
def _izip_fields_flat(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays,
collapsing any nested structure.
"""
for element in iterable:
if isinstance(element, np.void):
yield from _izip_fields_flat(tuple(element))
else:
yield element
def _izip_fields(iterable):
"""
Returns an iterator of concatenated fields from a sequence of arrays.
"""
for element in iterable:
if (hasattr(element, '__iter__') and
not isinstance(element, str)):
yield from _izip_fields(element)
elif isinstance(element, np.void) and len(tuple(element)) == 1:
# this statement is the same from the previous expression
yield from _izip_fields(element)
else:
yield element
def _izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
Parameters
----------
seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
flatten : {True, False},
Whether to
"""
# Should we flatten the items, or just use a nested approach
if flatten:
zipfunc = _izip_fields_flat
else:
zipfunc = _izip_fields
for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
yield tuple(zipfunc(tup))
def _fix_output(output, usemask=True, asrecarray=False):
"""
Private function: return a recarray, a ndarray, a MaskedArray
or a MaskedRecords depending on the input parameters
"""
if not isinstance(output, MaskedArray):
usemask = False
if usemask:
if asrecarray:
output = output.view(MaskedRecords)
else:
output = ma.filled(output)
if asrecarray:
output = output.view(recarray)
return output
def _fix_defaults(output, defaults=None):
"""
Update the fill_value and masked data of `output`
from the default given in a dictionary defaults.
"""
names = output.dtype.names
(data, mask, fill_value) = (output.data, output.mask, output.fill_value)
for (k, v) in (defaults or {}).items():
if k in names:
fill_value[k] = v
data[k][mask[k]] = v
return output
def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
usemask=None, asrecarray=None):
return seqarrays
@array_function_dispatch(_merge_arrays_dispatcher)
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
Merge arrays field by field.
Parameters
----------
seqarrays : sequence of ndarrays
Sequence of arrays
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
flatten : {False, True}, optional
Whether to collapse nested fields.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
array([( 1, 10.), ( 2, 20.), (-1, 30.)],
dtype=[('f0', '<i8'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
... np.array([10., 20., 30.])), usemask=False)
array([(1, 10.0), (2, 20.0), (-1, 30.0)],
dtype=[('f0', '<i8'), ('f1', '<f8')])
>>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
dtype=[('a', '<i8'), ('f1', '<f8')])
Notes
-----
* Without a mask, the missing value will be filled with something,
depending on what its corresponding type:
* ``-1`` for integers
* ``-1.0`` for floating point numbers
* ``'-'`` for characters
* ``'-1'`` for strings
* ``True`` for boolean values
* XXX: I just obtained these values empirically
"""
# Only one item in the input sequence ?
if (len(seqarrays) == 1):
seqarrays = np.asanyarray(seqarrays[0])
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
# Make sure we have named fields
if seqdtype.names is None:
seqdtype = np.dtype([('', seqdtype)])
if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everything's a-ok
seqarrays = seqarrays.ravel()
# Find what type of array we must return
if usemask:
if asrecarray:
seqtype = MaskedRecords
else:
seqtype = MaskedArray
elif asrecarray:
seqtype = recarray
else:
seqtype = ndarray
return seqarrays.view(dtype=seqdtype, type=seqtype)
else:
seqarrays = (seqarrays,)
else:
# Make sure we have arrays in the input sequence
seqarrays = [np.asanyarray(_m) for _m in seqarrays]
# Find the sizes of the inputs and their maximum
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
newdtype = _zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
# If we expect some kind of MaskedArray, make a special loop.
if usemask:
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
# Get the data and mask
data = a.ravel().__array__()
mask = ma.getmaskarray(a).ravel()
# Get the filling value (if needed)
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
fmsk = True
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
fmsk = np.ones((1,), dtype=mask.dtype)
else:
fval = None
fmsk = True
# Store an iterator padding the input to the expected length
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
data = tuple(_izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
mask=list(_izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
# Same as before, without the mask we don't need...
for (a, n) in zip(seqarrays, sizes):
nbmissing = (maxlength - n)
data = a.ravel().__array__()
if nbmissing:
fval = _check_fill_value(fill_value, a.dtype)
if isinstance(fval, (ndarray, np.void)):
if len(fval.dtype) == 1:
fval = fval.item()[0]
else:
fval = np.array(fval, dtype=a.dtype, ndmin=1)
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
# And we're done...
return output
def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
return (base,)
@array_function_dispatch(_drop_fields_dispatcher)
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
.. versionchanged:: 1.18.0
`drop_fields` returns an array with 0 fields if all fields are dropped,
rather than returning ``None`` as it did previously.
Parameters
----------
base : array
Input array
drop_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to drop.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
>>> rfn.drop_fields(a, 'a')
array([((2., 3),), ((5., 6),)],
dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
>>> rfn.drop_fields(a, 'ba')
array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
array([(1,), (4,)], dtype=[('a', '<i8')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names]
else:
drop_names = set(drop_names)
def _drop_descr(ndtype, drop_names):
names = ndtype.names
newdtype = []
for name in names:
current = ndtype[name]
if name in drop_names:
continue
if current.names is not None:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
else:
newdtype.append((name, current))
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
"""
Return a new array keeping only the fields in `keep_names`,
and preserving the order of those fields.
Parameters
----------
base : array
Input array
keep_names : string or sequence
String or sequence of strings corresponding to the names of the
fields to keep. Order of the names will be preserved.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : string or sequence, optional
Whether to return a recarray or a mrecarray (`asrecarray=True`) or
a plain ndarray or masked array with flexible dtype. The default
is False.
"""
newdtype = [(n, base.dtype[n]) for n in keep_names]
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _rec_drop_fields_dispatcher(base, drop_names):
return (base,)
@array_function_dispatch(_rec_drop_fields_dispatcher)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
"""
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
def _rename_fields_dispatcher(base, namemapper):
return (base,)
@array_function_dispatch(_rename_fields_dispatcher)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
Nested fields are supported.
Parameters
----------
base : ndarray
Input array whose fields must be modified.
namemapper : dictionary
Dictionary mapping old field names to their new version.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
"""
def _recursive_rename_fields(ndtype, namemapper):
newdtype = []
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
if current.names is not None:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
else:
newdtype.append((newname, current))
return newdtype
newdtype = _recursive_rename_fields(base.dtype, namemapper)
return base.view(newdtype)
def _append_fields_dispatcher(base, names, data, dtypes=None,
fill_value=None, usemask=None, asrecarray=None):
yield base
yield from data
@array_function_dispatch(_append_fields_dispatcher)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
fill_value : {float}, optional
Filling value used to pad missing data on the shorter arrays.
usemask : {False, True}, optional
Whether to return a masked array or not.
asrecarray : {False, True}, optional
Whether to return a recarray (MaskedRecords) or not.
"""
# Check the names
if isinstance(names, (tuple, list)):
if len(names) != len(data):
msg = "The number of arrays does not match the number of names"
raise ValueError(msg)
elif isinstance(names, str):
names = [names, ]
data = [data, ]
#
if dtypes is None:
data = [np.array(a, copy=False, subok=True) for a in data]
data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
else:
if not isinstance(dtypes, (tuple, list)):
dtypes = [dtypes, ]
if len(data) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(data)
else:
msg = "The dtypes argument must be None, a dtype, or a list."
raise ValueError(msg)
data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
for (a, n, d) in zip(data, names, dtypes)]
#
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
if len(data) > 1:
data = merge_arrays(data, flatten=True, usemask=usemask,
fill_value=fill_value)
else:
data = data.pop()
#
output = ma.masked_all(
max(len(base), len(data)),
dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
yield base
yield from data
@array_function_dispatch(_rec_append_fields_dispatcher)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
The names of the fields are given with the `names` arguments,
the corresponding values with the `data` arguments.
If a single field is appended, `names`, `data` and `dtypes` do not have
to be lists but just values.
Parameters
----------
base : array
Input array to extend.
names : string, sequence
String or sequence of strings corresponding to the names
of the new fields.
data : array or sequence of arrays
Array or sequence of arrays storing the fields to add to the base.
dtypes : sequence of datatypes, optional
Datatype or sequence of datatypes.
If None, the datatypes are estimated from the `data`.
See Also
--------
append_fields
Returns
-------
appended_array : np.recarray
"""
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
def _repack_fields_dispatcher(a, align=None, recurse=None):
return (a,)
@array_function_dispatch(_repack_fields_dispatcher)
def repack_fields(a, align=False, recurse=False):
"""
Re-pack the fields of a structured array or dtype in memory.
The memory layout of structured datatypes allows fields at arbitrary
byte offsets. This means the fields can be separated by padding bytes,
their offsets can be non-monotonically increasing, and they can overlap.
This method removes any overlaps and reorders the fields in memory so they
have increasing byte offsets, and adds or removes padding bytes depending
on the `align` option, which behaves like the `align` option to
`numpy.dtype`.
If `align=False`, this method produces a "packed" memory layout in which
each field starts at the byte the previous field ended, and any padding
bytes are removed.
If `align=True`, this methods produces an "aligned" memory layout in which
each field's offset is a multiple of its alignment, and the total itemsize
is a multiple of the largest alignment, by adding padding bytes as needed.
Parameters
----------
a : ndarray or dtype
array or dtype for which to repack the fields.
align : boolean
If true, use an "aligned" memory layout, otherwise use a "packed" layout.
recurse : boolean
If True, also repack nested structures.
Returns
-------
repacked : ndarray or dtype
Copy of `a` with fields repacked, or `a` itself if no repacking was
needed.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
>>> dt = np.dtype('u1, <i8, <f8', align=True)
>>> dt
dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '<i8', '<f8'], \
'offsets': [0, 8, 16], 'itemsize': 24}, align=True)
>>> print_offsets(dt)
offsets: [0, 8, 16]
itemsize: 24
>>> packed_dt = rfn.repack_fields(dt)
>>> packed_dt
dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
offsets: [0, 1, 9]
itemsize: 17
"""
if not isinstance(a, np.dtype):
dt = repack_fields(a.dtype, align=align, recurse=recurse)
return a.astype(dt, copy=False)
if a.names is None:
return a
fieldinfo = []
for name in a.names:
tup = a.fields[name]
if recurse:
fmt = repack_fields(tup[0], align=align, recurse=True)
else:
fmt = tup[0]
if len(tup) == 3:
name = (tup[2], name)
fieldinfo.append((name, fmt))
dt = np.dtype(fieldinfo, align=align)
return np.dtype((a.type, dt))
def _get_fields_and_offsets(dt, offset=0):
"""
Returns a flat list of (dtype, count, offset) tuples of all the
scalar fields in the dtype "dt", including nested fields, in left
to right order.
"""
# counts up elements in subarrays, including nested subarrays, and returns
# base dtype and count
def count_elem(dt):
count = 1
while dt.shape != ():
for size in dt.shape:
count *= size
dt = dt.base
return dt, count
fields = []
for name in dt.names:
field = dt.fields[name]
f_dt, f_offset = field[0], field[1]
f_dt, n = count_elem(f_dt)
if f_dt.names is None:
fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
else:
subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
size = f_dt.itemsize
for i in range(n):
if i == 0:
# optimization: avoid list comprehension if no subarray
fields.extend(subfields)
else:
fields.extend([(d, c, o + i*size) for d, c, o in subfields])
return fields
def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
casting=None):
return (arr,)
@array_function_dispatch(_structured_to_unstructured_dispatcher)
def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
"""
Converts an n-D structured array into an (n+1)-D unstructured array.
The new array will have a new last dimension equal in size to the
number of field-elements of the input array. If not supplied, the output
datatype is determined from the numpy type promotion rules applied to all
the field datatypes.
Nested fields, as well as each element of any subarray fields, all count
as a single field-elements.
Parameters
----------
arr : ndarray
Structured array or dtype to convert. Cannot contain object datatype.
dtype : dtype, optional
The dtype of the output unstructured array.
copy : bool, optional
See copy argument to `numpy.ndarray.astype`. If true, always return a
copy. If false, and `dtype` requirements are satisfied, a view is
returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
See casting argument of `numpy.ndarray.astype`. Controls what kind of
data casting may occur.
Returns
-------
unstructured : ndarray
Unstructured array with one more dimension.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a
array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
>>> rfn.structured_to_unstructured(a)
array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
>>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
array([ 3. , 5.5, 9. , 11. ])
"""
if arr.dtype.names is None:
raise ValueError('arr must be a structured array')
fields = _get_fields_and_offsets(arr.dtype)
n_fields = len(fields)
if n_fields == 0 and dtype is None:
raise ValueError("arr has no fields. Unable to guess dtype")
elif n_fields == 0:
# too many bugs elsewhere for this to work now
raise NotImplementedError("arr with no fields is not supported")
dts, counts, offsets = zip(*fields)
names = ['f{}'.format(n) for n in range(n_fields)]
if dtype is None:
out_dtype = np.result_type(*[dt.base for dt in dts])
else:
out_dtype = dtype
# Use a series of views and casts to convert to an unstructured array:
# first view using flattened fields (doesn't work for object arrays)
# Note: dts may include a shape for subarrays
flattened_fields = np.dtype({'names': names,
'formats': dts,
'offsets': offsets,
'itemsize': arr.dtype.itemsize})
with suppress_warnings() as sup: # until 1.16 (gh-12447)
sup.filter(FutureWarning, "Numpy has detected")
arr = arr.view(flattened_fields)
# next cast to a packed format with all fields converted to new dtype
packed_fields = np.dtype({'names': names,
'formats': [(out_dtype, dt.shape) for dt in dts]})
arr = arr.astype(packed_fields, copy=copy, casting=casting)
# finally is it safe to view the packed fields as the unstructured type
return arr.view((out_dtype, (sum(counts),)))
def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
align=None, copy=None, casting=None):
return (arr,)
@array_function_dispatch(_unstructured_to_structured_dispatcher)
def unstructured_to_structured(arr, dtype=None, names=None, align=False,
copy=False, casting='unsafe'):
"""
Converts an n-D unstructured array into an (n-1)-D structured array.
The last dimension of the input array is converted into a structure, with
number of field-elements equal to the size of the last dimension of the
input array. By default all output fields have the input array's dtype, but
an output structured dtype with an equal number of fields-elements can be
supplied instead.
Nested fields, as well as each element of any subarray fields, all count
towards the number of field-elements.
Parameters
----------
arr : ndarray
Unstructured array or dtype to convert.
dtype : dtype, optional
The structured dtype of the output array
names : list of strings, optional
If dtype is not supplied, this specifies the field names for the output
dtype, in order. The field dtypes will be the same as the input array.
align : boolean, optional
Whether to create an aligned memory layout.
copy : bool, optional
See copy argument to `numpy.ndarray.astype`. If true, always return a
copy. If false, and `dtype` requirements are satisfied, a view is
returned.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
See casting argument of `numpy.ndarray.astype`. Controls what kind of
data casting may occur.
Returns
-------
structured : ndarray
Structured array with fewer dimensions.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a = np.arange(20).reshape((4,5))
>>> a
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
>>> rfn.unstructured_to_structured(a, dt)
array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
"""
if arr.shape == ():
raise ValueError('arr must have at least one dimension')
n_elem = arr.shape[-1]
if n_elem == 0:
# too many bugs elsewhere for this to work now
raise NotImplementedError("last axis with size 0 is not supported")
if dtype is None:
if names is None:
names = ['f{}'.format(n) for n in range(n_elem)]
out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
fields = _get_fields_and_offsets(out_dtype)
dts, counts, offsets = zip(*fields)
else:
if names is not None:
raise ValueError("don't supply both dtype and names")
# sanity check of the input dtype
fields = _get_fields_and_offsets(dtype)
if len(fields) == 0:
dts, counts, offsets = [], [], []
else:
dts, counts, offsets = zip(*fields)
if n_elem != sum(counts):
raise ValueError('The length of the last dimension of arr must '
'be equal to the number of fields in dtype')
out_dtype = dtype
if align and not out_dtype.isalignedstruct:
raise ValueError("align was True but dtype is not aligned")
names = ['f{}'.format(n) for n in range(len(fields))]
# Use a series of views and casts to convert to a structured array:
# first view as a packed structured array of one dtype
packed_fields = np.dtype({'names': names,
'formats': [(arr.dtype, dt.shape) for dt in dts]})
arr = np.ascontiguousarray(arr).view(packed_fields)
# next cast to an unpacked but flattened format with varied dtypes
flattened_fields = np.dtype({'names': names,
'formats': dts,
'offsets': offsets,
'itemsize': out_dtype.itemsize})
arr = arr.astype(flattened_fields, copy=copy, casting=casting)
# finally view as the final nested dtype and remove the last axis
return arr.view(out_dtype)[..., 0]
def _apply_along_fields_dispatcher(func, arr):
return (arr,)
@array_function_dispatch(_apply_along_fields_dispatcher)
def apply_along_fields(func, arr):
"""
Apply function 'func' as a reduction across fields of a structured array.
This is similar to `apply_along_axis`, but treats the fields of a
structured array as an extra axis. The fields are all first cast to a
common type following the type-promotion rules from `numpy.result_type`
applied to the field's dtypes.
Parameters
----------
func : function
Function to apply on the "field" dimension. This function must
support an `axis` argument, like np.mean, np.sum, etc.
arr : ndarray
Structured array for which to apply func.
Returns
-------
out : ndarray
Result of the recution operation
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
>>> rfn.apply_along_fields(np.mean, b)
array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
>>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
array([ 3. , 5.5, 9. , 11. ])
"""
if arr.dtype.names is None:
raise ValueError('arr must be a structured array')
uarr = structured_to_unstructured(arr)
return func(uarr, axis=-1)
# works and avoids axis requirement, but very, very slow:
#return np.apply_along_axis(func, -1, uarr)
def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
return dst, src
@array_function_dispatch(_assign_fields_by_name_dispatcher)
def assign_fields_by_name(dst, src, zero_unassigned=True):
"""
Assigns values from one structured array to another by field name.
Normally in numpy >= 1.14, assignment of one structured array to another
copies fields "by position", meaning that the first field from the src is
copied to the first field of the dst, and so on, regardless of field name.
This function instead copies "by field name", such that fields in the dst
are assigned from the identically named field in the src. This applies
recursively for nested structures. This is how structure assignment worked
in numpy >= 1.6 to <= 1.13.
Parameters
----------
dst : ndarray
src : ndarray
The source and destination arrays during assignment.
zero_unassigned : bool, optional
If True, fields in the dst for which there was no matching
field in the src are filled with the value 0 (zero). This
was the behavior of numpy <= 1.13. If False, those fields
are not modified.
"""
if dst.dtype.names is None:
dst[...] = src
return
for name in dst.dtype.names:
if name not in src.dtype.names:
if zero_unassigned:
dst[name] = 0
else:
assign_fields_by_name(dst[name], src[name],
zero_unassigned)
def _require_fields_dispatcher(array, required_dtype):
return (array,)
@array_function_dispatch(_require_fields_dispatcher)
def require_fields(array, required_dtype):
"""
Casts a structured array to a new dtype using assignment by field-name.
This function assigns from the old to the new array by name, so the
value of a field in the output array is the value of the field with the
same name in the source array. This has the effect of creating a new
ndarray containing only the fields "required" by the required_dtype.
If a field name in the required_dtype does not exist in the
input array, that field is created and set to 0 in the output array.
Parameters
----------
a : ndarray
array to cast
required_dtype : dtype
datatype for output array
Returns
-------
out : ndarray
array with the new dtype, with field values copied from the fields in
the input array with the same name
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
>>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
array([(1., 1), (1., 1), (1., 1), (1., 1)],
dtype=[('b', '<f4'), ('c', 'u1')])
>>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
array([(1., 0), (1., 0), (1., 0), (1., 0)],
dtype=[('b', '<f4'), ('newf', 'u1')])
"""
out = np.empty(array.shape, dtype=required_dtype)
assign_fields_by_name(out, array)
return out
def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
asrecarray=None, autoconvert=None):
return arrays
@array_function_dispatch(_stack_arrays_dispatcher)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
Superposes arrays fields by fields
Parameters
----------
arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
autoconvert : {False, True}, optional
Whether automatically cast the type of the field to the maximum.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> x = np.array([1, 2,])
>>> rfn.stack_arrays(x) is x
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
(b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
mask=[(False, False, True), (False, False, True),
(False, False, False), (False, False, False),
(False, False, False)],
fill_value=(b'N/A', 1.e+20, 1.e+20),
dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
return arrays
elif len(arrays) == 1:
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
newdescr = _get_fieldspec(dtype_l)
names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
for fname, fdtype in _get_fieldspec(dtype_n):
if fname not in names:
newdescr.append((fname, fdtype))
names.append(fname)
else:
nameidx = names.index(fname)
_, cdtype = newdescr[nameidx]
if autoconvert:
newdescr[nameidx] = (fname, max(fdtype, cdtype))
elif fdtype != cdtype:
raise TypeError("Incompatible type '%s' <> '%s'" %
(cdtype, fdtype))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
else:
#
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[0, nrecords])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
names = a.dtype.names
if names is None:
output['f%i' % len(seen)][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if name not in seen:
seen.append(name)
#
return _fix_output(_fix_defaults(output, defaults),
usemask=usemask, asrecarray=asrecarray)
def _find_duplicates_dispatcher(
a, key=None, ignoremask=None, return_index=None):
return (a,)
@array_function_dispatch(_find_duplicates_dispatcher)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
Parameters
----------
a : array-like
Input array
key : {string, None}, optional
Name of the fields along which to check the duplicates.
If None, the search is performed by records
ignoremask : {True, False}, optional
Whether masked data should be discarded or considered as duplicates.
return_index : {False, True}, optional
Whether to return the indices of the duplicated values.
Examples
--------
>>> from numpy.lib import recfunctions as rfn
>>> ndtype = [('a', int)]
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
(masked_array(data=[(1,), (1,), (2,), (2,)],
mask=[(False,), (False,), (False,), (False,)],
fill_value=(999999,),
dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
fields = get_fieldstructure(a.dtype)
# Get the sorting data (by selecting the corresponding field)
base = a
if key:
for f in fields[key]:
base = base[f]
base = base[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sortedbase = base[sortidx]
sorteddata = sortedbase.filled()
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
# If masked data must be ignored, set the flag to false where needed
if ignoremask:
sortedmask = sortedbase.recordmask
flag[sortedmask[1:]] = False
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
if return_index:
return (duplicates, sortidx[flag])
else:
return duplicates
def _join_by_dispatcher(
key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
defaults=None, usemask=None, asrecarray=None):
return (r1, r2)
@array_function_dispatch(_join_by_dispatcher)
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
The key should be either a string or a sequence of string corresponding
to the fields used to join the array. An exception is raised if the
`key` field cannot be found in the two input arrays. Neither `r1` nor
`r2` should have any duplicates along `key`: the presence of duplicates
will make the output quite unreliable. Note that duplicates are not
looked for by the algorithm.
Parameters
----------
key : {string, sequence}
A string or a sequence of strings corresponding to the fields used
for comparison.
r1, r2 : arrays
Structured arrays.
jointype : {'inner', 'outer', 'leftouter'}, optional
If 'inner', returns the elements common to both r1 and r2.
If 'outer', returns the common elements as well as the elements of
r1 not in r2 and the elements of not in r2.
If 'leftouter', returns the common elements and the elements of r1
not in r2.
r1postfix : string, optional
String appended to the names of the fields of r1 that are present
in r2 but absent of the key.
r2postfix : string, optional
String appended to the names of the fields of r2 that are present
in r1 but absent of the key.
defaults : {dictionary}, optional
Dictionary mapping field names to the corresponding default values.
usemask : {True, False}, optional
Whether to return a MaskedArray (or MaskedRecords is
`asrecarray==True`) or a ndarray.
asrecarray : {False, True}, optional
Whether to return a recarray (or MaskedRecords if `usemask==True`)
or just a flexible-type ndarray.
Notes
-----
* The output is sorted along the key.
* A temporary array is formed by dropping the fields not in the key for
the two arrays and concatenating the result. This array is then
sorted, and the common entries selected. The output is constructed by
filling the fields with the selected entries. Matching is not
preserved if there are some duplicates...
"""
# Check jointype
if jointype not in ('inner', 'outer', 'leftouter'):
raise ValueError(
"The 'jointype' argument should be in 'inner', "
"'outer' or 'leftouter' (got '%s' instead)" % jointype
)
# If we have a single key, put it in a tuple
if isinstance(key, str):
key = (key,)
# Check the keys
if len(set(key)) != len(key):
dup = next(x for n,x in enumerate(key) if x in key[n+1:])
raise ValueError("duplicate join key %r" % dup)
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %r' % name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %r' % name)
# Make sure we work with ravelled arrays
r1 = r1.ravel()
r2 = r2.ravel()
# Fixme: nb2 below is never used. Commenting out for pyflakes.
# (nb1, nb2) = (len(r1), len(r2))
nb1 = len(r1)
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
collisions = (set(r1names) & set(r2names)) - set(key)
if collisions and not (r1postfix or r2postfix):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
msg += "can't both be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
# (use order of keys in `r1` for back-compatibility)
key1 = [ n for n in r1names if n in key ]
r1k = _keep_fields(r1, key1)
r2k = _keep_fields(r2, key1)
# Concatenate the two arrays for comparison
aux = ma.concatenate((r1k, r2k))
idx_sort = aux.argsort(order=key)
aux = aux[idx_sort]
#
# Get the common keys
flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
flag_in[:-1] = flag_in[1:] + flag_in[:-1]
idx_in = idx_sort[flag_in]
idx_1 = idx_in[(idx_in < nb1)]
idx_2 = idx_in[(idx_in >= nb1)] - nb1
(r1cmn, r2cmn) = (len(idx_1), len(idx_2))
if jointype == 'inner':
(r1spc, r2spc) = (0, 0)
elif jointype == 'outer':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
(r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
elif jointype == 'leftouter':
idx_out = idx_sort[~flag_in]
idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
(r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
# Select the entries from each input
(s1, s2) = (r1[idx_1], r2[idx_2])
#
# Build the new description of the output array .......
# Start with the key fields
ndtype = _get_fieldspec(r1k.dtype)
# Add the fields from r1
for fname, fdtype in _get_fieldspec(r1.dtype):
if fname not in key:
ndtype.append((fname, fdtype))
# Add the fields from r2
for fname, fdtype in _get_fieldspec(r2.dtype):
# Have we seen the current name already ?
# we need to rebuild this list every time
names = list(name for name, dtype in ndtype)
try:
nameidx = names.index(fname)
except ValueError:
#... we haven't: just add the description to the current list
ndtype.append((fname, fdtype))
else:
# collision
_, cdtype = ndtype[nameidx]
if fname in key:
# The current field is part of the key: take the largest dtype
ndtype[nameidx] = (fname, max(fdtype, cdtype))
else:
# The current field is not part of the key: add the suffixes,
# and place the new field adjacent to the old one
ndtype[nameidx:nameidx + 1] = [
(fname + r1postfix, cdtype),
(fname + r2postfix, fdtype)
]
# Rebuild a dtype from the new fields
ndtype = np.dtype(ndtype)
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
# Construct an empty array
output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
names = output.dtype.names
for f in r1names:
selected = s1[f]
if f not in names or (f in r2names and not r2postfix and f not in key):
f += r1postfix
current = output[f]
current[:r1cmn] = selected[:r1cmn]
if jointype in ('outer', 'leftouter'):
current[cmn:cmn + r1spc] = selected[r1cmn:]
for f in r2names:
selected = s2[f]
if f not in names or (f in r1names and not r1postfix and f not in key):
f += r2postfix
current = output[f]
current[:r2cmn] = selected[:r2cmn]
if (jointype == 'outer') and r2spc:
current[-r2spc:] = selected[r2cmn:]
# Sort and finalize the output
output.sort(order=key)
kwargs = dict(usemask=usemask, asrecarray=asrecarray)
return _fix_output(_fix_defaults(output, defaults), **kwargs)
def _rec_join_dispatcher(
key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
defaults=None):
return (r1, r2)
@array_function_dispatch(_rec_join_dispatcher)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
Join arrays `r1` and `r2` on keys.
Alternative to join_by, that always returns a np.recarray.
See Also
--------
join_by : equivalent function
"""
kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
defaults=defaults, usemask=False, asrecarray=True)
return join_by(key, r1, r2, **kwargs)
| 35.379397 | 90 | 0.593069 |
4a27c3618409b93222666114f0530ff35c80cdab | 2,716 | py | Python | jcvi/apps/emboss.py | fossabot/jcvi | 86948affd63e94c8327cf117c47d36940b508b68 | [
"BSD-2-Clause"
] | 1 | 2020-10-04T13:21:24.000Z | 2020-10-04T13:21:24.000Z | jcvi/apps/emboss.py | Wangjien/jcvi | 6732285f62dcbd7f3878e5017c3350124530c796 | [
"BSD-2-Clause"
] | null | null | null | jcvi/apps/emboss.py | Wangjien/jcvi | 6732285f62dcbd7f3878e5017c3350124530c796 | [
"BSD-2-Clause"
] | 1 | 2020-11-16T19:25:30.000Z | 2020-11-16T19:25:30.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Run EMBOSS programs.
"""
from __future__ import print_function
import sys
import multiprocessing as mp
from jcvi.apps.base import OptionParser, ActionDispatcher
from jcvi.formats.base import FileShredder, must_open
class NeedleHeader (object):
def __init__(self, filename):
fp = must_open(filename)
for row in fp:
if row[0] != '#':
continue
# Identity: 89/89 (100.0%)
if row.startswith('# Identity'):
self.identity = row.split(":")[-1].strip()
if row.startswith('# Score'):
self.score = row.split(":")[-1].strip()
def main():
actions = (
('needle', 'take protein pairs and needle them'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def _needle(fa, fb, needlefile, a, b, results):
"""
Run single needle job
"""
from Bio.Emboss.Applications import NeedleCommandline
needle_cline = NeedleCommandline(asequence=fa, bsequence=fb,
gapopen=10, gapextend=0.5, outfile=needlefile)
stdout, stderr = needle_cline()
nh = NeedleHeader(needlefile)
FileShredder([fa, fb, needlefile], verbose=False)
r = ["\t".join((a, b, nh.identity, nh.score))]
results.extend(r)
def needle(args):
"""
%prog needle nw.pairs a.pep.fasta b.pep.fasta
Take protein pairs and needle them
Automatically writes output file `nw.scores`
"""
from jcvi.formats.fasta import Fasta, SeqIO
p = OptionParser(needle.__doc__)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
manager = mp.Manager()
results = manager.list()
needle_pool = mp.Pool(processes=mp.cpu_count())
pairsfile, apep, bpep = args
afasta, bfasta = Fasta(apep), Fasta(bpep)
fp = must_open(pairsfile)
for i, row in enumerate(fp):
a, b = row.split()
a, b = afasta[a], bfasta[b]
fa, fb = must_open("{0}_{1}_a.fasta".format(pairsfile, i), "w"), \
must_open("{0}_{1}_b.fasta".format(pairsfile, i), "w")
SeqIO.write([a], fa, "fasta")
SeqIO.write([b], fb, "fasta")
fa.close()
fb.close()
needlefile = "{0}_{1}_ab.needle".format(pairsfile, i)
needle_pool.apply_async(_needle, \
(fa.name, fb.name, needlefile, a.id, b.id, results))
needle_pool.close()
needle_pool.join()
fp.close()
scoresfile = "{0}.scores".format(pairsfile.rsplit(".")[0])
fw = must_open(scoresfile, "w")
for result in results:
print(result, file=fw)
fw.close()
if __name__ == '__main__':
main()
| 25.866667 | 74 | 0.593888 |
4a27c3c748ae2562fad0e827c61f197ed0cdaa6a | 9,387 | py | Python | src/utils/evaluation_utils.py | phuonghx/RTM3D | 0bd3868a03f071244b2fed9ca1828298f5a96180 | [
"MIT"
] | 268 | 2020-08-09T07:49:21.000Z | 2022-03-29T03:50:42.000Z | src/utils/evaluation_utils.py | phuonghx/RTM3D | 0bd3868a03f071244b2fed9ca1828298f5a96180 | [
"MIT"
] | 22 | 2020-08-09T07:58:21.000Z | 2022-03-25T08:56:18.000Z | src/utils/evaluation_utils.py | phuonghx/RTM3D | 0bd3868a03f071244b2fed9ca1828298f5a96180 | [
"MIT"
] | 56 | 2020-08-10T14:05:53.000Z | 2022-01-25T04:28:37.000Z | """
# -*- coding: utf-8 -*-
-----------------------------------------------------------------------------------
# Author: Nguyen Mau Dung
# DoC: 2020.08.10
# email: [email protected]
-----------------------------------------------------------------------------------
# Description: The utils for evaluation
# Refer from: https://github.com/xingyizhou/CenterNet
"""
from __future__ import division
import sys
import torch
import numpy as np
import torch.nn.functional as F
import cv2
sys.path.append('../')
from data_process.kitti_data_utils import draw_box_3d
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = F.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def _topk(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def _topk_channel(scores, K=40):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_ys, topk_xs
def rtm3d_decode(hm_mc, hm_ver, ver_coor, cen_off, ver_off, wh, rot, depth, dim, K=40, hm_size=(96, 320)):
device = hm_mc.device
batch_size, num_classes, height, width = hm_mc.size()
num_vertexes = hm_ver.size(1)
hm_mc = _nms(hm_mc)
scores, inds, clses, ys, xs = _topk(hm_mc, K=K)
if cen_off is not None:
cen_off = _transpose_and_gather_feat(cen_off, inds)
cen_off = cen_off.view(batch_size, K, 2)
xs = xs.view(batch_size, K, 1) + cen_off[:, :, 0:1]
ys = ys.view(batch_size, K, 1) + cen_off[:, :, 1:2]
else:
xs = xs.view(batch_size, K, 1) + 0.5
ys = ys.view(batch_size, K, 1) + 0.5
rot = _transpose_and_gather_feat(rot, inds)
rot = rot.view(batch_size, K, 8)
depth = _transpose_and_gather_feat(depth, inds)
depth = depth.view(batch_size, K, 1)
dim = _transpose_and_gather_feat(dim, inds)
dim = dim.view(batch_size, K, 3)
clses = clses.view(batch_size, K, 1).float()
scores = scores.view(batch_size, K, 1)
wh = _transpose_and_gather_feat(wh, inds)
wh = wh.view(batch_size, K, 2)
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
ver_coor = _transpose_and_gather_feat(ver_coor, inds)
ver_coor = ver_coor.view(batch_size, K, num_vertexes * 2)
ver_coor[..., ::2] += xs.view(batch_size, K, 1).expand(batch_size, K, num_vertexes)
ver_coor[..., 1::2] += ys.view(batch_size, K, 1).expand(batch_size, K, num_vertexes)
ver_coor = ver_coor.view(batch_size, K, num_vertexes, 2).permute(0, 2, 1, 3).contiguous() # b x num_vers x K x 2
pure_ver_pos = ver_coor.unsqueeze(3).expand(batch_size, num_vertexes, K, K, 2)
hm_ver = _nms(hm_ver)
thresh = 0.1
ver_score, ver_inds, ver_ys, ver_xs = _topk_channel(hm_ver, K=K) # b x num_vertexes x K
if ver_off is not None:
ver_off = _transpose_and_gather_feat(ver_off, ver_inds.view(batch_size, -1))
ver_off = ver_off.view(batch_size, num_vertexes, K, 2)
ver_xs = ver_xs + ver_off[:, :, :, 0]
ver_ys = ver_ys + ver_off[:, :, :, 1]
else:
ver_xs = ver_xs + 0.5
ver_ys = ver_ys + 0.5
mask = (ver_score > thresh).float()
ver_score = (1 - mask) * -1 + mask * ver_score
ver_ys = (1 - mask) * (-10000) + mask * ver_ys
ver_xs = (1 - mask) * (-10000) + mask * ver_xs
ver_pos = torch.stack([ver_xs, ver_ys], dim=-1).unsqueeze(2).expand(batch_size, num_vertexes, K, K, 2)
# dist size: (batch_size, num_vertexes, K, K, 2) --> (batch_size, num_vertexes, K, K)
dist = (((pure_ver_pos - ver_pos) ** 2).sum(dim=4) ** 0.5)
min_dist, min_ind = dist.min(dim=3) # b x num_vertexes x K
ver_score = ver_score.gather(2, min_ind).unsqueeze(-1) # b x num_vertexes x K x 1
min_dist = min_dist.unsqueeze(-1)
min_ind = min_ind.view(batch_size, num_vertexes, K, 1, 1).expand(batch_size, num_vertexes, K, 1, 2)
ver_pos = ver_pos.gather(3, min_ind)
ver_pos = ver_pos.view(batch_size, num_vertexes, K, 2)
hm_h, hm_w = hm_size
dummy_h = torch.ones(size=(batch_size, num_vertexes, K, 1), device=device, dtype=torch.float) * hm_h
dummy_w = torch.ones(size=(batch_size, num_vertexes, K, 1), device=device, dtype=torch.float) * hm_w
mask = (ver_pos[..., 0:1] < 0) + (ver_pos[..., 0:1] > hm_w) + \
(ver_pos[..., 1:2] < 0) + (ver_pos[..., 1:2] > hm_h) + \
(ver_score < thresh) + (min_dist > (torch.max(dummy_h, dummy_w) * 0.3))
mask = (mask > 0).float().expand(batch_size, num_vertexes, K, 2)
ver_coor = (1 - mask) * ver_pos + mask * ver_coor
ver_coor = ver_coor.permute(0, 2, 1, 3).contiguous().view(batch_size, K, num_vertexes * 2)
# (scores x 1, xs x 1, ys x 1, wh x 2, bboxes x 4, ver_coor x 16, rot x 8, depth x 1, dim x 3, clses x 1)
# (scores-0:1, xs-1:2, ys-2:3, wh-3:5, bboxes-5:9, ver_coor-9:25, rot-25:33, depth-33:34, dim-34:37, clses-37:38)
# detections: [batch_size, K, 38]
detections = torch.cat([scores, xs, ys, wh, bboxes, ver_coor, rot, depth, dim, clses], dim=2)
return detections
def get_alpha(rot):
# output: (B, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
# bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
# return rot[:, 0]
idx = rot[:, 1] > rot[:, 5]
alpha1 = np.arctan2(rot[:, 2], rot[:, 3]) + (-0.5 * np.pi)
alpha2 = np.arctan2(rot[:, 6], rot[:, 7]) + (0.5 * np.pi)
return alpha1 * idx + alpha2 * (1 - idx)
def get_pred_depth(depth):
return depth
def post_processing_2d(detections, num_classes=3, down_ratio=4):
"""
:param detections: [batch_size, K, 38]
# (scores x 1, xs x 1, ys x 1, wh x 2, bboxes x 4, ver_coor x 16, rot x 8, depth x 1, dim x 3, clses x 1)
# (scores-0:1, xs-1:2, ys-2:3, wh-3:5, bboxes-5:9, ver_coor-9:25, rot-25:33, depth-33:34, dim-34:37, clses-37:38)
:param conf_thresh:
:return:
"""
# TODO: Need to consider rescale to the original scale: bbox, xs, ys, and ver_coor - 1:25
ret = []
for i in range(detections.shape[0]):
top_preds = {}
classes = detections[i, :, -1]
for j in range(num_classes):
inds = (classes == j)
top_preds[j] = np.concatenate([
detections[i, inds, :1].astype(np.float32),
detections[i, inds, 1:25].astype(np.float32) * down_ratio,
get_alpha(detections[i, inds, 25:33])[:, np.newaxis].astype(np.float32),
get_pred_depth(detections[i, inds, 33:34]).astype(np.float32),
detections[i, inds, 34:37].astype(np.float32)], axis=1)
ret.append(top_preds)
return ret
def get_final_pred(detections, num_classes=3, peak_thresh=0.2):
for j in range(num_classes):
if len(detections[j] > 0):
keep_inds = (detections[j][:, 0] > peak_thresh)
detections[j] = detections[j][keep_inds]
return detections
def draw_predictions(img, detections, colors, num_classes=3, show_3dbox=False):
for j in range(num_classes):
if len(detections[j] > 0):
for det in detections[j]:
# (scores-0:1, xs-1:2, ys-2:3, wh-3:5, bboxes-5:9, ver_coor-9:25, rot-25:26, depth-26:27, dim-27:30)
_score = det[0]
_x, _y, _wh, _bbox, _ver_coor = int(det[1]), int(det[2]), det[3:5], det[5:9], det[9:25]
_rot, _depth, _dim = det[25], det[26], det[27:30]
_bbox = np.array(_bbox, dtype=np.int)
img = cv2.rectangle(img, (_bbox[0], _bbox[1]), (_bbox[2], _bbox[3]), colors[-j - 1], 2)
if show_3dbox:
_ver_coor = np.array(_ver_coor, dtype=np.int).reshape(-1, 2)
img = draw_box_3d(img, _ver_coor, color=colors[j])
# print('_depth: {:.2f}n, _dim: {}, _rot: {:.2f} radian'.format(_depth, _dim, _rot))
return img
def post_processing_3d(detections, conf_thresh=0.95):
"""
"""
pass
| 39.441176 | 117 | 0.595611 |
4a27c4029d686fc78dd88c33db5f17e828a708f1 | 1,777 | py | Python | src/utils.py | jdubkim/Self-play-on-Multi-Sankes-Environment | 8e72c66110a007d6bf0ca2ff68fc0a845f3b3a42 | [
"MIT"
] | 5 | 2018-07-02T12:42:00.000Z | 2018-11-22T12:56:21.000Z | src/utils.py | jdubkim/dlcampjeju2018 | 8e72c66110a007d6bf0ca2ff68fc0a845f3b3a42 | [
"MIT"
] | null | null | null | src/utils.py | jdubkim/dlcampjeju2018 | 8e72c66110a007d6bf0ca2ff68fc0a845f3b3a42 | [
"MIT"
] | null | null | null | import gym
from gym import spaces
import numpy as np
import sys
from baselines.common import set_global_seeds
from baselines.bench import Monitor
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from config import Config
import cv2
cv2.ocl.setUseOpenCL(False)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
if Config.USE_ATARI_SIZE:
self.width = 84
self.height = 84
else:
self.width = 12
self.height = 12
self.observation_space = spaces.Box(low=0, high=255, shape=(self.height, self.width, 6), dtype=np.uint8)
def observation(self, frame):
if Config.USE_ATARI_SIZE:
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame
def make_basic_env(env_id, num_env, seed, start_index=0):
def make_env(rank):
def _thunk():
env = gym.make(env_id)
env.__init__(n_snakes=Config.NUM_SNAKES, n_fruits=Config.NUM_SNAKES)
env.seed(seed + rank)
env = Monitor(env, None, allow_early_resets=True)
env = WarpFrame(env)
return env
return _thunk
set_global_seeds(seed)
return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
def get_shape(ob_space):
shape = ob_space.shape
shape = (shape[0], shape[1], int(shape[2] / 2))
return shape
def get_opponent_file(i, opponent_idx):
return 'opponent{0}_{1}.pkl'.format(i, opponent_idx)
def get_opponent1_file(x): # TODO: get_opponent_file(i, opponents_idx[i])
return 'opponent1_' + str(x) + '.pkl'
def get_opponent2_file(x):
return 'opponent2_' + str(x) + '.pkl' | 27.765625 | 112 | 0.665729 |
4a27c4e37d1dea45d071b8f8ddaa7bfbfc41754f | 4,638 | py | Python | edge/upload_aivdm_data_file_broadcast_weather.py | dannil10/dogger | 7e4570f1aa7d5393a9ae182498573d03fe1b61e9 | [
"MIT"
] | null | null | null | edge/upload_aivdm_data_file_broadcast_weather.py | dannil10/dogger | 7e4570f1aa7d5393a9ae182498573d03fe1b61e9 | [
"MIT"
] | null | null | null | edge/upload_aivdm_data_file_broadcast_weather.py | dannil10/dogger | 7e4570f1aa7d5393a9ae182498573d03fe1b61e9 | [
"MIT"
] | null | null | null | import gateway.link
udp_upload_ais = gateway.link.SqlFileAisData(
channels = {144},
start_delay = 0,
transmit_rate = 0.2,
max_age = 20,
target_channels = {'VDM':{0:'txt', 149:'json'}},
file_path = '/srv/dogger/files/',
message_formats = [ {"message": {"type":8, "fid":31, "start_pos":56}, #"exclude_mmsis":[], "include_only_mmsis":[] },
"lon": { "bits":[ 56, 80], "type":"I3", "div":60000, "novalue":181 },
"lat": { "bits":[ 81,104], "type":"I3", "div":60000, "novalue":91 },
"accuracy": { "bits":[105,105], "type":"b" },
"day": { "bits":[106,110], "type":"u", "novalue":0 },
"hour": { "bits":[111,115], "type":"u", "novalue":24 },
"minute": { "bits":[116,121], "type":"u", "novalue":60 },
"wspeed": { "bits":[122,128], "type":"u", "novalue":127, "overflow": 126 },
"wgust": { "bits":[129,135], "type":"u", "novalue":127, "overflow": 126 },
"wdir": { "bits":[136,144], "type":"u", "novalue":360 },
"wgustdir": { "bits":[145,153], "type":"u", "novalue":360 },
"airtemp": { "bits":[154,164], "type":"I1", "round":1, "div":10, "novalue":-1024 },
"humidity": { "bits":[165,171], "type":"u", "novalue":101 },
"dewpoint": { "bits":[172,181], "type":"I1", "round":2, "div":10, "novalue":501 },
"pressure": { "bits":[182,190], "type":"u", "novalue":511, "underflow": 0, "overflow": 402, "add": 799 },
"pressuretend": { "bits":[191,192], "type":"e", "novalue":3 },
"visibility": { "bits":[193,200], "type":"U1", "round":1, "div":10, "novalue":127, "overflow_var_flag":[193,193] },
"waterlevel": { "bits":[201,212], "type":"U2", "round":3, "div":100, "add":-10, "novalue":4001 },
"leveltrend": { "bits":[213,214], "type":"e", "novalue":3 },
"cspeed": { "bits":[215,222], "type":"U1", "round":1, "div":10, "novalue":255, "overflow": 251 },
"cdir": { "bits":[223,231], "type":"u", "novalue":360 },
"cspeed2": { "bits":[232,239], "type":"U1", "round":1, "div":10, "novalue":255, "overflow": 251 },
"cdir2": { "bits":[240,248], "type":"u", "novalue":360 },
"cdepth2": { "bits":[249,253], "type":"U1", "round":1, "div":10, "novalue":31 },
"cspeed3": { "bits":[254,261], "type":"U1", "round":1, "div":10, "novalue":255, "overflow": 251 },
"cdir3": { "bits":[262,270], "type":"u", "novalue":360 },
"cdepth3": { "bits":[271,275], "type":"U1", "round":1, "div":10, "novalue":31 },
"waveheight": { "bits":[276,283], "type":"U1", "round":1, "div":10, "novalue":255, "overflow": 251 },
"waveperiod": { "bits":[284,289], "type":"u", "novalue":63 },
"wavedir": { "bits":[290,298], "type":"u", "novalue":360 },
"swellheight": { "bits":[299,306], "type":"U1", "round":1, "div":10, "novalue":255, "overflow": 251 },
"swellperiod": { "bits":[307,312], "type":"u", "novalue":63 },
"swelldir": { "bits":[313,321], "type":"u", "novalue":360 },
"seastate": { "bits":[322,325], "type":"e", "novalue":13 },
"watertemp": { "bits":[326,335], "type":"I1", "round":1, "div":10, "novalue":501 },
"preciptype": { "bits":[336,338], "type":"e", "novalue":7 },
"salinity": { "bits":[339,347], "type":"U1", "round":1, "div":10, "novalue":510, "nosensor":511 },
"ice": { "bits":[348,349], "type":"e", "novalue":3 },
"olc": { "function":{"name":"get_open_location_code", "args":{"lat", "lon"}} },
"host_hardware_id": { "function":{"name":"create_location_key", "args":{"mmsi", "olc"}} },
"device_hardware_id": { "function":{"name":"create_location_key", "args":{"mmsi", "olc"}} },
"module_hardware_id": { "function":{"name":"create_location_message_key", "args":{"mmsi", "olc", "fid"}} } } ],
config_filepath = '/srv/dogger/',
config_filename = 'conf_cloud_db.ini')
udp_upload_ais.run()
| 79.965517 | 138 | 0.434886 |
4a27c6c2b12e956fdc71a5137b622fede30f2018 | 4,598 | py | Python | acceptance/python/src/test_multiple_owners.py | innoprenuer/bigchaindb | 32b64ccc2a208f38162566f3e088ad49baced79f | [
"Apache-2.0"
] | 1 | 2019-05-31T14:06:02.000Z | 2019-05-31T14:06:02.000Z | acceptance/python/src/test_multiple_owners.py | innoprenuer/bigchaindb | 32b64ccc2a208f38162566f3e088ad49baced79f | [
"Apache-2.0"
] | null | null | null | acceptance/python/src/test_multiple_owners.py | innoprenuer/bigchaindb | 32b64ccc2a208f38162566f3e088ad49baced79f | [
"Apache-2.0"
] | 1 | 2019-08-28T23:38:52.000Z | 2019-08-28T23:38:52.000Z | # # Multiple owners integration testing
# This test checks if we can successfully create and transfer a transaction
# with multiple owners.
# The script tests various things like:
#
# - create a transaction with multiple owners
# - check if the transaction is stored and has the right amount of public keys
# - transfer the transaction to a third person
#
# We run a series of checks for each step, that is retrieving
# the transaction from the remote system, and also checking the public keys
# of a given transaction.
#
# This integration test is a rip-off of our
# [tutorial](https://docs.bigchaindb.com/projects/py-driver/en/latest/usage.html).
# ## Imports
# We need some utils from the `os` package, we will interact with
# env variables.
import os
# For this test case we import and use the Python Driver.
from bigchaindb_driver import BigchainDB
from bigchaindb_driver.crypto import generate_keypair
def test_multiple_owners():
# ## Set up a connection to BigchainDB
# Check [test_basic.py](./test_basic.html) to get some more details
# about the endpoint.
bdb = BigchainDB(os.environ.get('BIGCHAINDB_ENDPOINT'))
# Hey Alice and Bob, nice to see you again!
alice, bob = generate_keypair(), generate_keypair()
# ## Alice and Bob create a transaction
# Alice and Bob just moved into a shared flat, no one can afford these
# high rents anymore. Bob suggests to get a dish washer for the
# kitchen. Alice agrees and here they go, creating the asset for their
# dish washer.
dw_asset = {
'data': {
'dish washer': {
'serial_number': 1337
}
}
}
# They prepare a `CREATE` transaction. To have multiple owners, both
# Bob and Alice need to be the recipients.
prepared_dw_tx = bdb.transactions.prepare(
operation='CREATE',
signers=alice.public_key,
recipients=(alice.public_key, bob.public_key),
asset=dw_asset)
# Now they both sign the transaction by providing their private keys.
# And send it afterwards.
fulfilled_dw_tx = bdb.transactions.fulfill(
prepared_dw_tx,
private_keys=[alice.private_key, bob.private_key])
bdb.transactions.send(fulfilled_dw_tx, mode='commit')
# We store the `id` of the transaction to use it later on.
dw_id = fulfilled_dw_tx['id']
# Let's check if the transaction was successful.
assert bdb.transactions.retrieve(dw_id), \
'Cannot find transaction {}'.format(dw_id)
# The transaction should have two public keys in the outputs.
assert len(
bdb.transactions.retrieve(dw_id)['outputs'][0]['public_keys']) == 2
# ## Alice and Bob transfer a transaction to Carol.
# Alice and Bob save a lot of money living together. They often go out
# for dinner and don't cook at home. But now they don't have any dishes to
# wash, so they decide to sell the dish washer to their friend Carol.
# Hey Carol, nice to meet you!
carol = generate_keypair()
# Alice and Bob prepare the transaction to transfer the dish washer to
# Carol.
transfer_asset = {'id': dw_id}
output_index = 0
output = fulfilled_dw_tx['outputs'][output_index]
transfer_input = {'fulfillment': output['condition']['details'],
'fulfills': {'output_index': output_index,
'transaction_id': fulfilled_dw_tx[
'id']},
'owners_before': output['public_keys']}
# Now they create the transaction...
prepared_transfer_tx = bdb.transactions.prepare(
operation='TRANSFER',
asset=transfer_asset,
inputs=transfer_input,
recipients=carol.public_key)
# ... and sign it with their private keys, then send it.
fulfilled_transfer_tx = bdb.transactions.fulfill(
prepared_transfer_tx,
private_keys=[alice.private_key, bob.private_key])
sent_transfer_tx = bdb.transactions.send(fulfilled_transfer_tx,
mode='commit')
# They check if the transaction was successful.
assert bdb.transactions.retrieve(
fulfilled_transfer_tx['id']) == sent_transfer_tx
# The owners before should include both Alice and Bob.
assert len(
bdb.transactions.retrieve(fulfilled_transfer_tx['id'])['inputs'][0][
'owners_before']) == 2
# While the new owner is Carol.
assert bdb.transactions.retrieve(fulfilled_transfer_tx['id'])[
'outputs'][0]['public_keys'][0] == carol.public_key
| 37.382114 | 82 | 0.670291 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.