max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
phi/math/backend/_backend.py | marc-gav/PhiFlow | 0 | 4000 | <filename>phi/math/backend/_backend.py
from collections import namedtuple
from contextlib import contextmanager
from threading import Barrier
from typing import List, Callable
import numpy
from ._dtype import DType, combine_types
SolveResult = namedtuple('SolveResult', [
'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message',
])
class ComputeDevice:
"""
A physical device that can be selected to perform backend computations.
"""
def __init__(self, backend: 'Backend', name: str, device_type: str, memory: int, processor_count: int, description: str, ref=None):
self.name: str = name
""" Name of the compute device. CPUs are typically called `'CPU'`. """
self.device_type: str = device_type
""" Type of device such as `'CPU'`, `'GPU'` or `'TPU'`. """
self.memory: int = memory
""" Maximum memory of the device that can be allocated (in bytes). -1 for n/a. """
self.processor_count: int = processor_count
""" Number of CPU cores or GPU multiprocessors. -1 for n/a. """
self.description: str = description
""" Further information about the device such as driver version. """
self.ref = ref
""" (Optional) Reference to the internal device representation. """
self.backend: 'Backend' = backend
""" Backend that this device belongs to. Different backends represent the same device with different objects. """
def __repr__(self):
mem = f"{(self.memory / 1024 ** 2)} MB" if self.memory > 0 else "memory: n/a"
pro = f"{self.processor_count} processors" if self.processor_count > 0 else "processors: n/a"
descr = self.description.replace('\n', ' ')
if len(descr) > 30:
descr = descr[:28] + "..."
return f"'{self.name}' ({self.device_type}) | {mem} | {pro} | {descr}"
class Backend:
def __init__(self, name: str, default_device: ComputeDevice):
"""
Backends delegate low-level operations to a compute library or emulate them.
The methods of `Backend` form a comprehensive list of available operations.
To support a compute library, subclass `Backend` and register it by adding it to `BACKENDS`.
Args:
name: Human-readable string
default_device: `ComputeDevice` being used by default
"""
self._name = name
self._default_device = default_device
def __enter__(self):
_DEFAULT.append(self)
def __exit__(self, exc_type, exc_val, exc_tb):
_DEFAULT.pop(-1)
@property
def name(self) -> str:
return self._name
def supports(self, feature: str or Callable) -> bool:
"""
Tests if this backend supports the given feature.
Features correspond to a method of this backend that must be implemented if the feature is supported.
Possible features:
* `sparse_tensor`
* `gradients
Args:
feature: `str` or unbound Backend method, e.g. `Backend.sparse_tensor`
Returns:
Whether the feature is supported.
"""
feature = feature if isinstance(feature, str) else feature.__name__
if not hasattr(Backend, feature):
raise ValueError(f"Not a valid feature: '{feature}'")
backend_fun = getattr(Backend, feature)
impl_fun = getattr(self.__class__, feature)
return impl_fun is not backend_fun
def prefers_channels_last(self) -> bool:
raise NotImplementedError()
@property
def precision(self) -> int:
""" Short for math.backend.get_precision() """
return get_precision()
@property
def float_type(self) -> DType:
return DType(float, self.precision)
@property
def as_registered(self) -> 'Backend':
from phi.math.backend import BACKENDS
for backend in BACKENDS:
if self.name in backend.name:
return backend
raise RuntimeError(f"Backend '{self}' is not visible.")
@property
def complex_type(self) -> DType:
return DType(complex, max(64, self.precision))
def combine_types(self, *dtypes: DType) -> DType:
return combine_types(*dtypes, fp_precision=self.precision)
def auto_cast(self, *tensors) -> list:
"""
Determins the appropriate values type resulting from operations involving the tensors as input.
This method is called by the default implementations of basic operators.
Backends can override this method to prevent unnecessary casting.
Args:
*tensors: tensors to cast and to consider when determining the common data type
Returns:
tensors cast to a common data type
"""
dtypes = [self.dtype(t) for t in tensors]
result_type = self.combine_types(*dtypes)
if result_type.kind in (int, float, complex, bool):
tensors = [self.cast(t, result_type) for t in tensors]
return tensors
def __str__(self):
return self.name
def __repr__(self):
return self.name
def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]:
"""
Fetches information about all available compute devices this backend can use.
Implementations:
* NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count)
* PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties)
* TensorFlow: `tensorflow.python.client.device_lib.list_local_devices`
* Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices)
Args:
device_type: (optional) Return only devices of this type, e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`.
Returns:
`list` of all currently available devices.
"""
raise NotImplementedError()
def get_default_device(self) -> ComputeDevice:
return self._default_device
def set_default_device(self, device: ComputeDevice or str):
if isinstance(device, str):
devices = self.list_devices(device)
assert len(devices) >= 1, f"{self.name}: Cannot select '{device} because no device of this type is available."
device = devices[0]
self._default_device = device
def seed(self, seed: int):
raise NotImplementedError()
def is_tensor(self, x, only_native=False):
"""
An object is considered a native tensor by a backend if no internal conversion is required by backend methods.
An object is considered a tensor (nativer or otherwise) by a backend if it is not a struct (e.g. tuple, list) and all methods of the backend accept it as a tensor argument.
Args:
x: object to check
only_native: If True, only accepts true native tensor representations, not Python numbers or others that are also supported as tensors (Default value = False)
Returns:
bool: whether `x` is considered a tensor by this backend
"""
raise NotImplementedError()
def as_tensor(self, x, convert_external=True):
"""
Converts a tensor-like object to the native tensor representation of this backend.
If x is a native tensor of this backend, it is returned without modification.
If x is a Python number (numbers.Number instance), `convert_numbers` decides whether to convert it unless the backend cannot handle Python numbers.
*Note:* There may be objects that are considered tensors by this backend but are not native and thus, will be converted by this method.
Args:
x: tensor-like, e.g. list, tuple, Python number, tensor
convert_external: if False and `x` is a Python number that is understood by this backend, this method returns the number as-is. This can help prevent type clashes like int32 vs int64. (Default value = True)
Returns:
tensor representation of `x`
"""
raise NotImplementedError()
def is_available(self, tensor) -> bool:
"""
Tests if the value of the tensor is known and can be read at this point.
If true, `numpy(tensor)` must return a valid NumPy representation of the value.
Tensors are typically available when the backend operates in eager mode.
Args:
tensor: backend-compatible tensor
Returns:
bool
"""
raise NotImplementedError()
def numpy(self, tensor) -> numpy.ndarray:
"""
Returns a NumPy representation of the given tensor.
If `tensor` is already a NumPy array, it is returned without modification.
This method raises an error if the value of the tensor is not known at this point, e.g. because it represents a node in a graph.
Use `is_available(tensor)` to check if the value can be represented as a NumPy array.
Args:
tensor: backend-compatible tensor
Returns:
NumPy representation of the values stored in the tensor
"""
raise NotImplementedError()
def to_dlpack(self, tensor):
raise NotImplementedError()
def from_dlpack(self, capsule):
raise NotImplementedError()
def copy(self, tensor, only_mutable=False):
raise NotImplementedError()
def call(self, f: Callable, *args, name=None):
"""
Calls `f(*args)` and returns the result.
This method may be used to register internal calls with the profiler.
Usage:
choose_backend(key).call(custom_function, *args)
"""
return f(*args)
def block_until_ready(self, values):
pass
def jit_compile(self, f: Callable) -> Callable:
return NotImplemented
def functional_gradient(self, f, wrt: tuple or list, get_output: bool):
raise NotImplementedError(self)
def custom_gradient(self, f: Callable, gradient: Callable) -> Callable:
"""
Creates a function based on `f` that uses a custom gradient for backprop.
Args:
f: Forward function.
gradient: Function for backprop. Will be called as `gradient(*d_out)` to compute the gradient of `f`.
Returns:
Function with similar signature and return values as `f`. However, the returned function does not support keyword arguments.
"""
return NotImplemented
def jit_compile_grad(self, f, wrt: tuple or list, get_output: bool):
raise NotImplementedError()
def transpose(self, tensor, axes):
raise NotImplementedError()
def random_uniform(self, shape):
""" Float tensor of selected precision containing random values in the range [0, 1) """
raise NotImplementedError(self)
def random_normal(self, shape):
""" Float tensor of selected precision containing random values sampled from a normal distribution with mean 0 and std 1. """
raise NotImplementedError(self)
def stack(self, values, axis=0):
raise NotImplementedError(self)
def concat(self, values, axis):
raise NotImplementedError(self)
def pad(self, value, pad_width, mode: str = 'constant', constant_values=0):
"""
Pad a tensor with values as specified by `mode` and `constant_values`.
If the mode is not supported, returns NotImplemented.
Args:
value: tensor
pad_width: 2D tensor specifying the number of values padded to the edges of each axis in the form [[axis 0 lower, axis 0 upper], ...] including batch and component axes.
mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect'
constant_values: used for out-of-bounds points if mode='constant' (Default value = 0)
mode: str: (Default value = 'constant')
Returns:
padded tensor or NotImplemented
"""
raise NotImplementedError(self)
def reshape(self, value, shape):
raise NotImplementedError(self)
def flip(self, value, axes: tuple or list):
slices = tuple(slice(None, None, -1 if i in axes else None) for i in range(self.ndims(value)))
return value[slices]
def sum(self, value, axis=None, keepdims=False):
raise NotImplementedError(self)
def prod(self, value, axis=None):
raise NotImplementedError(self)
def divide_no_nan(self, x, y):
"""
Computes x/y but returns 0 if y=0.
Args:
x:
y:
Returns:
"""
raise NotImplementedError(self)
def where(self, condition, x=None, y=None):
raise NotImplementedError(self)
def nonzero(self, values):
"""
Args:
values: Tensor with only spatial dimensions
Returns:
non-zero multi-indices as tensor of shape (nnz, vector)
"""
raise NotImplementedError(self)
def mean(self, value, axis=None, keepdims=False):
raise NotImplementedError(self)
def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)):
raise NotImplementedError(self)
def zeros(self, shape, dtype: DType = None):
raise NotImplementedError(self)
def zeros_like(self, tensor):
raise NotImplementedError(self)
def ones(self, shape, dtype: DType = None):
raise NotImplementedError(self)
def ones_like(self, tensor):
raise NotImplementedError(self)
def meshgrid(self, *coordinates):
raise NotImplementedError(self)
def linspace(self, start, stop, number):
raise NotImplementedError(self)
def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list):
""" Multiply-sum-reduce a_axes of a with b_axes of b. """
raise NotImplementedError(self)
def matmul(self, A, b):
raise NotImplementedError(self)
def einsum(self, equation, *tensors):
raise NotImplementedError(self)
def while_loop(self, loop: Callable, values: tuple):
"""
```python
while any(values[0]):
values = loop(*values)
return values
```
This operation does not support backpropagation.
Args:
loop: Loop function, must return a `tuple` with entries equal to `values` in shape and data type.
values: Initial values of loop variables.
Returns:
Loop variables upon loop completion.
"""
raise NotImplementedError(self)
def abs(self, x):
raise NotImplementedError(self)
def sign(self, x):
raise NotImplementedError(self)
def round(self, x):
raise NotImplementedError(self)
def ceil(self, x):
raise NotImplementedError(self)
def floor(self, x):
raise NotImplementedError(self)
def max(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def min(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def maximum(self, a, b):
raise NotImplementedError(self)
def minimum(self, a, b):
raise NotImplementedError(self)
def clip(self, x, minimum, maximum):
raise NotImplementedError(self)
def sqrt(self, x):
raise NotImplementedError(self)
def exp(self, x):
raise NotImplementedError(self)
def conv(self, value, kernel, zero_padding=True):
"""
Convolve value with kernel.
Depending on the tensor rank, the convolution is either 1D (rank=3), 2D (rank=4) or 3D (rank=5).
Higher dimensions may not be supported.
Args:
value: tensor of shape (batch_size, in_channel, spatial...)
kernel: tensor of shape (batch_size or 1, out_channel, in_channel, spatial...)
zero_padding: If True, pads the edges of `value` with zeros so that the result has the same shape as `value`.
Returns:
Convolution result as tensor of shape (batch_size, out_channel, spatial...)
"""
raise NotImplementedError(self)
def expand_dims(self, a, axis=0, number=1):
raise NotImplementedError(self)
def shape(self, tensor):
raise NotImplementedError(self)
def staticshape(self, tensor):
raise NotImplementedError(self)
def cast(self, x, dtype: DType):
raise NotImplementedError(self)
def to_float(self, x):
"""
Converts a tensor to floating point values with precision equal to the currently set default precision.
See Also:
`Backend.precision()`.
If `x` is mutable and of the correct floating type, returns a copy of `x`.
To convert float tensors to the backend precision but leave non-float tensors untouched, use `Backend.as_tensor()`.
Args:
x: tensor of bool, int or float
Returns:
Values of `x` as float tensor
"""
return self.cast(x, self.float_type)
def to_int32(self, x):
return self.cast(x, DType(int, 32))
def to_int64(self, x):
return self.cast(x, DType(int, 64))
def to_complex(self, x):
return self.cast(x, DType(complex, max(64, min(self.precision * 2, 128))))
def batched_gather_nd(self, values, indices):
"""
Gathers values from the tensor `values` at locations `indices`.
The first dimension of `values` and `indices` is the batch dimension which must be either equal for both or one for either.
Args:
values: tensor of shape (batch, spatial..., channel)
indices: int tensor of shape (batch, any..., multi_index) where the size of multi_index is values.rank - 2.
Returns:
Gathered values as tensor of shape (batch, any..., channel)
"""
raise NotImplementedError(self)
def flatten(self, x):
return self.reshape(x, (-1,))
def std(self, x, axis=None, keepdims=False):
raise NotImplementedError(self)
def boolean_mask(self, x, mask, axis=0):
"""
Args:
x: tensor with any number of dimensions
mask: 1D mask tensor
axis: Axis index >= 0
"""
raise NotImplementedError(self)
def isfinite(self, x):
raise NotImplementedError(self)
def scatter(self, base_grid, indices, values, mode: str):
"""
Depending on `mode`, performs scatter_update or scatter_add.
Args:
base_grid: Tensor into which scatter values are inserted at indices. Tensor of shape (batch_size, spatial..., channels)
indices: Tensor of shape (batch_size or 1, update_count, index_vector)
values: Values to scatter at indices. Tensor of shape (batch_size or 1, update_count or 1, channels or 1)
mode: One of ('update', 'add')
Returns:
Copy of base_grid with values at `indices` updated by `values`.
"""
raise NotImplementedError(self)
def any(self, boolean_tensor, axis=None, keepdims=False):
raise NotImplementedError(self)
def all(self, boolean_tensor, axis=None, keepdims=False):
raise NotImplementedError(self)
def fft(self, x):
"""
Computes the n-dimensional FFT along all but the first and last dimensions.
Args:
x: tensor of dimension 3 or higher
Returns:
"""
raise NotImplementedError(self)
def ifft(self, k):
"""
Computes the n-dimensional inverse FFT along all but the first and last dimensions.
Args:
k: tensor of dimension 3 or higher
Returns:
"""
raise NotImplementedError(self)
def imag(self, x):
raise NotImplementedError(self)
def real(self, x):
raise NotImplementedError(self)
def sin(self, x):
raise NotImplementedError(self)
def cos(self, x):
raise NotImplementedError(self)
def tan(self, x):
raise NotImplementedError(self)
def log(self, x):
""" Natural logarithm """
raise NotImplementedError(self)
def log2(self, x):
raise NotImplementedError(self)
def log10(self, x):
raise NotImplementedError(self)
def dtype(self, array) -> DType:
raise NotImplementedError(self)
def tile(self, value, multiples):
"""
Repeats the tensor along each axis the number of times given by multiples.
If `multiples` has more dimensions than `value`, these dimensions are added to `value` as outer dimensions.
Args:
value: tensor
multiples: tuple or list of integers
Returns:
tile tensor
"""
raise NotImplementedError(self)
def sparse_tensor(self, indices, values, shape):
"""
Optional features.
Args:
indices: tuple/list matching the dimensions (pair for matrix)
values: param shape:
shape:
Returns:
"""
raise NotImplementedError(self)
def coordinates(self, tensor):
"""
Returns the coordinates and values of a tensor.
Args:
tensor: Sparse tensor
Returns:
coordinates: `tuple` of tensor holding the coordinate vectors, i.e. (row, col) for matrices.
indices: Tensor holding the corresponding values
"""
raise NotImplementedError(self)
def minimize(self, method: str, f, x0, atol, max_iter, trj: bool):
from scipy.optimize import OptimizeResult, minimize
from threading import Thread
assert self.supports(Backend.functional_gradient)
assert len(self.staticshape(x0)) == 2 # (batch, parameters)
batch_size = self.staticshape(x0)[0]
fg = self.functional_gradient(f, [0], get_output=True)
method_description = f"SciPy {method} with {self.name}"
iterations = [0] * batch_size
function_evaluations = [0] * batch_size
xs = [None] * batch_size
final_losses = [None] * batch_size
converged = [False] * batch_size
diverged = [False] * batch_size
messages = [""] * batch_size
f_inputs = [None] * batch_size
f_b_losses = None
f_b_losses_np = None
f_grad_np = None
f_input_available = Barrier(batch_size + 1)
f_output_available = Barrier(batch_size + 1)
finished = [False] * batch_size
all_finished = False
trajectories = [[] for _ in range(batch_size)] if trj else None
threads = []
for b in range(batch_size):
def b_thread(b=b):
recent_b_losses = []
def b_fun(x: numpy.ndarray):
function_evaluations[b] += 1
f_inputs[b] = self.as_tensor(x, convert_external=True)
f_input_available.wait()
f_output_available.wait()
recent_b_losses.append(f_b_losses[b])
if final_losses[b] is None: # first evaluation
final_losses[b] = f_b_losses[b]
if trajectories is not None:
trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1, False, False, ""))
return f_b_losses_np[b], f_grad_np[b]
def callback(x, *args): # L-BFGS-B only passes x but the documentation says (x, state)
iterations[b] += 1
loss = min(recent_b_losses)
recent_b_losses.clear()
final_losses[b] = loss
if trajectories is not None:
trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b], False, False, ""))
res = minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback)
assert isinstance(res, OptimizeResult)
# res.nit, res.nfev
xs[b] = res.x
converged[b] = res.success
diverged[b] = res.status not in (0, 1) # 0=success
messages[b] = res.message
finished[b] = True
while not all_finished:
f_input_available.wait()
f_output_available.wait()
b_thread = Thread(target=b_thread)
threads.append(b_thread)
b_thread.start()
while True:
f_input_available.wait()
if all(finished):
all_finished = True
f_output_available.wait()
break
_, f_b_losses, f_grad = fg(self.stack(f_inputs))
f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64)
f_grad_np = self.numpy(f_grad).astype(numpy.float64)
f_output_available.wait()
for b_thread in threads:
b_thread.join() # make sure threads exit correctly
if trj:
max_trajectory_length = max([len(t) for t in trajectories])
last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], "") for b in range(batch_size)]
trajectories = [t[:-1] + [last_point] * (max_trajectory_length - len(t) + 1) for t, last_point in zip(trajectories, last_points)]
trajectory = []
for states in zip(*trajectories):
x = self.stack([self.to_float(state.x) for state in states])
residual = self.stack([state.residual for state in states])
iterations = [state.iterations for state in states]
function_evaluations = [state.function_evaluations for state in states]
converged = [state.converged for state in states]
diverged = [state.diverged for state in states]
trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages))
return trajectory
else:
x = self.stack(xs)
residual = self.stack(final_losses)
return SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages)
def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
"""
Solve the system of linear equations A · x = y.
This method need not provide a gradient for the operation.
Args:
method: Which algorithm to use. One of `('auto', 'CG', 'CG-adaptive')`.
lin: Linear operation. One of
* sparse/dense matrix valid for all instances
* tuple/list of sparse/dense matrices for varying matrices along batch, must have the same nonzero locations.
* linear function A(x), must be called on all instances in parallel
y: target result of A * x. 2nd order tensor (batch, vector) or list of vectors.
x0: Initial guess of size (batch, parameters)
rtol: Relative tolerance of size (batch,)
atol: Absolute tolerance of size (batch,)
max_iter: Maximum number of iterations of size (batch,)
trj: Whether to record and return the optimization trajectory as a `List[SolveResult]`.
Returns:
result: `SolveResult` or `List[SolveResult]`, depending on `trj`.
"""
if method == 'auto':
return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)
elif method == 'CG':
return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj)
elif method == 'CG-adaptive':
return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj)
else:
raise NotImplementedError(f"Method '{method}' not supported for linear solve.")
def conjugate_gradient(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
""" Standard conjugate gradient algorithm. Signature matches to `Backend.linear_solve()`. """
# Based on "An Introduction to the Conjugate Gradient Method Without the Agonizing Pain" by <NAME>
# symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b
method = f"Φ-Flow CG ({self.name})"
y = self.to_float(y)
x0 = self.copy(self.to_float(x0), only_mutable=True)
batch_size = self.staticshape(y)[0]
tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2)
x = x0
dx = residual = y - self.linear(lin, x)
it_counter = 0
iterations = self.zeros([batch_size], DType(int, 32))
function_evaluations = self.ones([batch_size], DType(int, 32))
residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True)
diverged = self.any(~self.isfinite(x), axis=(1,))
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None
finished = converged | diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1))
while ~self.all(finished):
it_counter += 1; iterations += not_finished_1
dy = self.linear(lin, dx); function_evaluations += not_finished_1
dx_dy = self.sum(dx * dy, axis=-1, keepdims=True)
step_size = self.divide_no_nan(residual_squared, dx_dy)
step_size *= self.expand_dims(self.to_float(not_finished_1), -1) # this is not really necessary but ensures batch-independence
x += step_size * dx
if it_counter % 50 == 0:
residual = y - self.linear(lin, x); function_evaluations += 1
else:
residual = residual - step_size * dy # in-place subtraction affects convergence
residual_squared_old = residual_squared
residual_squared = self.sum(residual ** 2, -1, keepdims=True)
dx = residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx
diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8)
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
if trajectory is not None:
trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, ""))
x = self.copy(x)
iterations = self.copy(iterations)
finished = converged | diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1))
return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")
def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]:
""" Conjugate gradient algorithm with adaptive step size. Signature matches to `Backend.linear_solve()`. """
# Based on the variant described in "Methods of Conjugate Gradients for Solving Linear Systems" by <NAME> and <NAME>
# https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf
method = f"Φ-Flow CG-adaptive ({self.name})"
y = self.to_float(y)
x0 = self.copy(self.to_float(x0), only_mutable=True)
batch_size = self.staticshape(y)[0]
tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2)
x = x0
dx = residual = y - self.linear(lin, x)
dy = self.linear(lin, dx)
iterations = self.zeros([batch_size], DType(int, 32))
function_evaluations = self.ones([batch_size], DType(int, 32))
residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True)
diverged = self.any(~self.isfinite(x), axis=(1,))
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None
continue_ = ~converged & ~diverged & (iterations < max_iter)
def loop(continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, _converged, _diverged):
continue_1 = self.to_int32(continue_)
it_counter += 1
iterations += continue_1
dx_dy = self.sum(dx * dy, axis=-1, keepdims=True)
step_size = self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True), dx_dy)
step_size *= self.expand_dims(self.to_float(continue_1), -1) # this is not really necessary but ensures batch-independence
x += step_size * dx
# if it_counter % 50 == 0: # Not traceable since Python bool
# residual = y - self.linear(lin, x); function_evaluations += 1
# else:
residual = residual - step_size * dy # in-place subtraction affects convergence
residual_squared = self.sum(residual ** 2, -1, keepdims=True)
dx = residual - self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True) * dx, dx_dy)
dy = self.linear(lin, dx); function_evaluations += continue_1
diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8)
converged = self.all(residual_squared <= tolerance_sq, axis=(1,))
if trajectory is not None:
trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, ""))
x = self.copy(x)
iterations = self.copy(iterations)
continue_ = ~converged & ~diverged & (iterations < max_iter)
return continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, converged, diverged
_, _, x, _, _, residual, iterations, function_evaluations, converged, diverged =\
self.while_loop(loop, (continue_, 0, x, dx, dy, residual, iterations, function_evaluations, converged, diverged))
return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")
def linear(self, lin, vector):
if callable(lin):
return lin(vector)
elif isinstance(lin, (tuple, list)):
for lin_i in lin:
lin_shape = self.staticshape(lin_i)
assert len(lin_shape) == 2
return self.stack([self.matmul(m, v) for m, v in zip(lin, self.unstack(vector))])
else:
lin_shape = self.staticshape(lin)
assert len(lin_shape) == 2, f"A must be a matrix but got shape {lin_shape}"
return self.matmul(lin, vector)
def gradients(self, y, xs: tuple or list, grad_y) -> tuple:
raise NotImplementedError(self)
def record_gradients(self, xs: tuple or list, persistent=False):
raise NotImplementedError(self)
def stop_gradient(self, value):
raise NotImplementedError(self)
def grid_sample(self, grid, spatial_dims: tuple, coordinates, extrapolation='constant'):
"""
Interpolates a regular grid at the specified coordinates.
Args:
grid: Tensor
spatial_dims: Dimension indices that correspond to coordinate vectors
coordinates: Tensor of floating grid indices.
The last dimension must match `spatial_dims`.
The first grid point of dimension i lies at position 0, the last at values.shape[i]-1.
extrapolation: Values to use for coordinates outside the grid.
One of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`.
Returns:
sampled values with linear interpolation
"""
return NotImplemented
def variable(self, value):
return NotImplemented
def ndims(self, tensor):
return len(self.staticshape(tensor))
def size(self, array):
return self.prod(self.shape(array))
def batch_gather(self, tensor, batches):
if isinstance(batches, int):
batches = [batches]
return tensor[batches, ...]
def unstack(self, tensor, axis=0, keepdims=False) -> tuple:
if axis < 0:
axis += len(tensor.shape)
if axis >= len(tensor.shape) or axis < 0:
raise ValueError("Illegal axis value")
result = []
for slice_idx in range(tensor.shape[axis]):
if keepdims:
component = tensor[tuple([slice(slice_idx, slice_idx + 1) if d == axis else slice(None) for d in range(len(tensor.shape))])]
else:
component = tensor[tuple([slice_idx if d == axis else slice(None) for d in range(len(tensor.shape))])]
result.append(component)
return tuple(result)
def equal(self, x, y):
""" Element-wise equality check """
raise NotImplementedError(self)
def not_equal(self, x, y):
return ~self.equal(x, y)
def greater_than(self, x, y):
x, y = self.auto_cast(x, y)
return x > y
def greater_or_equal(self, x, y):
x, y = self.auto_cast(x, y)
return x >= y
def add(self, a, b):
a, b = self.auto_cast(a, b)
return a + b
def sub(self, a, b):
a, b = self.auto_cast(a, b)
return a - b
def mul(self, a, b):
a, b = self.auto_cast(a, b)
return a * b
def div(self, numerator, denominator):
numerator, denominator = self.auto_cast(numerator, denominator)
return numerator / denominator
def pow(self, base, exp):
base, exp = self.auto_cast(base, exp)
return base ** exp
def mod(self, dividend, divisor):
dividend, divisor = self.auto_cast(dividend, divisor)
return dividend % divisor
def and_(self, a, b):
a, b = self.auto_cast(a, b)
return a & b
def or_(self, a, b):
a, b = self.auto_cast(a, b)
return a | b
def xor(self, a, b):
a, b = self.auto_cast(a, b)
return a ^ b
def floordiv(self, a, b):
a, b = self.auto_cast(a, b)
return a // b
BACKENDS = []
""" Global list of all registered backends. Register a `Backend` by adding it to the list. """
_DEFAULT = [] # [0] = global default, [1:] from 'with' blocks
_PRECISION = [32] # [0] = global precision in bits, [1:] from 'with' blocks
def choose_backend(*values, prefer_default=False) -> Backend:
"""
Selects a suitable backend to handle the given values.
This function is used by most math functions operating on `Tensor` objects to delegate the actual computations.
Args:
*values:
prefer_default: if True, selects the default backend assuming it can handle handle the values, see `default_backend()`.
raise_error: Determines the behavior of this function if no backend can handle the given values.
If True, raises a `NoBackendFound` error, else returns `None`.
Returns:
the selected `Backend`
"""
# --- Default Backend has priority ---
if _is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1], values)):
return _DEFAULT[-1]
# --- Filter out non-applicable ---
backends = [backend for backend in BACKENDS if _is_applicable(backend, values)]
if len(backends) == 0:
raise NoBackendFound(f"No backend found for types {[type(v).__name__ for v in values]}; registered backends are {BACKENDS}")
# --- Native tensors? ---
for backend in backends:
if _is_specific(backend, values):
return backend
return backends[0]
class NoBackendFound(Exception):
"""
Thrown by `choose_backend` if no backend can handle the given values.
"""
def __init__(self, msg):
Exception.__init__(self, msg)
def default_backend() -> Backend:
"""
The default backend is preferred by `choose_backend()`.
The default backend can be set globally using `set_global_default_backend()` and locally using `with backend:`.
Returns:
current default `Backend`
"""
return _DEFAULT[-1]
def context_backend() -> Backend or None:
"""
Returns the backend set by the inner-most surrounding `with backend:` block.
If called outside a backend context, returns `None`.
Returns:
`Backend` or `None`
"""
return _DEFAULT[-1] if len(_DEFAULT) > 1 else None
def set_global_default_backend(backend: Backend):
"""
Sets the given backend as default.
This setting can be overridden using `with backend:`.
See `default_backend()`, `choose_backend()`.
Args:
backend: `Backend` to set as default
"""
assert isinstance(backend, Backend)
_DEFAULT[0] = backend
def set_global_precision(floating_point_bits: int):
"""
Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends.
If `floating_point_bits` is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64.
Operations may also convert floating point values to this precision, even if the input had a different precision.
If `floating_point_bits` is None, new tensors will default to float32 unless specified otherwise.
The output of math operations has the same precision as its inputs.
Args:
floating_point_bits: one of (16, 32, 64, None)
"""
_PRECISION[0] = floating_point_bits
def get_precision() -> int:
"""
Gets the current target floating point precision in bits.
The precision can be set globally using `set_global_precision()` or locally using `with precision(p):`.
Any Backend method may convert floating point values to this precision, even if the input had a different precision.
Returns:
16 for half, 32 for single, 64 for double
"""
return _PRECISION[-1]
@contextmanager
def precision(floating_point_bits: int):
"""
Sets the floating point precision for the local context.
Usage: `with precision(p):`
This overrides the global setting, see `set_global_precision()`.
Args:
floating_point_bits: 16 for half, 32 for single, 64 for double
"""
_PRECISION.append(floating_point_bits)
try:
yield None
finally:
_PRECISION.pop(-1)
def convert(tensor, backend: Backend = None, use_dlpack=True):
"""
Convert a Tensor to the native format of `backend`.
If the target backend can operate natively on `tensor`, returns `tensor`.
If both backends support *DLPack* and `use_dlpack=True`, uses zero-copy conversion using the DLPack library.
Else, intermediately converts `tensor` to a NumPy array.
*Warning*: This operation breaks the automatic differentiation chain.
Args:
tensor: Native tensor belonging to any registered backend.
backend: Target backend. If `None`, uses the current default backend, see `default_backend()`.
Returns:
Tensor belonging to `backend`.
"""
backend = backend or default_backend()
current_backend = choose_backend(tensor, prefer_default=False)
if backend.is_tensor(tensor, True) or backend is current_backend:
return tensor
if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack):
capsule = current_backend.to_dlpack(tensor)
return backend.from_dlpack(capsule)
else:
nparray = current_backend.numpy(tensor)
return backend.as_tensor(nparray)
# Backend choice utility functions
def _is_applicable(backend, values):
for value in values:
if not backend.is_tensor(value, only_native=False):
return False
return True
def _is_specific(backend, values):
for value in values:
if backend.is_tensor(value, only_native=True):
return True
return False
# Other low-level helper functions
def combined_dim(dim1, dim2, type_str: str = 'batch'):
if dim1 is None and dim2 is None:
return None
if dim1 is None or dim1 == 1:
return dim2
if dim2 is None or dim2 == 1:
return dim1
assert dim1 == dim2, f"Incompatible {type_str} dimensions: x0 {dim1}, y {dim2}"
return dim1
| 2.859375 | 3 |
bpython/curtsiesfrontend/parse.py | dtrodrigues/bpython | 2,168 | 4001 | import re
from curtsies.formatstring import fmtstr, FmtStr
from curtsies.termformatconstants import (
FG_COLORS,
BG_COLORS,
colors as CURTSIES_COLORS,
)
from functools import partial
from ..lazyre import LazyReCompile
COLORS = CURTSIES_COLORS + ("default",)
CNAMES = dict(zip("krgybmcwd", COLORS))
# hack for finding the "inverse"
INVERSE_COLORS = {
CURTSIES_COLORS[idx]: CURTSIES_COLORS[
(idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS)
]
for idx in range(len(CURTSIES_COLORS))
}
INVERSE_COLORS["default"] = INVERSE_COLORS[CURTSIES_COLORS[0]]
def func_for_letter(letter_color_code: str, default: str = "k"):
"""Returns FmtStr constructor for a bpython-style color code"""
if letter_color_code == "d":
letter_color_code = default
elif letter_color_code == "D":
letter_color_code = default.upper()
return partial(
fmtstr,
fg=CNAMES[letter_color_code.lower()],
bold=letter_color_code.isupper(),
)
def color_for_letter(letter_color_code: str, default: str = "k"):
if letter_color_code == "d":
letter_color_code = default
return CNAMES[letter_color_code.lower()]
def parse(s):
"""Returns a FmtStr object from a bpython-formatted colored string"""
rest = s
stuff = []
while True:
if not rest:
break
start, rest = peel_off_string(rest)
stuff.append(start)
return (
sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0]))
if len(stuff) > 0
else FmtStr()
)
def fs_from_match(d):
atts = {}
if d["fg"]:
# this isn't according to spec as I understand it
if d["fg"].isupper():
d["bold"] = True
# TODO figure out why boldness isn't based on presence of \x02
color = CNAMES[d["fg"].lower()]
if color != "default":
atts["fg"] = FG_COLORS[color]
if d["bg"]:
if d["bg"] == "I":
# hack for finding the "inverse"
color = INVERSE_COLORS[color]
else:
color = CNAMES[d["bg"].lower()]
if color != "default":
atts["bg"] = BG_COLORS[color]
if d["bold"]:
atts["bold"] = True
return fmtstr(d["string"], **atts)
peel_off_string_re = LazyReCompile(
r"""(?P<colormarker>\x01
(?P<fg>[krgybmcwdKRGYBMCWD]?)
(?P<bg>[krgybmcwdKRGYBMCWDI]?)?)
(?P<bold>\x02?)
\x03
(?P<string>[^\x04]*)
\x04
(?P<rest>.*)
""",
re.VERBOSE | re.DOTALL,
)
def peel_off_string(s):
m = peel_off_string_re.match(s)
assert m, repr(s)
d = m.groupdict()
rest = d["rest"]
del d["rest"]
return d, rest
| 2.53125 | 3 |
sarpy/io/general/nitf_elements/tres/unclass/BANDSA.py | pressler-vsc/sarpy | 1 | 4002 | <reponame>pressler-vsc/sarpy
# -*- coding: utf-8 -*-
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
class BAND(TREElement):
def __init__(self, value):
super(BAND, self).__init__()
self.add_field('BANDPEAK', 's', 5, value)
self.add_field('BANDLBOUND', 's', 5, value)
self.add_field('BANDUBOUND', 's', 5, value)
self.add_field('BANDWIDTH', 's', 5, value)
self.add_field('BANDCALDRK', 's', 6, value)
self.add_field('BANDCALINC', 's', 5, value)
self.add_field('BANDRESP', 's', 5, value)
self.add_field('BANDASD', 's', 5, value)
self.add_field('BANDGSD', 's', 5, value)
class BANDSAType(TREElement):
def __init__(self, value):
super(BANDSAType, self).__init__()
self.add_field('ROW_SPACING', 's', 7, value)
self.add_field('ROW_SPACING_UNITS', 's', 1, value)
self.add_field('COL_SPACING', 's', 7, value)
self.add_field('COL_SPACING_UNITS', 's', 1, value)
self.add_field('FOCAL_LENGTH', 's', 6, value)
self.add_field('BANDCOUNT', 'd', 4, value)
self.add_loop('BANDs', self.BANDCOUNT, BAND, value)
class BANDSA(TREExtension):
_tag_value = 'BANDSA'
_data_type = BANDSAType
| 2.03125 | 2 |
ktrain/graph/learner.py | husmen/ktrain | 1,013 | 4003 | from ..imports import *
from .. import utils as U
from ..core import GenLearner
class NodeClassLearner(GenLearner):
"""
```
Main class used to tune and train Keras models for node classification
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""
def __init__(self, model, train_data=None, val_data=None,
batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,
workers=1, use_multiprocessing=False):
super().__init__(model, train_data=train_data, val_data=val_data,
batch_size=batch_size, eval_batch_size=eval_batch_size,
workers=workers, use_multiprocessing=use_multiprocessing)
return
def view_top_losses(self, n=4, preproc=None, val_data=None):
"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""
val = self._check_val(val_data)
# get top losses and associated data
tups = self.top_losses(n=n, val_data=val, preproc=preproc)
# get multilabel status and class names
classes = preproc.get_classes() if preproc is not None else None
# iterate through losses
for tup in tups:
# get data
idx = tup[0]
loss = tup[1]
truth = tup[2]
pred = tup[3]
print('----------')
print("id:%s | loss:%s | true:%s | pred:%s)\n" % (idx, round(loss,2), truth, pred))
#print(obs)
return
def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):
"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""
raise Exception('currently_unsupported: layer_output method is not yet supported for ' +
'graph neural networks in ktrain')
class LinkPredLearner(GenLearner):
"""
```
Main class used to tune and train Keras models for link prediction
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""
def __init__(self, model, train_data=None, val_data=None,
batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,
workers=1, use_multiprocessing=False):
super().__init__(model, train_data=train_data, val_data=val_data,
batch_size=batch_size, eval_batch_size=eval_batch_size,
workers=workers, use_multiprocessing=use_multiprocessing)
return
def view_top_losses(self, n=4, preproc=None, val_data=None):
"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""
val = self._check_val(val_data)
# get top losses and associated data
tups = self.top_losses(n=n, val_data=val, preproc=preproc)
# get multilabel status and class names
classes = preproc.get_classes() if preproc is not None else None
# iterate through losses
for tup in tups:
# get data
idx = tup[0]
loss = tup[1]
truth = tup[2]
pred = tup[3]
print('----------')
print("id:%s | loss:%s | true:%s | pred:%s)\n" % (idx, round(loss,2), truth, pred))
#print(obs)
return
def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):
"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""
raise Exception('currently_unsupported: layer_output method is not yet supported for ' +
'graph neural networks in ktrain')
| 2.890625 | 3 |
VegaZero2VegaLite.py | Thanksyy/Vega-Zero | 5 | 4004 | __author__ = "<NAME>"
import json
import pandas
class VegaZero2VegaLite(object):
def __init__(self):
pass
def parse_vegaZero(self, vega_zero):
self.parsed_vegaZero = {
'mark': '',
'data': '',
'encoding': {
'x': '',
'y': {
'aggregate': '',
'y': ''
},
'color': {
'z': ''
}
},
'transform': {
'filter': '',
'group': '',
'bin': {
'axis': '',
'type': ''
},
'sort': {
'axis': '',
'type': ''
},
'topk': ''
}
}
vega_zero_keywords = vega_zero.split(' ')
self.parsed_vegaZero['mark'] = vega_zero_keywords[vega_zero_keywords.index('mark') + 1]
self.parsed_vegaZero['data'] = vega_zero_keywords[vega_zero_keywords.index('data') + 1]
self.parsed_vegaZero['encoding']['x'] = vega_zero_keywords[vega_zero_keywords.index('x') + 1]
self.parsed_vegaZero['encoding']['y']['y'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 2]
self.parsed_vegaZero['encoding']['y']['aggregate'] = vega_zero_keywords[vega_zero_keywords.index('aggregate') + 1]
if 'color' in vega_zero_keywords:
self.parsed_vegaZero['encoding']['color']['z'] = vega_zero_keywords[vega_zero_keywords.index('color') + 1]
if 'topk' in vega_zero_keywords:
self.parsed_vegaZero['transform']['topk'] = vega_zero_keywords[vega_zero_keywords.index('topk') + 1]
if 'sort' in vega_zero_keywords:
self.parsed_vegaZero['transform']['sort']['axis'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 1]
self.parsed_vegaZero['transform']['sort']['type'] = vega_zero_keywords[vega_zero_keywords.index('sort') + 2]
if 'group' in vega_zero_keywords:
self.parsed_vegaZero['transform']['group'] = vega_zero_keywords[vega_zero_keywords.index('group') + 1]
if 'bin' in vega_zero_keywords:
self.parsed_vegaZero['transform']['bin']['axis'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 1]
self.parsed_vegaZero['transform']['bin']['type'] = vega_zero_keywords[vega_zero_keywords.index('bin') + 3]
if 'filter' in vega_zero_keywords:
filter_part_token = []
for each in vega_zero_keywords[vega_zero_keywords.index('filter') + 1:]:
if each not in ['group', 'bin', 'sort', 'topk']:
filter_part_token.append(each)
else:
break
if 'between' in filter_part_token:
filter_part_token[filter_part_token.index('between') + 2] = 'and ' + filter_part_token[
filter_part_token.index('between') - 1] + ' <='
filter_part_token[filter_part_token.index('between')] = '>='
# replace 'and' -- 'or'
filter_part_token = ' '.join(filter_part_token).split()
filter_part_token = ['&' if x == 'and' else x for x in filter_part_token]
filter_part_token = ['|' if x == 'or' else x for x in filter_part_token]
if '&' in filter_part_token or '|' in filter_part_token:
final_filter_part = ''
each_conditions = []
for i in range(len(filter_part_token)):
each = filter_part_token[i]
if each != '&' and each != '|':
# ’=‘ in SQL --to--> ’==‘ in Vega-Lite
if each == '=':
each = '=='
each_conditions.append(each)
if each == '&' or each == '|' or i == len(filter_part_token) - 1:
# each = '&' or '|'
if 'like' == each_conditions[1]:
# only consider this case: '%a%'
if each_conditions[2][1] == '%' and each_conditions[2][len(each_conditions[2]) - 2] == '%':
final_filter_part += 'indexof(' + 'datum.' + each_conditions[0] + ',"' + \
each_conditions[2][2:len(each_conditions[2]) - 2] + '") != -1'
elif 'like' == each_conditions[2] and 'not' == each_conditions[1]:
if each_conditions[3][1] == '%' and each_conditions[3][len(each_conditions[3]) - 2] == '%':
final_filter_part += 'indexof(' + 'datum.' + each_conditions[0] + ',"' + \
each_conditions[3][2:len(each_conditions[3]) - 2] + '") == -1'
else:
final_filter_part += 'datum.' + ' '.join(each_conditions)
if i != len(filter_part_token) - 1:
final_filter_part += ' ' + each + ' '
each_conditions = []
self.parsed_vegaZero['transform']['filter'] = final_filter_part
else:
# only single filter condition
self.parsed_vegaZero['transform']['filter'] = 'datum.' + ' '.join(filter_part_token).strip()
return self.parsed_vegaZero
def to_VegaLite(self, vega_zero, dataframe=None):
self.VegaLiteSpec = {
'bar': {
"mark": "bar",
"encoding": {
"x": {"field": "x", "type": "nominal"},
"y": {"field": "y", "type": "quantitative"}
}
},
'arc': {
"mark": "arc",
"encoding": {
"color": {"field": "x", "type": "nominal"},
"theta": {"field": "y", "type": "quantitative"}
}
},
'line': {
"mark": "line",
"encoding": {
"x": {"field": "x", "type": "nominal"},
"y": {"field": "y", "type": "quantitative"}
}
},
'point': {
"mark": "point",
"encoding": {
"x": {"field": "x", "type": "quantitative"},
"y": {"field": "y", "type": "quantitative"}
}
}
}
VegaZero = self.parse_vegaZero(vega_zero)
# assign some vega-zero keywords to the VegaLiteSpec object
if isinstance(dataframe, pandas.core.frame.DataFrame):
self.VegaLiteSpec[VegaZero['mark']]['data'] = dict()
self.VegaLiteSpec[VegaZero['mark']]['data']['values'] = json.loads(dataframe.to_json(orient='records'))
if VegaZero['mark'] != 'arc':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['field'] = VegaZero['encoding']['x']
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['field'] = VegaZero['encoding']['y']['y']
if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['aggregate'] = VegaZero['encoding']['y']['aggregate']
else:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['color']['field'] = VegaZero['encoding']['x']
self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['field'] = VegaZero['encoding']['y']['y']
if VegaZero['encoding']['y']['aggregate'] != '' and VegaZero['encoding']['y']['aggregate'] != 'none':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['theta']['aggregate'] = VegaZero['encoding']['y'][
'aggregate']
if VegaZero['encoding']['color']['z'] != '':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['color'] = {
'field': VegaZero['encoding']['color']['z'], 'type': 'nominal'
}
# it seems that the group will be performed by VegaLite defaultly, in our cases.
if VegaZero['transform']['group'] != '':
pass
if VegaZero['transform']['bin']['axis'] != '':
if VegaZero['transform']['bin']['axis'] == 'x':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['type'] = 'temporal'
if VegaZero['transform']['bin']['type'] in ['date', 'year', 'week', 'month']:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = VegaZero['transform']['bin']['type']
elif VegaZero['transform']['bin']['type'] == 'weekday':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['timeUnit'] = 'week'
else:
print('Unknown binning step.')
if VegaZero['transform']['filter'] != '':
if 'transform' not in self.VegaLiteSpec[VegaZero['mark']]:
self.VegaLiteSpec[VegaZero['mark']]['transform'] = [{
"filter": VegaZero['transform']['filter']
}]
elif 'filter' not in self.VegaLiteSpec[VegaZero['mark']]['transform']:
self.VegaLiteSpec[VegaZero['mark']]['transform'].append({
"filter": VegaZero['transform']['filter']
})
else:
self.VegaLiteSpec[VegaZero['mark']]['transform']['filter'] += ' & ' + VegaZero['transform']['filter']
if VegaZero['transform']['topk'] != '':
if VegaZero['transform']['sort']['axis'] == 'x':
sort_field = VegaZero['encoding']['x']
elif VegaZero['transform']['sort']['axis'] == 'y':
sort_field = VegaZero['encoding']['y']['y']
else:
print('Unknown sorting field: ', VegaZero['transform']['sort']['axis'])
sort_field = VegaZero['transform']['sort']['axis']
if VegaZero['transform']['sort']['type'] == 'desc':
sort_order = 'descending'
else:
sort_order = 'ascending'
if 'transform' in self.VegaLiteSpec[VegaZero['mark']]:
current_filter = self.VegaLiteSpec[VegaZero['mark']]['transform'][0]['filter']
self.VegaLiteSpec[VegaZero['mark']]['transform'][0][
'filter'] = current_filter + ' & ' + "datum.rank <= " + str(VegaZero['transform']['topk'])
self.VegaLiteSpec[VegaZero['mark']]['transform'].insert(0, {
"window": [{
"field": sort_field,
"op": "dense_rank",
"as": "rank"
}],
"sort": [{"field": sort_field, "order": sort_order}]
})
else:
self.VegaLiteSpec[VegaZero['mark']]['transform'] = [
{
"window": [{
"field": sort_field,
"op": "dense_rank",
"as": "rank"
}],
"sort": [{"field": sort_field, "order": sort_order}]
},
{
"filter": "datum.rank <= " + str(VegaZero['transform']['topk'])
}
]
if VegaZero['transform']['sort']['axis'] != '':
if VegaZero['transform']['sort']['axis'] == 'x':
if VegaZero['transform']['sort']['type'] == 'desc':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = '-x'
else:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['y']['sort'] = 'x'
else:
if VegaZero['transform']['sort']['type'] == 'desc':
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = '-y'
else:
self.VegaLiteSpec[VegaZero['mark']]['encoding']['x']['sort'] = 'y'
return self.VegaLiteSpec[VegaZero['mark']]
| 2.984375 | 3 |
utils/dancer.py | kmzbrnoI/ac-python | 0 | 4005 | """Library for executing user-defined dance."""
import logging
from typing import Any, Dict, Optional, Callable
import datetime
import ac
import ac.blocks
from ac import ACs, AC
JC = Dict[str, Any]
class DanceStartException(Exception):
pass
class Step:
"""Base class for all specific dance steps."""
def update(self, acn: AC) -> None:
pass
def on_start(self, acn: AC) -> None:
pass
def disp_str(self) -> str:
return ''
class JCNotFoundException(DanceStartException):
pass
class StepJC(Step):
"""
Process jc 'name'. If processed already, skip processing and continue.
"""
name_to_id: Dict[str, int] = {}
def __init__(self, name: str, type_: str = 'VC') -> None:
self.jc: Optional[JC] = None
self.type = type_
self.name = name
def update(self, acn: AC) -> None:
assert isinstance(acn, DanceAC)
if self.jc is None:
jcid = self.get_jc_id(self.name, acn)
self.jc = acn.pt_get(f'/jc/{jcid}?state=true')['jc']
if self.jc['state']['active']:
self.jc = None
acn.step_done()
return
result = acn.pt_put(f'/jc/{self.jc["id"]}/state', {})
if result['success']:
self.jc = None
acn.step_done()
def on_start(self, acn: AC) -> None:
self.get_jc_id(self.name, acn)
def get_jc_id(self, name: str, acn: AC) -> int:
if not StepJC.name_to_id:
jcs = acn.pt_get('/jc')['jc']
StepJC.name_to_id = {
jc['name']: jc['id']
for jc in jcs if jc['type'] == self.type
}
if name not in StepJC.name_to_id.keys():
raise JCNotFoundException(f'Jízdní cesta {self.name} neexistuje!')
return StepJC.name_to_id[name]
def disp_str(self) -> str:
return f'Stavění JC {self.name}'
class StepDelay(Step):
"""Delay any time."""
def __init__(self, delay: datetime.timedelta) -> None:
self.delay = delay
self.finish: Optional[datetime.datetime] = None
def update(self, acn: AC) -> None:
assert isinstance(acn, DanceAC)
if self.finish is None:
self.finish = datetime.datetime.now() + self.delay
if datetime.datetime.now() > self.finish:
self.finish = None
acn.step_done()
def disp_str(self) -> str:
return f'Čekání {self.delay}'
class BlockNotFoundException(DanceStartException):
pass
class StepWaitForBlock(Step):
"""Wait for specific state of any block. See examples below."""
name_to_id: Dict[str, int] = {}
def __init__(self, name: str, checker: Callable[[ac.Block], bool]) -> None:
self.name = name
self.checker = checker
self.block: Optional[ac.Block] = None
def update(self, acn: AC) -> None:
assert isinstance(acn, DanceAC)
if self.block is None:
blockid = self.get_block_id(self.name, acn)
self.block = acn.pt_get(f'/blocks/{blockid}?state=true')['block']
if self.checker(self.block):
self.block = None
acn.step_done()
else:
ac.blocks.register([self.block['id']])
def on_start(self, acn: AC) -> None:
self.get_block_id(self.name, acn)
def on_block_change(self, acn: AC, block: ac.Block) -> None:
assert isinstance(acn, DanceAC)
if self.block is None or block['id'] != self.block['id']:
return
if self.checker(block):
ac.blocks.unregister([self.block['id']])
self.block = None
acn.step_done()
def get_block_id(self, name: str, acn: AC) -> int:
if not StepWaitForBlock.name_to_id:
blocks = acn.pt_get('/blocks')['blocks']
StepWaitForBlock.name_to_id = {
block['name']: block['id'] for block in blocks
}
if name not in StepWaitForBlock.name_to_id.keys():
raise BlockNotFoundException(f"Blok {self.name} neexistuje!")
return StepWaitForBlock.name_to_id[name]
def disp_str(self) -> str:
return f'Čekání na stav bloku {self.name}'
def track_is_occupied(block: ac.Block) -> bool:
return bool(block['blockState']['state'] == 'occupied')
class DanceAC(AC):
"""This AC executes predefined steps."""
def __init__(self, id_: str, password: str,
steps: Dict[int, Step]) -> None:
AC.__init__(self, id_, password)
self.steps = steps
self.stepi = 0
def on_start(self) -> None:
logging.info('Start')
for stepi, step in self.steps.items():
try:
step.on_start(self)
except DanceStartException as e:
self.disp_error(f'Krok {stepi}: '+str(e))
self.done()
return
self.stepi = 1
self.send_step()
self.on_update()
def on_stop(self) -> None:
self.statestr = ''
self.statestr_send()
def on_update(self) -> None:
AC.on_update(self)
if not self.running():
return
if self.stepi in self.steps:
self.steps[self.stepi].update(self)
else:
logging.info('Done')
self.done()
def step_done(self) -> None:
logging.info(f'Step {self.stepi} done, '
f'going to step {self.stepi+1}...')
self.stepi += 1
self.send_step()
self.on_update()
def send_step(self) -> None:
if self.stepi in self.steps.keys():
if self.running():
description = self.steps[self.stepi].disp_str()
self.statestr = f'Aktuální krok: {self.stepi}: {description}'
self.statestr_send()
def on_block_change(self, block: ac.Block) -> None:
if (self.running() and
isinstance(self.steps[self.stepi], StepWaitForBlock)):
self.steps[self.stepi].on_block_change(self, block) # type: ignore
@ac.blocks.on_block_change()
def _on_block_change(block: ac.Block) -> None:
for acn in ACs.values():
if isinstance(acn, DanceAC):
acn.on_block_change(block)
| 2.640625 | 3 |
praw/models/reddit/mixins/reportable.py | zachwylde00/praw | 38 | 4006 | <gh_stars>10-100
"""Provide the ReportableMixin class."""
from ....const import API_PATH
class ReportableMixin:
"""Interface for RedditBase classes that can be reported."""
def report(self, reason):
"""Report this object to the moderators of its subreddit.
:param reason: The reason for reporting.
Raises :class:`.APIException` if ``reason`` is longer than 100
characters.
Example usage:
.. code-block:: python
submission = reddit.submission(id='5or86n')
submission.report('report reason')
comment = reddit.comment(id='dxolpyc')
comment.report('report reason')
"""
self._reddit.post(
API_PATH["report"], data={"id": self.fullname, "reason": reason}
)
| 2.8125 | 3 |
defense/jpeg_compress.py | TrustworthyDL/LeBA | 19 | 4007 | def _jpeg_compression(im):
assert torch.is_tensor(im)
im = ToPILImage()(im)
savepath = BytesIO()
im.save(savepath, 'JPEG', quality=75)
im = Image.open(savepath)
im = ToTensor()(im)
return im | 2.53125 | 3 |
mellon/factories/filesystem/file.py | LaudateCorpus1/mellon | 5 | 4008 | <reponame>LaudateCorpus1/mellon
import collections
import os.path
from zope import component
from zope import interface
from zope.component.factory import Factory
from sparc.configuration import container
import mellon
@interface.implementer(mellon.IByteMellonFile)
class MellonByteFileFromFilePathAndConfig(object):
def __init__(self, file_path, config):
self.file_path = file_path
self.config = config
def __str__(self):
return "byte file at location {}".format(self.file_path)
def __iter__(self):
with open(self.file_path, 'rb') as stream:
file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config)
for snippet in file_:
yield snippet
mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig)
@interface.implementer(mellon.IUnicodeMellonFile)
class MellonUnicodeFileFromFilePathAndConfig(object):
def __init__(self, file_path, config):
self.file_path = file_path
self.config = config
def __str__(self):
return "Unicode file at location {}".format(self.file_path)
def __iter__(self):
_end = 0
_buffer = collections.deque()
_eof_buffer = collections.deque()
with open(str(self.file_path), 'rU') as stream:
file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config)
for snippet in file_:
yield snippet
mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig)
@interface.implementer(mellon.IMellonFileProvider)
class MellonFileProviderForRecursiveDirectoryConfig(object):
def __init__(self, config):
"""Init
Args:
config: sparc.configuration.container.ISparcAppPyContainerConfiguration
provider with
mellon.factories.filesystem[configure.yaml:FileSystemDir]
and mellon[configure.yaml:MellonSnippet] entries.
"""
self.config = config
def __iter__(self):
base_path = container.IPyContainerConfigValue(self.config).\
get('FileSystemDir')['directory']
for d, dirs, files in os.walk(base_path):
for f in files:
path = os.path.join(d, f)
if not os.path.isfile(path):
continue
#get interface-assigned string (IPath)
path = component.createObject(u'mellon.filesystem_path', path)
if mellon.IBinaryChecker(path).check():
yield component.createObject(\
u'mellon.factories.filesystem.byte_file', path, self.config)
else:
yield component.createObject(\
u'mellon.factories.filesystem.unicode_file', path, self.config)
mellonFileProviderForRecursiveDirectoryConfigFactory = Factory(MellonFileProviderForRecursiveDirectoryConfig)
interface.alsoProvides(mellonFileProviderForRecursiveDirectoryConfigFactory, mellon.IMellonFileProviderFactory)
| 2.09375 | 2 |
dltb/thirdparty/datasource/__init__.py | CogSciUOS/DeepLearningToolbox | 2 | 4009 | <gh_stars>1-10
"""Predefined Datasources.
"""
# toolbox imports
from ...datasource import Datasource
Datasource.register_instance('imagenet-val', __name__ + '.imagenet',
'ImageNet', section='val') # section='train'
Datasource.register_instance('dogsandcats', __name__ + '.dogsandcats',
'DogsAndCats')
Datasource.register_instance('widerface', __name__ + '.widerface', 'WiderFace')
Datasource.register_instance('fgnet', __name__ + '.fgnet', 'FGNet')
Datasource.register_instance('Helen', __name__ + '.helen', 'Helen')
Datasource.register_instance('lfw', __name__ + '.lfw', 'LabeledFacesInTheWild')
Datasource.register_instance('ms-celeb-1m', __name__ + '.face', 'MSCeleb1M')
Datasource.register_instance('5celeb', __name__ + '.fivecelebface',
'FiveCelebFace')
Datasource.register_instance('ffhq', __name__ + '.ffhq', 'FFHQ')
Datasource.register_instance('celeba', __name__ + '.celeba', 'CelebA')
Datasource.register_instance('celeba-aligned', __name__ + '.celeba',
'CelebA', aligned=True)
Datasource.register_class('WiderFace', __name__ + '.widerface')
| 1.601563 | 2 |
tests/test_results.py | babinyurii/RECAN | 7 | 4010 | <reponame>babinyurii/RECAN
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 15:58:44 2019
@author: babin
"""
posits_def = [251, 501, 751, 1001, 1251, 1501, 1751, 2001, 2251, 2501, 2751, 3001, 3215]
dist_whole_align_ref = {'AB048704.1_genotype_C_':
[0.88,
0.938,
0.914,
0.886,
0.89,
0.908,
0.938,
0.948,
0.948,
0.886,
0.852,
0.8580645161290322,
0.827906976744186],
'AB010291.1_Bj':
[0.968,
0.986,
0.946,
0.92,
0.94,
0.964,
0.95,
0.892,
0.914,
0.9359999999999999,
0.924,
0.935483870967742,
0.9255813953488372]}
dist_win_250_shift_100_ref = {'AB048704.1_genotype_C_':
[0.87,
0.9,
0.9359999999999999,
0.924,
0.944,
0.944,
0.948,
0.888,
0.868,
0.86,
0.888,
0.9,
0.908,
0.88,
0.916,
0.924,
0.94,
0.96,
0.948,
0.9319999999999999,
0.944,
0.9359999999999999,
0.96,
0.9319999999999999,
0.864,
0.8200000000000001,
0.88,
0.892,
0.88,
0.844,
0.827906976744186,
0.8608695652173913,
0.9333333333333333],
'AB010291.1_Bj': [0.95,
0.984,
0.988,
0.984,
0.98,
0.98,
0.98,
0.92,
0.896,
0.888,
0.928,
0.94,
0.96,
0.948,
0.976,
0.976,
0.968,
0.952,
0.896,
0.844,
0.86,
0.908,
0.976,
0.948,
0.916,
0.904,
0.9359999999999999,
0.948,
0.94,
0.9359999999999999,
0.9255813953488372,
0.9217391304347826,
0.8666666666666667]}
dist_whole_align_def_params_k2p = {'AB048704.1_genotype_C_':
[0.8681719101219889,
0.9351731626008992,
0.9083728156043438,
0.8750271283550077,
0.879929128403318,
0.9015597329057567,
0.9351297624958606,
0.9459250442159328,
0.9459717143364927,
0.8760802380420646,
0.8343273948904422,
0.841497348083017,
0.8033200314745574],
'AB010291.1_Bj':
[0.9671530980992109,
0.9858456107911616,
0.9438329817983037,
0.9150569322625627,
0.9372918193486423,
0.9630251291666885,
0.9481456308045444,
0.8823622232289046,
0.9077377632214376,
0.9325670957791264,
0.919398127767968,
0.9323907045444492,
0.9211964811945209]}
| 1.328125 | 1 |
lxmls/readers/simple_data_set.py | SimonSuster/lxmls-toolkit | 1 | 4011 | import numpy as np
# This class generates a 2D dataset with two classes, "positive" and "negative".
# Each class follows a Gaussian distribution.
class SimpleDataSet():
''' A simple two dimentional dataset for visualization purposes. The date set contains points from two gaussians with mean u_i and std_i'''
def __init__(self,nr_examples=100,g1 = [[-5,-5],1], g2 = [[5,5],1],balance=0.5,split=[0.8,0,0.2]):
nr_positive = nr_examples*balance # number of examples of "positive" class
nr_negative = nr_examples - nr_positive # number of examples of "negative" class
self.mean1 = g1[0] # mean of positive class
self.mean2 = g2[0] # mean of negative class
self.variance1 = g1[1] #
self.variance2 = g2[1]
self.balance = balance
self.nr_points = nr_examples
X_pos_1 = np.random.normal(g1[0][0],g1[1],[nr_positive,1])
X_pos_2 = np.random.normal(g1[0][1],g1[1],[nr_positive,1])
X_pos = np.hstack([X_pos_1,X_pos_2])
X_neg_1 = np.random.normal(g2[0][0],g2[1],[nr_negative,1])
X_neg_2 = np.random.normal(g2[0][1],g2[1],[nr_negative,1])
X_neg = np.hstack([X_neg_1,X_neg_2])
y_pos = np.zeros([nr_positive,1],dtype=np.int)
y_neg = np.ones([nr_negative,1],dtype=np.int)
X = np.vstack([X_pos, X_neg])
y = np.vstack([y_pos, y_neg])
perm = np.random.permutation(nr_examples)
self.split = split
self.X = X[perm,:]
self.y = y[perm]
train_y,dev_y,test_y,train_X,dev_X,test_X = split_train_dev_test(self.X,self.y,split[0],split[1],split[2])
self.train_X = train_X
self.train_y = train_y
self.dev_X = dev_X
self.dev_y = dev_y
self.test_X = test_X
self.test_y = test_y
def get_name(self):
return "Simple Data Set -- Mean1= (%.2f,%.2f) Var1 = %.2f Mean2= (%.2f,%.2f) Var2= %.2f \nNr. Points=%.2f, Balance=%.2f Train-Dev-Test (%.2f,.%.2f,%.2f)"%(self.mean1[0] ,self.mean1[1], self.variance1, self.mean2[0], self.mean2[1], self.variance2, self.nr_points, self.balance, self.split[0],self.split[1],self.split[2])
def get_bayes_optimal(self):
params = np.zeros((3,2))
p1 = self.balance
p2 = 1.0 - self.balance
params[0,0] = -1.0/(2.0*self.variance1) * np.dot(self.mean1,self.mean1) + np.log(p1)
params[0,1] = -1.0/(2.0*self.variance2) * np.dot(self.mean2,self.mean2) + np.log(p2)
params[1,0] = 1.0/self.variance1 * self.mean1[0]
params[2,0] = 1.0/self.variance1 * self.mean1[1]
params[1,1] = 1.0/self.variance2 * self.mean2[0]
params[2,1] = 1.0/self.variance2 * self.mean2[1]
print params
return params
def plot_data(self,params=np.array([]),name="Naive Bayes", print_bayes_opt = True):
import matplotlib.pyplot as plt
fig = plt.figure()
fig.suptitle(self.get_name())
axis = fig.add_subplot(1,1,1)
idx,_ = np.nonzero(self.train_y == 0)
idx2,_ = np.nonzero(self.train_y == 1)
idx3,_ = np.nonzero(self.test_y == 0)
idx4,_ = np.nonzero(self.test_y == 1)
axis.scatter(self.train_X[idx,0],self.train_X[idx,1],s=30,c="red",marker='s')
axis.scatter(self.train_X[idx2,0],self.train_X[idx2,1],s=30,c="blue",marker='s')
if(idx3.shape[0] > 0):
axis.scatter(self.test_X[idx3,0],self.test_X[idx3,1],s=30,c="red",marker='o')
if(idx4.shape[0] > 0):
axis.scatter(self.test_X[idx4,0],self.test_X[idx4,1],s=30,c="blue",marker='o')
## Plot Bayes optimal
if(print_bayes_opt):
bayes_opt_params = self.get_bayes_optimal()
self.add_line(fig,axis,bayes_opt_params, "Bayes Optimal","black")
axis.legend()
# fig.show()
return fig,axis
def add_line(self,fig,axis,params,name,colour):
x_max = np.max(self.train_X)
x_min = np.min(self.train_X)
x = np.arange(x_min,x_max,0.1,dtype = "float")
y_star = ((params[1,1]-params[1,0])*x + (params[0,1] - params[0,0]))/(params[2,0] -params[2,1])
axis.plot(x,y_star,'g--',c=colour, label=name, linewidth=2)
axis.legend()
# fig.show()
return fig,axis
def split_train_dev_test(X,y,train_per,dev_per,test_per):
if(train_per + dev_per + test_per > 1):
print "Train Dev Test split should sum to one"
return
dim = y.shape[0]
split1 = int(dim*train_per)
if(dev_per ==0):
train_y,test_y = np.vsplit(y,[split1])
dev_y = np.array([])
train_X = X[0:split1,:]
dev_X = np.array([])
test_X = X[split1:,:]
else:
split2 = int(dim*(train_per+dev_per))
print split2
train_y,dev_y,test_y = np.vsplit(y,(split1,split2))
train_X = X[0:split1,:]
dev_X = X[split1:split2,:]
test_X = X[split2:,:]
return train_y,dev_y,test_y,train_X,dev_X,test_X
| 3.40625 | 3 |
set1/c06_attack_repeating_key_xor.py | kangtastic/cryptopals | 1 | 4012 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Break repeating-key XOR
#
# It is officially on, now.
#
# This challenge isn't conceptually hard, but it involves actual
# error-prone coding. The other challenges in this set are there to bring
# you up to speed. This one is there to qualify you. If you can do this
# one, you're probably just fine up to Set 6.
#
# There's a file here:
#
# http://cryptopals.com/static/challenge-data/6.txt
#
# It's been base64'd after being encrypted with repeating-key XOR.
#
# Decrypt it.
#
# Here's how:
#
# 1. Let KEYSIZE be the guessed length of the key; try values from 2 to
# (say) 40.
# 2. Write a function to compute the edit distance/Hamming distance between
# two strings. The Hamming distance is just the number of differing
# bits. The distance between:
#
# this is a test
#
# and
#
# wokka wokka!!!
#
# is 37. *Make sure your code agrees before you proceed.*
# 3. For each KEYSIZE, take the first KEYSIZE worth of bytes, and the
# second KEYSIZE worth of bytes, and find the edit distance between them.
# Normalize this result by dividing by KEYSIZE.
# 4. The KEYSIZE with the smallest normalized edit distance is probably the
# key. You could proceed perhaps with the smallest 2-3 KEYSIZE values.
# Or take 4 KEYSIZE blocks instead of 2 and average the distances.
# 5. Now that you probably know the KEYSIZE: break the ciphertext into
# blocks of KEYSIZE length.
# 6. Now transpose the blocks: make a block that is the first byte of every
# block, and a block that is the second byte of every block, and so on.
# 7. Solve each block as if it was single-character XOR. You already have
# code to do this.
# 8. For each block, the single-byte XOR key that produces the best looking
# histogram is the repeating-key XOR key byte for that block. Put them
# together and you have the key.
#
# This code is going to turn out to be surprisingly useful later on. Breaking
# repeating-key XOR ("Vigenère") statistically is obviously an academic
# exercise, a "Crypto 101" thing. But more people "know how" to break it than
# can actually break it, and a similar technique breaks something much more
# important.
#
# No, that's not a mistake.
#
# We get more tech support questions for this challenge than any of the
# other ones. We promise, there aren't any blatant errors in this text.
# In particular: the "wokka wokka!!!" edit distance really is 37.
#
import inspect
import os
import sys
from itertools import zip_longest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0)))))
from util.loader import loader
from util.text import englishness, repeating_key_xor, single_byte_xor
# Lookup table for the number of 1 bits in a nibble. (Nybble, quartet, etc.)
NIBBLE_BITS = [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4]
def likely_key_sizes(bs, lower=2, upper=40, n=3):
"""Finds a repeating-key-XOR'd ciphertext's most likely key sizes."""
sizes = {}
for size in range(lower, upper + 1):
normalized_distance = 0
for i in range(0, len(bs) - size * 2, size * 2):
bs1, bs2 = bs[i : i + size], bs[i + size : i + size * 2]
normalized_distance += hamming_distance(bs1, bs2) / 2
sizes.update({size: normalized_distance})
return sorted(sizes, key=lambda k: sizes[k])[:n]
def hamming_distance(bs1, bs2):
"""Finds the Hamming distance between two bytestrings."""
distance = 0
for b1, b2 in zip_longest(bs1, bs2, fillvalue=0):
b = b1 ^ b2
distance += NIBBLE_BITS[b >> 4] + NIBBLE_BITS[b & 0xF]
return distance
def main():
ctext = loader("6.txt", "base64", split=False)
ptext, key, high_score = b"", b"", 0
for size in likely_key_sizes(ctext):
blocks = [ctext[i : i + size] for i in range(0, len(ctext), size)]
transposed = zip_longest(*blocks, fillvalue=0)
likely_key = b"".join(
single_byte_xor(tblock, key=True) for tblock in transposed
)
candidate = repeating_key_xor(ctext, likely_key)
score = englishness(candidate)
if score > high_score:
ptext, key, high_score = candidate, likely_key, score
print(f"Key: '{key.decode()}'")
print()
print(ptext.decode())
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
# Output:
#
# Key: 'Terminator X: Bring the noise' (29 bytes)
#
# I'm back and I'm ringin' the bell
# A rockin' on the mike while the fly girls yell
# In ecstasy in the back of me
# Well that's my DJ Deshay cuttin' all them Z's
# Hittin' hard and the girlies goin' crazy
# Vanilla's on the mike, man I'm not lazy.
#
# <remainder of output omitted>
#
| 3.6875 | 4 |
c2nl/models/transformer.py | kopf-yhs/ncscos | 22 | 4013 | import torch
import torch.nn as nn
import torch.nn.functional as f
from prettytable import PrettyTable
from c2nl.modules.char_embedding import CharEmbedding
from c2nl.modules.embeddings import Embeddings
from c2nl.modules.highway import Highway
from c2nl.encoders.transformer import TransformerEncoder
from c2nl.decoders.transformer import TransformerDecoder
from c2nl.inputters import constants
from c2nl.modules.global_attention import GlobalAttention
from c2nl.modules.copy_generator import CopyGenerator, CopyGeneratorCriterion
from c2nl.utils.misc import sequence_mask
class Embedder(nn.Module):
def __init__(self, args):
super(Embedder, self).__init__()
self.enc_input_size = 0
self.dec_input_size = 0
# at least one of word or char embedding options should be True
assert args.use_src_word or args.use_src_char
assert args.use_tgt_word or args.use_tgt_char
self.use_src_word = args.use_src_word
self.use_tgt_word = args.use_tgt_word
if self.use_src_word:
self.src_word_embeddings = Embeddings(args.emsize,
args.src_vocab_size,
constants.PAD)
self.enc_input_size += args.emsize
if self.use_tgt_word:
self.tgt_word_embeddings = Embeddings(args.emsize,
args.tgt_vocab_size,
constants.PAD)
self.dec_input_size += args.emsize
self.use_src_char = args.use_src_char
self.use_tgt_char = args.use_tgt_char
if self.use_src_char:
assert len(args.filter_size) == len(args.nfilters)
self.src_char_embeddings = CharEmbedding(args.n_characters,
args.char_emsize,
args.filter_size,
args.nfilters)
self.enc_input_size += sum(list(map(int, args.nfilters)))
self.src_highway_net = Highway(self.enc_input_size, num_layers=2)
if self.use_tgt_char:
assert len(args.filter_size) == len(args.nfilters)
self.tgt_char_embeddings = CharEmbedding(args.n_characters,
args.char_emsize,
args.filter_size,
args.nfilters)
self.dec_input_size += sum(list(map(int, args.nfilters)))
self.tgt_highway_net = Highway(self.dec_input_size, num_layers=2)
self.use_type = args.use_code_type
if self.use_type:
self.type_embeddings = nn.Embedding(len(constants.TOKEN_TYPE_MAP),
self.enc_input_size)
self.src_pos_emb = args.src_pos_emb
self.tgt_pos_emb = args.tgt_pos_emb
self.no_relative_pos = all(v == 0 for v in args.max_relative_pos)
if self.src_pos_emb and self.no_relative_pos:
self.src_pos_embeddings = nn.Embedding(args.max_src_len,
self.enc_input_size)
if self.tgt_pos_emb:
self.tgt_pos_embeddings = nn.Embedding(args.max_tgt_len + 2,
self.dec_input_size)
self.dropout = nn.Dropout(args.dropout_emb)
def forward(self,
sequence,
sequence_char,
sequence_type=None,
mode='encoder',
step=None):
if mode == 'encoder':
word_rep = None
if self.use_src_word:
word_rep = self.src_word_embeddings(sequence.unsqueeze(2)) # B x P x d
if self.use_src_char:
char_rep = self.src_char_embeddings(sequence_char) # B x P x f
if word_rep is None:
word_rep = char_rep
else:
word_rep = torch.cat((word_rep, char_rep), 2) # B x P x d+f
word_rep = self.src_highway_net(word_rep) # B x P x d+f
if self.use_type:
type_rep = self.type_embeddings(sequence_type)
word_rep = word_rep + type_rep
if self.src_pos_emb and self.no_relative_pos:
pos_enc = torch.arange(start=0,
end=word_rep.size(1)).type(torch.LongTensor)
pos_enc = pos_enc.expand(*word_rep.size()[:-1])
if word_rep.is_cuda:
pos_enc = pos_enc.cuda()
pos_rep = self.src_pos_embeddings(pos_enc)
word_rep = word_rep + pos_rep
elif mode == 'decoder':
word_rep = None
if self.use_tgt_word:
word_rep = self.tgt_word_embeddings(sequence.unsqueeze(2)) # B x P x d
if self.use_tgt_char:
char_rep = self.tgt_char_embeddings(sequence_char) # B x P x f
if word_rep is None:
word_rep = char_rep
else:
word_rep = torch.cat((word_rep, char_rep), 2) # B x P x d+f
word_rep = self.tgt_highway_net(word_rep) # B x P x d+f
if self.tgt_pos_emb:
if step is None:
pos_enc = torch.arange(start=0,
end=word_rep.size(1)).type(torch.LongTensor)
else:
pos_enc = torch.LongTensor([step]) # used in inference time
pos_enc = pos_enc.expand(*word_rep.size()[:-1])
if word_rep.is_cuda:
pos_enc = pos_enc.cuda()
pos_rep = self.tgt_pos_embeddings(pos_enc)
word_rep = word_rep + pos_rep
else:
raise ValueError('Unknown embedder mode!')
word_rep = self.dropout(word_rep)
return word_rep
class Encoder(nn.Module):
def __init__(self,
args,
input_size):
super(Encoder, self).__init__()
self.transformer = TransformerEncoder(num_layers=args.nlayers,
d_model=input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
dropout=args.trans_drop,
max_relative_positions=args.max_relative_pos,
use_neg_dist=args.use_neg_dist)
self.use_all_enc_layers = args.use_all_enc_layers
if self.use_all_enc_layers:
self.layer_weights = nn.Linear(input_size, 1, bias=False)
def count_parameters(self):
return self.transformer.count_parameters()
def forward(self,
input,
input_len):
layer_outputs, _ = self.transformer(input, input_len) # B x seq_len x h
if self.use_all_enc_layers:
output = torch.stack(layer_outputs, dim=2) # B x seq_len x nlayers x h
layer_scores = self.layer_weights(output).squeeze(3)
layer_scores = f.softmax(layer_scores, dim=-1)
memory_bank = torch.matmul(output.transpose(2, 3),
layer_scores.unsqueeze(3)).squeeze(3)
else:
memory_bank = layer_outputs[-1]
return memory_bank, layer_outputs
class Decoder(nn.Module):
def __init__(self, args, input_size):
super(Decoder, self).__init__()
self.input_size = input_size
self.split_decoder = args.split_decoder and args.copy_attn
if self.split_decoder:
# Following (https://arxiv.org/pdf/1808.07913.pdf), we split decoder
self.transformer_c = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
coverage_attn=args.coverage_attn,
dropout=args.trans_drop
)
self.transformer_d = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
dropout=args.trans_drop
)
# To accomplish eq. 19 - 21 from `https://arxiv.org/pdf/1808.07913.pdf`
self.fusion_sigmoid = nn.Sequential(
nn.Linear(self.input_size * 2, self.input_size),
nn.Sigmoid()
)
self.fusion_gate = nn.Sequential(
nn.Linear(self.input_size * 2, self.input_size),
nn.ReLU()
)
else:
self.transformer = TransformerDecoder(
num_layers=args.nlayers,
d_model=self.input_size,
heads=args.num_head,
d_k=args.d_k,
d_v=args.d_v,
d_ff=args.d_ff,
coverage_attn=args.coverage_attn,
dropout=args.trans_drop
)
if args.reload_decoder_state:
state_dict = torch.load(
args.reload_decoder_state, map_location=lambda storage, loc: storage
)
self.decoder.load_state_dict(state_dict)
def count_parameters(self):
if self.split_decoder:
return self.transformer_c.count_parameters() + self.transformer_d.count_parameters()
else:
return self.transformer.count_parameters()
def init_decoder(self,
src_lens,
max_src_len):
if self.split_decoder:
state_c = self.transformer_c.init_state(src_lens, max_src_len)
state_d = self.transformer_d.init_state(src_lens, max_src_len)
return state_c, state_d
else:
return self.transformer.init_state(src_lens, max_src_len)
def decode(self,
tgt_words,
tgt_emb,
memory_bank,
state,
step=None,
layer_wise_coverage=None):
if self.split_decoder:
copier_out, attns = self.transformer_c(tgt_words,
tgt_emb,
memory_bank,
state[0],
step=step,
layer_wise_coverage=layer_wise_coverage)
dec_out, _ = self.transformer_d(tgt_words,
tgt_emb,
memory_bank,
state[1],
step=step)
f_t = self.fusion_sigmoid(torch.cat([copier_out, dec_out], dim=-1))
gate_input = torch.cat([copier_out, torch.mul(f_t, dec_out)], dim=-1)
decoder_outputs = self.fusion_gate(gate_input)
else:
decoder_outputs, attns = self.transformer(tgt_words,
tgt_emb,
memory_bank,
state,
step=step,
layer_wise_coverage=layer_wise_coverage)
return decoder_outputs, attns
def forward(self,
memory_bank,
memory_len,
tgt_pad_mask,
tgt_emb):
max_mem_len = memory_bank[0].shape[1] \
if isinstance(memory_bank, list) else memory_bank.shape[1]
state = self.init_decoder(memory_len, max_mem_len)
return self.decode(tgt_pad_mask, tgt_emb, memory_bank, state)
class Transformer(nn.Module):
"""Module that writes an answer for the question given a passage."""
def __init__(self, args, tgt_dict):
""""Constructor of the class."""
super(Transformer, self).__init__()
self.name = 'Transformer'
if len(args.max_relative_pos) != args.nlayers:
assert len(args.max_relative_pos) == 1
args.max_relative_pos = args.max_relative_pos * args.nlayers
self.embedder = Embedder(args)
self.encoder = Encoder(args, self.embedder.enc_input_size)
self.decoder = Decoder(args, self.embedder.dec_input_size)
self.layer_wise_attn = args.layer_wise_attn
self.generator = nn.Linear(self.decoder.input_size, args.tgt_vocab_size)
if args.share_decoder_embeddings:
if self.embedder.use_tgt_word:
assert args.emsize == self.decoder.input_size
self.generator.weight = self.embedder.tgt_word_embeddings.word_lut.weight
self._copy = args.copy_attn
if self._copy:
self.copy_attn = GlobalAttention(dim=self.decoder.input_size,
attn_type=args.attn_type)
self.copy_generator = CopyGenerator(self.decoder.input_size,
tgt_dict,
self.generator)
self.criterion = CopyGeneratorCriterion(vocab_size=len(tgt_dict),
force_copy=args.force_copy)
else:
self.criterion = nn.CrossEntropyLoss(reduction='none')
def _run_forward_ml(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs):
batch_size = code_len.size(0)
# embed and encode the source sequence
code_rep = self.embedder(code_word_rep,
code_char_rep,
code_type_rep,
mode='encoder')
memory_bank, layer_wise_outputs = self.encoder(code_rep, code_len) # B x seq_len x h
# embed and encode the target sequence
summ_emb = self.embedder(summ_word_rep,
summ_char_rep,
mode='decoder')
summ_pad_mask = ~sequence_mask(summ_len, max_len=summ_emb.size(1))
enc_outputs = layer_wise_outputs if self.layer_wise_attn else memory_bank
layer_wise_dec_out, attns = self.decoder(enc_outputs,
code_len,
summ_pad_mask,
summ_emb)
decoder_outputs = layer_wise_dec_out[-1]
loss = dict()
target = tgt_seq[:, 1:].contiguous()
if self._copy:
# copy_score: batch_size, tgt_len, src_len
_, copy_score, _ = self.copy_attn(decoder_outputs,
memory_bank,
memory_lengths=code_len,
softmax_weights=False)
# mask copy_attn weights here if needed
if kwargs['code_mask_rep'] is not None:
mask = kwargs['code_mask_rep'].byte().unsqueeze(1) # Make it broadcastable.
copy_score.data.masked_fill_(mask, -float('inf'))
attn_copy = f.softmax(copy_score, dim=-1)
scores = self.copy_generator(decoder_outputs, attn_copy, src_map)
scores = scores[:, :-1, :].contiguous()
ml_loss = self.criterion(scores,
alignment[:, 1:].contiguous(),
target)
else:
scores = self.generator(decoder_outputs) # `batch x tgt_len x vocab_size`
scores = scores[:, :-1, :].contiguous() # `batch x tgt_len - 1 x vocab_size`
ml_loss = self.criterion(scores.view(-1, scores.size(2)),
target.view(-1))
ml_loss = ml_loss.view(*scores.size()[:-1])
ml_loss = ml_loss.mul(target.ne(constants.PAD).float())
ml_loss = ml_loss.sum(1) * kwargs['example_weights']
loss['ml_loss'] = ml_loss.mean()
loss['loss_per_token'] = ml_loss.div((summ_len - 1).float()).mean()
return loss
def forward(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs):
"""
Input:
- code_word_rep: ``(batch_size, max_doc_len)``
- code_char_rep: ``(batch_size, max_doc_len, max_word_len)``
- code_len: ``(batch_size)``
- summ_word_rep: ``(batch_size, max_que_len)``
- summ_char_rep: ``(batch_size, max_que_len, max_word_len)``
- summ_len: ``(batch_size)``
- tgt_seq: ``(batch_size, max_len)``
Output:
- ``(batch_size, P_LEN)``, ``(batch_size, P_LEN)``
"""
if self.training:
return self._run_forward_ml(code_word_rep,
code_char_rep,
code_type_rep,
code_len,
summ_word_rep,
summ_char_rep,
summ_len,
tgt_seq,
src_map,
alignment,
**kwargs)
else:
return self.decode(code_word_rep,
code_char_rep,
code_type_rep,
code_len,
src_map,
alignment,
**kwargs)
def __tens2sent(self,
t,
tgt_dict,
src_vocabs):
words = []
for idx, w in enumerate(t):
widx = w[0].item()
if widx < len(tgt_dict):
words.append(tgt_dict[widx])
else:
widx = widx - len(tgt_dict)
words.append(src_vocabs[idx][widx])
return words
def __generate_sequence(self,
params,
choice='greedy',
tgt_words=None):
batch_size = params['memory_bank'].size(0)
use_cuda = params['memory_bank'].is_cuda
if tgt_words is None:
tgt_words = torch.LongTensor([constants.BOS])
if use_cuda:
tgt_words = tgt_words.cuda()
tgt_words = tgt_words.expand(batch_size).unsqueeze(1) # B x 1
tgt_chars = None
if self.embedder.use_tgt_char:
tgt_chars = params['tgt_dict'].word_to_char_ids(constants.BOS_WORD)
tgt_chars = torch.Tensor(tgt_chars.tolist()).unsqueeze(0)
tgt_chars = tgt_chars.repeat(batch_size, 1)
tgt_chars = tgt_chars.to(tgt_words).unsqueeze(1)
dec_preds = []
copy_info = []
attentions = []
dec_log_probs = []
acc_dec_outs = []
max_mem_len = params['memory_bank'][0].shape[1] \
if isinstance(params['memory_bank'], list) else params['memory_bank'].shape[1]
dec_states = self.decoder.init_decoder(params['src_len'], max_mem_len)
attns = {"coverage": None}
enc_outputs = params['layer_wise_outputs'] if self.layer_wise_attn \
else params['memory_bank']
# +1 for <EOS> token
for idx in range(params['max_len'] + 1):
tgt = self.embedder(tgt_words,
tgt_chars,
mode='decoder',
step=idx)
tgt_pad_mask = tgt_words.data.eq(constants.PAD)
layer_wise_dec_out, attns = self.decoder.decode(tgt_pad_mask,
tgt,
enc_outputs,
dec_states,
step=idx,
layer_wise_coverage=attns['coverage'])
decoder_outputs = layer_wise_dec_out[-1]
acc_dec_outs.append(decoder_outputs.squeeze(1))
if self._copy:
_, copy_score, _ = self.copy_attn(decoder_outputs,
params['memory_bank'],
memory_lengths=params['src_len'],
softmax_weights=False)
# mask copy_attn weights here if needed
if params['src_mask'] is not None:
mask = params['src_mask'].byte().unsqueeze(1) # Make it broadcastable.
copy_score.data.masked_fill_(mask, -float('inf'))
attn_copy = f.softmax(copy_score, dim=-1)
prediction = self.copy_generator(decoder_outputs,
attn_copy,
params['src_map'])
prediction = prediction.squeeze(1)
for b in range(prediction.size(0)):
if params['blank'][b]:
blank_b = torch.LongTensor(params['blank'][b])
fill_b = torch.LongTensor(params['fill'][b])
if use_cuda:
blank_b = blank_b.cuda()
fill_b = fill_b.cuda()
prediction[b].index_add_(0, fill_b,
prediction[b].index_select(0, blank_b))
prediction[b].index_fill_(0, blank_b, 1e-10)
else:
prediction = self.generator(decoder_outputs.squeeze(1))
prediction = f.softmax(prediction, dim=1)
if choice == 'greedy':
tgt_prob, tgt = torch.max(prediction, dim=1, keepdim=True)
log_prob = torch.log(tgt_prob + 1e-20)
elif choice == 'sample':
tgt, log_prob = self.reinforce.sample(prediction.unsqueeze(1))
else:
assert False
dec_log_probs.append(log_prob.squeeze(1))
dec_preds.append(tgt.squeeze(1).clone())
if "std" in attns:
# std_attn: batch_size x num_heads x 1 x src_len
std_attn = torch.stack(attns["std"], dim=1)
attentions.append(std_attn.squeeze(2))
if self._copy:
mask = tgt.gt(len(params['tgt_dict']) - 1)
copy_info.append(mask.float().squeeze(1))
words = self.__tens2sent(tgt, params['tgt_dict'], params['source_vocab'])
tgt_chars = None
if self.embedder.use_tgt_char:
tgt_chars = [params['tgt_dict'].word_to_char_ids(w).tolist() for w in words]
tgt_chars = torch.Tensor(tgt_chars).to(tgt).unsqueeze(1)
words = [params['tgt_dict'][w] for w in words]
words = torch.Tensor(words).type_as(tgt)
tgt_words = words.unsqueeze(1)
return dec_preds, attentions, copy_info, dec_log_probs
def decode(self,
code_word_rep,
code_char_rep,
code_type_rep,
code_len,
src_map,
alignment,
**kwargs):
word_rep = self.embedder(code_word_rep,
code_char_rep,
code_type_rep,
mode='encoder')
memory_bank, layer_wise_outputs = self.encoder(word_rep, code_len) # B x seq_len x h
params = dict()
params['memory_bank'] = memory_bank
params['layer_wise_outputs'] = layer_wise_outputs
params['src_len'] = code_len
params['source_vocab'] = kwargs['source_vocab']
params['src_map'] = src_map
params['src_mask'] = kwargs['code_mask_rep']
params['fill'] = kwargs['fill']
params['blank'] = kwargs['blank']
params['src_dict'] = kwargs['src_dict']
params['tgt_dict'] = kwargs['tgt_dict']
params['max_len'] = kwargs['max_len']
params['src_words'] = code_word_rep
dec_preds, attentions, copy_info, _ = self.__generate_sequence(params, choice='greedy')
dec_preds = torch.stack(dec_preds, dim=1)
copy_info = torch.stack(copy_info, dim=1) if copy_info else None
# attentions: batch_size x tgt_len x num_heads x src_len
attentions = torch.stack(attentions, dim=1) if attentions else None
return {
'predictions': dec_preds,
'copy_info': copy_info,
'memory_bank': memory_bank,
'attentions': attentions
}
def count_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def count_encoder_parameters(self):
return self.encoder.count_parameters()
def count_decoder_parameters(self):
return self.decoder.count_parameters()
def layer_wise_parameters(self):
table = PrettyTable()
table.field_names = ["Layer Name", "Output Shape", "Param #"]
table.align["Layer Name"] = "l"
table.align["Output Shape"] = "r"
table.align["Param #"] = "r"
for name, parameters in self.named_parameters():
if parameters.requires_grad:
table.add_row([name, str(list(parameters.shape)), parameters.numel()])
return table
| 1.929688 | 2 |
cattle/plugins/docker/delegate.py | cjellick/python-agent | 8 | 4014 | import logging
from cattle import Config
from cattle.utils import reply, popen
from .compute import DockerCompute
from cattle.agent.handler import BaseHandler
from cattle.progress import Progress
from cattle.type_manager import get_type, MARSHALLER
from . import docker_client
import subprocess
import os
import time
log = logging.getLogger('docker')
def ns_exec(pid, event):
script = os.path.join(Config.home(), 'events', event.name.split(';')[0])
cmd = ['nsenter',
'-F',
'-m',
'-u',
'-i',
'-n',
'-p',
'-t', str(pid),
'--', script]
marshaller = get_type(MARSHALLER)
input = marshaller.to_string(event)
data = None
env = {}
with open('/proc/{}/environ'.format(pid)) as f:
for line in f.read().split('\0'):
if not len(line):
continue
kv = line.split('=', 1)
if kv[0].startswith('CATTLE'):
env[kv[0]] = kv[1]
env['PATH'] = os.environ['PATH']
env['CATTLE_CONFIG_URL'] = Config.config_url()
for i in range(3):
p = popen(cmd,
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, error = p.communicate(input=input)
retcode = p.poll()
if retcode == 0:
break
exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e', script]
if popen(exists_cmd, env=env).wait() == 0:
break
# Sleep and try again if missing
time.sleep(1)
if retcode:
return retcode, output, None
text = []
for line in output.splitlines():
if line.startswith('{'):
data = marshaller.from_string(line)
break
text.append(line)
return retcode, ''.join(text), data
class DockerDelegate(BaseHandler):
def __init__(self):
self.compute = DockerCompute()
pass
def events(self):
return ['delegate.request']
def delegate_request(self, req=None, event=None, instanceData=None, **kw):
if instanceData.kind != 'container' or \
instanceData.get('token') is None:
return
container = self.compute.get_container(docker_client(), instanceData,
by_agent=True)
if container is None:
log.info('Can not call [%s], container does not exists',
instanceData.uuid)
return
inspect = self.compute.inspect(container)
try:
running = inspect['State']['Running']
if not running:
log.error('Can not call [%s], container is not running',
instanceData.uuid)
return
except KeyError:
log.error('Can not call [%s], container is not running',
instanceData.uuid)
return
progress = Progress(event, parent=req)
exit_code, output, data = ns_exec(inspect['State']['Pid'], event)
if exit_code == 0:
return reply(event, data, parent=req)
else:
progress.update('Update failed', data={
'exitCode': exit_code,
'output': output
})
| 1.90625 | 2 |
bitraider/strategy.py | ehickox2012/bitraider | 2 | 4015 | import sys
import pytz
#import xml.utils.iso8601
import time
import numpy
from datetime import date, datetime, timedelta
from matplotlib import pyplot as plt
from exchange import cb_exchange as cb_exchange
from exchange import CoinbaseExchangeAuth
from abc import ABCMeta, abstractmethod
class strategy(object):
"""`strategy` defines an abstract base strategy class. Minimum required to create a strategy is a file with a class which inherits from strategy containing a backtest_strategy function. As a bonus, strategy includes utility functions like calculate_historic_data.
"""
__metaclass__ = ABCMeta
def __init__(name="default name", interval=5):
"""Constructor for an abstract strategy. You can modify it as needed.
\n`interval`: a.k.a timeslice the amount of time in seconds for each 'tick' default is 5
\n`name`: a string name for the strategy
"""
self.name = name
self.interval = interval
self.times_recalculated = 0
@abstractmethod
def trade(self, timeslice):
"""Perform operations on a timeslice.
\n`timeslice`: a section of trade data with time length equal to the strategy's interval, formatted as follows:
\n[time, low, high, open, close, volume]
"""
return
def backtest_strategy(self, historic_data):
"""Returns performance of a strategy vs market performance.
"""
# Reverse the data since Coinbase returns it in reverse chronological
# now historic_data strarts with the oldest entry
historic_data = list(reversed(historic_data))
earliest_time = float(historic_data[0][0])
latest_time = float(historic_data[-1][0])
start_price = float(historic_data[0][4])
end_price = float(historic_data[-1][4])
market_performance = ((end_price-start_price)/start_price)*100
print("Running simulation on historic data. This may take some time....")
for timeslice in historic_data:
# Display what percent through the data we are
idx = historic_data.index(timeslice)
percent = (float(idx)/float(len(historic_data)))*100 + 1
sys.stdout.write("\r%d%%" % percent)
sys.stdout.flush()
self.trade(timeslice)
# Calculate performance
end_amt_no_trades = (float(self.exchange.start_usd)/float(end_price)) + float(self.exchange.start_btc)
end_amt = (float(self.exchange.usd_bal)/float(end_price)) + float(self.exchange.btc_bal)
start_amt = (float(self.exchange.start_usd)/float(start_price)) + float(self.exchange.start_btc)
strategy_performance = ((end_amt-start_amt)/start_amt)*100
print("\n")
print("Times recalculated: "+str(self.times_recalculated))
print("Times bought: "+str(self.exchange.times_bought))
print("Times sold: "+str(self.exchange.times_sold))
print("The Market's performance: "+str(market_performance)+" %")
print("Strategy's performance: "+str(strategy_performance)+" %")
print("Account's ending value if no trades were made: "+str(end_amt_no_trades)+" BTC")
print("Account's ending value with this strategy: "+str(end_amt)+" BTC")
strategy_performance_vs_market = strategy_performance - market_performance
if strategy_performance > market_performance:
print("Congratulations! This strategy has beat the market by: "+str(strategy_performance_vs_market)+" %")
elif strategy_performance < market_performance:
print("This strategy has preformed: "+str(strategy_performance_vs_market)+" % worse than market.")
return strategy_performance_vs_market, strategy_performance, market_performance
@staticmethod
def calculate_historic_data(data, pivot):
"""Returns average price weighted according to volume, and the number of bitcoins traded
above and below a price point, called a pivot.\n
\npivot: the price used for returning volume above and below
\ndata: a list of lists formated as follows [time, low, high, open, close]
\n[
\n\t["2014-11-07 22:19:28.578544+00", "0.32", "4.2", "0.35", "4.2", "12.3"],
\n\t\t...
\n]
"""
price_list = []
weights = []
if data is None:
pass
min_price = float(data[0][1])
max_price = float(data[0][2])
discrete_prices = {}
for timeslice in data:
timeslice = [float(i) for i in timeslice]
if max_price < timeslice[2]:
max_prie = timeslice[2]
if min_price > timeslice[1]:
min_price = timeslice[1]
closing_price = timeslice[4]
volume = timeslice[5]
if closing_price not in discrete_prices.keys():
discrete_prices[str(closing_price)] = volume
else:
discrete[str(closing_price)] += volume
idx = data.index(timeslice)
price_list.append(closing_price)
weights.append(volume)
fltprices = [float(i) for i in discrete_prices.keys()]
fltvolumes = [float(i) for i in discrete_prices.values()]
np_discrete_prices = numpy.array(fltprices)
np_volume_per_price = numpy.array(fltvolumes)
weighted_avg = numpy.average(np_discrete_prices, weights=np_volume_per_price)
num_above = 0
num_below = 0
num_at = 0
for key in discrete_prices.keys():
value = discrete_prices[key]
if float(key) > pivot:
num_above+=value
elif float(key) < pivot:
num_below+=value
elif float(key) == pivot:
num_at+=value
total_volume = 0.0
for volume in fltvolumes:
total_volume+=volume
fltprops = []
for volume in fltvolumes:
fltprops.append((volume/total_volume))
#print("num_below: "+str(num_below))
#print("num_above: "+str(num_above))
#print("num_at: "+str(num_at))
#print("weighted_average: "+str(weighted_avg))
#plt.title("Price distribution")
#plt.xlabel("Price (USD)")
#plt.ylabel("Volume")
#plt.bar(fltprices, fltprops)
#plt.show()
return weighted_avg, num_above, num_below
| 3.40625 | 3 |
neural-networks.py | PacktPublishing/Python-Deep-Learning-for-Beginners- | 7 | 4016 | <filename>neural-networks.py
import numpy as np
# Perceptron
def predict_perceptron(inputs, weights):
if np.dot(inputs, weights) > 0:
return 1
else:
return 0
def predict_perceptron_proper(inputs, weights):
def step_function(input):
return 1 if input > 0 else 0
def linear_model(inputs, weights):
return np.dot(inputs, weights)
return step_function(linear_model(inputs, weights))
def neuron(inputs, weights):
def sigmoid_function(input):
return 1 / (1 + np.exp(-1 * input))
def linear_model(inputs, weights):
return np.dot(inputs, weights)
return sigmoid_function(linear_model(inputs, weights))
neural_network = neuron(neuron(inputs, weights1), weights2)
| 3.59375 | 4 |
biggan_discovery/orojar_discover.py | andreasjansson/OroJaR | 47 | 4017 | """
Learns a matrix of Z-Space directions using a pre-trained BigGAN Generator.
Modified from train.py in the PyTorch BigGAN repo.
"""
import os
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim
import utils
import train_fns
from sync_batchnorm import patch_replication_callback
from torch.utils.tensorboard import SummaryWriter
from orojar import orojar
from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G
from layers import fast_gram_schmidt, norm
class DataParallelLoss(nn.Module):
"""
This is simply a wrapper class to compute the OroJaR efficiently over several GPUs
"""
def __init__(self, G):
super(DataParallelLoss, self).__init__()
self.G = G
def forward(self, z, y, w, Q):
penalty = orojar(self.G, z, c=y, w=w, G_z=None, Q=Q, multiple_layers=False)
return penalty
# The main training file. Config is a dictionary specifying the configuration
# of this training run.
def run(config):
if config['wandb_entity'] is not None:
init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet')
if config["G_path"] is None: # Download a pre-trained G if necessary
download_G()
config["G_path"] = 'checkpoints/138k'
G, state_dict, device, experiment_name = load_G(config)
# If parallel, parallelize the GD module
if config['parallel']:
G = nn.DataParallel(DataParallelLoss(G))
if config['cross_replica']:
patch_replication_callback(G)
num_gpus = torch.cuda.device_count()
print(f'Using {num_gpus} GPUs')
# If search_space != 'all', then we need to pad the z components that we are leaving alone:
pad = get_direction_padding_fn(config)
direction_size = config['dim_z'] if config['search_space'] == 'all' else config['ndirs']
# A is our (ndirs, |z|) matrix of directions, where ndirs indicates the number of directions we want to learn
if config['load_A'] == 'coords':
print('Initializing with standard basis directions')
A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True)
elif config['load_A'] == 'random':
print('Initializing with random directions')
A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True)
torch.nn.init.kaiming_normal_(A)
else:
raise NotImplementedError
# We only learn A; G is left frozen during training:
optim = torch.optim.Adam(params=[A], lr=config['A_lr'])
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
interp_z, interp_y = utils.prepare_z_y(config["n_samples"], G.module.G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
interp_z.sample_()
interp_y.sample_()
if config['fix_class'] is not None:
y_ = y_.new_full(y_.size(), config['fix_class'])
fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class'])
interp_y = interp_y.new_full(interp_y.size(), config['fix_class'])
print('Beginning training at epoch %d...' % state_dict['epoch'])
# Train for specified number of epochs, although we mostly track G iterations.
iters_per_epoch = 1000
dummy_loader = [None] * iters_per_epoch # We don't need any real data
path_size = config['path_size']
# Simply stores a |z|-dimensional one-hot vector indicating each direction we are learning:
direction_indicators = torch.eye(config['ndirs']).to(device)
G.eval()
G.module.optim = optim
writer = SummaryWriter('%s/%s' % (config['logs_root'], experiment_name))
sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name)
writer.add_image('samples', sample_sheet, 0)
interp_y_ = G.module.G.shared(interp_y)
norm_fn = norm
# Make directions orthonormal via Gram Schmidt followed a normalization:
Q = pad(norm_fn(fast_gram_schmidt(A))) if not config["no_ortho"] else pad(A)
if config["vis_during_training"]:
print("Generating initial visualizations...")
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,
high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0, fps=24)
for epoch in range(state_dict['epoch'], config['num_epochs']):
if config['pbar'] == 'mine':
pbar = utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta')
else:
pbar = tqdm(dummy_loader)
for i, _ in enumerate(pbar):
state_dict['itr'] += 1
z_.sample_()
if config['fix_class'] is None:
y_.sample_()
y = G.module.G.shared(y_)
# OroJaR taken w.r.t. w_sampled, NOT z:
w = torch.zeros((G_batch_size, config['ndirs'])) # equal to the one-hot w
penalty = G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean()
optim.zero_grad()
penalty.backward()
optim.step()
# re-orthogonalize A for visualizations and the next training iteration:
Q = pad(norm_fn(fast_gram_schmidt(A))) if not config["no_ortho"] else pad(A)
# Log metrics to TensorBoard/WandB:
cur_training_iter = epoch * iters_per_epoch + i
writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter)
writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter)
# Save directions and log visuals:
if not (state_dict['itr'] % config['save_every']):
torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' %
(config['weights_root'], experiment_name, cur_training_iter))
if config["vis_during_training"]:
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,
high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter, fps=24)
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main()
| 2.359375 | 2 |
file_importer0.py | Alva789ro/Regional-Comprehensive-Economic-Partnership-RCEP-Economic-Default-Risk-Analysis | 1 | 4018 | import xlsxwriter
import pandas as pd
import numpy as np
import mysql.connector
australia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Australia')
brunei=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Brunei')
cambodia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Cambodia')
china=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='China')
indonesia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Indonesia')
japan=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Japan')
lao=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Lao')
malaysia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Malaysia')
myanmar=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Myanmar')
new_zeland=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='New Zeland')
philipines=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Philipines')
singapore=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Singapore')
thailand=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Thailand')
vietnam=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Vietnam')
'''
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "",
database = ""
)
mycursor = mydb.cursor()
sqlformula1 = "INSERT INTO australia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']):
mycursor.execute(sqlformula1, [a, b, c, d, e, f, g, h])
sqlformula2 = "INSERT INTO brunei VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']):
mycursor.execute(sqlformula2, [a, b, c, d, e, f, g, h])
sqlformula3 = "INSERT INTO cambodia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']):
mycursor.execute(sqlformula3, [a, b, c, d, e, f, g, h])
sqlformula4 = "INSERT INTO china VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']):
mycursor.execute(sqlformula4, [a, b, c, d, e, f, g, h])
sqlformula5 = "INSERT INTO indonesia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']):
mycursor.execute(sqlformula5, [a, b, c, d, e, f, g, h])
sqlformula6 = "INSERT INTO japan VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']):
mycursor.execute(sqlformula6, [a, b, c, d, e, f, g, h])
sqlformula7 = "INSERT INTO lao VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']):
mycursor.execute(sqlformula7, [a, b, c, d, e, f, g, h])
sqlformula8 = "INSERT INTO malaysia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']):
mycursor.execute(sqlformula8, [a, b, c, d, e, f, g, h])
sqlformula9 = "INSERT INTO myanmar VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']):
mycursor.execute(sqlformula9, [a, b, c, d, e, f, g, h])
sqlformula10 = "INSERT INTO new_zeland VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']):
mycursor.execute(sqlformula10, [a, b, c, d, e, f, g, h])
sqlformula11 = "INSERT INTO philipines VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']):
mycursor.execute(sqlformula11, [a, b, c, d, e, f, g, h])
sqlformula12 = "INSERT INTO singapore VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']):
mycursor.execute(sqlformula12, [a, b, c, d, e, f, g, h])
sqlformula13 = "INSERT INTO thailand VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']):
mycursor.execute(sqlformula13, [a, b, c, d, e, f, g, h])
sqlformula14 = "INSERT INTO vietnam VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']):
mycursor.execute(sqlformula14, [a, b, c, d, e, f, g, h])
'''
#mydb.commit()
| 2.25 | 2 |
packer/resources/bootstrap_node.py | VIOOH/nile | 4 | 4019 | #!/usr/bin/env python3
import os
import re
import glob
import boto3
import requests
import subprocess
from time import sleep
AWS_REGION = os.environ['AWS_REGION']
DEPLOY_UUID = os.environ['DEPLOY_UUID']
SERVICE_NAME = os.environ['SERVICE_NAME']
MOUNT_POINT = "/var/lib/" + SERVICE_NAME
NIC_IP = os.environ['NIC_IP']
TAG_KEY = os.environ['TAG_KEY']
def retrieve_eni_ids():
ec2 = boto3.resource('ec2')
enis = []
for eni in ec2.network_interfaces.all():
for tag in eni.tag_set:
if tag['Key'] == TAG_KEY:
if tag['Value'] == DEPLOY_UUID:
enis.append(eni.network_interface_id)
return enis if len(enis) > 0 else None
def attach_eni_ids():
c_ec2 = boto3.client('ec2')
r_ec2 = boto3.resource('ec2')
i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text
eni_ids = retrieve_eni_ids()
device_number = len(r_ec2.Instance(i_id).network_interfaces) + 1
for eni_id in eni_ids:
c_ec2.attach_network_interface(DeviceIndex=device_number, InstanceId=i_id, NetworkInterfaceId=eni_id)
def retrieve_ebs_ids():
ec2 = boto3.resource('ec2')
ebss = []
for volume in ec2.volumes.all():
if volume.tags is not None:
for tag in volume.tags:
if tag['Key'] == TAG_KEY:
if tag['Value'] == DEPLOY_UUID:
ebss.append(volume.volume_id)
return ebss if len(ebss) > 0 else None
def attach_ebs():
ec2 = boto3.client('ec2')
i_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text
volume_ids = retrieve_ebs_ids()
i = 0
device_char = 'z'
while i < len(volume_ids):
v_id = volume_ids[i]
device = '/dev/xvd{0}'.format(device_char)
ec2.attach_volume(Device=device, InstanceId=i_id, VolumeId=v_id)
# Wait to ensure device is attached
sleep(3)
if not check_ebs(v_id):
prepare_ebs(v_id)
add_fstab_entries(v_id, MOUNT_POINT)
p_mount = subprocess.Popen('mount -a'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_mount.communicate()
p_chown = subprocess.Popen('chown -R {0}:{0} {1}'.format(SERVICE_NAME, MOUNT_POINT).split(),
stdout=subprocess.PIPE)
stdout, stderr = p_chown.communicate()
device_char = chr(ord(device_char) - 1)
i += 1
def check_ebs(volume_id):
v_id = volume_id.replace('vol-', 'vol')
pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)
return bool(len(glob.glob(pattern)))
def prepare_ebs(volume_id):
v_id = volume_id.replace('vol-', 'vol')
pattern = '/dev/disk/by-id/*{0}'.format(v_id)
device = glob.glob(pattern)[0]
gdisk_commands = '\n'.join([
'n',
'1',
'34',
'',
'',
'w',
'Y',
''
])
p_echo = subprocess.Popen('echo -ne {0}'.format(gdisk_commands).split(' '), stdout=subprocess.PIPE)
p_fdisk = subprocess.Popen('gdisk {0}'.format(device).split(), stdin=p_echo.stdout, stdout=subprocess.PIPE)
stdout, stderr = p_fdisk.communicate()
print(stdout)
print(stderr)
# p_partprobe = subprocess.Popen('partprobe'.split(' '), stdout=subprocess.PIPE)
# stdout, stderr = p_partprobe.communicate()
# print(stdout)
# print(stderr)
sleep(3)
pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)
partition = glob.glob(pattern)[0]
p_xfs = subprocess.Popen('mkfs.xfs {0}'.format(partition).split(), stdout=subprocess.PIPE)
stdout, stderr = p_xfs.communicate()
print(stdout)
print(stderr)
def add_fstab_entries(volume_id, mount_point):
v_id = volume_id.replace('vol-', 'vol')
pattern = '/dev/disk/by-id/*{0}-part1'.format(v_id)
partition = glob.glob(pattern)[0]
fstab_entries = [
mount_point,
'xfs',
'defaults',
'0',
'0'
]
with open('/etc/fstab', 'a') as f:
f.write('{0} {1}\n'.format(partition, ' '.join(fstab_entries)))
f.flush()
f.close()
def wait_device_ready(timeout=3):
c = 0
while c < timeout:
sleep(1)
p_ip = subprocess.Popen('ip a'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_ip.communicate()
for line in stdout.decode().splitlines():
res = re.match('.*inet {0}/[0-9]{{2}}'.format(NIC_IP), line)
if res is not None:
return None
c += 1
raise Exception('Device with address {0} not ready'.format(NIC_IP))
def change_default_route():
wait_device_ready(10)
p_ip = subprocess.Popen('ip r'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_ip.communicate()
r_subnet_rules = []
for line in stdout. decode().splitlines():
res = re.match('(.* ){2}eth[0-9](?! $).*', line)
if res is not None:
subnet_rule = res.group(0)
l_subnet_rule = subnet_rule.split()
device = l_subnet_rule[2]
ip = l_subnet_rule[-1]
r_subnet_rules.append(
{
'device': device,
'ip': ip,
'subnet_rule': subnet_rule
}
)
r_default_route = ''
for line in stdout.decode().splitlines():
res = re.match('default .*', line)
if res is not None:
r_default_route = res.group(0)
break
with open('/etc/rc.local', 'a') as f:
f.write('#!/bin/bash\n\n')
rule_index = 128
default_route_device = ''
for rule in r_subnet_rules:
default_route = re.sub('eth.', rule['device'], r_default_route)
f.write('ip rule add from {0} table {1}\n'.format(rule['ip'], rule_index))
f.write('ip r add {0} table {1}\n'.format(default_route, rule_index))
f.write('ip r add {0} table {1}\n\n'.format(rule['subnet_rule'], rule_index))
if rule['ip'] == NIC_IP:
default_route_device = rule['device']
rule_index += 1
default_route = re.sub('eth.', default_route_device, r_default_route)
f.write('ip r del default\n')
f.write('ip r add {0}\n\n'.format(default_route))
f.write('exit 0\n')
f.flush()
f.close()
os.chmod('/etc/rc.local', 0o0755)
p_rc_local = subprocess.Popen('/etc/rc.local'.split(), stdout=subprocess.PIPE)
stdout, stderr = p_rc_local.communicate()
if __name__ == '__main__':
boto3.setup_default_session(region_name=AWS_REGION)
# uses: DEPLOY_UUID, TAG_KEY
attach_eni_ids()
# uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY
attach_ebs()
# uses: NIC_IP
change_default_route()
| 2.03125 | 2 |
parsers/srum_parser.py | otoriocyber/Chronos | 12 | 4020 | import csv
import datetime
import random
import os
from parsers.parser_base import ParserBase
FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1)
FILE_TIME_MICROSECOND = 10
def filetime_to_epoch_datetime(file_time):
if isinstance(file_time, int):
microseconds_since_file_time_epoch = file_time / FILE_TIME_MICROSECOND
else:
microseconds_since_file_time_epoch = int(file_time) / FILE_TIME_MICROSECOND
return FILE_TIME_EPOCH + datetime.timedelta(microseconds=microseconds_since_file_time_epoch)
class SrumParser(ParserBase):
CSV_FIELDS = {
"Unknown1.csv": ["TimeStamp", "AppId", "UserId", "EndTime", "DurationMS"],
"Unknown2.csv": [],
"Unknown3.csv": [],
"Unknown4.csv": ["TimeStamp", "AppId", "UserId"],
"SruDbCheckpointTable.csv": [],
"SruDbIdMapTable.csv": [],
"Network Usage.csv": ["TimeStamp", "AppId", "UserId", "InterfaceLuid", "L2ProfileId", "BytesSent",
"BytesRecvd"],
"Network Connections.csv": [],
"Energy Usage.csv": [],
"Energy Usage(Long - Term).csv": [],
"Application Resources.csv": ["TimeStamp", "AppId", "UserId"],
"Application Resource Usage.csv": ["TimeStamp", "AppId", "UserId"]
}
PARSING_TOOL = r"Tools\ese-analyst-master\ese2csv.exe"
PARSE_COMMAND = "{parser_path} -o {output_path} -p srudb_plugin {srum_db} --plugin-args {software_hive}"
def __init__(self, temp, config):
super().__init__(config)
self.temp_result_path = temp
def parse(self, args):
srum_db, software_hive = args
output = r"{}\srum_{}".format(self.temp_result_path, random.randint(1, 1000000))
os.mkdir(output)
command = self.PARSE_COMMAND.format(parser_path=self.PARSING_TOOL, output_path=output, srum_db=srum_db,
software_hive=software_hive)
self._run_command(command)
for csv_file in os.listdir(output):
srum_records = []
full_path = os.path.join(output, csv_file)
headers = self.CSV_FIELDS.get(csv_file)
if not headers:
continue
if csv_file == "Unknown1.csv":
with open(full_path, "r") as f:
reader = csv.DictReader(f)
for line in reader:
cur_record = {}
endTime = line.get("EndTime")
duration = line.get("DurationMS")
if endTime and duration:
cur_record["time"] = filetime_to_epoch_datetime(int(endTime) - int(duration)).isoformat()
cur_record["EndTime"] = filetime_to_epoch_datetime(endTime).isoformat()
cur_record["DurationMS"] = duration
else:
cur_record["time"] = datetime.datetime(1970, 1, 1).isoformat()
cur_record["AppId"] = line.get("AppId")
cur_record["UserId"] = line.get("UserId")
srum_records.append(cur_record)
else:
with open(full_path, "r") as f:
reader = csv.DictReader(f)
for line in reader:
cur_record = {}
for header in headers:
if header == "TimeStamp":
cur_record["time"] = line.get("TimeStamp").replace(" ", "T")
line.pop("TimeStamp")
value = line.get(header)
if value:
if isinstance(value, bytes):
cur_record[header.lower().replace(" ", "_")] = value.decode()
elif str.isdigit(value):
cur_record[header.lower().replace(" ", "_")] = int(value)
else:
cur_record[header.lower().replace(" ", "_")] = value
else:
cur_record[header.lower().replace(" ", "_")] = ""
srum_records.append(cur_record)
self._write_results_list([("srum-{}".format(csv_file.split(".")[0].lower().replace(" ", "_")), srum_records)])
| 2.34375 | 2 |
tests/csrf_tests/test_context_processor.py | Yoann-Vie/esgi-hearthstone | 0 | 4021 | from django.http import HttpRequest
from django.middleware.csrf import _compare_salted_tokens as equivalent_tokens
from django.template.context_processors import csrf
from django.test import SimpleTestCase
class TestContextProcessor(SimpleTestCase):
def test_force_token_to_string(self):
request = HttpRequest()
test_token = '<KEY>'
request.META['CSRF_COOKIE'] = test_token
token = csrf(request).get('csrf_token')
self.assertTrue(equivalent_tokens(str(token), test_token))
| 2.328125 | 2 |
python/das/types.py | marza-animation-planet/das | 4 | 4022 | <filename>python/das/types.py
import sys
import das
import traceback
class ReservedNameError(Exception):
def __init__(self, name):
super(ReservedNameError, self).__init__("'%s' is a reserved name" % name)
class VersionError(Exception):
def __init__(self, msg=None, current_version=None, required_version=None):
fullmsg = "ersion error"
if required_version:
fullmsg += ": %s required" % required_version
else:
fullmsg += ": no requirements"
if current_version:
fullmsg += ", %s in use" % current_version
else:
fullmsg += ", no version info"
if msg:
fullmsg = msg + " v" + fullmsg
else:
fullmsg = "V" + fullmsg
super(VersionError, self).__init__(fullmsg)
class GlobalValidationDisabled(object):
def __init__(self, data):
super(GlobalValidationDisabled, self).__init__()
self.data = data
self.oldstate = None
def __enter__(self):
try:
self.oldstate = self.data._is_global_validation_enabled()
self.data._enable_global_validation(False)
except:
pass
return self.data
def __exit__(self, type, value, traceback):
if self.oldstate is not None:
self.data._enable_global_validation(self.oldstate)
self.oldstate = None
# Always re-raise exception
return False
class TypeBase(object):
@classmethod
def TransferGlobalValidator(klass, src, dst):
if isinstance(src, klass) and isinstance(dst, klass):
dst._set_validate_globally_cb(src._gvalidate)
return dst
@classmethod
def ValidateGlobally(klass, inst):
if isinstance(inst, klass):
inst._gvalidate()
return inst
def __init__(self, *args):
super(TypeBase, self).__init__()
self.__dict__["_schema_type"] = None
self.__dict__["_validate_globally_cb"] = None
self.__dict__["_global_validation_enabled"] = True
def _wrap(self, rhs):
st = self._get_schema_type()
rv = self.__class__(rhs if st is None else st._validate_self(rhs))
rv._set_schema_type(self._get_schema_type())
return rv
def _adapt_value(self, value, key=None, index=None):
return das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index)
def _validate(self, schema_type=None):
if schema_type is None:
schema_type = self._get_schema_type()
if schema_type is not None:
schema_type.validate(self)
self._set_schema_type(schema_type)
def _gvalidate(self):
st = self._get_schema_type()
if st is not None:
# run self validation first (container validation)
st._validate_self(self)
if hasattr(self, "_is_global_validation_enabled"):
if not self._is_global_validation_enabled():
# Skip global validaton
return
gvcb = self._get_validate_globally_cb()
if gvcb is not None:
gvcb()
if hasattr(self, "_validate_globally"):
try:
getattr(self, "_validate_globally")()
except:
_, ei, tb = sys.exc_info()
ei = das.ValidationError("Global Validation Failed (%s)" % str(ei))
raise ei.__class__, ei, tb
def _get_schema_type(self):
return self.__dict__["_schema_type"]
def _set_schema_type(self, schema_type):
self.__dict__["_schema_type"] = schema_type
def _get_validate_globally_cb(self):
return self.__dict__["_validate_globally_cb"]
def _set_validate_globally_cb(self, cb):
self.__dict__["_validate_globally_cb"] = cb
def _is_global_validation_enabled(self):
return self.__dict__["_global_validation_enabled"]
def _enable_global_validation(self, on):
self.__dict__["_global_validation_enabled"] = on
class Tuple(TypeBase, tuple):
def __init__(self, *args):
# Funny, we need to declare *args here, but at the time we reach
# the core of the method, tuple is already created
# Maybe because tuple is immutable?
super(Tuple, self).__init__()
def __add__(self, y):
raise das.ValidationError("Expected a tuple of size %d, got %d" % (len(self), len(self) + len(y)))
def __getitem__(self, i):
return TypeBase.TransferGlobalValidator(self, super(Tuple, self).__getitem__(i))
class Sequence(TypeBase, list):
def __init__(self, *args):
TypeBase.__init__(self)
list.__init__(self, *args)
def _wrap_index(self, i, n=None, clamp=False):
if i < 0:
if n is None:
n = len(self)
ii = i + n
if ii < 0:
if clamp:
return 0
else:
raise IndexError("list index out of range")
else:
return ii
else:
return i
def __imul__(self, n):
oldlen = len(self)
super(Sequence, self).__imul__(n)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).__setslice__(oldlen, len(self), [])
except Exception, e:
print("das.types.Sequence.__imul__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
return self
def __mul__(self, n):
rv = self[:]
rv.__imul__(n)
return rv
def __rmul__(self, n):
return self.__mul__(n)
def __iadd__(self, y):
n = len(self)
super(Sequence, self).__iadd__([self._adapt_value(x, index=n+i) for i, x in enumerate(y)])
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).__setslice__(n, len(self), [])
except Exception, e:
print("das.types.Sequence.__iadd__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
return self
def __add__(self, y):
rv = self[:]
rv.__iadd__(y)
return rv
def __setitem__(self, i, y):
super(Sequence, self).__setitem__(i, self._adapt_value(y, index=i))
self._gvalidate()
def __getitem__(self, i):
return TypeBase.TransferGlobalValidator(self, super(Sequence, self).__getitem__(i))
def __delitem__(self, i):
ii = self._wrap_index(i, clamp=False)
item = super(Sequence, self).__getitem__(ii)
super(Sequence, self).__delitem__(i)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).insert(ii, item)
except Exception, e:
print("das.types.Sequence.__delitem__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def __iter__(self):
for item in super(Sequence, self).__iter__():
yield TypeBase.TransferGlobalValidator(self, item)
def __setslice__(self, i, j, y):
oldvals = super(Sequence, self).__getslice__(i, j)
newvals = [self._adapt_value(x, index=i+k) for k, x in enumerate(y)]
super(Sequence, self).__setslice__(i, j, newvals)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
ii = self._wrap_index(i, clamp=True)
super(Sequence, self).__setslice__(ii, ii+len(newvals), oldvals)
except Exception, e:
print("das.types.Sequence.__setslice__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def __getslice__(self, i, j):
return self._wrap(super(Sequence, self).__getslice__(i, j))
def __delslice__(self, i, j):
oldvals = super(Sequence, self).__getslice__(i, j)
super(Sequence, self).__delslice__(i, j)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
ii = self._wrap_index(i, clamp=True)
super(Sequence, self).__setslice__(ii, ii, oldvals)
except Exception, e:
print("das.types.Sequence.__setslice__: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
# def __contains__(self, y):
# try:
# _v = self._adapt_value(y, index=0)
# return super(Sequence, self).__contains__(_v)
# except:
# return False
def index(self, y):
return super(Sequence, self).index(self._adapt_value(y, index=0))
def insert(self, i, y):
super(Sequence, self).insert(i, self._adapt_value(y, index=i))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).pop(self._wrap_index(i, n=len(self)-1, clamp=True))
except Exception, e:
print("das.types.Sequence.insert: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def append(self, y):
n = len(self)
super(Sequence, self).append(self._adapt_value(y, index=n))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).pop()
except Exception, e:
print("das.types.Sequence.append: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def extend(self, y):
newvals = [self._adapt_value(x, index=len(self)+i) for i, x in enumerate(y)]
super(Sequence, self).extend(newvals)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).__setslice__(len(self) - len(newvals), len(self), [])
except Exception, e:
print("das.types.Sequence.extend: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
def pop(self, *args):
rv = super(Sequence, self).pop(*args)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
if args:
super(Sequence, self).insert(self._wrap_index(args[0], n=len(self)+1, clamp=False), rv)
else:
super(Sequence, self).append(rv)
except Exception, e:
print("das.types.Sequence.pop: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
return rv
def remove(self, y):
idx = self.index(y)
item = self[idx]
super(Sequence, self).remove(item)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Sequence, self).insert(idx, item)
except Exception, e:
print("das.types.Sequence.remove: Failed to recover sequence data (%s)" % e)
raise ec, ei, tb
class Set(TypeBase, set):
def __init__(self, args):
TypeBase.__init__(self)
set.__init__(self, args)
def __iand__(self, y):
oldvals = super(Set, self).copy()
super(Set, self).__iand__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).clear()
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.__iand__: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return self
def __and__(self, y):
rv = self.copy()
rv &= y
return rv
def __rand__(self, y):
return self.__and__(y)
def __isub__(self, y):
oldvals = super(Set, self).copy()
super(Set, self).__isub__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).clear()
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.__isub__: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return self
def __sub__(self, y):
rv = self.copy()
rv -= y
return rv
def __rsub__(self, y):
return self.__sub__(y)
def __ior__(self, y):
oldvals = super(Set, self).copy()
super(Set, self).__ior__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).clear()
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.__ior__: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return self
def __or__(self, y):
rv = self.copy()
rv |= y
return rv
def __ror__(self, y):
return self.__or__(y)
def __ixor__(self, y):
oldvals = super(Set, self).copy()
super(Set, self).__ixor__(set([self._adapt_value(x, index=i) for i, x in enumerate(y)]))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).clear()
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.__ixor__: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return self
def __xor__(self, y):
rv = self.copy()
rv ^= y
return rv
def __rxor__(self, y):
rv = self.copy()
rv ^= y
return rv
def __cmp__(self, oth):
# base set class doesn't implement __cmp__
# but we need it for some other purpose
if len(self.symmetric_difference(oth)) == 0:
return 0
elif len(self) <= len(oth):
return -1
else:
return 1
def __iter__(self):
for item in super(Set, self).__iter__():
yield TypeBase.TransferGlobalValidator(self, item)
def clear(self):
oldvals = super(Set, self).copy()
super(Set, self).clear()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).__ior__(oldvals)
except Exception, e:
print("das.types.Set.clear: Failed to recover set data (%s)" % e)
raise ec, ei, tb
def copy(self):
return self._wrap(self)
def add(self, e):
ae = self._adapt_value(e, index=len(self))
if ae in self:
return
super(Set, self).add(ae)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).remove(ae)
except Exception, e:
print("das.types.Set.add: Failed to recover set data (%s)" % e)
raise ec, ei, tb
def update(self, *args):
added = set()
for y in args:
lst = [self._adapt_value(x, index=i) for i, x in enumerate(y)]
for item in lst:
if item in self:
continue
super(Set, self).add(item)
added.add(item)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
for item in added:
super(Set, self).remove(item)
except Exception, e:
print("das.types.Set.update: Failed to recover set data (%s)" % e)
raise ec, ei, tb
def pop(self):
item = super(Set, self).pop()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Set, self).add(item)
except Exception, e:
print("das.types.Set.pop: Failed to recover set data (%s)" % e)
raise ec, ei, tb
return item
def difference(self, rhs):
return self.__sub__(rhs)
def union(self, rhs):
return self.__or__(rhs)
def intersection(self, rhs):
return self.__and__(rhs)
def symmetric_difference(self, rhs):
return self.__xor__(rhs)
class Dict(TypeBase, dict):
def __init__(self, *args, **kwargs):
TypeBase.__init__(self)
dict.__init__(self, *args, **kwargs)
def _adapt_key(self, key):
st = self._get_schema_type()
return (key if st is None else das.adapt_value(key, schema_type=st.ktype))
def __setitem__(self, k, v):
k = self._adapt_key(k)
wasset = (k in self)
oldval = (self[k] if wasset else None)
super(Dict, self).__setitem__(k, self._adapt_value(v, key=k))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
if wasset:
super(Dict, self).__setitem__(k, oldval)
else:
del(self[k])
except Exception, e:
print("das.types.Dict.__setitem__: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
def __getitem__(self, k):
return TypeBase.TransferGlobalValidator(self, super(Dict, self).__getitem__(self._adapt_key(k)))
def __delitem__(self, k):
_k = self._adapt_key(k)
_v = super(Dict, self).__getitem__(_k)
super(Dict, self).__delitem__(_k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Dict, self).__setitem__(_k, _v)
except Exception, e:
print("das.types.Dict.popitem: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
# def __contains__(self, k):
# try:
# _k = self._adapt_key(k)
# return super(Dict, self).__contains__(_k)
# except:
# return False
def setdefault(self, *args):
nargs = len(args)
if nargs > 2:
raise TypeError("setdefault expected at most 2 arguments, got %d" % nargs)
if nargs == 2:
args = (args[0], self._adapt_value(args[1], key=args[0]))
super(Dict, self).setdefault(*args)
def copy(self):
return self._wrap(self)
def update(self, *args, **kwargs):
oldvals = {}
remvals = set()
if len(args) == 1:
a0 = args[0]
if hasattr(a0, "keys"):
for k in a0.keys():
k = self._adapt_key(k)
if k in self:
oldvals[k] = self[k]
else:
remvals.add(k)
self[k] = self._adapt_value(a0[k], key=k)
else:
for k, v in a0:
k = self._adapt_key(k)
if k in self:
oldvals[k] = self[k]
else:
remvals.add(k)
self[k] = self._adapt_value(v, key=k)
elif len(args) > 1:
raise Exception("update expected at most 1 arguments, got %d" % len(args))
for k, v in kwargs.iteritems():
k = self._adapt_key(k)
if k in self:
if not k in oldvals:
oldvals[k] = self[k]
else:
remvals.add(k)
self[k] = self._adapt_value(v, key=k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
for k in remvals:
super(Dict, self).__delitem__(k)
for k, v in oldvals.iteritems():
super(Dict, self).__setitem__(k, v)
except Exception, e:
print("das.types.Dict.update: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
def pop(self, k, *args):
_k = self._adapt_key(k)
_v = super(Dict, self).pop(_k, *args)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
# if _k i not defined but a default value is provided, we should not reach here
# as dict is actually unchanged
# -> no need to check if _k was a valid key
super(Dict, self).__setitem__(_k, _v)
except Exception, e:
print("das.types.Dict.popitem: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
return _v
def popitem(self):
item = super(Dict, self).popitem()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Dict, self).__setitem__(item[0], item[1])
except Exception, e:
print("das.types.Dict.popitem: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
return item
def clear(self):
items = super(Dict, self).items()
super(Dict, self).clear()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
super(Dict, self).update(items)
except Exception, e:
print("das.types.Dict.clear: Failed to recover dict data (%s)" % e)
raise ec, ei, tb
def itervalues(self):
for v in super(Dict, self).itervalues():
yield TypeBase.TransferGlobalValidator(self, v)
def values(self):
return [x for x in self.itervalues()]
def iteritems(self):
for k, v in super(Dict, self).iteritems():
yield k, TypeBase.TransferGlobalValidator(self, v)
def items(self):
return [x for x in self.iteritems()]
class Struct(TypeBase):
def __init__(self, *args, **kwargs):
TypeBase.__init__(self)
self.__dict__["_dict"] = {}
self._update(*args, **kwargs)
def __getattr__(self, k):
try:
k = self._get_alias(k)
return TypeBase.TransferGlobalValidator(self, self._dict[k])
except KeyError:
if hasattr(self._dict, k):
# Look for an override method of the same name prefixed by '_' in current class
k2 = '_' + k
if hasattr(self, k2):
#print("Forward '%s' to %s class '%s'" % (k, self.__class__.__name__, k2))
return getattr(self, k2)
else:
#print("Forward '%s' to dict class '%s'" % (k, k))
return getattr(self._dict, k)
else:
#raise AttributeError("'Struct' has no attribute '%s' (dict %s)" % (k, "has" if hasattr(self._dict, k) else "hasn't"))
return self.__getattribute__(k)
def __setattr__(self, k, v):
# Special case for __class__ member that we may want to modify for
# to enable dynamic function set binding
if k == "__class__":
super(Struct, self).__setattr__(k, v)
else:
k = self._get_alias(k)
self._check_reserved(k)
wasset = (k in self._dict)
oldval = (self._dict[k] if wasset else None)
self._dict[k] = self._adapt_value(v, key=k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
if wasset:
self._dict[k] = oldval
else:
del(self._dict[k])
except Exception, e:
print("das.types.Struct.__setattr__: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def __delattr__(self, k):
k = self._get_alias(k)
oldval = self._dict.get(k, None)
self._dict.__delitem__(k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
# Note: we can reach here only if k was a valid key (otherwise __delitem__(k) would fail)
try:
self._dict[k] = oldval
except Exception, e:
print("das.types.Struct.__delattr__: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def __getitem__(self, k):
k = self._get_alias(k)
return TypeBase.TransferGlobalValidator(self, self._dict.__getitem__(k))
def __setitem__(self, k, v):
k = self._get_alias(k)
self._check_reserved(k)
wasset = (k in self._dict)
oldval = (self._dict[k] if wasset else None)
self._dict.__setitem__(k, self._adapt_value(v, key=k))
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
if wasset:
self._dict[k] = oldval
else:
del(self._dict[k])
except Exception, e:
print("das.types.Struct.__setitem__: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def __delitem__(self, k):
_k = k
k = self._get_alias(k)
oldval = self._dict.get(k, None)
self._dict.__delitem__(k)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
# Note: we can reach here only if k was a valid key (otherwise __delitem__(k) would fail)
try:
self._dict[k] = oldval
except Exception, e:
print("das.types.Struct.__delitem__: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def __contains__(self, k):
return self._dict.__contains__(self._get_alias(k))
def __cmp__(self, oth):
return self._dict.__cmp__(oth._dict if isinstance(oth, Struct) else oth)
def __eq__(self, oth):
return self._dict.__eq__(oth._dict if isinstance(oth, Struct) else oth)
def __ge__(self, oth):
return self._dict.__ge__(oth._dict if isinstance(oth, Struct) else oth)
def __le__(self, oth):
return self._dict.__le__(oth._dict if isinstance(oth, Struct) else oth)
def __gt__(self, oth):
return self._dict.__gt__(oth._dict if isinstance(oth, Struct) else oth)
def __lt__(self, oth):
return self._dict.__lt__(oth._dict if isinstance(oth, Struct) else oth)
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return self._dict.__len__()
def __str__(self):
return self._dict.__str__()
def __repr__(self):
return self._dict.__repr__()
# Override of dict.has_key
def _has_key(self, k):
return self._dict.has_key(self._get_alias(k))
# Override of dict.pop
def _pop(self, k, *args):
_k = k
k = self._get_alias(k)
oldval = self._dict.get(k, None)
retval = self._dict.pop(k, *args)
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
self._dict[k] = oldval
except Exception, e:
print("das.types.Struct.pop: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
return retval
# Override of dict.popitem
def _popitem(self):
k, v = self._dict.popitem()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
self._dict[k] = v
except Exception, e:
print("das.types.Struct.popitem: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
# Override of dict.clear
def _clear(self):
items = self._dict.items()
self._dict.clear()
try:
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
self._dict.update(items)
except Exception, e:
print("das.types.Struct.clear: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
# Override of dict.copy
def _copy(self):
return self._wrap(self)
# Override of dict.setdefault
def _setdefault(self, *args):
nargs = len(args)
if nargs > 2:
raise TypeError("_setdefault expected at most 2 arguments, got %d" % nargs)
if nargs >= 1:
self._check_reserved(args[0])
if nargs == 2:
args = (args[0], self._adapt_value(args[1], key=args[0]))
self._dict.setdefault(*args)
# Override of dict.update
def _update(self, *args, **kwargs):
if len(args) > 1:
raise Exception("update expected at most 1 arguments, got %d" % len(args))
oldvals = self._dict.copy()
try:
if len(args) == 1:
a0 = args[0]
if hasattr(a0, "keys"):
for k in a0.keys():
k = self._get_alias(k)
self._check_reserved(k)
self._dict[k] = self._adapt_value(a0[k], key=k)
else:
for k, v in a0:
k = self._get_alias(k)
self._check_reserved(k)
self._dict[k] = self._adapt_value(v, key=k)
for k, v in kwargs.iteritems():
k = self._get_alias(k)
self._check_reserved(k)
self._dict[k] = self._adapt_value(v, key=k)
self._gvalidate()
except:
ec, ei, tb = sys.exc_info()
try:
self._dict.clear()
self._dict.update(oldvals)
except Exception, e:
print("das.types.Struct.update: Failed to recover struct data (%s)" % e)
raise ec, ei, tb
def _get_alias(self, k):
st = self._get_schema_type()
if st is not None and st.has_key(k):
aliasname = das.schematypes.Alias.Name(st[k])
if aliasname is not None:
# if isinstance(st[k], das.schematypes.Deprecated):
# message = ("[das] Field %s is deprecated, use %s instead" % (repr(k), repr(aliasname)))
# das.print_once(message)
return aliasname
return k
def _check_reserved(self, k):
if hasattr(self.__class__, k):
raise ReservedNameError(k)
elif hasattr(self._dict, k):
k2 = "_" + k
if hasattr(self, k2):
# don't need to create forwarding attribute (set __getattr__)
return
if k2 in self.__dict__:
if self.__dict__[k2] != getattr(self._dict, k):
raise ReservedNameError(k)
else:
msg = "[das] %s's '%s(...)' method conflicts with data field '%s', use '_%s(...)' to call it instead" % (type(self).__name__, k, k, k)
st = self._get_schema_type()
if st is not None:
n = das.get_schema_type_name(st)
if n:
msg = "[%s] %s" % (n, msg)
das.print_once(msg)
self.__dict__[k2] = getattr(self._dict, k)
def ordered_keys(self):
return filter(lambda x: x in self, self._get_schema_type().ordered_keys())
def _itervalues(self):
for v in self._dict.itervalues():
yield TypeBase.TransferGlobalValidator(self, v)
def _values(self):
return [x for x in self.itervalues()]
def _iteritems(self):
for k, v in self._dict.iteritems():
yield k, TypeBase.TransferGlobalValidator(self, v)
def _items(self):
return [x for x in self.iteritems()]
| 2.40625 | 2 |
track.py | AliabbasMerchant/fileTrackAndBackup | 6 | 4023 | #! /usr/bin/python3
from help import *
import time
# short-forms are used, so as to reduce the .json file size
# t : type - d or f
# d : directory
# f : file
# ts : timestamp
# dirs : The dictionary containing info about directory contents
# time : edit time of the file/folder
# s : size of the file/folder
# p : full path of the file/folder
# n : name of the main file/folder in the .json file
# i : info about the contents in the .json file
# folder = {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/' + entity, 'time': get_time(stats), 'dirs': dir_dict}
# file = {'t': 'f', 's': stats.st_size, 'p': full_path + '/' + entity, 'time': get_time(stats)}
# info = {'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info}
# write = {'n': examine_name, 'ts': time.time(), 'i': info}
# info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)}
# write = {'n': examine_name, 'ts': time.time(), 'i': info}
no_of_files = 0
no_of_dirs = 0
examine_name = ''
save_filename = ''
_base_path = None
_ignore = False
errors = []
def get_save_config(base_path: str) -> None:
global examine_name, save_filename
examine_name = base_path.strip().split('/')[-1]
save_filename = examine_name + '.json'
if not os.path.lexists(constants.save_folder_name):
execute_bash("mkdir " + constants.save_folder_name)
def get_info_dict(sub_path: str) -> dict:
global no_of_files, no_of_dirs, _base_path, _ignore, errors
full_path = _base_path + '/' + sub_path
full_path = full_path.strip()
if full_path.endswith('/'):
full_path = full_path[:-1]
edit_dict = dict()
try:
entity_list = os.listdir(full_path)
for entity in entity_list:
ignore_it = False
if _ignore and to_be_ignored(full_path + '/' + entity): # ignoring cache temp etc files
ignore_it = True
if not ignore_it:
try:
stats = os.stat(full_path + '/' + entity)
if not os.path.islink(full_path + '/' + entity):
if os.path.isdir(full_path + '/' + entity):
no_of_dirs += 1
new_sub_path = sub_path + '/' + entity
dir_dict = get_info_dict(new_sub_path)
edit_dict[entity] = {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/' + entity,
'time': get_time(stats), 'dirs': dir_dict}
if os.path.isfile(full_path + '/' + entity):
no_of_files += 1
edit_dict[entity] = {'t': 'f', 's': stats.st_size, 'p': full_path + '/' + entity,
'time': get_time(stats)}
except FileNotFoundError:
errors.append(full_path + '/' + entity)
except PermissionError:
errors.append(full_path)
return edit_dict
def track(base_path: str, dir_path: str, output: bool = False, ignore: bool = False) -> list:
global _base_path, no_of_dirs, no_of_files, save_filename, _ignore, errors
no_of_dirs = 0
no_of_files = 0
print("Tracking...")
_base_path = base_path
_ignore = ignore
get_save_config(base_path)
if _ignore:
get_ignore_list()
if os.path.isdir(base_path):
info = get_info_dict('')
size = get_size(info)
no_of_dirs += 1
stats = os.stat(base_path)
info = {'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info}
write = {'n': examine_name, 'ts': time.time(), 'i': info}
write_to_json_file(write, constants.save_folder_name + "/" + save_filename)
if output:
print("Successfully analysed the folder " + base_path)
print("Found {} folder(s)".format(no_of_dirs))
print("Found {} file(s)".format(no_of_files))
print("The directory is of size {}".format(get_size_format(size)))
print("A detailed report can be found using the 'file_tb.py print [FILE/FOLDER]' command ")
else:
no_of_files += 1
stats = os.stat(base_path)
info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)}
write = {'n': examine_name, 'ts': time.time(), 'i': info}
write_to_json_file(write, constants.save_folder_name + "/" + save_filename)
if output:
print("Successfully analysed the file")
print("The file is of size {}".format(get_size_format(stats.st_size)))
print("A detailed report can be found using the 'file_tb.py print [FILE/FOLDER]' command ")
# pp(info)
return errors
if __name__ == '__main__':
track(os.getcwd(), os.getcwd(), output=True)
| 2.703125 | 3 |
clang/tools/scan-build-py/libscanbuild/analyze.py | Kvarnefalk/llvm-project | 1 | 4024 | <filename>clang/tools/scan-build-py/libscanbuild/analyze.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
""" This module implements the 'scan-build' command API.
To run the static analyzer against a build is done in multiple steps:
-- Intercept: capture the compilation command during the build,
-- Analyze: run the analyzer against the captured commands,
-- Report: create a cover report from the analyzer outputs. """
import re
import os
import os.path
import json
import logging
import multiprocessing
import tempfile
import functools
import subprocess
import contextlib
import datetime
import shutil
import glob
from collections import defaultdict
from libscanbuild import command_entry_point, compiler_wrapper, \
wrapper_environment, run_build, run_command, CtuConfig
from libscanbuild.arguments import parse_args_for_scan_build, \
parse_args_for_analyze_build
from libscanbuild.intercept import capture
from libscanbuild.report import document
from libscanbuild.compilation import split_command, classify_source, \
compiler_language
from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \
ClangErrorException
from libscanbuild.shell import decode
__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
COMPILER_WRAPPER_CC = 'analyze-cc'
COMPILER_WRAPPER_CXX = 'analyze-c++'
CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'
CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'
@command_entry_point
def scan_build():
""" Entry point for scan-build command. """
args = parse_args_for_scan_build()
# will re-assign the report directory as new output
with report_directory(
args.output, args.keep_empty, args.output_format) as args.output:
# Run against a build command. there are cases, when analyzer run
# is not required. But we need to set up everything for the
# wrappers, because 'configure' needs to capture the CC/CXX values
# for the Makefile.
if args.intercept_first:
# Run build command with intercept module.
exit_code = capture(args)
# Run the analyzer against the captured commands.
if need_analyzer(args.build):
govern_analyzer_runs(args)
else:
# Run build command and analyzer with compiler wrappers.
environment = setup_environment(args)
exit_code = run_build(args.build, env=environment)
# Cover report generation and bug counting.
number_of_bugs = document(args)
# Set exit status as it was requested.
return number_of_bugs if args.status_bugs else exit_code
@command_entry_point
def analyze_build():
""" Entry point for analyze-build command. """
args = parse_args_for_analyze_build()
# will re-assign the report directory as new output
with report_directory(args.output, args.keep_empty, args.output_format) as args.output:
# Run the analyzer against a compilation db.
govern_analyzer_runs(args)
# Cover report generation and bug counting.
number_of_bugs = document(args)
# Set exit status as it was requested.
return number_of_bugs if args.status_bugs else 0
def need_analyzer(args):
""" Check the intent of the build command.
When static analyzer run against project configure step, it should be
silent and no need to run the analyzer or generate report.
To run `scan-build` against the configure step might be necessary,
when compiler wrappers are used. That's the moment when build setup
check the compiler and capture the location for the build process. """
return len(args) and not re.search(r'configure|autogen', args[0])
def prefix_with(constant, pieces):
""" From a sequence create another sequence where every second element
is from the original sequence and the odd elements are the prefix.
eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
return [elem for piece in pieces for elem in [constant, piece]]
def get_ctu_config_from_args(args):
""" CTU configuration is created from the chosen phases and dir. """
return (
CtuConfig(collect=args.ctu_phases.collect,
analyze=args.ctu_phases.analyze,
dir=args.ctu_dir,
extdef_map_cmd=args.extdef_map_cmd)
if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))
def get_ctu_config_from_json(ctu_conf_json):
""" CTU configuration is created from the chosen phases and dir. """
ctu_config = json.loads(ctu_conf_json)
# Recover namedtuple from json when coming from analyze-cc or analyze-c++
return CtuConfig(collect=ctu_config[0],
analyze=ctu_config[1],
dir=ctu_config[2],
extdef_map_cmd=ctu_config[3])
def create_global_ctu_extdef_map(extdef_map_lines):
""" Takes iterator of individual external definition maps and creates a
global map keeping only unique names. We leave conflicting names out of
CTU.
:param extdef_map_lines: Contains the id of a definition (mangled name) and
the originating source (the corresponding AST file) name.
:type extdef_map_lines: Iterator of str.
:returns: Mangled name - AST file pairs.
:rtype: List of (str, str) tuples.
"""
mangled_to_asts = defaultdict(set)
for line in extdef_map_lines:
mangled_name, ast_file = line.strip().split(' ', 1)
mangled_to_asts[mangled_name].add(ast_file)
mangled_ast_pairs = []
for mangled_name, ast_files in mangled_to_asts.items():
if len(ast_files) == 1:
mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
return mangled_ast_pairs
def merge_ctu_extdef_maps(ctudir):
""" Merge individual external definition maps into a global one.
As the collect phase runs parallel on multiple threads, all compilation
units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.
These definition maps contain the mangled names and the source
(AST generated from the source) which had their definition.
These files should be merged at the end into a global map file:
CTU_EXTDEF_MAP_FILENAME."""
def generate_extdef_map_lines(extdefmap_dir):
""" Iterate over all lines of input files in a determined order. """
files = glob.glob(os.path.join(extdefmap_dir, '*'))
files.sort()
for filename in files:
with open(filename, 'r') as in_file:
for line in in_file:
yield line
def write_global_map(arch, mangled_ast_pairs):
""" Write (mangled name, ast file) pairs into final file. """
extern_defs_map_file = os.path.join(ctudir, arch,
CTU_EXTDEF_MAP_FILENAME)
with open(extern_defs_map_file, 'w') as out_file:
for mangled_name, ast_file in mangled_ast_pairs:
out_file.write('%s %s\n' % (mangled_name, ast_file))
triple_arches = glob.glob(os.path.join(ctudir, '*'))
for triple_path in triple_arches:
if os.path.isdir(triple_path):
triple_arch = os.path.basename(triple_path)
extdefmap_dir = os.path.join(ctudir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)
mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)
write_global_map(triple_arch, mangled_ast_pairs)
# Remove all temporary files
shutil.rmtree(extdefmap_dir, ignore_errors=True)
def run_analyzer_parallel(args):
""" Runs the analyzer against the given compilation database. """
def exclude(filename, directory):
""" Return true when any excluded directory prefix the filename. """
if not os.path.isabs(filename):
# filename is either absolute or relative to directory. Need to turn
# it to absolute since 'args.excludes' are absolute paths.
filename = os.path.normpath(os.path.join(directory, filename))
return any(re.match(r'^' + exclude_directory, filename)
for exclude_directory in args.excludes)
consts = {
'clang': args.clang,
'output_dir': args.output,
'output_format': args.output_format,
'output_failures': args.output_failures,
'direct_args': analyzer_params(args),
'force_debug': args.force_debug,
'ctu': get_ctu_config_from_args(args)
}
logging.debug('run analyzer against compilation database')
with open(args.cdb, 'r') as handle:
generator = (dict(cmd, **consts)
for cmd in json.load(handle) if not exclude(
cmd['file'], cmd['directory']))
# when verbose output requested execute sequentially
pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
for current in pool.imap_unordered(run, generator):
if current is not None:
# display error message from the static analyzer
for line in current['error_output']:
logging.info(line.rstrip())
pool.close()
pool.join()
def govern_analyzer_runs(args):
""" Governs multiple runs in CTU mode or runs once in normal mode. """
ctu_config = get_ctu_config_from_args(args)
# If we do a CTU collect (1st phase) we remove all previous collection
# data first.
if ctu_config.collect:
shutil.rmtree(ctu_config.dir, ignore_errors=True)
# If the user asked for a collect (1st) and analyze (2nd) phase, we do an
# all-in-one run where we deliberately remove collection data before and
# also after the run. If the user asks only for a single phase data is
# left so multiple analyze runs can use the same data gathered by a single
# collection run.
if ctu_config.collect and ctu_config.analyze:
# CTU strings are coming from args.ctu_dir and extdef_map_cmd,
# so we can leave it empty
args.ctu_phases = CtuConfig(collect=True, analyze=False,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
merge_ctu_extdef_maps(ctu_config.dir)
args.ctu_phases = CtuConfig(collect=False, analyze=True,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
shutil.rmtree(ctu_config.dir, ignore_errors=True)
else:
# Single runs (collect or analyze) are launched from here.
run_analyzer_parallel(args)
if ctu_config.collect:
merge_ctu_extdef_maps(ctu_config.dir)
def setup_environment(args):
""" Set up environment for build command to interpose compiler wrapper. """
environment = dict(os.environ)
environment.update(wrapper_environment(args))
environment.update({
'CC': COMPILER_WRAPPER_CC,
'CXX': COMPILER_WRAPPER_CXX,
'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
'ANALYZE_BUILD_REPORT_DIR': args.output,
'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
})
return environment
@command_entry_point
def analyze_compiler_wrapper():
""" Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """
return compiler_wrapper(analyze_compiler_wrapper_impl)
def analyze_compiler_wrapper_impl(result, execution):
""" Implements analyzer compiler wrapper functionality. """
# don't run analyzer when compilation fails. or when it's not requested.
if result or not os.getenv('ANALYZE_BUILD_CLANG'):
return
# check is it a compilation?
compilation = split_command(execution.cmd)
if compilation is None:
return
# collect the needed parameters from environment, crash when missing
parameters = {
'clang': os.getenv('ANALYZE_BUILD_CLANG'),
'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
'').split(' '),
'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
'directory': execution.cwd,
'command': [execution.cmd[0], '-c'] + compilation.flags,
'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
}
# call static analyzer against the compilation
for source in compilation.files:
parameters.update({'file': source})
logging.debug('analyzer parameters %s', parameters)
current = run(parameters)
# display error message from the static analyzer
if current is not None:
for line in current['error_output']:
logging.info(line.rstrip())
@contextlib.contextmanager
def report_directory(hint, keep, output_format):
""" Responsible for the report directory.
hint -- could specify the parent directory of the output directory.
keep -- a boolean value to keep or delete the empty report directory. """
stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-'
stamp = datetime.datetime.now().strftime(stamp_format)
parent_dir = os.path.abspath(hint)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir)
logging.info('Report directory created: %s', name)
try:
yield name
finally:
if os.listdir(name):
if output_format != 'sarif':
# 'scan-view' currently does not support sarif format.
msg = "Run 'scan-view %s' to examine bug reports."
else:
msg = "View result at %s/results-merged.sarif."
keep = True
else:
if keep:
msg = "Report directory '%s' contains no report, but kept."
else:
msg = "Removing directory '%s' because it contains no report."
logging.warning(msg, name)
if not keep:
os.rmdir(name)
def analyzer_params(args):
""" A group of command line arguments can mapped to command
line arguments of the analyzer. This method generates those. """
result = []
if args.store_model:
result.append('-analyzer-store={0}'.format(args.store_model))
if args.constraints_model:
result.append('-analyzer-constraints={0}'.format(
args.constraints_model))
if args.internal_stats:
result.append('-analyzer-stats')
if args.analyze_headers:
result.append('-analyzer-opt-analyze-headers')
if args.stats:
result.append('-analyzer-checker=debug.Stats')
if args.maxloop:
result.extend(['-analyzer-max-loop', str(args.maxloop)])
if args.output_format:
result.append('-analyzer-output={0}'.format(args.output_format))
if args.analyzer_config:
result.extend(['-analyzer-config', args.analyzer_config])
if args.verbose >= 4:
result.append('-analyzer-display-progress')
if args.plugins:
result.extend(prefix_with('-load', args.plugins))
if args.enable_checker:
checkers = ','.join(args.enable_checker)
result.extend(['-analyzer-checker', checkers])
if args.disable_checker:
checkers = ','.join(args.disable_checker)
result.extend(['-analyzer-disable-checker', checkers])
return prefix_with('-Xclang', result)
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
raise KeyError('{0} not passed to {1}'.format(
key, function.__name__))
return function(*args, **kwargs)
return wrapper
return decorator
@require(['command', # entry from compilation database
'directory', # entry from compilation database
'file', # entry from compilation database
'clang', # clang executable name (and path)
'direct_args', # arguments from command line
'force_debug', # kill non debug macros
'output_dir', # where generated report files shall go
'output_format', # it's 'plist', 'html', 'plist-html', 'plist-multi-file', or 'sarif'
'output_failures', # generate crash reports or not
'ctu']) # ctu control options
def run(opts):
""" Entry point to run (or not) static analyzer against a single entry
of the compilation database.
This complex task is decomposed into smaller methods which are calling
each other in chain. If the analysis is not possible the given method
just return and break the chain.
The passed parameter is a python dictionary. Each method first check
that the needed parameters received. (This is done by the 'require'
decorator. It's like an 'assert' to check the contract between the
caller and the called method.) """
try:
command = opts.pop('command')
command = command if isinstance(command, list) else decode(command)
logging.debug("Run analyzer against '%s'", command)
opts.update(classify_parameters(command))
return arch_check(opts)
except Exception:
logging.error("Problem occurred during analysis.", exc_info=1)
return None
@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
'error_output', 'exit_code'])
def report_failure(opts):
""" Create report when analyzer failed.
The major report is the preprocessor output. The output filename generated
randomly. The compiler output also captured into '.stderr.txt' file.
And some more execution context also saved into '.info.txt' file. """
def extension():
""" Generate preprocessor file extension. """
mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
return mapping.get(opts['language'], '.i')
def destination():
""" Creates failures directory if not exits yet. """
failures_dir = os.path.join(opts['output_dir'], 'failures')
if not os.path.isdir(failures_dir):
os.makedirs(failures_dir)
return failures_dir
# Classify error type: when Clang terminated by a signal it's a 'Crash'.
# (python subprocess Popen.returncode is negative when child terminated
# by signal.) Everything else is 'Other Error'.
error = 'crash' if opts['exit_code'] < 0 else 'other_error'
# Create preprocessor output file name. (This is blindly following the
# Perl implementation.)
(handle, name) = tempfile.mkstemp(suffix=extension(),
prefix='clang_' + error + '_',
dir=destination())
os.close(handle)
# Execute Clang again, but run the syntax check only.
cwd = opts['directory']
cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \
[opts['file'], '-o', name]
try:
cmd = get_arguments(cmd, cwd)
run_command(cmd, cwd=cwd)
except subprocess.CalledProcessError:
pass
except ClangErrorException:
pass
# write general information about the crash
with open(name + '.info.txt', 'w') as handle:
handle.write(opts['file'] + os.linesep)
handle.write(error.title().replace('_', ' ') + os.linesep)
handle.write(' '.join(cmd) + os.linesep)
handle.write(' '.join(os.uname()) + os.linesep)
handle.write(get_version(opts['clang']))
handle.close()
# write the captured output too
with open(name + '.stderr.txt', 'w') as handle:
handle.writelines(opts['error_output'])
handle.close()
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
'output_format'])
def run_analyzer(opts, continuation=report_failure):
""" It assembles the analysis command line and executes it. Capture the
output of the analysis and returns with it. If failure reports are
requested, it calls the continuation to generate it. """
def target():
""" Creates output file name for reports. """
if opts['output_format'] in {
'plist',
'plist-html',
'plist-multi-file'}:
(handle, name) = tempfile.mkstemp(prefix='report-',
suffix='.plist',
dir=opts['output_dir'])
os.close(handle)
return name
elif opts['output_format'] == 'sarif':
(handle, name) = tempfile.mkstemp(prefix='result-',
suffix='.sarif',
dir=opts['output_dir'])
os.close(handle)
return name
return opts['output_dir']
try:
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '--analyze'] +
opts['direct_args'] + opts['flags'] +
[opts['file'], '-o', target()],
cwd)
output = run_command(cmd, cwd=cwd)
return {'error_output': output, 'exit_code': 0}
except subprocess.CalledProcessError as ex:
result = {'error_output': ex.output, 'exit_code': ex.returncode}
if opts.get('output_failures', False):
opts.update(result)
continuation(opts)
return result
except ClangErrorException as ex:
result = {'error_output': ex.error, 'exit_code': 0}
if opts.get('output_failures', False):
opts.update(result)
continuation(opts)
return result
def extdef_map_list_src_to_ast(extdef_src_list):
""" Turns textual external definition map list with source files into an
external definition map list with ast files. """
extdef_ast_list = []
for extdef_src_txt in extdef_src_list:
mangled_name, path = extdef_src_txt.split(" ", 1)
# Normalize path on windows as well
path = os.path.splitdrive(path)[1]
# Make relative path out of absolute
path = path[1:] if path[0] == os.sep else path
ast_path = os.path.join("ast", path + ".ast")
extdef_ast_list.append(mangled_name + " " + ast_path)
return extdef_ast_list
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu'])
def ctu_collect_phase(opts):
""" Preprocess source by generating all data needed by CTU analysis. """
def generate_ast(triple_arch):
""" Generates ASTs for the current compilation command. """
args = opts['direct_args'] + opts['flags']
ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast',
os.path.realpath(opts['file'])[1:] +
'.ast')
ast_path = os.path.abspath(ast_joined_path)
ast_dir = os.path.dirname(ast_path)
if not os.path.isdir(ast_dir):
try:
os.makedirs(ast_dir)
except OSError:
# In case an other process already created it.
pass
ast_command = [opts['clang'], '-emit-ast']
ast_command.extend(args)
ast_command.append('-w')
ast_command.append(opts['file'])
ast_command.append('-o')
ast_command.append(ast_path)
logging.debug("Generating AST using '%s'", ast_command)
run_command(ast_command, cwd=opts['directory'])
def map_extdefs(triple_arch):
""" Generate external definition map file for the current source. """
args = opts['direct_args'] + opts['flags']
extdefmap_command = [opts['ctu'].extdef_map_cmd]
extdefmap_command.append(opts['file'])
extdefmap_command.append('--')
extdefmap_command.extend(args)
logging.debug("Generating external definition map using '%s'",
extdefmap_command)
extdef_src_list = run_command(extdefmap_command, cwd=opts['directory'])
extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list)
extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
if not os.path.isdir(extern_defs_map_folder):
try:
os.makedirs(extern_defs_map_folder)
except OSError:
# In case an other process already created it.
pass
if extdef_ast_list:
with tempfile.NamedTemporaryFile(mode='w',
dir=extern_defs_map_folder,
delete=False) as out_file:
out_file.write("\n".join(extdef_ast_list) + "\n")
cwd = opts['directory']
cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \
+ [opts['file']]
triple_arch = get_triple_arch(cmd, cwd)
generate_ast(triple_arch)
map_extdefs(triple_arch)
@require(['ctu'])
def dispatch_ctu(opts, continuation=run_analyzer):
""" Execute only one phase of 2 phases of CTU if needed. """
ctu_config = opts['ctu']
if ctu_config.collect or ctu_config.analyze:
assert ctu_config.collect != ctu_config.analyze
if ctu_config.collect:
return ctu_collect_phase(opts)
if ctu_config.analyze:
cwd = opts['directory']
cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \
+ opts['flags'] + [opts['file']]
triarch = get_triple_arch(cmd, cwd)
ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch),
'experimental-enable-naive-ctu-analysis=true']
analyzer_options = prefix_with('-analyzer-config', ctu_options)
direct_options = prefix_with('-Xanalyzer', analyzer_options)
opts['direct_args'].extend(direct_options)
return continuation(opts)
@require(['flags', 'force_debug'])
def filter_debug_flags(opts, continuation=dispatch_ctu):
""" Filter out nondebug macros when requested. """
if opts.pop('force_debug'):
# lazy implementation just append an undefine macro at the end
opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
return continuation(opts)
@require(['language', 'compiler', 'file', 'flags'])
def language_check(opts, continuation=filter_debug_flags):
""" Find out the language from command line parameters or file name
extension. The decision also influenced by the compiler invocation. """
accepted = frozenset({
'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
'c++-cpp-output', 'objective-c-cpp-output'
})
# language can be given as a parameter...
language = opts.pop('language')
compiler = opts.pop('compiler')
# ... or find out from source file extension
if language is None and compiler is not None:
language = classify_source(opts['file'], compiler == 'c')
if language is None:
logging.debug('skip analysis, language not known')
return None
elif language not in accepted:
logging.debug('skip analysis, language not supported')
return None
else:
logging.debug('analysis, language: %s', language)
opts.update({'language': language,
'flags': ['-x', language] + opts['flags']})
return continuation(opts)
@require(['arch_list', 'flags'])
def arch_check(opts, continuation=language_check):
""" Do run analyzer through one of the given architectures. """
disabled = frozenset({'ppc', 'ppc64'})
received_list = opts.pop('arch_list')
if received_list:
# filter out disabled architectures and -arch switches
filtered_list = [a for a in received_list if a not in disabled]
if filtered_list:
# There should be only one arch given (or the same multiple
# times). If there are multiple arch are given and are not
# the same, those should not change the pre-processing step.
# But that's the only pass we have before run the analyzer.
current = filtered_list.pop()
logging.debug('analysis, on arch: %s', current)
opts.update({'flags': ['-arch', current] + opts['flags']})
return continuation(opts)
else:
logging.debug('skip analysis, found not supported arch')
return None
else:
logging.debug('analysis, on default arch')
return continuation(opts)
# To have good results from static analyzer certain compiler options shall be
# omitted. The compiler flag filtering only affects the static analyzer run.
#
# Keys are the option name, value number of options to skip
IGNORED_FLAGS = {
'-c': 0, # compile option will be overwritten
'-fsyntax-only': 0, # static analyzer option will be overwritten
'-o': 1, # will set up own output file
# flags below are inherited from the perl implementation.
'-g': 0,
'-save-temps': 0,
'-install_name': 1,
'-exported_symbols_list': 1,
'-current_version': 1,
'-compatibility_version': 1,
'-init': 1,
'-e': 1,
'-seg1addr': 1,
'-bundle_loader': 1,
'-multiply_defined': 1,
'-sectorder': 3,
'--param': 1,
'--serialize-diagnostics': 1
}
def classify_parameters(command):
""" Prepare compiler flags (filters some and add others) and take out
language (-x) and architecture (-arch) flags for future processing. """
result = {
'flags': [], # the filtered compiler flags
'arch_list': [], # list of architecture flags
'language': None, # compilation language, None, if not specified
'compiler': compiler_language(command) # 'c' or 'c++'
}
# iterate on the compile options
args = iter(command[1:])
for arg in args:
# take arch flags into a separate basket
if arg == '-arch':
result['arch_list'].append(next(args))
# take language
elif arg == '-x':
result['language'] = next(args)
# parameters which looks source file are not flags
elif re.match(r'^[^-].+', arg) and classify_source(arg):
pass
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
for _ in range(count):
next(args)
# we don't care about extra warnings, but we should suppress ones
# that we don't want to see.
elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
pass
# and consider everything else as compilation flag.
else:
result['flags'].append(arg)
return result
| 1.992188 | 2 |
tableborder.py | PIRXrav/pyhack | 0 | 4025 | <gh_stars>0
#!/usr/bin/env python3
# pylint: disable=C0103
# pylint: disable=R0902
# pylint: disable=R0903
# pylint: disable=R0913
"""
Définie la classe TableBorder
"""
class TableBorder:
"""
Facillite l'usage de l'UNICODE
"""
def __init__(self,
top_left, top_split, top_right,
mid_left, mid_split, mid_right,
low_left, low_split, low_right,
horizontal, vertical):
"""
Constructeur
"""
self.top_left = top_left
self.top_split = top_split
self.top_right = top_right
self.mid_left = mid_left
self.mid_split = mid_split
self.mid_right = mid_right
self.low_left = low_left
self.low_split = low_split
self.low_right = low_right
self.horizontal = horizontal
self.vertical = vertical
BORDERS = [TableBorder('+', '+', '+',\
'+', '+', '+',\
'+', '+', '+',\
'-', '|'),
TableBorder(u'\u250c', u'\u252C', u'\u2510',\
u'\u251C', u'\u253C', u'\u2524',\
u'\u2514', u'\u2534', u'\u2518',\
u'\u2500', u'\u2502'),
TableBorder(u'\u2554', u'\u2566', u'\u2557',\
u'\u2560', u'\u256C', u'\u2563',\
u'\u255a', u'\u2569', u'\u255d',\
u'\u2550', u'\u2551')
]
| 2.5 | 2 |
app/urls.py | tkf2019/Vue-Django-SAST-Search | 0 | 4026 | <gh_stars>0
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register/', views.register),
url(r'^login/', views.login),
url(r'logout/', views.logout),
url(r'search/', views.search)
]
| 1.554688 | 2 |
custom_components/hasl/sensor.py | Ziqqo/hasl-platform | 0 | 4027 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Simple service for SL (Storstockholms Lokaltrafik)."""
import datetime
import json
import logging
from datetime import timedelta
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL,
CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF,
STATE_ON)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (async_track_point_in_utc_time,
async_track_utc_time_change,
track_time_interval)
from homeassistant.util import Throttle
from homeassistant.util.dt import now
from hasl import (haslapi, fpapi, tl2api, ri4api, si2api,
HASL_Error, HASL_API_Error, HASL_HTTP_Error)
__version__ = '2.2.0'
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'hasl'
# Keys used in the configuration.
CONF_RI4_KEY = 'ri4key'
CONF_SI2_KEY = 'si2key'
CONF_TL2_KEY = 'tl2key'
CONF_SITEID = 'siteid'
CONF_LINES = 'lines'
CONF_DIRECTION = 'direction'
CONF_ENABLED_SENSOR = 'sensor'
CONF_TIMEWINDOW = 'timewindow'
CONF_SENSORPROPERTY = 'property'
CONF_TRAIN_TYPE = 'train_type'
CONF_TRAFFIC_CLASS = 'traffic_class'
CONF_VERSION = 'version_sensor'
CONF_USE_MINIMIZATION = 'api_minimization'
LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation', 'comb', 'tl2']
LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations', 'refresh', 'updated']
LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB', 'SB', 'LB', 'SpvC', 'TB1', 'TB2', 'TB3']
# Default values for configuration.
DEFAULT_INTERVAL = timedelta(minutes=10)
DEFAULT_TIMEWINDOW = 30
DEFAULT_DIRECTION = 0
DEFAULT_SENSORPROPERTY = 'min'
DEFAULT_TRAIN_TYPE = 'PT'
DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local', 'tram', 'bus', 'fer']
DEFAULT_SENSORTYPE = 'departures'
DEFAULT_CACHE_FILE = '.storage/haslcache.json'
# Defining the configuration schema.
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
# API Keys
vol.Optional(CONF_RI4_KEY): cv.string,
vol.Optional(CONF_SI2_KEY): cv.string,
vol.Optional(CONF_TL2_KEY): cv.string,
vol.Optional(CONF_VERSION, default=False): cv.boolean,
vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean,
vol.Required(CONF_SENSORS, default=[]):
vol.All(cv.ensure_list, [vol.All({
vol.Required(ATTR_FRIENDLY_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE):
vol.In(LIST_SENSOR_TYPES),
vol.Optional(CONF_ENABLED_SENSOR): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL):
vol.Any(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_SITEID): cv.string,
vol.Optional(CONF_LINES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=2)),
vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW):
vol.All(vol.Coerce(int), vol.Range(min=0, max=60)),
vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY):
vol.In(LIST_SENSOR_PROPERTIES),
vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS):
vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]),
vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE):
vol.In(LIST_TRAIN_TYPES)
})]),
}, extra=vol.ALLOW_EXTRA)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the sensors."""
if not hass.data.get(DOMAIN):
hass.data[DOMAIN] = {}
sensors = []
if config[CONF_VERSION]:
sensors.append(SLVersionSensor(hass))
_LOGGER.info("Created version sensor for HASL")
for sensorconf in config[CONF_SENSORS]:
if sensorconf[CONF_SENSOR_TYPE] == 'departures' or \
sensorconf[CONF_SENSOR_TYPE] == 'comb':
sitekey = sensorconf.get(CONF_SITEID)
si2key = config.get(CONF_SI2_KEY)
ri4key = config.get(CONF_RI4_KEY)
if sitekey and ri4key:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLDeparturesSensor(
hass,
si2key,
ri4key,
sitekey,
sensorconf.get(CONF_LINES),
sensorname,
sensorconf.get(CONF_ENABLED_SENSOR),
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_DIRECTION),
sensorconf.get(CONF_TIMEWINDOW),
sensorconf.get(CONF_SENSORPROPERTY),
config.get(CONF_USE_MINIMIZATION)
))
_LOGGER.info("Created departures sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing site, si2key or ri4key",
sensorconf[ATTR_FRIENDLY_NAME])
if sensorconf[CONF_SENSOR_TYPE] == 'status' or \
sensorconf[CONF_SENSOR_TYPE] == 'tl2':
tl2key = config.get(CONF_TL2_KEY)
if tl2key:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLStatusSensor(
hass,
tl2key,
sensorname,
sensorconf.get(CONF_ENABLED_SENSOR),
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_TRAFFIC_CLASS),
config.get(CONF_USE_MINIMIZATION)
))
_LOGGER.info("Created status sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing tl2key attribute",
sensorconf[ATTR_FRIENDLY_NAME])
if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation':
train_type = sensorconf.get(CONF_TRAIN_TYPE)
if train_type:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLTrainLocationSensor(
hass,
sensorname,
train_type,
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_ENABLED_SENSOR),
))
_LOGGER.info("Created train sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing train_type attribute",
sensorconf[ATTR_FRIENDLY_NAME])
add_devices(sensors)
class SLTrainLocationSensor(Entity):
"""Trafic Situation Sensor."""
def __init__(self, hass, friendly_name, train_type,
interval, enabled_sensor):
self._hass = hass
self._fpapi = fpapi()
self._name = friendly_name
self._interval = interval
self._enabled_sensor = enabled_sensor
self._train_type = train_type
self._data = {}
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
return None
@property
def device_state_attributes(self):
""" Return the sensor attributes."""
return {'type': self._train_type, 'data': json.dumps(self._data)}
@property
def state(self):
""" Return the state of the sensor."""
return self._train_type
def _update(self):
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
try:
apidata = self._fpapi.request(self._train_type)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating train location sensor: %s", e.details)
return
except Exception as e:
_LOGGER.error("A error occured while"
"updating train location sensor: %s", e)
return
self._data = apidata
_LOGGER.info("Update completed %s...", self._name)
class SLVersionSensor(Entity):
"""HASL Version Sensor."""
def __init__(self, hass):
self._hass = hass
self._haslapi = haslapi()
self._name = 'HASL Version'
self._version = __version__
self._py_version = self._haslapi.version()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
return None
@property
def device_state_attributes(self):
""" Return the sensor attributes."""
return {'hasl': self._version, 'pyHasl': self._py_version}
@property
def state(self):
""" Return the state of the sensor."""
return self._version + "/" + self._py_version
class SLStatusSensor(Entity):
"""Trafic Situation Sensor."""
def __init__(self, hass, tl2key, friendly_name,
enabled_sensor, interval, type,
minimization):
self._tl2api = tl2api(tl2key)
self._datakey = 'tl2_' + tl2key
self._interval = interval
self._hass = hass
self._name = friendly_name
self._enabled_sensor = enabled_sensor
self._type = type
self._sensordata = []
self._lastupdate = '-'
self._cachefile = hass.config.path(DEFAULT_CACHE_FILE)
self._minimization = minimization
if not hass.data[DOMAIN].get(self._datakey):
hass.data[DOMAIN][self._datakey] = ''
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
return 'mdi:train-car'
@property
def device_state_attributes(self):
""" Return the sensor attributes."""
return self._sensordata
@property
def state(self):
""" Return the state of the sensor."""
return self._lastupdate
def getCache(self, key):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
return data.get(key)
except:
return {}
def putCache(self, key, value):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
data[key] = value
except:
data = {'' + key + '': value}
jsonFile = open(self._cachefile, 'w')
jsonFile.write(json.dumps(data))
jsonFile.close()
def _update(self):
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
_LOGGER.info("Starting to update TL2 for %s...",
self._name)
# Object used to create our object.
newdata = {}
# Use some nice translations for the statuses etc.
statuses = {
'EventGood': 'Good',
'EventMinor': 'Minor',
'EventMajor': 'Closed',
'EventPlanned': 'Planned',
}
# Icon table used for HomeAssistant.
statusIcons = {
'EventGood': 'mdi:check',
'EventMinor': 'mdi:clock-alert-outline',
'EventMajor': 'mdi:close',
'EventPlanned': 'mdi:triangle-outline'
}
trafficTypeIcons = {
'ferry': 'mdi:ferry',
'bus': 'mdi:bus',
'tram': 'mdi:tram',
'train': 'mdi:train',
'local': 'mdi:train-variant',
'metro': 'mdi:subway-variant'
}
# If the same API have already made the request in within
# the specified interval then use that data instead of
# requesting it again and spare some innocent credits from dying.
cacheage = self._hass.data[DOMAIN][self._datakey]
if not cacheage or now(self._hass.config.time_zone) \
- self._interval > cacheage or not self._minimization:
try:
apidata = self._tl2api.request()
apidata = apidata['ResponseData']['TrafficTypes']
self.putCache(self._datakey, apidata)
self._hass.data[DOMAIN][self._datakey] = \
now(self._hass.config.time_zone)
_LOGGER.info("Updated cache for %s...", self._name)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating TL2 sensor: %s", e.details)
return
except Exception as e:
_LOGGER.error("A error occured while "
"updating TL4 API: %s", e)
return
else:
apidata = self.getCache(self._datakey)
_LOGGER.info("Reusing data from cache for %s...",
self._name)
# Return only the relevant portion of the results.
for response in apidata:
type = response['Type']
if self._type is None or type in self._type:
statustype = ('ferry' if type == 'fer' else type)
newdata[statustype + '_status'] = \
statuses.get(response['StatusIcon'])
newdata[statustype + '_status_icon'] = \
statusIcons.get(response['StatusIcon'])
newdata[statustype + '_icon'] = \
trafficTypeIcons.get(statustype)
for event in response['Events']:
event['Status'] = statuses.get(event['StatusIcon'])
event['StatusIcon'] = \
statusIcons.get(event['StatusIcon'])
newdata[statustype + '_events'] = response['Events']
# Attribution and update sensor data.
newdata['attribution'] = "Stockholms Lokaltrafik"
newdata['last_updated'] = \
self._hass.data[DOMAIN][self._datakey].strftime('%Y-%m-%d' +
'%H:%M:%S')
self._sensordata = newdata
self._lastupdate = newdata['last_updated']
_LOGGER.info("TL2 update completed for %s...", self._name)
class SLDeparturesSensor(Entity):
"""Departure board for one SL site."""
def __init__(self, hass, si2key, ri4key, siteid,
lines, friendly_name, enabled_sensor,
interval, direction, timewindow, sensorproperty,
minimization):
"""Initialize"""
# The table of resulttypes and the corresponding units of measure.
unit_table = {
'min': 'min',
'time': '',
'deviations': '',
'refresh': '',
'update': '',
}
if si2key:
self._si2key = si2key
self._si2api = si2api(si2key, siteid, '')
self._si2datakey = 'si2_' + si2key + '_' + siteid
self._ri4key = ri4key
self._ri4api = ri4api(ri4key, siteid, 60)
self._ri4datakey = 'ri2_' + ri4key + '_' + siteid
self._hass = hass
self._name = friendly_name
self._lines = lines
self._siteid = siteid
self._enabled_sensor = enabled_sensor
self._sensorproperty = sensorproperty
self._departure_table = []
self._deviations_table = []
self._direction = direction
self._timewindow = timewindow
self._nextdeparture_minutes = '0'
self._nextdeparture_expected = '-'
self._lastupdate = '-'
self._interval = interval
self._unit_of_measure = unit_table.get(self._sensorproperty, 'min')
self._cachefile = hass.config.path(DEFAULT_CACHE_FILE)
self._minimization = minimization
if not hass.data[DOMAIN].get(self._ri4datakey):
hass.data[DOMAIN][self._ri4datakey] = ''
if self._si2key:
if not hass.data[DOMAIN].get(self._si2datakey):
hass.data[DOMAIN][self._si2datakey] = ''
# Setup updating of the sensor.
self.update = Throttle(interval)(self._update)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
""" Return the icon for the frontend."""
if self._deviations_table:
return 'mdi:bus-alert'
return 'mdi:bus'
@property
def state(self):
""" Return number of minutes to the next departure """
# If the sensor should return minutes to next departure.
if self._sensorproperty is 'min':
if not self._departure_table:
return '-'
return self._departure_table[0]['time']
# If the sensor should return the time at which next departure occurs.
if self._sensorproperty is 'time':
if not self._departure_table:
return '-'
expected = self._departure_table[0]['expected'] or '-'
if expected is not '-':
expected = \
datetime.datetime.strptime(self._nextdeparture_expected,
'%Y-%m-%dT%H:%M:%S')
expected = expected.strftime('%H:%M:%S')
return expected
# If the sensor should return the number of deviations.
if self._sensorproperty is 'deviations':
return len(self._deviations_table)
# If the sensor should return if it is updating or not.
if self._sensorproperty is 'refresh':
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
return STATE_ON
return STATE_OFF
if self._sensorproperty is 'updated':
if self._lastupdate is '-':
return '-'
return refresh.strftime('%Y-%m-%d %H:%M:%S')
# Failsafe
return '-'
@property
def device_state_attributes(self):
""" Return the sensor attributes ."""
# Initialize the state attributes.
val = {}
# Format the next exptected time.
if self._departure_table:
expected_time = self._departure_table[0]['expected'] or '-'
expected_minutes = self._departure_table[0]['time'] or '-'
if expected_time is not '-':
expected_time = \
datetime.datetime.strptime(expected_time,
'%Y-%m-%dT%H:%M:%S')
expected_time = expected_time.strftime('%H:%M:%S')
else:
expected_time = '-'
expected_minutes = '-'
# Format the last refresh time.
refresh = self._lastupdate
if self._lastupdate is not '-':
refresh = refresh.strftime('%Y-%m-%d %H:%M:%S')
# Setup the unit of measure.
if self._unit_of_measure is not '':
val['unit_of_measurement'] = self._unit_of_measure
# Check if sensor is currently updating or not.
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
if self._enabled_sensor is None or sensor_state.state is STATE_ON:
val['refresh_enabled'] = STATE_ON
else:
val['refresh_enabled'] = STATE_OFF
# Set values of the sensor.
val['attribution'] = 'Stockholms Lokaltrafik'
val['departures'] = self._departure_table
val['deviations'] = self._deviations_table
val['last_refresh'] = refresh
val['next_departure_minutes'] = expected_minutes
val['next_departure_time'] = expected_time
val['deviation_count'] = len(self._deviations_table)
return val
def parseDepartureTime(self, t):
""" weird time formats from the API,
do some quick and dirty conversions. """
try:
if t == 'Nu':
return 0
s = t.split()
if len(s) > 1 and s[1] == 'min':
return int(s[0])
s = t.split(':')
if len(s) > 1:
rightnow = now(self._hass.config.time_zone)
min = int(s[0]) * 60 + int(s[1]) - (rightnow.hour * 60 +
rightnow.minute)
if min < 0:
min = min + 1440
return min
except Exception:
_LOGGER.warning("Failed to parse departure time (%s) ", t)
return 0
def getCache(self, key):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
return data.get(key)
except:
return {}
def putCache(self, key, value):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
data[key] = value
except:
data = {'' + key + '': value}
jsonFile = open(self._cachefile, 'w')
jsonFile.write(json.dumps(data))
jsonFile.close()
def _update(self):
"""Get the departure board."""
# If using external sensor, get its value.
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
# If we dont have external sensor or it is ON then proceed.
if self._enabled_sensor is None or sensor_state.state \
is STATE_ON:
self._update_ri4()
if self._si2key:
self._update_si2()
self._lastupdate = now(self._hass.config.time_zone)
def _update_ri4(self):
errorOccured = False
_LOGGER.info("Starting to update RI4 for %s...", self._name)
cacheage = self._hass.data[DOMAIN][self._ri4datakey]
if not cacheage or now(self._hass.config.time_zone) \
- self._interval > cacheage or not self._minimization:
try:
departuredata = self._ri4api.request()
departuredata = departuredata['ResponseData']
self.putCache(self._ri4datakey, departuredata)
self._hass.data[DOMAIN][self._ri4datakey] = \
now(self._hass.config.time_zone)
_LOGGER.info("Updated cache for %s...", self._name)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating SI2 sensor: %s", e.details)
errorOccured = True
except Exception as e:
_LOGGER.error("A communication error occured while "
"updating RI4 API: %s", e)
errorOccured = True
else:
try:
departuredata = self.getCache(self._ri4datakey)
_LOGGER.info("Reusing data from cache for %s...",
self._name)
except Exception as e:
_LOGGER.error("A error occured while retreiving "
"cached RI4 sensor data: %s", e)
errorOccured = True
if not errorOccured:
departures = []
iconswitcher = {
'Buses': 'mdi:bus',
'Trams': 'mdi:tram',
'Ships': 'mdi:ferry',
'Metros': 'mdi:subway-variant',
'Trains': 'mdi:train',
}
for (i, traffictype) in enumerate(['Metros', 'Buses', 'Trains',
'Trams', 'Ships']):
for (idx, value) in enumerate(departuredata[traffictype]):
direction = value['JourneyDirection'] or 0
displaytime = value['DisplayTime'] or ''
destination = value['Destination'] or ''
linenumber = value['LineNumber'] or ''
expected = value['ExpectedDateTime'] or ''
groupofline = value['GroupOfLine'] or ''
icon = iconswitcher.get(traffictype, 'mdi:train-car')
if int(self._direction) == 0 or int(direction) \
== int(self._direction):
if self._lines == [] or linenumber \
in self._lines:
diff = self.parseDepartureTime(displaytime)
if diff < self._timewindow:
departures.append({
'line': linenumber,
'direction': direction,
'departure': displaytime,
'destination': destination,
'time': diff,
'expected': expected,
'type': traffictype,
'groupofline': groupofline,
'icon': icon,
})
self._departure_table = sorted(departures,
key=lambda k: k['time'])
_LOGGER.info("RI4 update completed for %s...", self._name)
def _update_si2(self):
errorOccured = False
_LOGGER.info("Starting to update SI2 for %s...", self._name)
cacheage = self._hass.data[DOMAIN][self._si2datakey]
if not cacheage or now(self._hass.config.time_zone) \
- self._interval > cacheage or not self._minimization:
try:
deviationdata = self._si2api.request()
deviationdata = deviationdata['ResponseData']
self.putCache(self._si2datakey, deviationdata)
self._hass.data[DOMAIN][self._si2datakey] = \
now(self._hass.config.time_zone)
_LOGGER.info('Updated cache for %s...', self._name)
except HASL_Error as e:
_LOGGER.error("A communication error occured while "
"updating SI2 sensor: %s", e.details)
errorOccured = True
except Exception as e:
_LOGGER.error("A error occured while "
"updating SI2 sensor: %s", e)
errorOccured = True
else:
try:
deviationdata = self.getCache(self._si2datakey)
_LOGGER.info("Reusing data from cache for %s...",
self._name)
except Exception as e:
_LOGGER.error("A error occured while retreiving "
"cached SI2 sensor: %s", e.details)
errorOccured = True
if not errorOccured:
deviations = []
for (idx, value) in enumerate(deviationdata):
deviations.append({
'updated': value['Updated'],
'title': value['Header'],
'fromDate': value['FromDateTime'],
'toDate': value['UpToDateTime'],
'details': value['Details'],
'sortOrder': value['SortOrder'],
})
self._deviations_table = \
sorted(deviations, key=lambda k: k['sortOrder'])
_LOGGER.info("SI2 update completed for %s...", self._name)
| 1.9375 | 2 |
simbad_tools.py | ishivvers/astro | 1 | 4028 | """
A quick library to deal with searching simbad for info
about a SN and parsing the results.
Author: <NAME>, <EMAIL>, 2014
example SIMBAD uri query:
http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S
"""
import re
from urllib2 import urlopen
def get_SN_info( name ):
"""
Queries simbad for SN coords, redshift, and host galaxy.
If redshift is not given for SN, attempts to resolve link to
host galaxy and report its redshift.
Returns ( (ra,dec), redshift, host_name, redshift_citation ), with
values of None inserted whenever it cannot resolve the value.
"""
simbad_uri = "http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s"
regex_coords = "Coordinates\(FK5.+\): .+"
regex_redshift = "Redshift:\s+\d+\.\d+.+"
regex_host = "apparent\s+host\s+galaxy\s+.+?\{(.*?)\}"
result = urlopen( simbad_uri % name.replace(' ','%20') ).read()
rescoords = re.search( regex_coords, result )
resred = re.search( regex_redshift, result )
reshost = re.search( regex_host, result )
try:
cs = rescoords.group().split(':')[1].strip()
ra = cs[:12].strip()
dec = cs[12:].strip()
except:
ra,dec = None,None
try:
redshift = float(resred.group().strip('Redshift: ').split(' ')[0])
citation = resred.group().split(' ')[-1]
except AttributeError:
redshift = None
citation = None
try:
host = reshost.group().split('{')[1].split('}')[0]
except AttributeError:
host = None
if (redshift == None) and (host != None):
# get the redshift from the host galaxy
result = urlopen( simbad_uri % host.replace(' ','%20') ).read()
resred = re.search( regex_redshift, result )
try:
redshift = float(resred.group().strip('Redshift: ').split(' ')[0])
citation = resred.group().split(' ')[-1]
except AttributeError:
pass
return ((ra,dec), redshift, host, citation)
| 3.15625 | 3 |
robots/environments.py | StanfordASL/soft-robot-control | 5 | 4029 | import os
from math import cos
from math import sin
import Sofa.Core
from splib.numerics import Quat, Vec3
from sofacontrol import measurement_models
path = os.path.dirname(os.path.abspath(__file__))
class TemplateEnvironment:
def __init__(self, name='Template', rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01):
self.name = name
self.robot = Sofa.Core.Node(name)
# set-up solvers
self.robot.addObject('EulerImplicitSolver', name='odesolver', firstOrder="0", rayleighMass=str(rayleighMass),
rayleighStiffness=str(rayleighStiffness))
self.robot.addObject('SparseLDLSolver', name='preconditioner')
self.robot.addObject('GenericConstraintCorrection', solverName="preconditioner")
self.actuator_list = []
self.nb_nodes = None
self.gravity = [0., -9810., 0.] # default
self.dt = dt
def get_measurement_model(self, nodes=None, pos=True, vel=True):
if nodes is None:
return measurement_models.linearModel(range(self.nb_nodes), self.nb_nodes, pos=pos, vel=vel)
else:
return measurement_models.linearModel(nodes, self.nb_nodes, pos=pos, vel=vel)
class Trunk(TemplateEnvironment):
def __init__(self, name='Trunk', all_cables=True):
super(Trunk, self).__init__(name=name)
self.nb_nodes = 709
self.gravity = [0., 0., 9810.]
self.robot.min_force = [0.] * 8 # Without premultiplication with dt
self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/trunk.vtk')
self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')
self.robot.addObject('TetrahedronSetTopologyModifier')
self.robot.addObject('TetrahedronSetTopologyAlgorithms')
self.robot.addObject('TetrahedronSetGeometryAlgorithms')
# Option 1:
self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false',
showIndicesScale='4e-5')
# Option 2: Equivalent to option 1 (we believe)
# self.robot.addObject('MechanicalObject', src='@loader')
# Gives a mass to the model
self.robot.addObject('UniformMass', totalMass=0.042)
# Add a TetrahedronFEMForceField componant which implement an elastic material model solved using the Finite
# Element Method on tetrahedrons.
self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large',
poissonRatio=0.45,
youngModulus=450)
# Fix the base of the trunk by adding constraints in a region of interest (ROI)
self.robot.addObject('BoxROI', name='boxROI', box=[[-20, -20, 0], [20, 20, 20]], drawBoxes=False)
self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')
##########################################
# Cable #
##########################################
actuator_names = ''
length1 = 10.
length2 = 2.
lengthTrunk = 195.
pullPoint = [[0., length1, 0.], [-length1, 0., 0.], [0., -length1, 0.], [length1, 0., 0.]]
direction = Vec3(0., length2 - length1, lengthTrunk)
direction.normalize()
nbCables = 4
actuators = self.robot.addChild('actuators')
for i in range(0, nbCables):
childname = 'cableL' + str(i)
theta = 1.57 * i
q = Quat(0., 0., sin(theta / 2.), cos(theta / 2.))
position = [[0., 0., 0.]] * 20
for k in range(0, 20, 2):
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 21)
position[k] = v.rotateFromQuat(q)
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 27)
position[k + 1] = v.rotateFromQuat(q)
cableL = actuators.addChild(childname)
cableL.addObject('MechanicalObject', name='meca',
position=pullPoint[i] + [pos.toList() for pos in position])
cableL.addObject('CableConstraint', template='Vec3d', name="cable",
hasPullPoint="0",
indices=list(range(21)),
maxPositiveDisp='70',
maxDispVariation="1",
valueType='force',
minForce=self.robot.min_force[i] * self.robot.dt.value)
cableL.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')
actuator_names += childname + '/cable,'
self.actuator_list.append(cableL.cable)
if all_cables:
for i in range(0, nbCables):
childname = 'cableS' + str(i)
theta = 1.57 * i
q = Quat(0., 0., sin(theta / 2.), cos(theta / 2.))
position = [[0., 0., 0.]] * 10
for k in range(0, 9, 2):
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 21)
position[k] = v.rotateFromQuat(q)
v = Vec3(direction[0], direction[1] * 17.5 * (k / 2) + length1, direction[2] * 17.5 * (k / 2) + 27)
position[k + 1] = v.rotateFromQuat(q)
cableS = actuators.addChild(childname)
cableS.addObject('MechanicalObject', name='meca',
position=pullPoint[i] + [pos.toList() for pos in position])
cableS.addObject('CableConstraint', template='Vec3d', name="cable",
hasPullPoint="0",
indices=list(range(10)),
maxPositiveDisp='40',
maxDispVariation="1",
valueType='force',
minForce=self.robot.min_force[i + 4] * self.robot.dt.value)
cableS.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')
actuator_names += childname + '/cable,'
self.actuator_list.append(cableS.cable)
self.robot.actuator_list = self.actuator_list
##########################################
# Visualization #
##########################################
trunkVisu = self.robot.addChild('VisualModel')
trunkVisu.addObject('MeshSTLLoader', filename=path + "/mesh/trunk.stl")
trunkVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8])
trunkVisu.addObject('BarycentricMapping')
class Trunk4Cables(Trunk):
def __init__(self, name='Trunk4Cables'):
super(Trunk4Cables, self).__init__(name=name, all_cables=False)
self.robot.min_force = [0, 0, 0, 0] # Without premultiplication with dt
class Finger(TemplateEnvironment):
def __init__(self, name='Finger'):
super(Finger, self).__init__(name=name)
self.nb_nodes = 158
self.robot.min_force = [0.] # Without premultiplication with dt
self.robot.addObject('MeshVTKLoader', name='loader', filename=path + '/mesh/finger.vtk')
self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')
self.robot.addObject('TetrahedronSetTopologyModifier')
self.robot.addObject('TetrahedronSetTopologyAlgorithms')
self.robot.addObject('TetrahedronSetGeometryAlgorithms')
self.robot.addObject('MechanicalObject', name='tetras', template='Vec3d', showIndices='false',
showIndicesScale='4e-5')
self.robot.addObject('UniformMass', totalMass=0.075)
# Add a TetrahedronFEMForceField componant which implement an elastic material model solved using the Finite Element Method on tetrahedrons.
self.robot.addObject('TetrahedronFEMForceField', template='Vec3d', name='FEM', method='large',
poissonRatio=0.45,
youngModulus=600)
# Fix the base of the trunk by adding constraints in a region of interest (ROI)
self.robot.addObject('BoxROI', name='boxROI', box=[[-15, 0, 0], [5, 10, 15]], drawBoxes=False)
self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.indices', stiffness='1e12')
##########################################
# Cable #
##########################################
# This creates a new node in the scene. This node is appended to the finger's node.
actuators = self.robot.addChild('actuators')
cable = actuators.addChild('cable')
# This create a MechanicalObject, a componant holding the degree of freedom of our
# mechanical modelling. In the case of a cable it is a set of positions specifying
# the points where the cable is passing by.
cable.addObject('MechanicalObject', name='meca',
position=(
"-17.5 12.5 2.5 " +
"-32.5 12.5 2.5 " +
"-47.5 12.5 2.5 " +
"-62.5 12.5 2.5 " +
"-77.5 12.5 2.5 " +
"-83.5 12.5 4.5 " +
"-85.5 12.5 6.5 " +
"-85.5 12.5 8.5 " +
"-83.5 12.5 10.5 " +
"-77.5 12.5 12.5 " +
"-62.5 12.5 12.5 " +
"-47.5 12.5 12.5 " +
"-32.5 12.5 12.5 " +
"-17.5 12.5 12.5 "))
# Create a CableConstraint object with a name.
# the indices are referring to the MechanicalObject's positions.
# The last indice is where the pullPoint is connected.
cable.addObject('CableConstraint', name="cable",
indices=list(range(14)),
pullPoint="0.0 12.5 2.5", valueType='force',
minForce=self.robot.min_force[0] * self.robot.dt.value)
# This create a BarycentricMapping. A BarycentricMapping is a key element as it will create a bi-directional link
# between the cable's DoFs and the finger's ones so that movements of the cable's DoFs will be mapped
# to the finger and vice-versa;
cable.addObject('BarycentricMapping', name='mapping', mapForces='false', mapMasses='false')
self.actuator_list.append(cable.cable)
self.robot.actuator_list = self.actuator_list
##########################################
# Visualization #
##########################################
# In Sofa, visualization is handled by adding a rendering model.
# Create an empty child node to store this rendering model.
fingerVisu = self.robot.addChild('VisualModel')
# Add to this empty node a rendering model made of triangles and loaded from an stl file.
fingerVisu.addObject('MeshSTLLoader', filename=path + "/mesh/finger.stl")
fingerVisu.addObject('OglModel', template='Vec3d', color=[1., 1., 1., 0.8])
# Add a BarycentricMapping to deform rendering model in way that follow the ones of the parent mechanical model.
fingerVisu.addObject('BarycentricMapping')
class Diamond(TemplateEnvironment):
def __init__(self, name='Diamond', totalMass=0.5, poissonRatio=0.45, youngModulus=450, rayleighMass=0.1, rayleighStiffness=0.1, dt=0.01):
super(Diamond, self).__init__(name=name, rayleighMass=rayleighMass, rayleighStiffness=rayleighStiffness, dt=dt)
self.nb_nodes = 1628
self.gravity = [0., 0., -9810.]
rotation = [90, 0.0, 0.0]
translation = [0.0, 0.0, 35]
self.robot.min_force = [0, 0, 0, 0] # Without premultiplication with dt
self.robot.addObject('MeshVTKLoader', name='loader', filename=path + "/mesh/diamond.vtu", rotation=rotation,
translation=translation)
self.robot.addObject('TetrahedronSetTopologyContainer', src='@loader', name='container')
self.robot.addObject('TetrahedronSetTopologyModifier')
self.robot.addObject('TetrahedronSetTopologyAlgorithms')
self.robot.addObject('TetrahedronSetGeometryAlgorithms')
self.robot.addObject('MechanicalObject', template='Vec3d', name='tetras', showIndices='false',
showIndicesScale='4e-5')
self.robot.addObject('UniformMass', totalMass=totalMass, name='mass')
self.robot.addObject('TetrahedronFEMForceField', template='Vec3d',
method='large', name='forcefield',
poissonRatio=poissonRatio, youngModulus=youngModulus)
# Fix the base of the trunk by adding constraints in a region of interest (ROI)
self.robot.addObject('BoxROI', name='boxROI', box=[-15, -15, -40, 15, 15, 10], drawBoxes=True)
self.robot.addObject('RestShapeSpringsForceField', points='@boxROI.<EMAIL>', stiffness='1e12')
##########################################
# Cable #
##########################################
self.actuatorsParam = [
{'withName': 'A',
'withCableGeometry': [[0, 97, 45]],
'withAPullPointLocation': [0, 10, 30]
},
{'withName': 'B',
'withCableGeometry': [[-97, 0, 45]],
'withAPullPointLocation': [-10, 0, 30]
},
{'withName': 'C',
'withCableGeometry': [[0, -97, 45]],
'withAPullPointLocation': [0, -10, 30]
},
{'withName': 'D',
'withCableGeometry': [[97, 0, 45]],
'withAPullPointLocation': [10, 0, 30]
}
]
actuators = self.robot.addChild('actuators')
for i in range(len(self.actuatorsParam)):
cable = actuators.addChild(self.actuatorsParam[i]['withName'])
cable.addObject('MechanicalObject', position=self.actuatorsParam[i]['withCableGeometry'])
cable.addObject('CableConstraint',
name='cable',
indices=list(range(len(self.actuatorsParam[i]['withCableGeometry']))),
pullPoint=self.actuatorsParam[i]['withAPullPointLocation'],
valueType='force',
hasPullPoint=True,
minForce=self.robot.min_force[i] * self.robot.dt.value
)
cable.addObject('BarycentricMapping', name="Mapping", mapForces=False, mapMasses=False)
self.actuator_list.append(cable.cable)
self.robot.actuator_list = self.actuator_list
##########################################
# Visualization #
##########################################
diamondVisu = self.robot.addChild('VisualModel')
diamondVisu.addObject('MeshSTLLoader', filename=path + "/mesh/diamond.stl")
diamondVisu.addObject('OglModel', template='Vec3d', color=[0.7, 0.7, 0.7, 0.7], updateNormals=False)
diamondVisu.addObject('BarycentricMapping')
| 2.109375 | 2 |
default.py | SimonPreissner/get-shifty | 0 | 4030 | """
This file contains meta information and default configurations of the project
"""
RSC_YEARS = [1660, 1670, 1680, 1690,
1700, 1710, 1720, 1730, 1740, 1750, 1760, 1770, 1780, 1790,
1800, 1810, 1820, 1830, 1840, 1850, 1860, 1870, 1880, 1890,
1900, 1910, 1920]
# cf. Chapter 4.4.1 of the thesis
SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760),
(1680,1710), (1710,1740), (1740,1770), (1770,1800), (1800,1830), (1830,1860), (1860,1890),
(1700,1800), (1800,1900),
(1700,1900)]
COUPLING_CONFIG = { # Alternatives
# parameters passed to the GWOT object
'metric': "cosine", # 'euclidian',
'normalize_vecs': "both", # 'mean', 'whiten', 'whiten_zca'
'normalize_dists': "mean", # 'max', 'median'
'score_type': "coupling", # #TODO fill in the rest of the options in the comments
'adjust': None, # 'csls', ...
'distribs': "uniform", # 'custom', 'zipf'
'share_vocs':False, # True
'size':1000, # 100 is small, 1e4
'max_anchors':100, # used with small couplings (for projection)
# parameters to be passed to the optimizer
'opt_loss_fun': "square_loss", # 'kl_loss'
'opt_entropic': True, # False
'opt_entreg': 5e-4, # stay within the range of e-4 (originally: 1e-4)
'opt_tol': 1e-9, # no limits
'opt_round_g': False, # True
'opt_compute_accuracy': False, # True would require a test dict, but that's not implemented!
'opt_gpu': False, # GPU optimization not tested
# parameters for calling fit()
'fit_maxiter': 300, # no limits; normally converges within 150 iterations
'fit_tol': 1e-9, # no limits
'fit_plot_every': 100000, # normally 20; 'deactivate' the file spam by choosing a large value
'fit_print_every': 1, # no limits
'fit_verbose': True, # False
'fit_save_plots': None # "/my_dir/my_optimizer_plots"
}
DIST_SHAPES = ['uniform', 'zipf', 'custom']
SHIFT_EXPERIMENTS = ["all",
"unsup_bi",
"unsup_mono",
"dis_tech"] | 1.65625 | 2 |
generate_training_data_drb.py | SimonTopp/Graph-WaveNet | 0 | 4031 | <reponame>SimonTopp/Graph-WaveNet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import os
import pandas as pd
import util
import os.path
import pandas as pd
import numpy as np
import yaml
import xarray as xr
import datetime
import pickle
def scale(dataset, std=None, mean=None):
"""
scale the data so it has a standard deviation of 1 and a mean of zero
:param dataset: [xr dataset] input or output data
:param std: [xr dataset] standard deviation if scaling test data with dims
:param mean: [xr dataset] mean if scaling test data with dims
:return: scaled data with original dims
"""
if not isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset):
std = dataset.std(skipna=True)
mean = dataset.mean(skipna=True)
# adding small number in case there is a std of zero
scaled = (dataset - mean) / (std + 1e-10)
check_if_finite(std)
check_if_finite(mean)
return scaled, std, mean
def sel_partition_data(dataset, start_dates, end_dates):
"""
select the data from a date range or a set of date ranges
:param dataset: [xr dataset] input or output data with date dimension
:param start_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to start period
(can have multiple discontinuos periods)
:param end_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to end period
(can have multiple discontinuos periods)
:return: dataset of just those dates
"""
# if it just one date range
if isinstance(start_dates, str):
if isinstance(end_dates, str):
return dataset.sel(date=slice(start_dates, end_dates))
else:
raise ValueError("start_dates is str but not end_date")
# if it's a list of date ranges
elif isinstance(start_dates, list) or isinstance(start_dates, tuple):
if len(start_dates) == len(end_dates):
data_list = []
for i in range(len(start_dates)):
date_slice = slice(start_dates[i], end_dates[i])
data_list.append(dataset.sel(date=date_slice))
return xr.concat(data_list, dim="date")
else:
raise ValueError("start_dates and end_dates must have same length")
else:
raise ValueError("start_dates must be either str, list, or tuple")
def separate_trn_tst(
dataset,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
):
"""
separate the train data from the test data according to the start and end
dates. This assumes your training data is in one continuous block and all
the dates that are not in the training are in the testing.
:param dataset: [xr dataset] input or output data with dims
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
"""
train = sel_partition_data(dataset, train_start_date, train_end_date)
val = sel_partition_data(dataset, val_start_date, val_end_date)
test = sel_partition_data(dataset, test_start_date, test_end_date)
return train, val, test
def split_into_batches(data_array, seq_len=365, offset=1):
"""
split training data into batches with size of batch_size
:param data_array: [numpy array] array of training data with dims [nseg,
ndates, nfeat]
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched data with dims [nbatches, nseg, seq_len
(batch_size), nfeat]
"""
combined = []
for i in range(int(1 / offset)):
start = int(i * offset * seq_len)
idx = np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len)
split = np.split(data_array, indices_or_sections=idx, axis=1)
# add all but the first and last batch since they will be smaller
combined.extend([s for s in split if s.shape[1] == seq_len])
combined = np.asarray(combined)
return combined
def read_multiple_obs(obs_files, x_data):
"""
read and format multiple observation files. we read in the pretrain data to
make sure we have the same indexing.
:param obs_files: [list] list of filenames of observation files
:param pre_train_file: [str] the file of pre_training data
:return: [xr dataset] the observations in the same time
"""
obs = [x_data.sortby(["seg_id_nat", "date"])]
for filename in obs_files:
ds = xr.open_zarr(filename)
obs.append(ds)
if "site_id" in ds.variables:
del ds["site_id"]
obs = xr.merge(obs, join="left")
obs = obs[["temp_c", "discharge_cms"]]
obs = obs.rename(
{"temp_c": "seg_tave_water", "discharge_cms": "seg_outflow"}
)
return obs
def reshape_for_training(data):
"""
reshape the data for training
:param data: training data (either x or y or mask) dims: [nbatch, nseg,
len_seq, nfeat/nout]
:return: reshaped data [nbatch * nseg, len_seq, nfeat/nout]
"""
n_batch, n_seg, seq_len, n_feat = data.shape
return np.reshape(data, [n_batch * n_seg, seq_len, n_feat])
def get_exclude_start_end(exclude_grp):
"""
get the start and end dates for the exclude group
:param exclude_grp: [dict] dictionary representing the exclude group from
the exclude yml file
:return: [tuple of datetime objects] start date, end date
"""
start = exclude_grp.get("start_date")
if start:
start = datetime.datetime.strptime(start, "%Y-%m-%d")
end = exclude_grp.get("end_date")
if end:
end = datetime.datetime.strptime(end, "%Y-%m-%d")
return start, end
def convert_batch_reshape(dataset, seq_len=365, offset=1, y = False, period = np.nan):
"""
convert xarray dataset into numpy array, swap the axes, batch the array and
reshape for training
:param dataset: [xr dataset] data to be batched
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched and reshaped dataset
"""
# convert xr.dataset to numpy array
dataset = dataset.transpose("seg_id_nat", "date")
arr = dataset.to_array().values
# if the dataset is empty, just return it as is
if dataset.date.size == 0:
return arr
# before [nfeat, nseg, ndates]; after [nseg, ndates, nfeat]
# this is the order that the split into batches expects
arr = np.moveaxis(arr, 0, -1)
# batch the data
# after [nbatch, nseg, seq_len, nfeat]
batched = split_into_batches(arr, seq_len=seq_len, offset=offset)
# reshape data
# after [nseq, seq_len, nseg, nfeat]
#reshaped = reshape_for_training(batched)
reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3])
if y & np.isfinite(period):
reshaped = reshaped[:,-period:,...]
return reshaped
def coord_as_reshaped_array(dataset, coord_name, seq_len=365, offset=1):
# I need one variable name. It can be any in the dataset, but I'll use the
# first
first_var = next(iter(dataset.data_vars.keys()))
coord_array = xr.broadcast(dataset[coord_name], dataset[first_var])[0]
new_var_name = coord_name + "1"
dataset[new_var_name] = coord_array
reshaped_np_arr = convert_batch_reshape(
dataset[[new_var_name]], seq_len=seq_len, offset=offset
)
return reshaped_np_arr
def check_if_finite(xarr):
assert np.isfinite(xarr.to_array().values).all()
def prep_data(
obs_temper_file,
obs_flow_file,
pretrain_file,
#distfile,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
x_vars=None,
y_vars= ["seg_tave_water", "seg_outflow"],
seq_length = 365,
offset = 1,
period = None,
primary_variable="temp",
#catch_prop_file=None,
#exclude_file=None,
#log_q=False,
out_file=None,
#segs=None,
normalize_y=False,
):
"""
prepare input and output data for DL model training read in and process
data into training and testing datasets. the training and testing data are
scaled to have a std of 1 and a mean of zero
:param obs_temper_file: [str] temperature observations file (csv)
:param obs_flow_file:[str] discharge observations file (csv)
:param pretrain_file: [str] the file with the pretraining data (SNTemp data)
:param distfile: [str] path to the distance matrix .npz file
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
:param x_vars: [list] variables that should be used as input. If None, all
of the variables will be used
:param primary_variable: [str] which variable the model should focus on
'temp' or 'flow'. This determines the order of the variables.
:param catch_prop_file: [str] the path to the catchment properties file. If
left unfilled, the catchment properties will not be included as predictors
:param exclude_file: [str] path to exclude file
:param log_q: [bool] whether or not to take the log of discharge in training
:param out_file: [str] file to where the values will be written
:returns: training and testing data along with the means and standard
deviations of the training input and output data
'y_trn_pre': batched, scaled, and centered output data for entire
period of record of SNTemp [n_samples, seq_len, n_out]
'y_obs_trn': batched, scaled, and centered output observation data
for the training period
'y_trn_obs_std': standard deviation of the y observations training
data [n_out]
'y_trn_obs_mean': mean of the observation training data [n_out]
'y_obs_tst': un-batched, unscaled, uncentered observation data for
the test period [n_yrs, n_seg, len_seq, n_out]
'dates_ids_trn: batched dates and national seg ids for training data
[n_samples, seq_len, 2]
'dates_ids_tst: un-batched dates and national seg ids for testing
data [n_yrs, n_seg, len_seq, 2]
"""
ds_pre = xr.open_zarr(pretrain_file)
x_data = ds_pre[x_vars]
# make sure we don't have any weird input values
check_if_finite(x_data)
x_trn, x_val, x_tst = separate_trn_tst(
x_data,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
x_scl, x_std, x_mean = scale(x_data)
x_trn_scl, _, _ = scale(x_trn, std=x_std, mean=x_mean)
x_val_scl, _, _ = scale(x_val, std=x_std, mean=x_mean)
x_tst_scl, _, _ = scale(x_tst, std=x_std, mean=x_mean)
y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data)
y_obs = y_obs[y_vars]
y_pre = ds_pre[y_vars]
y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst(
y_obs,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst(
y_pre,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
if normalize_y:
# scale y training data and get the mean and std
y_obs_trn, y_std, y_mean = scale(y_obs_trn)
y_pre_trn, _, _ = scale(y_pre_trn, y_std, y_mean)
else:
_, y_std, y_mean = scale(y_obs_trn)
data = {
"x_train": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length),
"x_val": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length),
"x_test": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length),
"x_std": x_std.to_array().values,
"x_mean": x_mean.to_array().values,
"x_cols": np.array(x_vars),
"ids_train": coord_as_reshaped_array(x_trn, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_train": coord_as_reshaped_array(x_trn, "date", offset=offset, seq_len=seq_length),
"ids_val": coord_as_reshaped_array(x_val, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_val": coord_as_reshaped_array(x_val, "date", offset=offset, seq_len=seq_length),
"ids_test": coord_as_reshaped_array(x_tst, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_test": coord_as_reshaped_array(x_tst, "date", offset=offset, seq_len=seq_length),
"y_pre_train": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_train": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_val": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period),
"y_test": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_vars": np.array(y_vars),
'period': np.array([period]),
'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period),
'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_std": y_std.to_array().values,
"y_mean": y_mean.to_array().values,
}
if out_file:
if os.path.isdir(out_file) == False:
os.makedirs(out_file)
'''
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_obs_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_obs_tst'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_obs_val'],
)
'''
np.savez_compressed(os.path.join(out_file,'data.npz'), **data)
return data
def prep_adj_matrix(infile, dist_type, out_file=None):
"""
process adj matrix.
**The resulting matrix is sorted by seg_id_nat **
:param infile:
:param dist_type: [str] type of distance matrix ("upstream", "downstream" or
"updown")
:param out_file:
:return: [numpy array] processed adjacency matrix
"""
adj_matrices = np.load(infile)
adj = adj_matrices[dist_type]
adj_full = sort_dist_matrix(adj, adj_matrices["rowcolnames"])
adj = adj_full[2]
adj = np.where(np.isinf(adj), 0, adj)
adj = -adj
mean_adj = np.mean(adj[adj != 0])
std_adj = np.std(adj[adj != 0])
adj[adj != 0] = adj[adj != 0] - mean_adj
adj[adj != 0] = adj[adj != 0] / std_adj
adj[adj != 0] = 1 / (1 + np.exp(-adj[adj != 0]))
I = np.eye(adj.shape[0])
A_hat = adj.copy() + I
D = np.sum(A_hat, axis=1)
D_inv = D ** -1.0
D_inv = np.diag(D_inv)
A_hat = np.matmul(D_inv, A_hat)
if out_file:
out_dm = [adj_full[0], adj_full[1], A_hat]
with open(out_file+'.pkl', 'wb') as f:
pickle.dump(out_dm, f, protocol=2)
return adj_full[0], adj_full[1], A_hat
def sort_dist_matrix(mat, row_col_names):
"""
sort the distance matrix by seg_id_nat
:return:
"""
df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names)
df = df.sort_index(axis=0)
df = df.sort_index(axis=1)
sensor_id_to_ind = {}
for i, sensor_id in enumerate(df.columns):
sensor_id_to_ind[sensor_id] = i
return row_col_names, sensor_id_to_ind, df
#check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx')
#if __name__ == "__main__":
check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full',
obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full',
pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output',
train_start_date=['1985-10-01', '2016-10-01'],
train_end_date=['2006-09-30', '2020-09-30'],
val_start_date='2006-10-01',
val_end_date='2016-09-30',
test_start_date=['1980-10-01', '2020-10-01'],
test_end_date=['1985-09-30', '2021-09-30'],
x_vars=["seg_rain", "seg_tave_air", "seginc_swrad", "seg_length", "seginc_potet", "seg_slope", "seg_humid",
"seg_elev"],
y_vars=['seg_tave_water'],
primary_variable='temp',
seq_length=365,
period=np.nan,
offset=1,
out_file = 'data/DRB_gwn_full')
'''f __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default="data/METR-LA", help="Output directory.")
parser.add_argument("--traffic_df_filename", type=str, default="data/metr-la.h5", help="Raw traffic readings.",)
parser.add_argument("--seq_length_x", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--seq_length_y", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--y_start", type=int, default=1, help="Y pred start", )
parser.add_argument("--dow", action='store_true',)
args = parser.parse_args()
if os.path.exists(args.output_dir):
reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip()
if reply[0] != 'y': exit
else:
os.makedirs(args.output_dir)
generate_train_val_test(args)
##### Reformat our inputs to match theirs.
df = pd.read_hdf("data/metr-la.h5")
seq_length_x = 12
seq_length_y = 12
y_start = 1
LAtrain = np.load('data/METR-LA/train.npz')
LAtest = np.load('data/METR-LA/test.npz')
LAval = np.load('data/METR-LA/val.npz')
LAtrain['x'].shape
LAtrain['y'].shape
LAtest['x'].shape
LAtest['y'].shape
check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3])
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_pre_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_pre_test'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_pre_val'],
)
''' | 2.875 | 3 |
Phase-1/Python Basic 1/Day-3.py | CodedLadiesInnovateTech/python-challenges | 11 | 4032 | <reponame>CodedLadiesInnovateTech/python-challenges
<<<<<<< HEAD
"""
1. Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s).
Sample function : abs()
Expected Result :
abs(number) -> number
Return the absolute value of the argument.
Tools: help function
2. Write a Python program to print the calendar of a given month and year.
Tools: Use 'calendar' module.
3. Write a Python program to print the following here document.
Sample string :
a string that you "don't" have to escape
This
is a ....... multi-line
heredoc string --------> example
Tools: string formating
4. Write a Python program to calculate number of days between two dates.
Sample dates : (2014, 7, 2), (2014, 7, 11)
Expected output : 9 days
Tools: Datetime module, timedelta module
5. Write a Python program to get the volume of a sphere with radius 6.
Tools: input function, math
6. Write a Python program to get the difference between a given number and 17, if the number is greater than 17 return double the absolute difference.
Tools: abs function, input function, math
7. Write a Python program to test whether a number is within 100 of 1000 or 2000.
Tools: maths,input function
8. Write a Python program to calculate the sum of three given numbers, if the values are equal then return three times of their sum.
Tools: math, input function
9. Write a Python program to get a new string from a given string where "Is" has been added to the front. If the given string already begins with "Is" then return the string unchanged.
Tools: input function, string formating
10. Write a Python program to get a string which is n (non-negative integer) copies of a given string.
Tools: input function, slicing
=======
"""
1. Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s).
Sample function : abs()
Expected Result :
abs(number) -> number
Return the absolute value of the argument.
Tools: help function
2. Write a Python program to print the calendar of a given month and year.
Tools: Use 'calendar' module.
3. Write a Python program to print the following here document.
Sample string :
a string that you "don't" have to escape
This
is a ....... multi-line
heredoc string --------> example
Tools: string formating
4. Write a Python program to calculate number of days between two dates.
Sample dates : (2014, 7, 2), (2014, 7, 11)
Expected output : 9 days
Tools: Datetime module, timedelta module
5. Write a Python program to get the volume of a sphere with radius 6.
Tools: input function, math
6. Write a Python program to get the difference between a given number and 17, if the number is greater than 17 return double the absolute difference.
Tools: abs function, input function, math
7. Write a Python program to test whether a number is within 100 of 1000 or 2000.
Tools: maths,input function
8. Write a Python program to calculate the sum of three given numbers, if the values are equal then return three times of their sum.
Tools: math, input function
9. Write a Python program to get a new string from a given string where "Is" has been added to the front. If the given string already begins with "Is" then return the string unchanged.
Tools: input function, string formating
10. Write a Python program to get a string which is n (non-negative integer) copies of a given string.
Tools: input function, slicing
>>>>>>> f4444ec0d72c645d12694e90df7429456db0611c
""" | 4.40625 | 4 |
tests/python/metaclass_inheritance.py | gmgunter/pyre | 25 | 4033 | <filename>tests/python/metaclass_inheritance.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# <NAME>. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
#
"""
When a metaclass understands the extra keywords that can be passed during class declaration,
it has to override all these to accommodate the change in signature
"""
class meta(type):
@classmethod
def __prepare__(metacls, name, bases, **kwds):
assert metacls.__name__ == 'meta'
assert name in ['base', 'derived']
if name == 'base':
assert bases == (object,)
assert kwds == {'arg1': True, 'arg2': False}
if name == 'derived':
assert bases == (base,)
assert kwds == {'arg1': False, 'arg2': True}
return super().__prepare__(name, bases)
def __new__(metacls, name, bases, attributes, **kwds):
assert metacls.__name__ == 'meta'
assert name in ['base', 'derived']
if name == 'base':
assert bases == (object,)
assert kwds == {'arg1': True, 'arg2': False}
if name == 'derived':
assert bases == (base,)
assert kwds == {'arg1': False, 'arg2': True}
return super().__new__(metacls, name, bases, attributes)
def __init__(self, name, bases, attributes, **kwds):
assert self.__name__ in ['base', 'derived']
if self.__name__ == 'base':
assert bases == (object,)
assert kwds == {'arg1': True, 'arg2': False}
if self.__name__ == 'derived':
assert bases == (base,)
assert kwds == {'arg1': False, 'arg2': True}
super().__init__(name, bases, attributes)
return
class base(object, metaclass=meta, arg1=True, arg2=False):
def __init__(self, **kwds):
assert type(self).__name__ == 'base'
assert kwds == {}
return
class derived(base, arg1=False, arg2=True):
def __init__(self, **kwds):
assert type(self).__name__ == 'derived'
assert kwds == {}
return
def test():
b = base()
d = derived()
return
# main
if __name__ == "__main__":
test()
# end of file
| 2.9375 | 3 |
cs101/module8/8-1/chroma1.py | idsdlab/basicai_sp21 | 1 | 4034 |
from cs1media import *
import math
def dist(c1, c2):
r1, g1, b1 = c1
r2, g2, b2 = c2
return math.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2)
def chroma(img, key, threshold):
w, h = img.size()
for y in range(h):
for x in range(w):
p = img.get(x, y)
if dist(p, key) < threshold:
img.set(x, y, Color.yellow)
statue = load_picture("photos/statue1.jpg")
chroma(statue, (41, 75, 146), 70)
statue.show()
| 3.40625 | 3 |
wfirst_stars/mklc.py | RuthAngus/wfirst_stars | 0 | 4035 | import numpy as np
import scipy
import scipy.io
import pylab
import numpy
import glob
import pyfits
def mklc(t, nspot=200, incl=(scipy.pi)*5./12., amp=1., tau=30.5, p=10.0):
diffrot = 0.
''' This is a simplified version of the class-based routines in
spot_model.py. It generates a light curves for dark, point like
spots with no limb-darkening.
Parameters:
nspot = desired number of spots present on star at any
one time
amp = desired light curve amplitude
tau = characteristic spot life-time
diffrot = fractional difference between equatorial and polar
rotation period
(unit of time is equatorial rotation period)'''
# print('Period = ', p)
dur = (max(t) - min(t))
# (crude estimate of) total number of spots needed during entire
# time-series
nspot_tot = int(nspot * dur / 2 / tau)
# uniform distribution of spot longitudes
lon = scipy.rand(nspot_tot) * 2 * scipy.pi
# distribution of spot latitudes uniform in sin(latitude)
lat = scipy.arcsin(scipy.rand(nspot_tot))
# spot rotation rate optionally depends on latitude
period = ((scipy.sin(lat) - 0.5) * diffrot + 1.0 ) * p
period0 = scipy.ones(nspot_tot) * p
# all spots have the same maximum area
# (crude estimate of) filling factor needed per spot
ff = amp / scipy.sqrt(nspot)
scale_fac = 1
amax = scipy.ones(nspot_tot) * ff * scale_fac
# all spots have the evolution timescale
decay = scipy.ones(nspot_tot) * tau
# uniform distribution of spot peak times
# start well before and end well after time-series limits (to
# avoid edge effects)
extra = 3 * decay.max()
pk = scipy.rand(nspot_tot) * (dur + 2 * extra) - extra
# COMPUTE THE LIGHT CURVE
# print("Computing light curve...")
time = numpy.array(t - min(t))
area_tot = scipy.zeros_like(time)
dF_tot = scipy.zeros_like(time)
dF_tot0 = scipy.zeros_like(time)
# add up the contributions of individual spots
for i in range(nspot_tot):
# Spot area
if (pk[i] == 0) + (decay[i] == 0):
area = scipy.ones_like(time) * amax[i]
else:
area = amax[i] * \
scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2)
area_tot += area
# Fore-shortening
phase = 2 * scipy.pi * time / period[i] + lon[i]
phase0 = 2 * scipy.pi * time / period0[i] + lon[i]
mu = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase)
mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \
scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0)
mu[mu < 0] = 0.0
mu0[mu0 < 0] = 0.0
# Flux
dF_tot -= area * mu
dF_tot0 -= area * mu0
amp_eff = dF_tot.max()-dF_tot.min()
nspot_eff = area_tot / scale_fac / ff
res0 = scipy.array([nspot_eff.mean(), ff, amp_eff])
res1 = scipy.zeros((4, len(time)))
res1[0,:] = time
res1[1,:] = area_tot
res1[2,:] = dF_tot
res1[3,:] = dF_tot0
# print('Used %d spots in total over %d rotation periods.' % (nspot_tot, dur))
# print('Mean filling factor of individual spots was %.4f.' % ff)
# print('Desired amplitude was %.4f, actual amplitude was %.4f.' \
# % (amp, amp_eff))
# print('Desired number of spots at any one time was %d.' % nspot)
return res0, res1
| 2.625 | 3 |
bin/sort.py | pelavarre/pybashish | 4 | 4036 | <filename>bin/sort.py
#!/usr/bin/env python3
"""
usage: sort.py [-h]
sort lines
options:
-h, --help show this help message and exit
quirks:
sorts tabs as different than spaces
sorts some spaces ending a line as different than none ending a line
examples:
Oh no! No examples disclosed!! 💥 💔 💥
"""
# FIXME: doc -k$N,$N and -n and maybe little else is worth learning
# FIXME: ass -k-1,-1 for negative field indexing
# FIXME: think into the mess at "sort" vs "LC_ALL=C sort"
import sys
import argdoc
def main():
args = argdoc.parse_args()
sys.stderr.write("{}\n".format(args))
sys.stderr.write("{}\n".format(argdoc.format_usage().rstrip()))
sys.stderr.write("sort.py: error: not implemented\n")
sys.exit(2) # exit 2 from rejecting usage
if __name__ == "__main__":
main()
# copied from: git clone https://github.com/pelavarre/pybashish.git
| 3.21875 | 3 |
davan/http/service/telldus/tdtool.py | davandev/davanserver | 0 | 4037 | <reponame>davandev/davanserver<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, getopt, httplib, urllib, json, os
import oauth.oauth as oauth
import datetime
from configobj import ConfigObj
import logging
global logger
logger = logging.getLogger(os.path.basename(__file__))
import davan.util.application_logger as log_manager
#insert your own public_key and private_key
import davan.config.config_creator as config_creator
configuration = config_creator.create()
PUBLIC_KEY = configuration["TELLDUS_PUBLIC_KEY"]
PRIVATE_KEY = configuration["TELLDUS_PRIVATE_KEY"]
TELLSTICK_TURNON = 1
TELLSTICK_TURNOFF = 2
TELLSTICK_BELL = 4
TELLSTICK_DIM = 16
TELLSTICK_UP = 128
TELLSTICK_DOWN = 256
SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN;
def printUsage():
print("Usage: %s [ options ]" % sys.argv[0])
print("")
print("Options:")
print(" -[lnfdbvh] [ --list ] [ --help ]")
print(" [ --on device ] [ --off device ] [ --bell device ]")
print(" [ --dimlevel level --dim device ]")
print(" [ --up device --down device ]")
print("")
print(" --list (-l short option)")
print(" List currently configured devices.")
print("")
print(" --help (-h short option)")
print(" Shows this screen.")
print("")
print(" --on device (-n short option)")
print(" Turns on device. 'device' must be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --off device (-f short option)")
print(" Turns off device. 'device' must be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --dim device (-d short option)")
print(" Dims device. 'device' must be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print(" Note: The dimlevel parameter must be set before using this option.")
print("")
print(" --dimlevel level (-v short option)")
print(" Set dim level. 'level' should an integer, 0-255.")
print(" Note: This parameter must be set before using dim.")
print("")
print(" --bell device (-b short option)")
print(" Sends bell command to devices supporting this. 'device' must")
print(" be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --up device")
print(" Sends up command to devices supporting this. 'device' must")
print(" be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --down device")
print(" Sends down command to devices supporting this. 'device' must")
print(" be an integer of the device-id")
print(" Device-id and name is outputed with the --list option")
print("")
print(" --list-sensors (-s short option)")
print(" Lists currently configured sensors")
print("")
print(" --sensor-data sensor (-d short option)")
print(" Get sensor data with sensor id number")
print("")
print("Report bugs to <<EMAIL>>")
def listSensors():
response = doRequest('sensors/list', {'includeIgnored': 1});
logger.debug("Number of sensors: %i" % len(response['sensor']));
for sensor in response['sensor']:
lastupdate = datetime.datetime.fromtimestamp(int(sensor['lastUpdated']));
logger.debug( "%s\t%s\t%s" % (sensor['id'], sensor['name'], lastupdate))
def listSensorsAndValues():
response = doRequest('sensors/list', {'includeValues': 1});
return response
def listDevicesAndValues():
response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS})
return response
def getSensorData(sensorId):
response = doRequest('sensor/info', {'id': sensorId });
lastupdate = datetime.datetime.fromtimestamp(int(response['lastUpdated']));
sensor_name = response['name'];
for data in response['data']:
logger.debug( "%s\t%s\t%s\t%s" % (sensor_name, data['name'], data['value'], lastupdate) )
def listDevices():
response = doRequest('devices/list', {'supportedMethods': SUPPORTED_METHODS})
logger.debug("Number of devices: %i" % len(response['device']));
for device in response['device']:
if (device['state'] == TELLSTICK_TURNON):
state = 'ON'
elif (device['state'] == TELLSTICK_TURNOFF):
state = 'OFF'
elif (device['state'] == TELLSTICK_DIM):
state = "DIMMED"
elif (device['state'] == TELLSTICK_UP):
state = "UP"
elif (device['state'] == TELLSTICK_DOWN):
state = "DOWN"
else:
state = 'Unknown state'
logger.debug("%s\t%s\t%s" % (device['id'], device['name'], state));
def doMethod(deviceId, methodId, methodValue = 0):
response = doRequest('device/info', {'id': deviceId})
if (methodId == TELLSTICK_TURNON):
method = 'on'
elif (methodId == TELLSTICK_TURNOFF):
method = 'off'
elif (methodId == TELLSTICK_BELL):
method = 'bell'
elif (methodId == TELLSTICK_UP):
method = 'up'
elif (methodId == TELLSTICK_DOWN):
method = 'down'
if ('error' in response):
name = ''
retString = response['error']
else:
name = response['name']
response = doRequest('device/command', {'id': deviceId, 'method': methodId, 'value': methodValue})
if ('error' in response):
retString = response['error']
else:
retString = response['status']
if (methodId in (TELLSTICK_TURNON, TELLSTICK_TURNOFF)):
logger.debug("Turning %s device %s, %s - %s" % ( method, deviceId, name, retString));
elif (methodId in (TELLSTICK_BELL, TELLSTICK_UP, TELLSTICK_DOWN)):
logger.debug("Sending %s to: %s %s - %s" % (method, deviceId, name, retString))
elif (methodId == TELLSTICK_DIM):
logger.debug("Dimming device: %s %s to %s - %s" % (deviceId, name, methodValue, retString))
def doRequest(method, params):
global config
config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')
consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)
token = oauth.OAuthToken(config['token'], config['tokenSecret'])
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url="http://api.telldus.com/json/" + method, parameters=params)
oauth_request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
headers = oauth_request.to_header()
headers['Content-Type'] = 'application/x-www-form-urlencoded'
conn = httplib.HTTPConnection("api.telldus.com:80")
conn.request('GET', "/json/" + method + "?" + urllib.urlencode(params, True).replace('+', '%20'), headers=headers)
response = conn.getresponse()
try:
return json.load(response)
except:
logger.debug( 'Failed to decode response :%s'%str(response))
return ""
def requestToken():
global config
consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)
request = oauth.OAuthRequest.from_consumer_and_token(consumer, http_url='http://api.telldus.com/oauth/requestToken')
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, None)
conn = httplib.HTTPConnection('api.telldus.com:80')
conn.request(request.http_method, '/oauth/requestToken', headers=request.to_header())
resp = conn.getresponse().read()
token = oauth.OAuthToken.from_string(resp)
logger.debug( 'Open the following url in your webbrowser:\nhttp://api.telldus.com/oauth/authorize?oauth_token=%s\n' % token.key)
logger.debug( 'After logging in and accepting to use this application run:\n%s --authenticate' % (sys.argv[0]))
config['requestToken'] = str(token.key)
config['requestTokenSecret'] = str(token.secret)
saveConfig()
def getAccessToken():
global config
consumer = oauth.OAuthConsumer(PUBLIC_KEY, PRIVATE_KEY)
token = oauth.OAuthToken(config['requestToken'], config['requestTokenSecret'])
request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='GET', http_url='http://api.telldus.com/oauth/accessToken')
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), consumer, token)
conn = httplib.HTTPConnection('api.telldus.com:80')
conn.request(request.http_method, request.to_url(), headers=request.to_header())
resp = conn.getresponse()
if resp.status != 200:
logger.debug( 'Error retreiving access token, the server replied:\n%s' % resp.read())
return
token = oauth.OAuthToken.from_string(resp.read())
config['requestToken'] = None
config['requestTokenSecret'] = None
config['token'] = str(token.key)
config['tokenSecret'] = str(token.secret)
logger.debug( 'Authentication successful, you can now use tdtool')
saveConfig()
def authenticate():
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['authenticate'])
for opt, arg in opts:
if opt in ('--authenticate'):
getAccessToken()
return
except getopt.GetoptError:
pass
requestToken()
def saveConfig():
global config
try:
os.makedirs(os.environ['HOME'] + '/.config/Telldus')
except:
pass
config.write()
def main(argv):
global config
if ('token' not in config or config['token'] == ''):
authenticate()
return
try:
opts, args = getopt.getopt(argv, "lsd:n:f:d:b:v:h", ["list", "list-sensors", "sensor-data=", "on=", "off=", "dim=", "bell=", "dimlevel=", "up=", "down=", "help"])
except getopt.GetoptError:
printUsage()
sys.exit(2)
dimlevel = -1
for opt, arg in opts:
if opt in ("-h", "--help"):
printUsage()
elif opt in ("-l", "--list"):
listDevices()
elif opt in ("-s", "--list-sensors"):
listSensors()
elif opt in ("-x", "--list-sensorsvalue"):
listSensorsAndValues()
elif opt in ("-d", "--sensor-data"):
getSensorData(arg)
elif opt in ("-n", "--on"):
doMethod(arg, TELLSTICK_TURNON)
elif opt in ("-f", "--off"):
doMethod(arg, TELLSTICK_TURNOFF)
elif opt in ("-b", "--bell"):
doMethod(arg, TELLSTICK_BELL)
elif opt in ("-d", "--dim"):
if (dimlevel < 0):
logger.debug("Dimlevel must be set with --dimlevel before --dim")
else:
doMethod(arg, TELLSTICK_DIM, dimlevel)
elif opt in ("-v", "--dimlevel"):
dimlevel = arg
elif opt in ("--up"):
doMethod(arg, TELLSTICK_UP)
elif opt in ("--down"):
doMethod(arg, TELLSTICK_DOWN)
if __name__ == "__main__":
config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')
configuration = config_creator.create()
log_manager.start_logging(configuration["LOGFILE_PATH"],loglevel=4)
main(sys.argv[1:])
| 2.171875 | 2 |
ichnaea/data/export.py | rajreet/ichnaea | 348 | 4038 | from collections import defaultdict
import json
import re
import time
from urllib.parse import urlparse
import uuid
import boto3
import boto3.exceptions
import botocore.exceptions
import markus
import redis.exceptions
import requests
import requests.exceptions
from sqlalchemy import select
import sqlalchemy.exc
from ichnaea.data import _map_content_enabled
from ichnaea.models import (
ApiKey,
BlueObservation,
BlueReport,
BlueShard,
CellObservation,
CellReport,
CellShard,
DataMap,
ExportConfig,
Report,
WifiObservation,
WifiReport,
WifiShard,
)
from ichnaea.models.content import encode_datamap_grid
from ichnaea import util
WHITESPACE = re.compile(r"\s", flags=re.UNICODE)
METRICS = markus.get_metrics()
class IncomingQueue(object):
"""
The incoming queue contains the data collected in the web application. It
is the single entrypoint from which all other data pipelines get their
data.
It distributes the data into the configured export queues, checks those
queues and if they contain enough or old enough data schedules an async
export task to process the data in each queue.
"""
def __init__(self, task):
self.task = task
def __call__(self, export_task):
redis_client = self.task.redis_client
data_queue = self.task.app.data_queues["update_incoming"]
data = data_queue.dequeue()
grouped = defaultdict(list)
for item in data:
grouped[(item["api_key"], item.get("source", "gnss"))].append(
{"api_key": item["api_key"], "report": item["report"]}
)
with self.task.db_session(commit=False) as session:
export_configs = ExportConfig.all(session)
with self.task.redis_pipeline() as pipe:
for (api_key, source), items in grouped.items():
for config in export_configs:
if config.allowed(api_key, source):
queue_key = config.queue_key(api_key, source)
queue = config.queue(queue_key, redis_client)
queue.enqueue(items, pipe=pipe)
for config in export_configs:
# Check all queues if they now contain enough data or
# old enough data to be ready for processing.
for queue_key in config.partitions(redis_client):
queue = config.queue(queue_key, redis_client)
if queue.ready():
export_task.delay(config.name, queue_key)
if data_queue.ready():
self.task.apply_countdown()
class ReportExporter(object):
_retriable = (IOError,)
_retries = 3
_retry_wait = 1.0
def __init__(self, task, config, queue_key):
self.task = task
self.config = config
self.queue_key = queue_key
self.queue = config.queue(queue_key, task.redis_client)
self.stats_tags = ["key:" + self.config.name]
@staticmethod
def export(task, name, queue_key):
with task.db_session(commit=False) as session:
config = ExportConfig.get(session, name)
exporter_types = {
"dummy": DummyExporter,
"geosubmit": GeosubmitExporter,
"internal": InternalExporter,
"s3": S3Exporter,
}
exporter_type = exporter_types.get(config.schema)
if exporter_type is not None:
exporter_type(task, config, queue_key)()
def __call__(self):
queue_items = self.queue.dequeue()
if not queue_items:
return
success = False
for i in range(self._retries):
try:
with METRICS.timer("data.export.upload.timing", tags=self.stats_tags):
self.send(queue_items)
success = True
except self._retriable:
success = False
time.sleep(self._retry_wait * (i ** 2 + 1))
if success:
METRICS.incr("data.export.batch", tags=self.stats_tags)
break
if success and self.queue.ready():
self.task.apply_countdown(args=[self.config.name, self.queue_key])
def send(self, queue_items):
raise NotImplementedError()
class DummyExporter(ReportExporter):
def send(self, queue_items):
pass
class GeosubmitExporter(ReportExporter):
_retriable = (IOError, requests.exceptions.RequestException)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
headers = {
"Content-Encoding": "gzip",
"Content-Type": "application/json",
"User-Agent": "ichnaea",
}
response = requests.post(
self.config.url,
data=util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=5
),
headers=headers,
timeout=60.0,
)
# log upload_status and trigger exception for bad responses
# this causes the task to be re-tried
METRICS.incr(
"data.export.upload",
tags=self.stats_tags + ["status:%s" % response.status_code],
)
response.raise_for_status()
class S3Exporter(ReportExporter):
_retriable = (
IOError,
boto3.exceptions.Boto3Error,
botocore.exceptions.BotoCoreError,
)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
_, bucketname, path = urlparse(self.config.url)[:3]
# s3 key names start without a leading slash
path = path.lstrip("/")
if not path.endswith("/"):
path += "/"
year, month, day = util.utcnow().timetuple()[:3]
# strip away queue prefix again
parts = self.queue_key.split(":")
source = parts[1]
api_key = parts[2]
obj_name = path.format(
source=source, api_key=api_key, year=year, month=month, day=day
)
obj_name += uuid.uuid1().hex + ".json.gz"
try:
data = util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=7
)
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketname)
obj = bucket.Object(obj_name)
obj.put(Body=data, ContentEncoding="gzip", ContentType="application/json")
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:success"]
)
except Exception:
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:failure"]
)
raise
class InternalTransform(object):
"""
This maps the geosubmit v2 schema used in view code and external
transfers (backup, forward to partners) to the internal submit v1
schema used in our own database models.
"""
# *_id maps a source section id to a target section id
# *_map maps fields inside the section from source to target id
# if the names are equal, a simple string can be specified instead
# of a two-tuple
position_id = ("position", None)
position_map = [
("latitude", "lat"),
("longitude", "lon"),
"accuracy",
"altitude",
("altitudeAccuracy", "altitude_accuracy"),
"heading",
"pressure",
"speed",
"source",
]
blue_id = ("bluetoothBeacons", "blue")
blue_map = [("macAddress", "mac"), "age", ("signalStrength", "signal")]
cell_id = ("cellTowers", "cell")
cell_map = [
("radioType", "radio"),
("mobileCountryCode", "mcc"),
("mobileNetworkCode", "mnc"),
("locationAreaCode", "lac"),
("cellId", "cid"),
"age",
"asu",
("primaryScramblingCode", "psc"),
"serving",
("signalStrength", "signal"),
("timingAdvance", "ta"),
]
wifi_id = ("wifiAccessPoints", "wifi")
wifi_map = [
("macAddress", "mac"),
"age",
"channel",
"frequency",
("radioType", "radio"),
("signalToNoiseRatio", "snr"),
("signalStrength", "signal"),
]
def _map_dict(self, item_source, field_map):
value = {}
for spec in field_map:
if isinstance(spec, tuple):
source, target = spec
else:
source = spec
target = spec
source_value = item_source.get(source)
if source_value is not None:
value[target] = source_value
return value
def _parse_dict(self, item, report, key_map, field_map):
value = {}
item_source = item.get(key_map[0])
if item_source:
value = self._map_dict(item_source, field_map)
if value:
if key_map[1] is None:
report.update(value)
else:
report[key_map[1]] = value
return value
def _parse_list(self, item, report, key_map, field_map):
values = []
for value_item in item.get(key_map[0], ()):
value = self._map_dict(value_item, field_map)
if value:
values.append(value)
if values:
report[key_map[1]] = values
return values
def __call__(self, item):
report = {}
self._parse_dict(item, report, self.position_id, self.position_map)
blues = self._parse_list(item, report, self.blue_id, self.blue_map)
cells = self._parse_list(item, report, self.cell_id, self.cell_map)
wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map)
position = item.get("position") or {}
gps_age = position.get("age", 0)
timestamp = item.get("timestamp")
if timestamp:
# turn timestamp into GPS timestamp
report["timestamp"] = timestamp - gps_age
if gps_age:
# Normalize age fields to be relative to GPS time
for type_ in ("blue", "cell", "wifi"):
for record in report.get(type_, ()):
record["age"] = record.get("age", 0) - gps_age
if blues or cells or wifis:
return report
return {}
class InternalExporter(ReportExporter):
_retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError)
transform = InternalTransform()
def send(self, queue_items):
api_keys = set()
api_keys_known = set()
metrics = {}
items = []
for item in queue_items:
# preprocess items and extract set of API keys
item["report"] = self.transform(item["report"])
if item["report"]:
items.append(item)
api_keys.add(item["api_key"])
for api_key in api_keys:
metrics[api_key] = {}
for type_ in ("report", "blue", "cell", "wifi"):
for action in ("drop", "upload"):
metrics[api_key]["%s_%s" % (type_, action)] = 0
with self.task.db_session(commit=False) as session:
# limit database session to get API keys
keys = [key for key in api_keys if key]
if keys:
columns = ApiKey.__table__.c
rows = session.execute(
select([columns.valid_key]).where(columns.valid_key.in_(keys))
).fetchall()
for row in rows:
api_keys_known.add(row.valid_key)
positions = []
observations = {"blue": [], "cell": [], "wifi": []}
for item in items:
api_key = item["api_key"]
report = item["report"]
obs, malformed_obs = self.process_report(report)
any_data = False
for name in ("blue", "cell", "wifi"):
if obs.get(name):
observations[name].extend(obs[name])
metrics[api_key][name + "_upload"] += len(obs[name])
any_data = True
metrics[api_key][name + "_drop"] += malformed_obs.get(name, 0)
metrics[api_key]["report_upload"] += 1
if any_data:
positions.append((report["lat"], report["lon"]))
else:
metrics[api_key]["report_drop"] += 1
with self.task.redis_pipeline() as pipe:
self.queue_observations(pipe, observations)
if _map_content_enabled and positions:
self.process_datamap(pipe, positions)
self.emit_metrics(api_keys_known, metrics)
def queue_observations(self, pipe, observations):
for datatype, shard_model, shard_key, queue_prefix in (
("blue", BlueShard, "mac", "update_blue_"),
("cell", CellShard, "cellid", "update_cell_"),
("wifi", WifiShard, "mac", "update_wifi_"),
):
queued_obs = defaultdict(list)
for obs in observations[datatype]:
# group by sharded queue
shard_id = shard_model.shard_id(getattr(obs, shard_key))
queue_id = queue_prefix + shard_id
queued_obs[queue_id].append(obs.to_json())
for queue_id, values in queued_obs.items():
# enqueue values for each queue
queue = self.task.app.data_queues[queue_id]
queue.enqueue(values, pipe=pipe)
def emit_metrics(self, api_keys_known, metrics):
for api_key, key_metrics in metrics.items():
api_tag = []
if api_key and api_key in api_keys_known:
api_tag = ["key:%s" % api_key]
for name, count in key_metrics.items():
if not count:
continue
type_, action = name.split("_")
if type_ == "report":
suffix = "report"
tags = api_tag
else:
suffix = "observation"
tags = ["type:%s" % type_] + api_tag
METRICS.incr("data.%s.%s" % (suffix, action), count, tags=tags)
def process_report(self, data):
report = Report.create(**data)
if report is None:
return ({}, {})
malformed = {}
observations = {}
for name, report_cls, obs_cls in (
("blue", BlueReport, BlueObservation),
("cell", CellReport, CellObservation),
("wifi", WifiReport, WifiObservation),
):
malformed[name] = 0
observations[name] = {}
if data.get(name):
for item in data[name]:
# validate the blue/cell/wifi specific fields
item_report = report_cls.create(**item)
if item_report is None:
malformed[name] += 1
continue
# combine general and specific report data into one
item_obs = obs_cls.combine(report, item_report)
item_key = item_obs.unique_key
# if we have better data for the same key, ignore
existing = observations[name].get(item_key)
if existing is not None and existing.better(item_obs):
continue
observations[name][item_key] = item_obs
obs = {
"blue": observations["blue"].values(),
"cell": observations["cell"].values(),
"wifi": observations["wifi"].values(),
}
return (obs, malformed)
def process_datamap(self, pipe, positions):
grids = set()
for lat, lon in positions:
if lat is not None and lon is not None:
grids.add(DataMap.scale(lat, lon))
shards = defaultdict(set)
for lat, lon in grids:
shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon))
for shard_id, values in shards.items():
queue = self.task.app.data_queues["update_datamap_" + shard_id]
queue.enqueue(list(values), pipe=pipe)
| 2.171875 | 2 |
test/inference_correctness/dcn_multi_hot.py | x-y-z/HugeCTR | 130 | 4039 | <filename>test/inference_correctness/dcn_multi_hot.py
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(model_name = "dcn",
max_eval_batches = 1,
batchsize_eval = 16384,
batchsize = 16384,
lr = 0.001,
vvgpu = [[0]],
repeat_dataset = True,
use_mixed_precision = False,
scaler = 1.0,
use_cuda_graph = True,
metrics_spec = {hugectr.MetricsType.AUC: 1.0})
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
source = ["./dcn_data/file_list.txt"],
eval_source = "./dcn_data/file_list_test.txt",
check_type = hugectr.Check_t.Sum,
num_workers = 16)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.0001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 13, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("data1", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 300,
embedding_vec_size = 16,
combiner = "sum",
sparse_embedding_name = "sparse_embedding1",
bottom_name = "data1",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding1"],
top_names = ["reshape1"],
leading_dim=416))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape1", "dense"], top_names = ["concat1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["concat1"],
top_names = ["slice11", "slice12"],
ranges=[(0,429),(0,429)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross,
bottom_names = ["slice11"],
top_names = ["multicross1"],
num_layers=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["slice12"],
top_names = ["fc1"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc1"],
top_names = ["relu1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu1"],
top_names = ["dropout1"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dropout1"],
top_names = ["fc2"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc2"],
top_names = ["relu2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu2"],
top_names = ["dropout2"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["dropout2", "multicross1"],
top_names = ["concat2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat2"],
top_names = ["fc3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc3", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.graph_to_json(graph_config_file = "/dump_infer/dcn.json")
model.fit(max_iter = 2300, display = 200, eval_interval = 2000, snapshot = 2000, snapshot_prefix = "/dump_infer/dcn")
model.export_predictions("/dump_infer/dcn_pred_" + str(2000), "/dump_infer/dcn_label_" + str(2000))
from hugectr.inference import InferenceParams, CreateInferenceSession
import numpy as np
batch_size = 16384
num_batches = 1
data_source = "./dcn_data/file_list_test.txt"
inference_params = InferenceParams(model_name = "dcn",
max_batchsize = batch_size,
hit_rate_threshold = 1.0,
dense_model_file = "/dump_infer/dcn_dense_2000.model",
sparse_model_files = ["/dump_infer/dcn0_sparse_2000.model"],
device_id = 0,
use_gpu_embedding_cache = False,
cache_size_percentage = 1.0,
i64_input_key = False,
use_mixed_precision = False,
use_cuda_graph = True)
inference_session = CreateInferenceSession("/dump_infer/dcn.json", inference_params)
predictions = inference_session.predict(num_batches = num_batches,
source = data_source,
data_reader_type = hugectr.DataReaderType_t.Norm,
check_type = hugectr.Check_t.Sum)
grount_truth = np.loadtxt("/dump_infer/dcn_pred_2000")
diff = predictions-grount_truth
mse = np.mean(diff*diff)
if mse > 1e-3:
raise RuntimeError("Too large mse between DCN multi hot inference and training: {}".format(mse))
sys.exit(1)
else:
print("DCN multi hot inference results are consistent with those during training, mse: {}".format(mse)) | 1.859375 | 2 |
bindings/pydrake/systems/perception.py | RobotLocomotion/drake-python3.7 | 2 | 4040 | <reponame>RobotLocomotion/drake-python3.7
import numpy as np
from pydrake.common.value import AbstractValue
from pydrake.math import RigidTransform
from pydrake.perception import BaseField, Fields, PointCloud
from pydrake.systems.framework import LeafSystem
def _TransformPoints(points_Ci, X_CiSi):
# Make homogeneous copy of points.
points_h_Ci = np.vstack((points_Ci,
np.ones((1, points_Ci.shape[1]))))
return X_CiSi.dot(points_h_Ci)[:3, :]
def _TileColors(color, dim):
# Need manual broadcasting.
return np.tile(np.array([color]).T, (1, dim))
def _ConcatenatePointClouds(points_dict, colors_dict):
scene_points = None
scene_colors = None
for id in points_dict:
if scene_points is None:
scene_points = points_dict[id]
else:
scene_points = np.hstack((points_dict[id], scene_points))
if scene_colors is None:
scene_colors = colors_dict[id]
else:
scene_colors = np.hstack((colors_dict[id], scene_colors))
valid_indices = np.logical_not(np.isnan(scene_points))
scene_points = scene_points[:, valid_indices[0, :]]
scene_colors = scene_colors[:, valid_indices[0, :]]
return scene_points, scene_colors
class PointCloudConcatenation(LeafSystem):
"""
.. pydrake_system::
name: PointCloudConcatenation
input_ports:
- point_cloud_CiSi_id0
- X_FCi_id0
- ...
- point_cloud_CiSi_idN
- X_FCi_idN
output_ports:
- point_cloud_FS
"""
def __init__(self, id_list, default_rgb=[255., 255., 255.]):
"""
A system that takes in N point clouds of points Si in frame Ci, and N
RigidTransforms from frame Ci to F, to put each point cloud in a common
frame F. The system returns one point cloud combining all of the
transformed point clouds. Each point cloud must have XYZs. RGBs are
optional. If absent, those points will be the provided default color.
@param id_list A list containing the string IDs of all of the point
clouds. This is often the serial number of the camera they came
from, such as "1" for a simulated camera or "805212060373" for a
real camera.
@param default_rgb A list of length 3 containing the RGB values to use
in the absence of PointCloud.rgbs. Values should be between 0 and
255. The default is white.
"""
LeafSystem.__init__(self)
self._point_cloud_ports = {}
self._transform_ports = {}
self._id_list = id_list
self._default_rgb = np.array(default_rgb)
output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs)
for id in self._id_list:
self._point_cloud_ports[id] = self.DeclareAbstractInputPort(
"point_cloud_CiSi_{}".format(id),
AbstractValue.Make(PointCloud(fields=output_fields)))
self._transform_ports[id] = self.DeclareAbstractInputPort(
"X_FCi_{}".format(id),
AbstractValue.Make(RigidTransform.Identity()))
self.DeclareAbstractOutputPort("point_cloud_FS",
lambda: AbstractValue.Make(
PointCloud(fields=output_fields)),
self.DoCalcOutput)
def _AlignPointClouds(self, context):
points = {}
colors = {}
for id in self._id_list:
point_cloud = self.EvalAbstractInput(
context, self._point_cloud_ports[id].get_index()).get_value()
X_CiSi = self.EvalAbstractInput(
context, self._transform_ports[id].get_index()).get_value()
points[id] = _TransformPoints(
point_cloud.xyzs(), X_CiSi.GetAsMatrix4())
if point_cloud.has_rgbs():
colors[id] = point_cloud.rgbs()
else:
colors[id] = _TileColors(
self._default_rgb, point_cloud.xyzs().shape[1])
return _ConcatenatePointClouds(points, colors)
def DoCalcOutput(self, context, output):
scene_points, scene_colors = self._AlignPointClouds(context)
output.get_mutable_value().resize(scene_points.shape[1])
output.get_mutable_value().mutable_xyzs()[:] = scene_points
output.get_mutable_value().mutable_rgbs()[:] = scene_colors
| 2.328125 | 2 |
experiments/db_test.py | mit-ll/CATAN | 15 | 4041 | <gh_stars>10-100
#!/usr/bin/env python
"""
@author <NAME>
© 2015 Massachusetts Institute of Technology
"""
import argparse
import random
import catan.db
from catan.data import NodeMessage
# test data
STATUS_LIST = ['ok', 'injured', 'deceased']
# nodes
def gen_nodes(n, db, start_lat, stop_lat, start_long, stop_long):
assert n > 0
cmd = "INSERT INTO catan_nodes VALUES "
# generate n random nodes, centered around Cambridge
for i in range(n):
# random lat, long
lat = round(random.uniform(start_lat, stop_lat), 6)
lng = round(random.uniform(start_long, stop_long), 6)
# node_id, gps_lat, gps_long, gps_acc, path, timestamp
sql_cmd = cmd + "(%d, %.6f, %.6f, %.6f, %.6f, %.6f)" % (i, lat, lng, 0, 0, 0)
db._sql(sql_cmd)
# people
def gen_people(n, db, start_lat, stop_lat, start_long, stop_long):
"""
Generates n people, random male/female ratio between 5 and 90 years of age
"""
assert n > 0
# open male first names file
f = open('dist.male.first','r')
male_first_names = [name.strip().split()[0] for name in f.readlines()]
f.close()
# open female first names file
f = open('dist.female.first','r')
female_first_names = [name.strip().split()[0] for name in f.readlines()]
f.close()
# open last names file
f = open('dist.all.last','r')
family_names = [name.strip().split()[0] for name in f.readlines()]
f.close()
# generate people
for i in range(n):
catanDBObj = catan.db.CatanDatabaseObject()
# bio
sex = random.randint(0,1)
if sex == 0: # male
catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)]
catanDBObj.person_bio.sex = 'male'
else: # female
catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)]
catanDBObj.person_bio.sex = 'female'
catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)]
catanDBObj.person_bio.age = random.randint(5,90)
# message (message, status, location, etc.)
# location
lat = round(random.uniform(start_lat, stop_lat), 6)
lng = round(random.uniform(start_long, stop_long), 6)
catanDBObj.person_message.person_message = 'Hi Mom'
catanDBObj.person_message.status_gps_latitude = lat
catanDBObj.person_message.status_gps_longitude = lng
catanDBObj.person_message.status_gps_accuracy = 0
# status
catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)]
catanDBObj.person_message.status_location = 'Test status location'
# generate a NodeMessage for the database
# it only cares about the data and source fields, so we can ignore other fields
nmsg = NodeMessage()
nmsg.source = random.randint(0,31) # random node 0-31
nmsg.data = catanDBObj.pack()
db.update_db(nmsg)
# Create some random updates
for i in range(1,n+1):
update = random.randint(0,1)
if update == 0:
catanDBObj = catan.db.CatanDatabaseObject()
catanDBObj.person_id = i
# location
lat = round(random.uniform(start_lat, stop_lat), 6)
lng = round(random.uniform(start_long, stop_long), 6)
catanDBObj.person_message.person_message = 'Location update 1'
catanDBObj.person_message.status_gps_latitude = lat
catanDBObj.person_message.status_gps_longitude = lng
catanDBObj.person_message.status_gps_accuracy = 0
n = NodeMessage()
n.source = random.randint(0,31)
n.data = catanDBObj.pack()
db.update_db(n)
def populate_db():
db = catan.db.CatanDatabase(0)
# insert some test nodes
# for cambridge
gen_nodes(32, db, 42.354823, 42.368315, -71.114484, -71.084422)
gen_people(100, db, 42.354823, 42.368315, -71.114484, -71.084422)
cmd = ('SELECT '
'db_person_bio.person_id, '
'db_person_bio.origin_node_id, '
'db_person_bio.name_family, '
'db_person_bio.name_given, '
'db_person_bio.age, '
'db_person_bio.sex, '
'db_person_messages.submission_id, '
'db_person_messages.origin_node_id, '
'db_person_messages.status_gps_latitude, '
'db_person_messages.status_gps_longitude, '
'db_person_messages.status_gps_accuracy, '
'db_person_messages.status, '
'db_person_messages.status_location, '
'db_submitter_info.timestamp '
'FROM db_person_bio '
'LEFT JOIN db_person_messages ON db_person_messages.person_id = db_person_bio.person_id '
'LEFT JOIN db_submitter_info ON db_submitter_info.submission_id = db_person_messages.submission_id')
for r in db._sql(cmd).fetchall():
print r
def main(args):
pass
if __name__=='__main__':
populate_db()
| 2.6875 | 3 |
Medium/200.py | Hellofafar/Leetcode | 6 | 4042 | <gh_stars>1-10
# ------------------------------
# 200. Number of Islands
#
# Description:
# Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
#
# Example 1:
# 11110
# 11010
# 11000
# 00000
# Answer: 1
#
# Example 2:
# 11000
# 11000
# 00100
# 00011
# Answer: 3
#
# Version: 1.0
# 11/13/17 by Jianfa
# ------------------------------
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
def sink(i, j):
if 0 <= i < len(grid) and 0 <= j < len(grid[0]) and grid[i][j] == "1":
grid[i][j] = "0"
map(sink, (i+1, i-1, i, i), (j, j, j+1, j-1))
return 1
return 0
return sum(sink(i, j) for i in range(len(grid)) for j in range(len(grid[i])))
# ------------------------------
# Summary:
# Copied from discussion.
# The following is another easy understanding idea:
#
# class Solution(object):
# def numIslands(self, grid):
# """
# :type grid: List[List[str]]
# :rtype: int
# """
# if len(grid) == 0: return 0
# m = len(grid)
# n = len(grid[0])
# res = 0
# for i in range(m):
# for j in range(n):
# if grid[i][j] == '1':
# res += 1
# grid[i][j] = '2'
# self.island(i, j, grid, m, n)
# return res
# def island(self, x, y, grid, m, n):
# if x + 1 < m and grid[x+1][y] == '1':
# grid[x+1][y] = '2'
# self.island(x+1,y,grid, m, n)
# if y + 1 < n and grid[x][y+1] == '1':
# grid[x][y+1] = '2'
# self.island(x,y+1,grid, m, n)
# if x -1 >=0 and grid[x-1][y] == '1':
# grid[x-1][y] = '2'
# self.island(x-1,y,grid, m, n)
# if y - 1 >= 0 and grid[x][y-1] == '1':
# grid[x][y-1] = '2'
# self.island(x,y-1,grid, m, n) | 3.828125 | 4 |
tests/formatters/fseventsd.py | SamuelePilleri/plaso | 0 | 4043 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the fseventsd record event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import fseventsd
from tests.formatters import test_lib
class FseventsdFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the fseventsd record event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = fseventsd.FSEventsdEventFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = fseventsd.FSEventsdEventFormatter()
expected_attribute_names = [
u'event_identifier', u'flag_values', u'hex_flags', u'path']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetSources.
if __name__ == '__main__':
unittest.main()
| 2.484375 | 2 |
train.py | Farzin-Negahbani/PathoNet | 0 | 4044 | from keras.callbacks import ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard
from keras.models import load_model
import random
import numpy as np
from scipy import misc
import gc
from keras.optimizers import Adam
from imageio import imread
from datetime import datetime
import os
import json
import models
from utils import DataLoader, LrPolicy
from config import Config
import argparse
def get_parser():
parser = argparse.ArgumentParser('train')
parser.add_argument('--configPath', '-c', required=True)
return parser
def train(args=None):
parser = get_parser()
args = parser.parse_args(args)
conf=Config()
conf.load(args.configPath)
time=datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
trainString="%s_%s_%s_%s" % (conf.model,conf.optimizer,str(conf.lr),time)
os.makedirs(conf.logPath+"/"+trainString)
conf.save(conf.logPath+"/"+trainString+'/config.json')
print('Compiling model...')
model_checkpoint = ModelCheckpoint(conf.logPath+"/"+trainString+'/Checkpoint-{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=False, save_weights_only=True)
change_lr = LearningRateScheduler(LrPolicy(conf.lr).stepDecay)
tbCallBack=TensorBoard(log_dir=conf.logPath+"/"+trainString+'/logs', histogram_freq=0, write_graph=True, write_images=True)
model=models.modelCreator(conf.model,conf.inputShape,conf.classes,conf.pretrainedModel)
model.compile(optimizer = conf.optimizer, loss = conf.loss)
data = [conf.trainDataPath+"/"+f for f in os.listdir(conf.trainDataPath) if '.jpg' in f]
random.shuffle(data)
thr=int(len(data)*conf.validationSplit)
trainData=data[thr:]
valData=data[:thr]
trainDataLoader=DataLoader(conf.batchSize,conf.inputShape,trainData,conf.guaMaxValue)
validationDataLoader=DataLoader(conf.batchSize,conf.inputShape,valData,conf.guaMaxValue)
print('Fitting model...')
model.fit_generator(generator=trainDataLoader.generator(),
validation_data=validationDataLoader.generator(),
steps_per_epoch=len(trainData)//conf.batchSize,
validation_steps=len(valData)//conf.batchSize,
epochs=conf.epoches,
verbose=1,
initial_epoch=0,
callbacks = [model_checkpoint, change_lr,tbCallBack]
)
if __name__ == "__main__":
train()
| 2.03125 | 2 |
tests/chainer_tests/functions_tests/array_tests/test_flatten.py | mingxiaoh/chainer-v3 | 7 | 4045 | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 4), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestFlatten(unittest.TestCase):
dtype = numpy.float32
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.g_shape = (numpy.prod((1,) + self.shape),)
self.g = numpy.random.uniform(-1, 1, self.g_shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.flatten(x)
self.assertEqual(y.shape, self.g_shape)
self.assertEqual(y.dtype, self.dtype)
testing.assert_allclose(self.x.flatten(), y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, g_data):
gradient_check.check_backward(
functions.Flatten(), x_data, g_data, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.g))
testing.run_module(__name__, __file__)
| 2.59375 | 3 |
categories/migrations/0001_initial.py | snoop2head/exercise_curation_django | 3 | 4046 | # Generated by Django 3.0.3 on 2020-03-24 09:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('exercises', '0018_photo_file'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=80)),
('description', models.TextField(blank=True)),
('exercises', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='exercises.Exercise')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('image_url', models.URLField()),
('image_caption', models.CharField(blank=True, max_length=80)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='categories.Category')),
],
options={
'abstract': False,
},
),
]
| 1.78125 | 2 |
src/metarl/envs/dm_control/dm_control_env.py | neurips2020submission11699/metarl | 2 | 4047 | <filename>src/metarl/envs/dm_control/dm_control_env.py<gh_stars>1-10
from dm_control import suite
from dm_control.rl.control import flatten_observation
from dm_env import StepType
import gym
import numpy as np
from metarl.envs import Step
from metarl.envs.dm_control.dm_control_viewer import DmControlViewer
class DmControlEnv(gym.Env):
"""
Binding for `dm_control <https://arxiv.org/pdf/1801.00690.pdf>`_
"""
def __init__(self, env, name=None):
self._name = name or type(env.task).__name__
self._env = env
self._viewer = None
@classmethod
def from_suite(cls, domain_name, task_name):
return cls(suite.load(domain_name, task_name),
name='{}.{}'.format(domain_name, task_name))
def step(self, action):
time_step = self._env.step(action)
return Step(
flatten_observation(time_step.observation)['observations'],
time_step.reward, time_step.step_type == StepType.LAST,
**time_step.observation)
def reset(self):
time_step = self._env.reset()
return flatten_observation(time_step.observation)['observations']
def render(self, mode='human'):
# pylint: disable=inconsistent-return-statements
if mode == 'human':
if not self._viewer:
title = 'dm_control {}'.format(self._name)
self._viewer = DmControlViewer(title=title)
self._viewer.launch(self._env)
self._viewer.render()
return None
elif mode == 'rgb_array':
return self._env.physics.render()
else:
raise NotImplementedError
def close(self):
if self._viewer:
self._viewer.close()
self._env.close()
self._viewer = None
self._env = None
def _flat_shape(self, observation):
return np.sum(int(np.prod(v.shape)) for k, v in observation.items())
@property
def action_space(self):
action_spec = self._env.action_spec()
if (len(action_spec.shape) == 1) and (-np.inf in action_spec.minimum or
np.inf in action_spec.maximum):
return gym.spaces.Discrete(np.prod(action_spec.shape))
else:
return gym.spaces.Box(action_spec.minimum,
action_spec.maximum,
dtype=np.float32)
@property
def observation_space(self):
flat_dim = self._flat_shape(self._env.observation_spec())
return gym.spaces.Box(low=-np.inf,
high=np.inf,
shape=[flat_dim],
dtype=np.float32)
def __getstate__(self):
d = self.__dict__.copy()
d['_viewer'] = None
return d
| 2.296875 | 2 |
python_modules/lakehouse/lakehouse/snowflake_table.py | vatervonacht/dagster | 3 | 4048 | from dagster import check
from .house import Lakehouse
from .table import create_lakehouse_table_def
class SnowflakeLakehouse(Lakehouse):
def __init__(self):
pass
def hydrate(self, _context, _table_type, _table_metadata, table_handle, _dest_metadata):
return None
def materialize(self, context, table_type, table_metadata, value):
return None, None
def snowflake_table(
name=None,
input_tables=None,
other_input_defs=None,
tags=None,
required_resource_keys=None,
description=None,
):
tags = check.opt_dict_param(tags, 'tags')
tags['lakehouse_type'] = 'snowflake_table'
tags['kind'] = 'snowflake'
required_resource_keys = check.opt_set_param(required_resource_keys, 'required_resource_keys')
required_resource_keys.add('snowflake')
if callable(name):
fn = name
return create_lakehouse_table_def(
name=fn.__name__,
lakehouse_fn=fn,
input_tables=[],
required_resource_keys=required_resource_keys,
)
def _wrap(fn):
return create_lakehouse_table_def(
name=name if name is not None else fn.__name__,
lakehouse_fn=fn,
input_tables=input_tables,
other_input_defs=other_input_defs,
tags=tags,
description=description,
required_resource_keys=required_resource_keys,
)
return _wrap
| 2.265625 | 2 |
pype/plugins/maya/publish/validate_look_no_default_shaders.py | tokejepsen/pype | 0 | 4049 | <reponame>tokejepsen/pype
from maya import cmds
import pyblish.api
import pype.api
import pype.maya.action
class ValidateLookNoDefaultShaders(pyblish.api.InstancePlugin):
"""Validate if any node has a connection to a default shader.
This checks whether the look has any members of:
- lambert1
- initialShadingGroup
- initialParticleSE
- particleCloud1
If any of those is present it will raise an error. A look is not allowed
to have any of the "default" shaders present in a scene as they can
introduce problems when referenced (overriding local scene shaders).
To fix this no shape nodes in the look must have any of default shaders
applied.
"""
order = pype.api.ValidateContentsOrder + 0.01
families = ['look']
hosts = ['maya']
label = 'Look No Default Shaders'
actions = [pype.maya.action.SelectInvalidAction]
DEFAULT_SHADERS = {"lambert1", "initialShadingGroup",
"initialParticleSE", "particleCloud1"}
def process(self, instance):
"""Process all the nodes in the instance"""
invalid = self.get_invalid(instance)
if invalid:
raise RuntimeError("Invalid node relationships found: "
"{0}".format(invalid))
@classmethod
def get_invalid(cls, instance):
invalid = set()
for node in instance:
# Get shading engine connections
shaders = cmds.listConnections(node, type="shadingEngine") or []
# Check for any disallowed connections on *all* nodes
if any(s in cls.DEFAULT_SHADERS for s in shaders):
# Explicitly log each individual "wrong" connection.
for s in shaders:
if s in cls.DEFAULT_SHADERS:
cls.log.error("Node has unallowed connection to "
"'{}': {}".format(s, node))
invalid.add(node)
return list(invalid)
| 2.390625 | 2 |
data_science_app/app.py | Johne-DuChene/data_science_learning_app | 0 | 4050 | <gh_stars>0
from flask import Flask
# initialize the app
app = Flask(__name__)
# execute iris function at /iris route
@app.route("/iris")
def iris():
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
X, y = load_iris(return_X_y=True)
clf = LogisticRegression(
random_state = 42,
solver="lbfgs",
multi_class="multinomial"
).fit(X, y)
return str(clf.predict(X[:2, :])) | 2.515625 | 3 |
vbdiar/scoring/normalization.py | VarunSrivastava19/VBDiarization | 101 | 4051 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Brno University of Technology FIT
# Author: <NAME> <<EMAIL>>
# All Rights Reserved
import os
import logging
import pickle
import multiprocessing
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from vbdiar.features.segments import get_frames_from_time
from vbdiar.embeddings.embedding import extract_embeddings
from vbdiar.utils import mkdir_p
from vbdiar.utils.utils import Utils
logger = logging.getLogger(__name__)
def process_files(fns, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1):
"""
Args:
fns:
speakers_dict:
features_extractor:
embedding_extractor:
audio_dir:
wav_suffix:
in_rttm_dir:
rttm_suffix:
min_length:
n_jobs:
Returns:
"""
kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor,
embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix,
in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length)
if n_jobs == 1:
ret = _process_files((fns, kwargs))
else:
pool = multiprocessing.Pool(n_jobs)
ret = pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs)))
return ret
def _process_files(dargs):
"""
Args:
dargs:
Returns:
"""
fns, kwargs = dargs
ret = []
for fn in fns:
ret.append(process_file(file_name=fn, **kwargs))
return ret
def process_file(file_name, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length):
""" Extract embeddings for all defined speakers.
Args:
file_name (string_types): path to input audio file
speakers_dict (dict): dictionary containing all embedding across speakers
features_extractor (Any):
embedding_extractor (Any):
audio_dir (string_types):
wav_suffix (string_types):
in_rttm_dir (string_types):
rttm_suffix (string_types):
min_length (float):
Returns:
dict: updated dictionary with speakers
"""
logger.info('Processing file `{}`.'.format(file_name.split()[0]))
# extract features from whole audio
features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix)))
# process utterances of the speakers
features_dict = {}
with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f:
for line in f:
start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000)
speaker = line.split()[7]
if dur > min_length:
end_time = start_time + dur
start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time))
if speaker not in features_dict:
features_dict[speaker] = {}
assert 0 <= start < end, \
f'Incorrect timing for extracting features, start: {start}, size: {features.shape[0]}, end: {end}.'
if end >= features.shape[0]:
end = features.shape[0] - 1
features_dict[speaker][(start_time, end_time)] = features[start:end]
for speaker in features_dict:
embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor)
embeddings_long = embedding_set.get_all_embeddings()
if speaker not in speakers_dict.keys():
speakers_dict[speaker] = embeddings_long
else:
speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0)
return speakers_dict
class Normalization(object):
""" Speaker normalization S-Norm. """
embeddings = None
in_emb_dir = None
def __init__(self, norm_list, audio_dir=None, in_rttm_dir=None, in_emb_dir=None,
out_emb_dir=None, min_length=None, features_extractor=None, embedding_extractor=None,
plda=None, wav_suffix='.wav', rttm_suffix='.rttm', n_jobs=1):
""" Initialize normalization object.
Args:
norm_list (string_types): path to normalization list
audio_dir (string_types|None): path to audio directory
in_rttm_dir (string_types|None): path to directory with rttm files
in_emb_dir (str|None): path to directory with i-vectors
out_emb_dir (str|None): path to directory for storing embeddings
min_length (int): minimal length for extracting embeddings
features_extractor (Any): object for feature extraction
embedding_extractor (Any): object for extracting embedding
plda (PLDA|None): plda model object
wav_suffix (string_types): suffix of wav files
rttm_suffix (string_types): suffix of rttm files
"""
if audio_dir:
self.audio_dir = os.path.abspath(audio_dir)
self.norm_list = norm_list
if in_rttm_dir:
self.in_rttm_dir = os.path.abspath(in_rttm_dir)
else:
raise ValueError('It is required to have input rttm files for normalization.')
self.features_extractor = features_extractor
self.embedding_extractor = embedding_extractor
self.plda = plda
self.wav_suffix = wav_suffix
self.rttm_suffix = rttm_suffix
if in_emb_dir:
self.in_emb_dir = os.path.abspath(in_emb_dir)
if out_emb_dir:
self.out_emb_dir = os.path.abspath(out_emb_dir)
self.min_length = min_length
self.n_jobs = n_jobs
if self.in_emb_dir is None:
self.embeddings = self.extract_embeddings()
else:
self.embeddings = self.load_embeddings()
self.mean = np.mean(self.embeddings, axis=0)
def __iter__(self):
current = 0
while current < len(self.embeddings):
yield self.embeddings[current]
current += 1
def __getitem__(self, key):
return self.embeddings[key]
def __setitem__(self, key, value):
self.embeddings[key] = value
def __len__(self):
return len(self.embeddings)
def extract_embeddings(self):
""" Extract normalization embeddings using averaging.
Returns:
Tuple[np.array, np.array]: vectors for individual speakers, global mean over all speakers
"""
speakers_dict, fns = {}, []
with open(self.norm_list) as f:
for line in f:
if len(line.split()) > 1: # number of speakers is defined
line = line.split()[0]
else:
line = line.replace(os.linesep, '')
fns.append(line)
speakers_dict = process_files(fns, speakers_dict=speakers_dict, features_extractor=self.features_extractor,
embedding_extractor=self.embedding_extractor, audio_dir=self.audio_dir,
wav_suffix=self.wav_suffix, in_rttm_dir=self.in_rttm_dir,
rttm_suffix=self.rttm_suffix, min_length=self.min_length, n_jobs=self.n_jobs)
assert len(speakers_dict) == len(fns)
# all are the same
merged_speakers_dict = speakers_dict[0]
if self.out_emb_dir:
for speaker in merged_speakers_dict:
out_path = os.path.join(self.out_emb_dir, f'{speaker}.pkl')
mkdir_p(os.path.dirname(out_path))
with open(out_path, 'wb') as f:
pickle.dump(merged_speakers_dict[speaker], f, pickle.HIGHEST_PROTOCOL)
for speaker in merged_speakers_dict:
merged_speakers_dict[speaker] = np.mean(merged_speakers_dict[speaker], axis=0)
return np.array(list(merged_speakers_dict.values()))
def load_embeddings(self):
""" Load normalization embeddings from pickle files.
Returns:
np.array: embeddings per speaker
"""
embeddings, speakers = [], set()
with open(self.norm_list) as f:
for file_name in f:
if len(file_name.split()) > 1: # number of speakers is defined
file_name = file_name.split()[0]
else:
file_name = file_name.replace(os.linesep, '')
with open('{}{}'.format(os.path.join(self.in_rttm_dir, file_name), self.rttm_suffix)) as fp:
for line in fp:
speakers.add(line.split()[7])
logger.info('Loading pickled normalization embeddings from `{}`.'.format(self.in_emb_dir))
for speaker in speakers:
embedding_path = os.path.join(self.in_emb_dir, '{}.pkl'.format(speaker))
if os.path.isfile(embedding_path):
logger.info('Loading normalization pickle file `{}`.'.format(speaker))
with open(embedding_path, 'rb') as f:
# append mean from speaker's embeddings
speaker_embeddings = pickle.load(f)
embeddings.append(np.mean(speaker_embeddings, axis=0))
else:
logger.warning('No pickle file found for `{}` in `{}`.'.format(speaker, self.in_emb_dir))
return np.array(embeddings)
def s_norm(self, test, enroll):
""" Run speaker normalization (S-Norm) on cached embeddings.
Args:
test (np.array): test embedding
enroll (np.array): enroll embedding
Returns:
float: hypothesis
"""
if self.plda:
a = self.plda.score(test, self.embeddings).T
b = self.plda.score(enroll, self.embeddings).T
c = self.plda.score(enroll, test).T
else:
a = cosine_similarity(test, self.embeddings).T
b = cosine_similarity(enroll, self.embeddings).T
c = cosine_similarity(enroll, test).T
scores = []
for ii in range(test.shape[0]):
test_scores = []
for jj in range(enroll.shape[0]):
test_mean, test_std = np.mean(a.T[ii]), np.std(a.T[ii])
enroll_mean, enroll_std = np.mean(b.T[jj]), np.std(b.T[jj])
s = c[ii][jj]
test_scores.append((((s - test_mean) / test_std + (s - enroll_mean) / enroll_std) / 2))
scores.append(test_scores)
return np.array(scores)
| 2.046875 | 2 |
agent_based_models/abm_allelopathy/plot_data.py | mattsmart/biomodels | 0 | 4052 | <filename>agent_based_models/abm_allelopathy/plot_data.py
import matplotlib.pyplot as plt
import os
def data_plotter(lattice_dict, datafile_dir, plot_dir):
# total spaces on grid implies grid size
total_cells = lattice_dict['E'][0] + lattice_dict['D_a'][0] + lattice_dict['D_b'][0] + lattice_dict['B'][0]
n = int(total_cells**0.5)
plt.figure(1)
plt.plot(lattice_dict['time'], lattice_dict['E'], label='Empty lattice points')
plt.plot(lattice_dict['time'], lattice_dict['D_a'], label='Donors (Type A)')
plt.plot(lattice_dict['time'], lattice_dict['D_b'], label='Donors (Type B)')
plt.plot(lattice_dict['time'], lattice_dict['B'], label='Debris')
ax = plt.gca()
ax.set_title('Cell Populations over time (n = %d)' % n)
ax.set_ylabel('Number of cells')
ax.set_xlabel('Time (h)')
plt.legend()
f = plt.gcf()
f.set_size_inches(20.0, 8.0) # alternative: 20.0, 8.0
f.tight_layout()
plt.savefig(os.path.join(plot_dir, 'population_vs_time.png'))
plt.clf()
return
| 2.34375 | 2 |
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/virtual_wan_security_providers.py | JonathanGailliez/azure-sdk-for-python | 1 | 4053 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualWanSecurityProviders(Model):
"""Collection of SecurityProviders.
:param supported_providers:
:type supported_providers:
list[~azure.mgmt.network.v2018_10_01.models.VirtualWanSecurityProvider]
"""
_attribute_map = {
'supported_providers': {'key': 'supportedProviders', 'type': '[VirtualWanSecurityProvider]'},
}
def __init__(self, **kwargs):
super(VirtualWanSecurityProviders, self).__init__(**kwargs)
self.supported_providers = kwargs.get('supported_providers', None)
| 1.867188 | 2 |
jsonresume_theme_stackoverflow/filters.py | flowgunso/jsonresume-theme-stackoverflow | 0 | 4054 | import datetime
import re
from .exceptions import ObjectIsNotADate
def format_date(value, format="%d %M %Y"):
regex = re.match(r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})", value)
if regex is not None:
date = datetime.date(
int(regex.group("year")),
int(regex.group("month")),
int(regex.group("day")))
else:
raise ObjectIsNotADate
return date.strftime(format)
| 3.484375 | 3 |
ipec/data/core.py | wwwbbb8510/ippso | 9 | 4055 | import numpy as np
import os
import logging
from sklearn.model_selection import train_test_split
DATASET_ROOT_FOLDER = os.path.abspath('datasets')
class DataLoader:
train = None
validation = None
test = None
mode = None
partial_dataset = None
@staticmethod
def load(train_path=None, validation_path=None, test_path=None, height=28, length=28, train_validation_split_point=10000):
if train_path is not None:
DataLoader.train = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, train_path), height=height, length=length)
if validation_path is not None:
DataLoader.validation = DataLoader.load_image_data_with_label_at_end(
os.path.join(DATASET_ROOT_FOLDER, validation_path), height=height, length=length)
elif train_validation_split_point is not None and train_validation_split_point > 0:
if DataLoader.mode is None or DataLoader.partial_dataset is not None:
train_validation_split_point = int(DataLoader.train['images'].shape[0] * 0.8)
splited_train = {
'images': DataLoader.train['images'][0:train_validation_split_point, :, :, :],
'labels': DataLoader.train['labels'][0:train_validation_split_point]
}
splited_validation = {
'images': DataLoader.train['images'][train_validation_split_point:, :, :, :],
'labels': DataLoader.train['labels'][train_validation_split_point:]
}
DataLoader.train = splited_train
DataLoader.validation = splited_validation
if test_path is not None:
DataLoader.test = DataLoader.load_image_data_with_label_at_end(os.path.join(DATASET_ROOT_FOLDER, test_path), height=height, length=length)
logging.debug('Training data shape:{}'.format(str(DataLoader.train['images'].shape)))
logging.debug('Validation data shape:{}'.format(str(DataLoader.validation['images'].shape)))
logging.debug('Test data shape:{}'.format(str(DataLoader.test['images'].shape)))
return DataLoader
@staticmethod
def get_training_data():
"""
get training data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.train.images
labels = DataLoader.train.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_validation_data():
"""
get validation data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.validation.images
labels = DataLoader.validation.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def get_test_data():
"""
get test data
:return: dict of (images, labels)
:rtype: dict
"""
images = DataLoader.test.images
labels = DataLoader.test.labels
return {
'images': images,
'labels': labels
}
@staticmethod
def load_image_data_with_label_at_end(path, height, length):
data = np.loadtxt(path)
if DataLoader.mode is None:
data = data[0:1000, :]
elif DataLoader.partial_dataset is not None and DataLoader.partial_dataset > 0 and DataLoader.partial_dataset <1:
# randomly pick partial dataset
cut_point = int(data.shape[0] * DataLoader.partial_dataset)
indices = np.random.permutation(data.shape[0])
training_idx= indices[:cut_point]
data = data[training_idx, :]
images = data[:, 0:-1]
labels = data[:, -1]
images = np.reshape(images, [images.shape[0], height, length, 1], order='F')
return {
'images': images,
'labels': labels
}
| 2.625 | 3 |
FOR/Analisador-completo/main.py | lucasf5/Python | 1 | 4056 | <filename>FOR/Analisador-completo/main.py<gh_stars>1-10
# Exercício Python 56: Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final do programa, mostre: a média de idade do grupo, qual é o nome do homem mais velho e quantas mulheres têm menos de 20 anos.
mediaidade = ''
nomelista = []
idadelista = []
sexolista = []
homens = []
mulherescommenosde20 = 0
nomedelas = []
# -------------------------------------------------------------------
for i in range(1,5):
print(f'{i} PESSOA')
nome = (input('Seu nome: '))
idade = int(input('Sua idade: '))
sexo = int(input('Sexo? [0]Masculino [1]Feminino: '))
if sexo == 1 and idade < 20:
nomedelas.append(nome)
mulherescommenosde20 += 1
elif sexo == 0:
homens.append(nome)
# Adcionei todas idades em uma lista
idadelista.append(idade)
# Tirei a média dessas idades //Primeira parte
mediaidade = ((sum(idadelista))/4)
# Adcionei todos os nomes em uma lista
nomelista.append(nome)
# -------------------------------------------------------------------
# Armazenei em maximo o maior valor encontrado dentro de uma lista
maximo = max(idadelista)
# Armazenei em idadexidade o INDEX do maior valor
indexidade = idadelista.index(maximo)
# Armazenei em indexnome a posição de quem tem a maior idade
indexnome = nomelista[indexidade]
# -------------------------------------------------------------------
print(f'A media das idades é: {mediaidade}')
print(f'A pessoa que tem a maior idade, com {maximo} é essa: {indexnome}')
print(f'As mulheres que possuem menos de 20 anos: {mulherescommenosde20} e são: {nomedelas}')
| 3.71875 | 4 |
test/python/quantum_info/operators/test_operator.py | EnriqueL8/qiskit-terra | 2 | 4057 | <reponame>EnriqueL8/qiskit-terra<filename>test/python/quantum_info/operators/test_operator.py
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for Operator matrix linear operator class."""
import unittest
import logging
import copy
import numpy as np
from numpy.testing import assert_allclose
import scipy.linalg as la
from qiskit import QiskitError
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.extensions.standard import HGate, CHGate, CXGate
from qiskit.test import QiskitTestCase
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.predicates import matrix_equal
logger = logging.getLogger(__name__)
class OperatorTestCase(QiskitTestCase):
"""Test utils for Operator"""
# Pauli-matrix unitaries
UI = np.eye(2)
UX = np.array([[0, 1], [1, 0]])
UY = np.array([[0, -1j], [1j, 0]])
UZ = np.diag([1, -1])
UH = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
@classmethod
def rand_rho(cls, n):
"""Return random density matrix"""
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger.debug("rand_rho RandomState seeded with seed=%s", seed)
rng = np.random.RandomState(seed)
psi = rng.rand(n) + 1j * rng.rand(n)
rho = np.outer(psi, psi.conj())
rho /= np.trace(rho)
return rho
@classmethod
def rand_matrix(cls, rows, cols=None, real=False):
"""Return a random matrix."""
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger.debug("rand_matrix RandomState seeded with seed=%s", seed)
rng = np.random.RandomState(seed)
if cols is None:
cols = rows
if real:
return rng.rand(rows, cols)
return rng.rand(rows, cols) + 1j * rng.rand(rows, cols)
def simple_circuit_no_measure(self):
"""Return a unitary circuit and the corresponding unitary array."""
qr = QuantumRegister(3)
circ = QuantumCircuit(qr)
circ.h(qr[0])
circ.x(qr[1])
circ.ry(np.pi / 2, qr[2])
y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])
target = Operator(np.kron(y90, np.kron(self.UX, self.UH)))
return circ, target
def simple_circuit_with_measure(self):
"""Return a unitary circuit with measurement."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circ = QuantumCircuit(qr, cr)
circ.h(qr[0])
circ.x(qr[1])
circ.measure(qr, cr)
return circ
class TestOperator(OperatorTestCase):
"""Tests for Operator linear operator class."""
def test_init_array_qubit(self):
"""Test subsystem initialization from N-qubit array."""
# Test automatic inference of qubit subsystems
mat = self.rand_matrix(8, 8)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
op = Operator(mat, input_dims=8, output_dims=8)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
def test_init_array(self):
"""Test initialization from array."""
mat = np.eye(3)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (3, 3))
self.assertEqual(op.input_dims(), (3,))
self.assertEqual(op.output_dims(), (3,))
mat = self.rand_matrix(2 * 3 * 4, 4 * 5)
op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4])
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (4 * 5, 2 * 3 * 4))
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.output_dims(), (2, 3, 4))
def test_init_array_except(self):
"""Test initialization exception from array."""
mat = self.rand_matrix(4, 4)
self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2])
self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4])
self.assertRaises(QiskitError, Operator, mat, input_dims=5)
def test_init_operator(self):
"""Test initialization from Operator."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = Operator(op1)
self.assertEqual(op1, op2)
def test_circuit_init(self):
"""Test initialization from a circuit."""
# Test tensor product of 1-qubit gates
circuit = QuantumCircuit(3)
circuit.h(0)
circuit.x(1)
circuit.ry(np.pi / 2, 2)
op = Operator(circuit)
y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])
target = np.kron(y90, np.kron(self.UX, self.UH))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of Controlled-u1 gate
lam = np.pi / 4
circuit = QuantumCircuit(2)
circuit.cu1(lam, 0, 1)
op = Operator(circuit)
target = np.diag([1, 1, 1, np.exp(1j * lam)])
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of controlled-H gate
circuit = QuantumCircuit(2)
circuit.ch(0, 1)
op = Operator(circuit)
target = np.kron(self.UI, np.diag([1, 0])) + np.kron(
self.UH, np.diag([0, 1]))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_instruction_init(self):
"""Test initialization from a circuit."""
gate = CXGate()
op = Operator(gate).data
target = gate.to_matrix()
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
gate = CHGate()
op = Operator(gate).data
had = HGate().to_matrix()
target = np.kron(had, np.diag([0, 1])) + np.kron(
np.eye(2), np.diag([1, 0]))
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_circuit_init_except(self):
"""Test initialization from circuit with measure raises exception."""
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Operator, circuit)
def test_equal(self):
"""Test __eq__ method"""
mat = self.rand_matrix(2, 2, real=True)
self.assertEqual(Operator(np.array(mat, dtype=complex)),
Operator(mat))
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat.tolist()),
Operator(mat))
def test_data(self):
"""Test Operator representation string property."""
mat = self.rand_matrix(2, 2)
op = Operator(mat)
assert_allclose(mat, op.data)
def test_dim(self):
"""Test Operator dim property."""
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4))
def test_input_dims(self):
"""Test Operator input_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5))
self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4))
self.assertEqual(op.input_dims(qargs=[0]), (4,))
self.assertEqual(op.input_dims(qargs=[1]), (5,))
def test_output_dims(self):
"""Test Operator output_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.output_dims(), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2))
self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3))
self.assertEqual(op.output_dims(qargs=[0]), (2,))
self.assertEqual(op.output_dims(qargs=[1]), (3,))
self.assertEqual(op.output_dims(qargs=[2]), (4,))
self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4))
self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2))
def test_reshape(self):
"""Test Operator reshape method."""
op = Operator(self.rand_matrix(8, 8))
reshaped1 = op.reshape(input_dims=[8], output_dims=[8])
reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4])
self.assertEqual(op.output_dims(), (2, 2, 2))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(reshaped1.output_dims(), (8,))
self.assertEqual(reshaped1.input_dims(), (8,))
self.assertEqual(reshaped2.output_dims(), (2, 4))
self.assertEqual(reshaped2.input_dims(), (4, 2))
def test_copy(self):
"""Test Operator copy method"""
mat = np.eye(2)
with self.subTest("Deep copy"):
orig = Operator(mat)
cpy = orig.copy()
cpy._data[0, 0] = 0.0
self.assertFalse(cpy == orig)
with self.subTest("Shallow copy"):
orig = Operator(mat)
clone = copy.copy(orig)
clone._data[0, 0] = 0.0
self.assertTrue(clone == orig)
def test_is_unitary(self):
"""Test is_unitary method."""
# X-90 rotation
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
self.assertTrue(Operator(X90).is_unitary())
# Non-unitary should return false
self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary())
def test_to_operator(self):
"""Test to_operator method."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = op1.to_operator()
self.assertEqual(op1, op2)
def test_conjugate(self):
"""Test conjugate method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_conj = op.conjugate()
self.assertEqual(uni_conj, Operator(matr - 1j * mati))
def test_transpose(self):
"""Test transpose method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_t = op.transpose()
self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T))
def test_adjoint(self):
"""Test adjoint method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_adj = op.adjoint()
self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T))
def test_compose_except(self):
"""Test compose different dimension exception"""
self.assertRaises(QiskitError,
Operator(np.eye(2)).compose,
Operator(np.eye(3)))
self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2)
def test_compose(self):
"""Test compose method."""
op1 = Operator(self.UX)
op2 = Operator(self.UY)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.compose(op2), targ)
self.assertEqual(op1 @ op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.compose(op1), targ)
self.assertEqual(op2 @ op1, targ)
def test_dot(self):
"""Test dot method."""
op1 = Operator(self.UY)
op2 = Operator(self.UX)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.dot(op2), targ)
self.assertEqual(op1 * op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.dot(op1), targ)
self.assertEqual(op2 * op1, targ)
def test_compose_front(self):
"""Test front compose method."""
opYX = Operator(self.UY).compose(Operator(self.UX), front=True)
matYX = np.dot(self.UY, self.UX)
self.assertEqual(opYX, Operator(matYX))
opXY = Operator(self.UX).compose(Operator(self.UY), front=True)
matXY = np.dot(self.UX, self.UY)
self.assertEqual(opXY, Operator(matXY))
def test_compose_subsystem(self):
"""Test subsystem compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ))
self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ))
self.assertEqual(op @ op3([0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat)
self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ))
self.assertEqual(op @ op3([2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ))
self.assertEqual(op @ op2([0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat)
self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ))
self.assertEqual(op @ op2([2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(np.kron(np.eye(4), mat_a), mat)
self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ))
self.assertEqual(op @ op1([0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat)
self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ))
self.assertEqual(op @ op1([1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(np.kron(mat_a, np.eye(4)), mat)
self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ))
self.assertEqual(op @ op1([2]), Operator(targ))
def test_dot_subsystem(self):
"""Test subsystem dot method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ))
self.assertEqual(op * op3([0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ))
self.assertEqual(op * op3([2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ))
self.assertEqual(op * op2([0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ))
self.assertEqual(op * op2([2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ))
self.assertEqual(op * op1([0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ))
self.assertEqual(op * op1([1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ))
self.assertEqual(op * op1([2]), Operator(targ))
def test_compose_front_subsystem(self):
"""Test subsystem front compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ))
def test_power(self):
"""Test power method."""
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
op = Operator(X90)
self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]]))
self.assertEqual(op.power(4), Operator(-1 * np.eye(2)))
self.assertEqual(op.power(8), Operator(np.eye(2)))
def test_expand(self):
"""Test expand method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat1).expand(Operator(mat2))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat2).expand(Operator(mat1))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_tensor(self):
"""Test tensor method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat2).tensor(Operator(mat1))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat1).tensor(Operator(mat2))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_power_except(self):
"""Test power method raises exceptions."""
op = Operator(self.rand_matrix(3, 3))
# Non-integer power raises error
self.assertRaises(QiskitError, op.power, 0.5)
def test_add(self):
"""Test add method."""
mat1 = self.rand_matrix(4, 4)
mat2 = self.rand_matrix(4, 4)
op1 = Operator(mat1)
op2 = Operator(mat2)
self.assertEqual(op1._add(op2), Operator(mat1 + mat2))
self.assertEqual(op1 + op2, Operator(mat1 + mat2))
self.assertEqual(op1 - op2, Operator(mat1 - mat2))
def test_add_except(self):
"""Test add method raises exceptions."""
op1 = Operator(self.rand_matrix(2, 2))
op2 = Operator(self.rand_matrix(3, 3))
self.assertRaises(QiskitError, op1._add, op2)
def test_multiply(self):
"""Test multiply method."""
mat = self.rand_matrix(4, 4)
val = np.exp(5j)
op = Operator(mat)
self.assertEqual(op._multiply(val), Operator(val * mat))
self.assertEqual(val * op, Operator(val * mat))
def test_multiply_except(self):
"""Test multiply method raises exceptions."""
op = Operator(self.rand_matrix(2, 2))
self.assertRaises(QiskitError, op._multiply, 's')
self.assertRaises(QiskitError, op.__rmul__, 's')
self.assertRaises(QiskitError, op._multiply, op)
self.assertRaises(QiskitError, op.__rmul__, op)
def test_negate(self):
"""Test negate method"""
mat = self.rand_matrix(4, 4)
op = Operator(mat)
self.assertEqual(-op, Operator(-1 * mat))
def test_equiv(self):
"""Test negate method"""
mat = np.diag([1, np.exp(1j * np.pi / 2)])
phase = np.exp(-1j * np.pi / 4)
op = Operator(mat)
self.assertTrue(op.equiv(phase * mat))
self.assertTrue(op.equiv(Operator(phase * mat)))
self.assertFalse(op.equiv(2 * mat))
if __name__ == '__main__':
unittest.main()
| 2.046875 | 2 |
pages/feature_modal.py | jack-skerrett-bluefruit/Python-ScreenPlay | 0 | 4058 | <gh_stars>0
from selenium.webdriver.common.by import By
class feature_modal:
title_textbox = (By.ID, "feature-name")
description_textbox = (By.ID, "description")
save_button = (By.XPATH, "/html/body/app/div[3]/div[2]/div/div/div/button[1]")
| 2.046875 | 2 |
liststations.py | CrookedY/AirPollutionBot | 1 | 4059 | from urllib2 import Request, urlopen, URLError
import json
request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/')
try:
response = urlopen(request)
data = response.read()
except URLError, e:
print 'error:', e
stations= json.loads (data)
#extract out station 2
stations2 = stations [7]
properties = stations2[u'properties']
#extract ID so can be use in link
ID = properties[u'id']
#print ID
url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID))
request2 = Request (url)
try:
response = urlopen(request2)
data2 = response.read()
except URLError, e:
print 'error:', e
#contains station properties data. Need to get to timecourse ID
station_prop = data2
station_prop_json= json.loads (station_prop)
#ID is a key in dictionary so need to extract as a key
a= station_prop_json[u'properties'][u'timeseries'].keys()
i=a[0]
url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData')
request3 = Request(url2)
try:
response = urlopen(request3)
data3 = response.read()
except URLError, e:
print 'error:', e
print data3
| 3.125 | 3 |
pyfinancials/engine.py | kmiller96/PyFinancials | 1 | 4060 | <filename>pyfinancials/engine.py
def hello_world():
"""Tests the import."""
return "Hello world!"
| 1.453125 | 1 |
core/migrations/0002_auto_20180702_1913.py | mertyildiran/echo | 5 | 4061 | <reponame>mertyildiran/echo<gh_stars>1-10
# Generated by Django 2.0.6 on 2018-07-02 19:13
import core.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='echo',
old_name='owner',
new_name='user',
),
migrations.AlterField(
model_name='echo',
name='audio',
field=models.FileField(upload_to=core.models.echo_directory),
),
migrations.AlterField(
model_name='profile',
name='picture',
field=models.FileField(blank=True, null=True, upload_to=core.models.profile_directory),
),
]
| 1.734375 | 2 |
tests/test_helpers.py | ajdavis/aiohttp | 1 | 4062 | <filename>tests/test_helpers.py
import pytest
from unittest import mock
from aiohttp import helpers
import datetime
def test_parse_mimetype_1():
assert helpers.parse_mimetype('') == ('', '', '', {})
def test_parse_mimetype_2():
assert helpers.parse_mimetype('*') == ('*', '*', '', {})
def test_parse_mimetype_3():
assert (helpers.parse_mimetype('application/json') ==
('application', 'json', '', {}))
def test_parse_mimetype_4():
assert (
helpers.parse_mimetype('application/json; charset=utf-8') ==
('application', 'json', '', {'charset': 'utf-8'}))
def test_parse_mimetype_5():
assert (
helpers.parse_mimetype('''application/json; charset=utf-8;''') ==
('application', 'json', '', {'charset': 'utf-8'}))
def test_parse_mimetype_6():
assert(
helpers.parse_mimetype('ApPlIcAtIoN/JSON;ChaRseT="UTF-8"') ==
('application', 'json', '', {'charset': 'UTF-8'}))
def test_parse_mimetype_7():
assert (
helpers.parse_mimetype('application/rss+xml') ==
('application', 'rss', 'xml', {}))
def test_parse_mimetype_8():
assert (
helpers.parse_mimetype('text/plain;base64') ==
('text', 'plain', '', {'base64': ''}))
def test_basic_auth1():
# missing password here
with pytest.raises(ValueError):
helpers.BasicAuth(None)
def test_basic_auth2():
with pytest.raises(ValueError):
helpers.BasicAuth('nkim', None)
def test_basic_auth3():
auth = helpers.BasicAuth('nkim')
assert auth.login == 'nkim'
assert auth.password == ''
def test_basic_auth4():
auth = helpers.BasicAuth('nkim', 'pwd')
assert auth.login == 'nkim'
assert auth.password == '<PASSWORD>'
assert auth.encode() == 'Basic bmtpbTpwd2Q='
def test_invalid_formdata_params():
with pytest.raises(TypeError):
helpers.FormData('asdasf')
def test_invalid_formdata_params2():
with pytest.raises(TypeError):
helpers.FormData('as') # 2-char str is not allowed
def test_invalid_formdata_content_type():
form = helpers.FormData()
invalid_vals = [0, 0.1, {}, [], b'foo']
for invalid_val in invalid_vals:
with pytest.raises(TypeError):
form.add_field('foo', 'bar', content_type=invalid_val)
def test_invalid_formdata_filename():
form = helpers.FormData()
invalid_vals = [0, 0.1, {}, [], b'foo']
for invalid_val in invalid_vals:
with pytest.raises(TypeError):
form.add_field('foo', 'bar', filename=invalid_val)
def test_invalid_formdata_content_transfer_encoding():
form = helpers.FormData()
invalid_vals = [0, 0.1, {}, [], b'foo']
for invalid_val in invalid_vals:
with pytest.raises(TypeError):
form.add_field('foo',
'bar',
content_transfer_encoding=invalid_val)
def test_access_logger_format():
log_format = '%T {%{SPAM}e} "%{ETag}o" %X {X} %%P'
mock_logger = mock.Mock()
access_logger = helpers.AccessLogger(mock_logger, log_format)
expected = '%s {%s} "%s" %%X {X} %%%s'
assert expected == access_logger._log_format
@mock.patch("aiohttp.helpers.datetime")
@mock.patch("os.getpid")
def test_access_logger_atoms(mock_getpid, mock_datetime):
utcnow = datetime.datetime(1843, 1, 1, 0, 0)
mock_datetime.datetime.utcnow.return_value = utcnow
mock_getpid.return_value = 42
log_format = '%a %t %P %l %u %r %s %b %O %T %Tf %D'
mock_logger = mock.Mock()
access_logger = helpers.AccessLogger(mock_logger, log_format)
message = mock.Mock(headers={}, method="GET", path="/path", version=(1, 1))
environ = {}
response = mock.Mock(headers={}, output_length=123,
body_length=42, status=200)
transport = mock.Mock()
transport.get_extra_info.return_value = ("127.0.0.2", 1234)
access_logger.log(message, environ, response, transport, 3.1415926)
assert not mock_logger.exception.called
expected = ('127.0.0.2 [01/Jan/1843:00:00:00 +0000] <42> - - '
'GET /path HTTP/1.1 200 42 123 3 3.141593 3141593')
mock_logger.info.assert_called_with(expected)
def test_access_logger_dicts():
log_format = '%{User-Agent}i %{Content-Length}o %{SPAM}e %{None}i'
mock_logger = mock.Mock()
access_logger = helpers.AccessLogger(mock_logger, log_format)
message = mock.Mock(headers={"USER-AGENT": "Mock/1.0"}, version=(1, 1))
environ = {"SPAM": "EGGS"}
response = mock.Mock(headers={"CONTENT-LENGTH": 123})
transport = mock.Mock()
transport.get_extra_info.return_value = ("127.0.0.2", 1234)
access_logger.log(message, environ, response, transport, 0.0)
assert not mock_logger.error.called
expected = 'Mock/1.0 123 EGGS -'
mock_logger.info.assert_called_with(expected)
def test_logger_no_message_and_environ():
mock_logger = mock.Mock()
mock_transport = mock.Mock()
mock_transport.get_extra_info.return_value = ("127.0.0.3", 0)
access_logger = helpers.AccessLogger(mock_logger, "%r %{FOOBAR}e")
access_logger.log(None, None, None, mock_transport, 0.0)
mock_logger.info.assert_called_with("- -")
def test_reify():
class A:
@helpers.reify
def prop(self):
return 1
a = A()
assert 1 == a.prop
def test_reify_class():
class A:
@helpers.reify
def prop(self):
"""Docstring."""
return 1
assert isinstance(A.prop, helpers.reify)
assert 'Docstring.' == A.prop.__doc__
def test_reify_assignment():
class A:
@helpers.reify
def prop(self):
return 1
a = A()
with pytest.raises(AttributeError):
a.prop = 123
def test_requote_uri_with_unquoted_percents():
# Ensure we handle unquoted percent signs in redirects.
bad_uri = 'http://example.com/fiz?buz=%ppicture'
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == helpers.requote_uri(bad_uri)
def test_requote_uri_properly_requotes():
# Ensure requoting doesn't break expectations.
quoted = 'http://example.com/fiz?buz=%25ppicture'
assert quoted == helpers.requote_uri(quoted)
| 2.421875 | 2 |
GenConfigs.py | truls/faas-profiler | 0 | 4063 | from os.path import join
FAAS_ROOT="/lhome/trulsas/faas-profiler"
WORKLOAD_SPECS=join(FAAS_ROOT, "specs", "workloads")
#FAAS_ROOT="/home/truls/uni/phd/faas-profiler"
WSK_PATH = "wsk"
OPENWHISK_PATH = "/lhome/trulsas/openwhisk"
#: Location of output data
DATA_DIR = join(FAAS_ROOT, "..", "profiler_results")
SYSTEM_CPU_SET = "0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30"
| 1.476563 | 1 |
Chapter09/calc.py | LuisPereda/Learning_Python | 0 | 4064 |
def sum1(a,b):
try:
c = a+b
return c
except :
print "Error in sum1 function"
def divide(a,b):
try:
c = a/b
return c
except :
print "Error in divide function"
print divide(10,0)
print sum1(10,0) | 3.53125 | 4 |
radssh/hostkey.py | Eli-Tarrago/radssh | 39 | 4065 | <gh_stars>10-100
#
# Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk Data Management Inc.
#
# This file is part of the RadSSH software package.
#
# RadSSH is free software, released under the Revised BSD License.
# You are permitted to use, modify, and redsitribute this software
# according to the Revised BSD License, a copy of which should be
# included with the distribution as file LICENSE.txt
#
'''HostKey Handling Module'''
import os
import threading
import warnings
import paramiko.hostkeys
# Deprecated as of 1.1 - Use known_hosts rewrite instead if using this API
warnings.warn(FutureWarning('RadSSH hostkey module is no longer supported, and will be removed in release 2.0. Port existing code to use radssh.known_hosts instead.'))
class CodeMap(object):
'''CodeMap class'''
def __init__(self, **kwargs):
self._fwd = kwargs
self._reverse = {}
for k, v in kwargs.items():
self.__setattr__(k, v)
self._reverse[v] = k
def code(self, name):
'''Given a name, return the code'''
return self._fwd[name]
def name(self, code):
'''Given a code value, return the corresponding code'''
return self._reverse[code]
verify_mode = CodeMap(
# Different options for handling host key verification
# Listed in decreasing order of security/paranoia
reject=0, # Missing keys are rejected
prompt=1, # Missing keys may be accepted, based on user prompt
accept_new=2, # Missing keys automatically accepted
# After this point, key conflicts no longer hinder connections
# Using these options, you become vulnerable to spoofing and
# intercepted traffic for SSH sessions, and you don't care.
ignore=100, # Turn host key verification OFF
overwrite_blindly=666 # Concentrated evil
)
def printable_fingerprint(k):
'''Convert key fingerprint into OpenSSH printable format'''
fingerprint = k.get_fingerprint()
# Handle Python3 bytes or Python2 8-bit string style...
if isinstance(fingerprint[0], int):
seq = [int(x) for x in fingerprint]
else:
seq = [ord(x) for x in fingerprint]
return ':'.join(['%02x' % x for x in seq])
class HostKeyVerifier(object):
'''Class to control how (if) host keys are verified'''
def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'):
self.mode = verify_mode.code(mode)
self.hostkeys = paramiko.hostkeys.HostKeys()
self.lock = threading.Lock()
if mode == verify_mode.ignore:
return
self.known_hosts_file = os.path.expanduser(known_hosts_file)
if os.path.exists(self.known_hosts_file):
self.hostkeys.load(self.known_hosts_file)
elif not os.path.exists(os.path.dirname(self.known_hosts_file)):
os.makedirs(os.path.dirname(self.known_hosts_file))
def verify_host_key(self, hostname, key):
'''Verify a single hostkey against a hostname or IP'''
if self.mode == verify_mode.ignore:
return True
# Special formatting for non-standard ports...
if ':' not in hostname:
lookup_name = hostname
elif hostname.endswith(':22'):
lookup_name = hostname[:-3]
else:
host_base, port_base = hostname.rsplit(':', 1)
lookup_name = '[%s]:%s' % (host_base, port_base)
# Try remainder of host verification with locking
self.lock.acquire()
if self.hostkeys.check(lookup_name, key):
self.lock.release()
return True
host_entry = self.hostkeys.lookup(lookup_name)
actual = printable_fingerprint(key)
if host_entry and key.get_name() in host_entry:
# Entry mismatch
expected = printable_fingerprint(host_entry[key.get_name()])
print('Host key mismatch for (%s)' % lookup_name)
print('Expected:', expected)
print('Got :', actual)
if self.mode == verify_mode.overwrite_blindly:
print('Blindly accepting updated host key for %s' % lookup_name)
self.hostkeys.add(lookup_name, key.get_name(), key)
self.hostkeys.save(self.known_hosts_file)
self.lock.release()
return True
else:
# Missing key
if self.mode == verify_mode.reject:
self.lock.release()
return False
accept_and_add = False
if self.mode == verify_mode.prompt:
print('Unverified connection to "%s"' % lookup_name)
print('(Host Key Fingerprint [%s])' % actual)
answer = input('Do you want to accept this key? (y/N): ')
if answer[0].upper() == 'Y':
accept_and_add = True
if self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly):
accept_and_add = True
if accept_and_add:
print('Accepting new host key for %s' % lookup_name)
self.hostkeys.add(lookup_name, key.get_name(), key)
self.hostkeys.save(self.known_hosts_file)
self.lock.release()
return True
self.lock.release()
return False
| 2.09375 | 2 |
nuke/pymmh3.py | jfpanisset/Cryptomatte | 543 | 4066 | '''
pymmh3 was written by <NAME> and enhanced by <NAME>, and is placed in the public
domain. The authors hereby disclaim copyright to this source code.
pure python implementation of the murmur3 hash algorithm
https://code.google.com/p/smhasher/wiki/MurmurHash3
This was written for the times when you do not want to compile c-code and install modules,
and you only want a drop-in murmur3 implementation.
As this is purely python it is FAR from performant and if performance is anything that is needed
a proper c-module is suggested!
This module is written to have the same format as mmh3 python package found here for simple conversions:
https://pypi.python.org/pypi/mmh3/2.3.1
'''
import sys as _sys
if (_sys.version_info > (3, 0)):
def xrange( a, b, c ):
return list(range( a, b, c))
def xencode(x):
if isinstance(x, bytes) or isinstance(x, bytearray):
return x
else:
return x.encode()
else:
def xencode(x):
return x
del _sys
def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in range( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )
def hash128( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. '''
def hash128_x64( key, seed ):
''' Implements 128bit murmur3 hash for x64. '''
def fmix( k ):
k ^= k >> 33
k = ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
k = ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
return k
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
c1 = 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
#body
for block_start in range( 0, nblocks * 8, 8 ):
# ??? big endian?
k1 = key[ 2 * block_start + 7 ] << 56 | \
key[ 2 * block_start + 6 ] << 48 | \
key[ 2 * block_start + 5 ] << 40 | \
key[ 2 * block_start + 4 ] << 32 | \
key[ 2 * block_start + 3 ] << 24 | \
key[ 2 * block_start + 2 ] << 16 | \
key[ 2 * block_start + 1 ] << 8 | \
key[ 2 * block_start + 0 ]
k2 = key[ 2 * block_start + 15 ] << 56 | \
key[ 2 * block_start + 14 ] << 48 | \
key[ 2 * block_start + 13 ] << 40 | \
key[ 2 * block_start + 12 ] << 32 | \
key[ 2 * block_start + 11 ] << 24 | \
key[ 2 * block_start + 10 ] << 16 | \
key[ 2 * block_start + 9 ] << 8 | \
key[ 2 * block_start + 8 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
h1 = ( h1 << 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = ( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
h2 = ( h2 << 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
tail_size = length & 15
if tail_size >= 15:
k2 ^= key[ tail_index + 14 ] << 48
if tail_size >= 14:
k2 ^= key[ tail_index + 13 ] << 40
if tail_size >= 13:
k2 ^= key[ tail_index + 12 ] << 32
if tail_size >= 12:
k2 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k2 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k2 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k2 ^= key[ tail_index + 8 ]
if tail_size > 8:
k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
if tail_size >= 8:
k1 ^= key[ tail_index + 7 ] << 56
if tail_size >= 7:
k1 ^= key[ tail_index + 6 ] << 48
if tail_size >= 6:
k1 ^= key[ tail_index + 5 ] << 40
if tail_size >= 5:
k1 ^= key[ tail_index + 4 ] << 32
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
return ( h2 << 64 | h1 )
def hash128_x86( key, seed ):
''' Implements 128bit murmur3 hash for x86. '''
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
h3 = seed
h4 = seed
c1 = 0x239b961b
c2 = 0xab0e9789
c3 = 0x38b34ae5
c4 = 0xa1e38b93
#body
for block_start in range( 0, nblocks * 16, 16 ):
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k2 = key[ block_start + 7 ] << 24 | \
key[ block_start + 6 ] << 16 | \
key[ block_start + 5 ] << 8 | \
key[ block_start + 4 ]
k3 = key[ block_start + 11 ] << 24 | \
key[ block_start + 10 ] << 16 | \
key[ block_start + 9 ] << 8 | \
key[ block_start + 8 ]
k4 = key[ block_start + 15 ] << 24 | \
key[ block_start + 14 ] << 16 | \
key[ block_start + 13 ] << 8 | \
key[ block_start + 12 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 19 | h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( c3 * k2 ) & 0xFFFFFFFF
h2 ^= k2
h2 = ( h2 << 17 | h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
h2 = ( h2 + h3 ) & 0xFFFFFFFF
h2 = ( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF
k3 = ( c3 * k3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( c4 * k3 ) & 0xFFFFFFFF
h3 ^= k3
h3 = ( h3 << 15 | h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
h3 = ( h3 + h4 ) & 0xFFFFFFFF
h3 = ( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF
k4 = ( c4 * k4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( c1 * k4 ) & 0xFFFFFFFF
h4 ^= k4
h4 = ( h4 << 13 | h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h4 = ( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
k3 = 0
k4 = 0
tail_size = length & 15
if tail_size >= 15:
k4 ^= key[ tail_index + 14 ] << 16
if tail_size >= 14:
k4 ^= key[ tail_index + 13 ] << 8
if tail_size >= 13:
k4 ^= key[ tail_index + 12 ]
if tail_size > 12:
k4 = ( k4 * c4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( k4 * c1 ) & 0xFFFFFFFF
h4 ^= k4
if tail_size >= 12:
k3 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k3 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k3 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k3 ^= key[ tail_index + 8 ]
if tail_size > 8:
k3 = ( k3 * c3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( k3 * c4 ) & 0xFFFFFFFF
h3 ^= k3
if tail_size >= 8:
k2 ^= key[ tail_index + 7 ] << 24
if tail_size >= 7:
k2 ^= key[ tail_index + 6 ] << 16
if tail_size >= 6:
k2 ^= key[ tail_index + 5 ] << 8
if tail_size >= 5:
k2 ^= key[ tail_index + 4 ]
if tail_size > 4:
k2 = ( k2 * c2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( k2 * c3 ) & 0xFFFFFFFF
h2 ^= k2
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h3 ^= length
h4 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h3 = fmix( h3 )
h4 = fmix( h4 )
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
return ( h4 << 96 | h3 << 64 | h2 << 32 | h1 )
key = bytearray( xencode(key) )
if x64arch:
return hash128_x64( key, seed )
else:
return hash128_x86( key, seed )
def hash64( key, seed = 0x0, x64arch = True ):
''' Implements 64bit murmur3 hash. Returns a tuple. '''
hash_128 = hash128( key, seed, x64arch )
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
return ( int( signed_val1 ), int( signed_val2 ) )
def hash_bytes( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. Returns a byte string. '''
hash_128 = hash128( key, seed, x64arch )
bytestring = ''
for i in range(0, 16, 1):
lsbyte = hash_128 & 0xFF
bytestring = bytestring + str( chr( lsbyte ) )
hash_128 = hash_128 >> 8
return bytestring
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] "string to hash"' )
parser.add_argument( '--seed', type = int, default = 0 )
parser.add_argument( 'strings', default = [], nargs='+')
opts = parser.parse_args()
for str_to_hash in opts.strings:
sys.stdout.write( '"%s" = 0x%08X\n' % ( str_to_hash, hash( str_to_hash ) ) )
| 3.46875 | 3 |
bindings/python/tests/test_factory.py | pscff/dlite | 10 | 4067 | <reponame>pscff/dlite<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import dlite
thisdir = os.path.abspath(os.path.dirname(__file__))
class Person:
def __init__(self, name, age, skills):
self.name = name
self.age = age
self.skills = skills
def __repr__(self):
return 'Person(%r, %r, %r)' % (self.name, self.age, list(self.skills))
url = 'json://' + thisdir + '/Person.json'
print('-- create: ExPerson')
ExPerson = dlite.classfactory(Person, url=url)
print('-- create: person1')
person1 = Person('<NAME>', 42, ['distilling', 'tasting'])
print('-- create: person2')
person2 = ExPerson('<NAME>', 42, ['distilling', 'tasting'])
person2.dlite_inst.save('json', 'persons.json', 'mode=w')
# Print json-representation of person2 using dlite
print(person2.dlite_inst.asjson(indent=2))
person3 = dlite.loadfactory(Person, 'json://persons.json')
person4 = dlite.objectfactory(person1, meta=person2.dlite_meta)
| 2.8125 | 3 |
week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py | bky373/elice-racer-1st | 1 | 4068 | '''
연속 부분 최대합
nn개의 숫자가 주어질 때, 연속 부분을 선택하여 그 합을 최대화 하는 프로그램을 작성하시오.
예를 들어, 다음과 같이 8개의 숫자가 있다고 하자.
1 2 -4 5 3 -2 9 -10
이 때, 연속 부분이란 연속하여 숫자를 선택하는 것을 말한다.
가능한 연속 부분으로써 [1, 2, -4], [5, 3, -2, 9], [9, -10] 등이 있을 수 있다.
이 연속 부분들 중에서 가장 합이 큰 연속 부분은 [5, 3, -2, 9] 이며,
이보다 더 합을 크게 할 수는 없다.
따라서 연속 부분 최대합은 5+3+(-2)+9 = 15 이다.
입력 예시
1 2 -4 5 3 -2 9 -10
출력 예시
15
문제 조건
입력되는 수의 개수는 최대 100개입니다.
'''
import sys
def getSubsum(data) :
'''
n개의 숫자가 list로 주어질 때, 그 연속 부분 최대합을 반환하는 함수를 작성하세요.
'''
dp = [0] * len(data)
dp[0] = data[0]
for i in range(1, len(data)):
dp[i] = max(dp[i-1] + data[i], data[i])
return max(dp)
def main():
'''
이 부분은 수정하지 마세요.
'''
data = [int(x) for x in input().split()]
print(getSubsum(data))
if __name__ == "__main__":
main()
| 2.8125 | 3 |
tests/test_dns.py | jensstein/mockdock | 0 | 4069 | <reponame>jensstein/mockdock
#!/usr/bin/env python3
import unittest
from mockdock import dns
class DNSTest(unittest.TestCase):
def test_build_packet(self):
data = b"^4\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01"
packet = dns.build_packet(data, "192.168.0.1")
expeced_result = b"^4\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\x00<\x00\x04\xc0\xa8\x00\x01"
self.assertEqual(packet, expeced_result)
| 2.78125 | 3 |
tests/conftest.py | zhongnansu/es-cli | 6 | 4070 | <filename>tests/conftest.py
"""
Copyright 2019, Amazon Web Services Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
We can define the fixture functions in this file to make them
accessible across multiple test modules.
"""
import os
import pytest
from utils import create_index, delete_index, get_connection
@pytest.fixture(scope="function")
def connection():
test_connection = get_connection()
create_index(test_connection)
yield test_connection
delete_index(test_connection)
@pytest.fixture(scope="function")
def default_config_location():
from escli.conf import __file__ as package_root
package_root = os.path.dirname(package_root)
default_config = os.path.join(package_root, "esclirc")
yield default_config
@pytest.fixture(scope="session", autouse=True)
def temp_config(tmpdir_factory):
# this function runs on start of test session.
# use temporary directory for conf home so user conf will not be used
os.environ["XDG_CONFIG_HOME"] = str(tmpdir_factory.mktemp("data"))
| 2.046875 | 2 |
Cogs/ServerStats.py | Damiian1/techwizardshardware | 0 | 4071 | import asyncio
import discord
from datetime import datetime
from operator import itemgetter
from discord.ext import commands
from Cogs import Nullify
from Cogs import DisplayName
from Cogs import UserTime
from Cogs import Message
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(ServerStats(bot, settings))
class ServerStats:
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
async def message(self, message):
# Check the message and see if we should allow it - always yes.
# This module doesn't need to cancel messages.
# Don't count your own, Pooter
if not message.author.id == self.bot.user.id:
server = message.guild
messages = int(self.settings.getServerStat(server, "TotalMessages"))
if messages == None:
messages = 0
messages += 1
self.settings.setServerStat(server, "TotalMessages", messages)
return { 'Ignore' : False, 'Delete' : False}
@commands.command(pass_context=True)
async def serverinfo(self, ctx, *, guild_name = None):
"""Lists some info about the current or passed server."""
# Check if we passed another guild
guild = None
if guild_name == None:
guild = ctx.guild
else:
for g in self.bot.guilds:
if g.name.lower() == guild_name.lower():
guild = g
break
if str(g.id) == str(guild_name):
guild = g
break
if guild == None:
# We didn't find it
await ctx.send("I couldn't find that guild...")
return
server_embed = discord.Embed(color=ctx.author.color)
server_embed.title = guild.name
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at)
time_str = "{} {}".format(local_time['time'], local_time['zone'])
server_embed.description = "Created at {}".format(time_str)
online_members = 0
bot_member = 0
bot_online = 0
for member in guild.members:
if member.bot:
bot_member += 1
if not member.status == discord.Status.offline:
bot_online += 1
continue
if not member.status == discord.Status.offline:
online_members += 1
# bot_percent = "{:,g}%".format((bot_member/len(guild.members))*100)
user_string = "{:,}/{:,} online ({:,g}%)".format(
online_members,
len(guild.members) - bot_member,
round((online_members/(len(guild.members) - bot_member) * 100), 2)
)
b_string = "bot" if bot_member == 1 else "bots"
user_string += "\n{:,}/{:,} {} online ({:,g}%)".format(
bot_online,
bot_member,
b_string,
round((bot_online/bot_member)*100, 2)
)
#server_embed.add_field(name="Members", value="{:,}/{:,} online ({:.2f}%)\n{:,} {} ({}%)".format(online_members, len(guild.members), bot_percent), inline=True)
server_embed.add_field(name="Members ({:,} total)".format(len(guild.members)), value=user_string, inline=True)
server_embed.add_field(name="Roles", value=str(len(guild.roles)), inline=True)
chandesc = "{:,} text, {:,} voice".format(len(guild.text_channels), len(guild.voice_channels))
server_embed.add_field(name="Channels", value=chandesc, inline=True)
server_embed.add_field(name="Default Role", value=guild.default_role, inline=True)
server_embed.add_field(name="Owner", value=guild.owner.name + "#" + guild.owner.discriminator, inline=True)
server_embed.add_field(name="AFK Channel", value=guild.afk_channel, inline=True)
server_embed.add_field(name="Verification", value=guild.verification_level, inline=True)
server_embed.add_field(name="Voice Region", value=guild.region, inline=True)
server_embed.add_field(name="Considered Large", value=guild.large, inline=True)
# Find out where in our join position this server is
joinedList = []
popList = []
for g in self.bot.guilds:
joinedList.append({ 'ID' : g.id, 'Joined' : g.me.joined_at })
popList.append({ 'ID' : g.id, 'Population' : len(g.members) })
# sort the guilds by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
popList = sorted(popList, key=lambda x:x['Population'], reverse=True)
check_item = { "ID" : guild.id, "Joined" : guild.me.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
server_embed.add_field(name="Join Position", value="{:,} of {:,}".format(position, total), inline=True)
# Get our population position
check_item = { "ID" : guild.id, "Population" : len(guild.members) }
total = len(popList)
position = popList.index(check_item) + 1
server_embed.add_field(name="Population Rank", value="{:,} of {:,}".format(position, total), inline=True)
emojitext = ""
emojicount = 0
for emoji in guild.emojis:
if emoji.animated:
emojiMention = "<a:"+emoji.name+":"+str(emoji.id)+">"
else:
emojiMention = "<:"+emoji.name+":"+str(emoji.id)+">"
test = emojitext + emojiMention
if len(test) > 1024:
# TOOO BIIIIIIIIG
emojicount += 1
if emojicount == 1:
ename = "Emojis ({:,} total)".format(len(guild.emojis))
else:
ename = "Emojis (Continued)"
server_embed.add_field(name=ename, value=emojitext, inline=True)
emojitext=emojiMention
else:
emojitext = emojitext + emojiMention
if len(emojitext):
if emojicount == 0:
emojiname = "Emojis ({} total)".format(len(guild.emojis))
else:
emojiname = "Emojis (Continued)"
server_embed.add_field(name=emojiname, value=emojitext, inline=True)
if len(guild.icon_url):
server_embed.set_thumbnail(url=guild.icon_url)
else:
# No Icon
server_embed.set_thumbnail(url=ctx.author.default_avatar_url)
server_embed.set_footer(text="Server ID: {}".format(guild.id))
await ctx.channel.send(embed=server_embed)
@commands.command(pass_context=True)
async def sharedservers(self, ctx, *, member = None):
"""Lists how many servers you share with the bot."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member == None:
member = ctx.author
if type(member) is str:
member_check = DisplayName.memberForName(member, ctx.guild)
if not member_check:
msg = "I couldn't find *{}* on this server...".format(member)
if suppress:
msg = Nullify.clean(msg)
await ctx.send(msg)
return
member = member_check
if member.id == self.bot.user.id:
count = len(self.bot.guilds)
if count == 1:
await ctx.send("I'm on *1* server. :blush:")
else:
await ctx.send("I'm on *{}* servers. :blush:".format(count))
return
count = 0
for guild in self.bot.guilds:
for mem in guild.members:
if mem.id == member.id:
count += 1
if ctx.author.id == member.id:
targ = "You share"
else:
targ = "*{}* shares".format(DisplayName.name(member))
if count == 1:
await ctx.send("{} *1* server with me. :blush:".format(targ))
else:
await ctx.send("{} *{}* servers with me. :blush:".format(targ, count))
@commands.command(pass_context=True)
async def listservers(self, ctx, number : int = 10):
"""Lists the servers I'm connected to - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
i = 1
msg = '__**Servers I\'m On:**__\n\n'
for server in self.bot.guilds:
if i > number:
break
msg += '{}. *{}*\n'.format(i, server.name)
i += 1
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def topservers(self, ctx, number : int = 10):
"""Lists the top servers I'm connected to ordered by population - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
serverList = []
for server in self.bot.guilds:
memberCount = 0
for member in server.members:
memberCount += 1
serverList.append({ 'Name' : server.name, 'Users' : memberCount })
# sort the servers by population
serverList = sorted(serverList, key=lambda x:int(x['Users']), reverse=True)
if number > len(serverList):
number = len(serverList)
i = 1
msg = ''
for server in serverList:
if i > number:
break
msg += '{}. *{}* - *{:,}* members\n'.format(i, server['Name'], server['Users'])
i += 1
if number < len(serverList):
msg = '__**Top {} of {} Servers:**__\n\n'.format(number, len(serverList))+msg
else:
msg = '__**Top {} Servers:**__\n\n'.format(len(serverList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def bottomservers(self, ctx, number : int = 10):
"""Lists the bottom servers I'm connected to ordered by population - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
serverList = []
for server in self.bot.guilds:
serverList.append({ 'Name' : server.name, 'Users' : len(server.members) })
# sort the servers by population
serverList = sorted(serverList, key=lambda x:int(x['Users']))
if number > len(serverList):
number = len(serverList)
i = 1
msg = ''
for server in serverList:
if i > number:
break
msg += '{}. *{}* - *{:,}* members\n'.format(i, server['Name'], server['Users'])
i += 1
if number < len(serverList):
msg = '__**Bottom {} of {} Servers:**__\n\n'.format(number, len(serverList))+msg
else:
msg = '__**Bottom {} Servers:**__\n\n'.format(len(serverList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def users(self, ctx):
"""Lists the total number of users on all servers I'm connected to."""
message = await Message.EmbedText(title="Counting users...", color=ctx.message.author).send(ctx)
servers = members = membersOnline = bots = botsOnline = 0
counted_users = []
counted_bots = []
for server in self.bot.guilds:
servers += 1
for member in server.members:
if member.bot:
bots += 1
if not member.id in counted_bots:
counted_bots.append(member.id)
if not member.status == discord.Status.offline:
botsOnline += 1
else:
members += 1
if not member.id in counted_users:
counted_users.append(member.id)
if not member.status == discord.Status.offline:
membersOnline += 1
await Message.Embed(
title="Member Stats",
description="Current User Information".format(server.name),
fields=[
{ "name" : "Servers", "value" : "└─ {:,}".format(servers), "inline" : False },
{ "name" : "Users", "value" : "└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)), "inline" : False},
{ "name" : "Bots", "value" : "└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), "inline" : False},
{ "name" : "Total", "value" : "└─ {:,}/{:,} online ({:,g}%)".format(membersOnline + botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)), "inline" : False}
],
color=ctx.message.author).edit(ctx, message)
'''userCount = 0
serverCount = 0
counted_users = []
message = await ctx.send("Counting users...")
for server in self.bot.guilds:
serverCount += 1
userCount += len(server.members)
for member in server.members:
if not member.id in counted_users:
counted_users.append(member.id)
await message.edit(content='There are *{:,} users* (*{:,}* unique) on the *{:,} servers* I am currently a part of!'.format(userCount, len(counted_users), serverCount))'''
@commands.command(pass_context=True)
async def joinpos(self, ctx, *, member = None):
"""Tells when a user joined compared to other users."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member == None:
member = ctx.author
if type(member) is str:
member_check = DisplayName.memberForName(member, ctx.guild)
if not member_check:
msg = "I couldn't find *{}* on this server...".format(member)
if suppress:
msg = Nullify.clean(msg)
await ctx.send(msg)
return
member = member_check
joinedList = []
for mem in ctx.message.guild.members:
joinedList.append({ 'ID' : mem.id, 'Joined' : mem.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
check_item = { "ID" : member.id, "Joined" : member.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
before = ""
after = ""
msg = "*{}'s* join position is **{:,}**.".format(DisplayName.name(member), position, total)
if position-1 == 1:
# We have previous members
before = "**1** user"
elif position-1 > 1:
before = "**{:,}** users".format(position-1)
if total-position == 1:
# There were users after as well
after = "**1** user"
elif total-position > 1:
after = "**{:,}** users".format(total-position)
# Build the string!
if len(before) and len(after):
# Got both
msg += "\n\n{} joined before, and {} after.".format(before, after)
elif len(before):
# Just got before
msg += "\n\n{} joined before.".format(before)
elif len(after):
# Just after
msg += "\n\n{} joined after.".format(after)
await ctx.send(msg)
@commands.command(pass_context=True)
async def firstjoins(self, ctx, number : int = 10):
"""Lists the first users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No users! Just like you wanted!')
return
joinedList = []
for member in ctx.message.guild.members:
joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
msg += '{}. *{}* - *{}*\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str)
i += 1
if number < len(joinedList):
msg = '__**First {} of {} Members to Join:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**First {} Members to Join:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def recentjoins(self, ctx, number : int = 10):
"""Lists the most recent users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No users! Just like you wanted!')
return
joinedList = []
for member in ctx.message.guild.members:
joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
msg += '{}. *{}* - *{}*\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str)
i += 1
if number < len(joinedList):
msg = '__**Last {} of {} Members to Join:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**Last {} Members to Join:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def firstservers(self, ctx, number : int = 10):
"""Lists the first servers I've joined - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
joinedList = []
for guild in self.bot.guilds:
botmember = DisplayName.memberForID(self.bot.user.id, guild)
joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) })
# sort the servers by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
if member['Members'] == 1:
msg += '{}. *{}* - *{}* - *(1 member)*\n'.format(i, member['Name'], time_str)
else:
msg += '{}. *{}* - *{}* - *({} members)*\n'.format(i, member['Name'], time_str, member['Members'])
i += 1
if number < len(joinedList):
msg = '__**First {} of {} Servers I Joined:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**First {} Servers I Joined:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def recentservers(self, ctx, number : int = 10):
"""Lists the most recent users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
joinedList = []
for guild in self.bot.guilds:
botmember = DisplayName.memberForID(self.bot.user.id, guild)
joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) })
# sort the servers by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
if member['Members'] == 1:
msg += '{}. *{}* - *{}* - *(1 member)*\n'.format(i, member['Name'], time_str)
else:
msg += '{}. *{}* - *{}* - *({} members)*\n'.format(i, member['Name'], time_str, member['Members'])
i += 1
if number < len(joinedList):
msg = '__**Last {} of {} Servers I Joined:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**Last {} Servers I Joined:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def messages(self, ctx):
"""Lists the number of messages I've seen on this sever so far. (only applies after this module's inception, and if I'm online)"""
messages = int(self.settings.getServerStat(ctx.message.guild, "TotalMessages"))
messages -= 1
self.settings.setServerStat(ctx.message.guild, "TotalMessages", messages)
if messages == None:
messages = 0
if messages == 1:
await ctx.channel.send('So far, I\'ve witnessed *{:,} message!*'.format(messages))
else:
await ctx.channel.send('So far, I\'ve witnessed *{:,} messages!*'.format(messages))
@commands.command(pass_context=True)
async def allmessages(self, ctx):
"""Lists the number of messages I've seen on all severs so far. (only applies after this module's inception, and if I'm online)"""
messages = 0
for guild in self.bot.guilds:
temp = 0 if self.settings.getServerStat(guild, "TotalMessages") is None else self.settings.getServerStat(guild, "TotalMessages")
messages += int(temp)
messages -= 1
if messages == 1:
await ctx.channel.send('So far, I\'ve witnessed *{:,} message across all servers!*'.format(messages))
else:
await ctx.channel.send('So far, I\'ve witnessed *{:,} messages across all servers!*'.format(messages))
# Set our message count locally -1
messages = int(self.settings.getServerStat(ctx.message.guild, "TotalMessages"))
messages -= 1
self.settings.setServerStat(ctx.message.guild, "TotalMessages", messages)
| 2.546875 | 3 |
chess_commentary_model/transformers_model/dataset_preprocessing.py | Rseiji/TCC-2020 | 0 | 4072 | <filename>chess_commentary_model/transformers_model/dataset_preprocessing.py
"""Métodos de preprocessamento de testes individuais
"""
import pandas as pd
import numpy as np
import math
def test_1(df, seed=0):
"""training: balanced; test: balanced
training: 80k (40k 0, 40k 1)
test: 20k (10k 0, 10k 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:40000]
df_zeros_training = df_zeros.loc[:40000]
df_ones_test = df_ones.loc[40000:50000]
df_zeros_test = df_zeros.loc[40000:50000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_2(df, seed=0):
"""training: balanced; test: unbalanced
training: 80k (40k 0, 40k 1)
test: 20k (4k 0, 16k 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:40000]
df_zeros_training = df_zeros.loc[:40000]
df_ones_test = df_ones.loc[40000:44000]
df_zeros_test = df_zeros.loc[40000:56000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_3(df, seed=0):
"""training: unbalanced; test: unbalanced
training: 80k (16k 1, 64k 0)
test: 20k (4k 1, 16k 0)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:16000]
df_zeros_training = df_zeros.loc[:64000]
df_ones_test = df_ones.loc[16000:20000]
df_zeros_test = df_zeros.loc[64000:80000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
##################################
## Tests on old dataset
##################################
def test_4(df, seed=0):
""" training: balanced; test: balanced
training: 58k (29k 0, 29k 1)
test: 14.5k (7.25k 0, 7.25k 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:29000]
df_zeros_training = df_zeros.loc[:29000]
df_ones_test = df_ones.loc[29000:36250]
df_zeros_test = df_zeros.loc[29000:36250]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_5(df, seed=0):
"""training: balanced; test: unbalanced
training: 58k (29000 0, 29000 1)
test: 14.5k (12905 0, 1595 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:29000]
df_zeros_training = df_zeros.loc[:29000]
df_ones_test = df_ones.loc[29000:30595]
df_zeros_test = df_zeros.loc[29000:41905]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_6(df, seed=0):
"""training: unbalanced; test: unbalanced
training: 58k (6380 1, 51620 0)
test: 14.5k (1595 1, 12905 0)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:6380]
df_zeros_training = df_zeros.loc[:51620]
df_ones_test = df_ones.loc[6380:7975]
df_zeros_test = df_zeros.loc[51620:64525]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
| 2.78125 | 3 |
venv/Lib/site-packages/CoolProp/constants.py | kubakoziczak/gasSteamPowerPlant | 0 | 4073 | # This file is automatically generated by the generate_constants_module.py script in wrappers/Python.
# DO NOT MODIFY THE CONTENTS OF THIS FILE!
from __future__ import absolute_import
from . import _constants
INVALID_PARAMETER = _constants.INVALID_PARAMETER
igas_constant = _constants.igas_constant
imolar_mass = _constants.imolar_mass
iacentric_factor = _constants.iacentric_factor
irhomolar_reducing = _constants.irhomolar_reducing
irhomolar_critical = _constants.irhomolar_critical
iT_reducing = _constants.iT_reducing
iT_critical = _constants.iT_critical
irhomass_reducing = _constants.irhomass_reducing
irhomass_critical = _constants.irhomass_critical
iP_critical = _constants.iP_critical
iP_reducing = _constants.iP_reducing
iT_triple = _constants.iT_triple
iP_triple = _constants.iP_triple
iT_min = _constants.iT_min
iT_max = _constants.iT_max
iP_max = _constants.iP_max
iP_min = _constants.iP_min
idipole_moment = _constants.idipole_moment
iT = _constants.iT
iP = _constants.iP
iQ = _constants.iQ
iTau = _constants.iTau
iDelta = _constants.iDelta
iDmolar = _constants.iDmolar
iHmolar = _constants.iHmolar
iSmolar = _constants.iSmolar
iCpmolar = _constants.iCpmolar
iCp0molar = _constants.iCp0molar
iCvmolar = _constants.iCvmolar
iUmolar = _constants.iUmolar
iGmolar = _constants.iGmolar
iHelmholtzmolar = _constants.iHelmholtzmolar
iSmolar_residual = _constants.iSmolar_residual
iDmass = _constants.iDmass
iHmass = _constants.iHmass
iSmass = _constants.iSmass
iCpmass = _constants.iCpmass
iCp0mass = _constants.iCp0mass
iCvmass = _constants.iCvmass
iUmass = _constants.iUmass
iGmass = _constants.iGmass
iHelmholtzmass = _constants.iHelmholtzmass
iviscosity = _constants.iviscosity
iconductivity = _constants.iconductivity
isurface_tension = _constants.isurface_tension
iPrandtl = _constants.iPrandtl
ispeed_sound = _constants.ispeed_sound
iisothermal_compressibility = _constants.iisothermal_compressibility
iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient
ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics
ialphar = _constants.ialphar
idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta
idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau
ialpha0 = _constants.ialpha0
idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta
idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau
iBvirial = _constants.iBvirial
iCvirial = _constants.iCvirial
idBvirial_dT = _constants.idBvirial_dT
idCvirial_dT = _constants.idCvirial_dT
iZ = _constants.iZ
iPIP = _constants.iPIP
ifraction_min = _constants.ifraction_min
ifraction_max = _constants.ifraction_max
iT_freeze = _constants.iT_freeze
iGWP20 = _constants.iGWP20
iGWP100 = _constants.iGWP100
iGWP500 = _constants.iGWP500
iFH = _constants.iFH
iHH = _constants.iHH
iPH = _constants.iPH
iODP = _constants.iODP
iPhase = _constants.iPhase
iundefined_parameter = _constants.iundefined_parameter
INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID
QT_INPUTS = _constants.QT_INPUTS
PQ_INPUTS = _constants.PQ_INPUTS
QSmolar_INPUTS = _constants.QSmolar_INPUTS
QSmass_INPUTS = _constants.QSmass_INPUTS
HmolarQ_INPUTS = _constants.HmolarQ_INPUTS
HmassQ_INPUTS = _constants.HmassQ_INPUTS
DmolarQ_INPUTS = _constants.DmolarQ_INPUTS
DmassQ_INPUTS = _constants.DmassQ_INPUTS
PT_INPUTS = _constants.PT_INPUTS
DmassT_INPUTS = _constants.DmassT_INPUTS
DmolarT_INPUTS = _constants.DmolarT_INPUTS
HmolarT_INPUTS = _constants.HmolarT_INPUTS
HmassT_INPUTS = _constants.HmassT_INPUTS
SmolarT_INPUTS = _constants.SmolarT_INPUTS
SmassT_INPUTS = _constants.SmassT_INPUTS
TUmolar_INPUTS = _constants.TUmolar_INPUTS
TUmass_INPUTS = _constants.TUmass_INPUTS
DmassP_INPUTS = _constants.DmassP_INPUTS
DmolarP_INPUTS = _constants.DmolarP_INPUTS
HmassP_INPUTS = _constants.HmassP_INPUTS
HmolarP_INPUTS = _constants.HmolarP_INPUTS
PSmass_INPUTS = _constants.PSmass_INPUTS
PSmolar_INPUTS = _constants.PSmolar_INPUTS
PUmass_INPUTS = _constants.PUmass_INPUTS
PUmolar_INPUTS = _constants.PUmolar_INPUTS
HmassSmass_INPUTS = _constants.HmassSmass_INPUTS
HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS
SmassUmass_INPUTS = _constants.SmassUmass_INPUTS
SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS
DmassHmass_INPUTS = _constants.DmassHmass_INPUTS
DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS
DmassSmass_INPUTS = _constants.DmassSmass_INPUTS
DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS
DmassUmass_INPUTS = _constants.DmassUmass_INPUTS
DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS
FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE
FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE
FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP
FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID
FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION
FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED
iphase_liquid = _constants.iphase_liquid
iphase_supercritical = _constants.iphase_supercritical
iphase_supercritical_gas = _constants.iphase_supercritical_gas
iphase_supercritical_liquid = _constants.iphase_supercritical_liquid
iphase_critical_point = _constants.iphase_critical_point
iphase_gas = _constants.iphase_gas
iphase_twophase = _constants.iphase_twophase
iphase_unknown = _constants.iphase_unknown
iphase_not_imposed = _constants.iphase_not_imposed
NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS
CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK
CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED
SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES
ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY
ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH
ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH
ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH
REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS
REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS
REFPROP_USE_GERG = _constants.REFPROP_USE_GERG
REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON
MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB
DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS
HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES
PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA
R_U_CODATA = _constants.R_U_CODATA
VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH
SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA
OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS
OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION
OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION
USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI
ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE
VTPR_ALWAYS_RELOAD_LIBRARY = _constants.VTPR_ALWAYS_RELOAD_LIBRARY
FLOAT_PUNCTUATION = _constants.FLOAT_PUNCTUATION
| 1.210938 | 1 |
torch_datasets/samplers/balanced_batch_sampler.py | mingruimingrui/torch-datasets | 0 | 4074 | import random
import torch.utils.data.sampler
class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler):
def __init__(
self,
dataset_labels,
batch_size=1,
steps=None,
n_classes=0,
n_samples=2
):
""" Create a balanced batch sampler for label based datasets
Args
dataset_labels : Labels of every entry from a dataset (in the same sequence)
batch_size : batch_size no explaination needed
step_size : Number of batches to generate (if None, then dataset_size / batch_size will be used)
n_classes : Number of classes
n_samples : Number of samples per class
*** If batch_size > n_classes * n_samples, rest of batch will be randomly filled
"""
self.batch_size = batch_size
self.steps = len(dataset_labels) // batch_size if steps is None else steps
self.n_classes = n_classes
self.n_samples = n_samples
# Create a label_to_entry_ids table
self.label_to_entry_ids = {}
for entry_id, label in enumerate(dataset_labels):
if label in self.label_to_entry_ids:
self.label_to_entry_ids[label].append(entry_id)
else:
self.label_to_entry_ids[label] = [entry_id]
# Subset the labels with more than n_samples entries
self.labels_subset = [label for (label, entry_ids) in self.label_to_entry_ids.items() if len(entry_ids) >= n_samples]
assert len(self.labels_subset) >= n_classes, 'Too little labels have {} entries, choose a smaller n_classes or n_samples'.format(n_samples)
def _make_batch_ids(self):
batch_ids = []
# Choose classes and entries
labels_choosen = random.sample(self.labels_subset, self.n_classes)
# Randomly sample n_samples entries from choosen labels
for l in labels_choosen:
batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples)
if len(batch_ids) < self.batch_size:
# Randomly sample remainder
labels_choosen = {l: None for l in labels_choosen}
remaining_entry_ids = []
for label, entry_ids in self.label_to_entry_ids.items():
if label not in labels_choosen:
remaining_entry_ids += entry_ids
batch_ids += random.sample(remaining_entry_ids, self.batch_size - len(batch_ids))
# Randomly shuffle batch ids
batch_ids = random.sample(batch_ids, self.batch_size)
batch_ids = torch.LongTensor(batch_ids)
return batch_ids
def __iter__(self):
self.count = 0
while self.count < self.steps:
self.count += 1
yield self._make_batch_ids()
def __len__(self):
return self.steps
| 3.15625 | 3 |
ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py | likenamehaojie/Apache-Ambari-ZH | 1,664 | 4075 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import re
__all__ = ["get_bare_principal"]
def get_bare_principal(normalized_principal_name):
"""
Given a normalized principal name (nimbus/[email protected]) returns just the
primary component (nimbus)
:param normalized_principal_name: a string containing the principal name to process
:return: a string containing the primary component value or None if not valid
"""
bare_principal = None
if normalized_principal_name:
match = re.match(r"([^/@]+)(?:/[^@])?(?:@.*)?", normalized_principal_name)
if match:
bare_principal = match.group(1)
return bare_principal | 2.296875 | 2 |
04/cross_validation.01.py | study-machine-learning/dongheon.shin | 2 | 4076 | <filename>04/cross_validation.01.py<gh_stars>1-10
from sklearn import svm, metrics
import random
import re
def split(rows):
data = []
labels = []
for row in rows:
data.append(row[0:4])
labels.append(row[4])
return (data, labels)
def calculate_score(train, test):
train_data, train_label = split(train)
test_data, test_label = split(test)
classifier = svm.SVC()
classifier.fit(train_data, train_label)
predict = classifier.predict(test_data)
return metrics.accuracy_score(test_label, predict)
def to_number(n):
return float(n) if re.match(r"^[0-9\.]+$", n) else n
def to_columm(line):
return list(map(to_number, line.strip().split(",")))
lines = open("iris.csv", "r", encoding="utf-8").read().split("\n")
csv = list(map(to_columm, lines))
del csv[0]
random.shuffle(csv)
k = 5
csv_k = [[] for i in range(k)]
scores = []
for i in range(len(csv)):
csv_k[i % k].append(csv[i])
for test in csv_k:
train = []
for data in csv_k:
if test != data:
train += data
score = calculate_score(train, test)
scores.append(score)
print("score = ", scores)
print("avg = ", sum(scores) / len(scores))
| 2.875 | 3 |
third_party/org_specs2.bzl | wix/wix-oss-infra | 3 | 4077 | <gh_stars>1-10
load("@wix_oss_infra//:import_external.bzl", import_external = "safe_wix_scala_maven_import_external")
def dependencies():
import_external(
name = "org_specs2_specs2_fp_2_12",
artifact = "org.specs2:specs2-fp_2.12:4.8.3",
artifact_sha256 = "777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7",
srcjar_sha256 = "6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca",
deps = [
"@org_scala_lang_scala_library"
],
)
import_external(
name = "org_specs2_specs2_common_2_12",
artifact = "org.specs2:specs2-common_2.12:4.8.3",
artifact_sha256 = "3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708",
srcjar_sha256 = "b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3",
deps = [
"@org_scala_lang_modules_scala_parser_combinators_2_12",
"@org_scala_lang_modules_scala_xml_2_12",
"@org_scala_lang_scala_library",
"@org_scala_lang_scala_reflect",
"@org_specs2_specs2_fp_2_12"
],
)
import_external(
name = "org_specs2_specs2_matcher_2_12",
artifact = "org.specs2:specs2-matcher_2.12:4.8.3",
artifact_sha256 = "aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8",
srcjar_sha256 = "01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be",
deps = [
"@org_scala_lang_scala_library",
"@org_specs2_specs2_common_2_12"
],
)
import_external(
name = "org_specs2_specs2_core_2_12",
artifact = "org.specs2:specs2-core_2.12:4.8.3",
artifact_sha256 = "f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063",
srcjar_sha256 = "0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9",
deps = [
"@org_scala_lang_scala_library",
"@org_scala_sbt_test_interface",
"@org_specs2_specs2_common_2_12",
"@org_specs2_specs2_matcher_2_12"
],
)
import_external(
name = "org_specs2_specs2_junit_2_12",
artifact = "org.specs2:specs2-junit_2.12:4.8.3",
artifact_sha256 = "5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8",
srcjar_sha256 = "84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c",
deps = [
"@junit_junit",
"@org_scala_lang_scala_library",
"@org_scala_sbt_test_interface",
"@org_specs2_specs2_core_2_12"
],
)
| 1.46875 | 1 |
task/w2/trenirovka/12-rivnist 2.py | beregok/pythontask | 1 | 4078 | a = int(input())
b = int(input())
c = int(input())
d = int(input())
if a == 0 and b == 0:
print("INF")
else:
if (d - b * c / a) != 0 and (- b / a) == (- b // a):
print(- b // a)
else:
print("NO")
| 3.546875 | 4 |
src/reg_resampler.py | atif-hassan/Regression_ReSampling | 15 | 4079 | class resampler:
def __init__(self):
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from collections import Counter
import numpy as np
self.bins = 3
self.pd = pd
self.LabelEncoder = LabelEncoder
self.Counter = Counter
self.X = 0
self.Y_classes = 0
self.target = 0
self.np = np
# This function adds classes to each sample and returns the class list as a dataframe/numpy array (as per input)
# It also merges classes as and when required
def fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2):
self.bins = bins
tmp = target
# If data is numpy, then convert it into pandas
if type(target) == int:
if target < 0:
target = X.shape[1]+target
tmp = target
self.X = self.pd.DataFrame()
for i in range(X.shape[1]):
if i!=target:
self.X[str(i)] = X[:,i]
self.X["target"] = X[:,target]
target = "target"
else:
self.X = X.copy()
# Use qcut if balanced binning is required
if balanced_binning:
self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0)
else:
self.Y_classes = self.pd.cut(self.X[target], bins=self.bins)
# Pandas outputs ranges after binning. Convert ranges to classes
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Merge classes if number of neighbours is more than the number of samples
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
mid_point = len(classes_count)
# Logic for merging
for i in range(len(classes_count)):
if classes_count[i][1] < min_n_samples:
self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0]
if verbose > 0:
print("INFO: Class " + str(classes_count[i][0]) + " has been merged into Class " + str(classes_count[i-1][0]) + " due to low number of samples")
classes_count[i][0] = classes_count[i-1][0]
if verbose > 0:
print()
# Perform label-encoding once again
# Avoids class skipping after merging
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Pretty print
if verbose > 1:
print("Class Distribution:\n-------------------")
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
for class_, count in classes_count:
print(str(class_)+": "+str(count))
print()
# Finally concatenate and return as dataframe or numpy
# Based on what type of target was sent
self.X["classes"] = self.Y_classes
if type(tmp) == int:
self.target = tmp
else:
self.target = target
return self.Y_classes
# This function performs the re-sampling
def resample(self, sampler_obj, trainX, trainY):
# If classes haven't yet been created, then run the "fit" function
if type(self.Y_classes) == int:
print("Error! Run fit method first!!")
return None
# Finally, perform the re-sampling
resampled_data, _ = sampler_obj.fit_resample(trainX, trainY)
if type(resampled_data).__module__ == 'numpy':
resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop("classes", axis=1).columns)
# Return the correct X and Y
if type(self.target) == int:
return resampled_data.drop("target", axis=1).values, resampled_data["target"].values
else:
return resampled_data.drop(self.target, axis=1), resampled_data[self.target]
| 3.203125 | 3 |
get_data/speech_commands.py | patrick-kidger/generalised_shapelets | 32 | 4080 | <reponame>patrick-kidger/generalised_shapelets
import os
import pathlib
import sklearn.model_selection
import tarfile
import torch
import torchaudio
import urllib.request
here = pathlib.Path(__file__).resolve().parent
def _split_data(tensor, stratify):
# 0.7/0.15/0.15 train/val/test split
(train_tensor, testval_tensor,
train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify,
train_size=0.7,
random_state=0,
shuffle=True,
stratify=stratify)
val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor,
train_size=0.5,
random_state=1,
shuffle=True,
stratify=testval_stratify)
return train_tensor, val_tensor, test_tensor
def _save_data(dir, **tensors):
for tensor_name, tensor_value in tensors.items():
torch.save(tensor_value, str(dir / tensor_name) + '.pt')
def download():
base_base_loc = str(here / '../experiments/data')
if not os.path.exists(base_base_loc):
raise RuntimeError("data directory does not exist. Please create a directory called 'data' in the 'experiments'"
" directory. (We're going to put a lot of data there, so we don't make it automatically - "
"thus giving you the opportunity to make it a symlink rather than a normal directory, so "
"that the data can be stored elsewhere if you wish.)")
base_loc = base_base_loc + '/SpeechCommands'
loc = base_loc + '/speech_commands.tar.gz'
if os.path.exists(loc):
return
if not os.path.exists(base_loc):
os.mkdir(base_loc)
urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz',
loc)
with tarfile.open(loc, 'r') as f:
f.extractall(base_loc)
def _process_data():
base_loc = here / '..' / 'experiments' / 'data' / 'SpeechCommands'
X = torch.empty(34975, 16000, 1)
y = torch.empty(34975, dtype=torch.long)
batch_index = 0
y_index = 0
for foldername in ('yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go'):
loc = base_loc / foldername
for filename in os.listdir(loc):
audio, _ = torchaudio.load_wav(loc / filename, channels_first=False,
normalization=False) # for forward compatbility if they fix it
audio = audio / 2 ** 15 # Normalization argument doesn't seem to work so we do it manually.
# A few samples are shorter than the full length; for simplicity we discard them.
if len(audio) != 16000:
continue
X[batch_index] = audio
y[batch_index] = y_index
batch_index += 1
y_index += 1
assert batch_index == 34975, "batch_index is {}".format(batch_index)
audio_X = X
# X is of shape (batch=34975, length=16000, channels=1)
X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach()
# X is of shape (batch=34975, length=81, channels=40). For some crazy reason it requires a gradient, so detach.
train_X, _, _ = _split_data(X, y)
out = []
means = []
stds = []
for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)):
mean = train_Xi.mean()
std = train_Xi.std()
means.append(mean)
stds.append(std)
out.append((Xi - mean) / (std + 1e-5))
X = torch.stack(out, dim=-1)
train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X, y)
train_X, val_X, test_X = _split_data(X, y)
train_y, val_y, test_y = _split_data(y, y)
return train_X, val_X, test_X, train_y, val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X, \
val_audio_X, test_audio_X
def main():
download()
(train_X, val_X, test_X, train_y, val_y, test_y, means, stds, train_audio_X, val_audio_X,
test_audio_X) = _process_data()
loc = here / '..' / 'experiments' / 'data' / 'speech_commands_data'
if not os.path.exists(loc):
os.mkdir(loc)
_save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y,
means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X)
if __name__ == '__main__':
main()
| 2.796875 | 3 |
app/endpoints/products.py | duch94/spark_crud_test | 0 | 4081 | <reponame>duch94/spark_crud_test
from datetime import datetime
from typing import List
from flask import Blueprint, jsonify, request, json
from app.models.products import Product, Category, products_categories
from app import db
products_blueprint = Blueprint('products', __name__)
def create_or_get_categories(p: dict) -> List[Category]:
"""
Func to get existing categories objects or create new otherwise
:param p: payload of request
:return: list of categories
"""
recevied_categories: List[Category] = [Category(name=cat) for cat in p['categories']]
categories = []
for cat in recevied_categories:
exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0]
if exists:
existing_category = Category.query.filter(Category.name == cat.name).all()[0]
categories.append(existing_category)
else:
categories.append(cat)
return categories
@products_blueprint.route('/products', methods=['GET'])
def get_products():
return jsonify({
'results': [p.serialized for p in Product.query.all()]
})
@products_blueprint.route('/create_product', methods=['POST'])
def create_product():
data = request.get_data().decode('utf-8')
payload = json.loads(data)
datetime_format = '%Y-%m-%d %H:%M:%S'
if len(payload['categories']) < 1 or len(payload['categories']) > 5:
return '{"status": "error", "msg": "categories number must be between 1 and 5"}', 400
categories = create_or_get_categories(payload)
try:
new_prod = Product(name=payload['name'],
rating=float(payload['rating']),
featured=bool(payload['featured'] if 'featured' in payload.keys() else None),
expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format)
if ('expiration_date' in payload.keys()) else None),
brand_id=int(payload['brand_id']),
items_in_stock=int(payload['items_in_stock']),
receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format)
if ('receipt_date' in payload.keys()) else None))
except TypeError as e:
return '{"status": "error", "msg": "TypeError occured: check values of fields"}'
except KeyError as e:
return '{"status": "error", "msg": "field %s have not been found, but is required"}' % str(e), 400
if new_prod.rating > 8.0:
new_prod.featured = True
[cat.products.append(new_prod) for cat in categories]
[db.session.add(cat) for cat in categories]
db.session.commit()
return jsonify({"status": "ok", "msg": "product received"})
@products_blueprint.route('/update_product', methods=['PUT'])
def update_product():
data = request.get_data().decode('utf-8')
payload = json.loads(data)
datetime_format = '%Y-%m-%d %H:%M:%S'
product = Product.query.filter(Product.id == payload['id'])
if product:
if 'name' in payload.keys():
product.update({'name': payload['name']})
if 'featured' in payload.keys():
product.update({'featured': bool(payload['featured'])})
if 'rating' in payload.keys():
product.update({'rating': float(payload['rating'])})
if product.rating > 8.0:
product.featured = True
if 'items_in_stock' in payload.keys():
product.update({'items_in_stock': int(payload['items_in_stock'])})
if 'receipt_date' in payload.keys():
product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)})
if 'brand' in payload.keys():
product.update({'brand': int(payload['brand'])})
if 'categories' in payload.keys():
categories = create_or_get_categories(payload)
db.session.query(products_categories).filter(
products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False)
product_obj = product.all()[0]
[cat.products.append(product_obj) for cat in categories]
[db.session.add(cat) for cat in categories]
if 'expiration_date' in payload.keys():
product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)})
db.session.commit()
return jsonify({"status": "ok", "msg": "product updated"})
else:
return '{"status": "error", "msg": "no product found with given id"}', 404
@products_blueprint.route('/delete_product', methods=['DELETE'])
def delete_product():
data = request.get_data().decode('utf-8')
p = json.loads(data)
products_result = Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False)
products_categories_result = db.session.query(products_categories).filter(
products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False)
db.session.commit()
if products_result == 1:
return jsonify({"status": "ok",
"msg": "product deleted, also %d product_categories relations deleted"
% products_categories_result})
else:
return jsonify({"status": "warning", "msg": "%d products deleted, also %d product_categories relations deleted"
% (products_result, products_categories_result)})
| 2.703125 | 3 |
util/config/validators/test/test_validate_bitbucket_trigger.py | giuseppe/quay | 2,027 | 4082 | import pytest
from httmock import urlmatch, HTTMock
from util.config import URLSchemeAndHostname
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator
from test.fixtures import *
@pytest.mark.parametrize(
"unvalidated_config",
[
(ValidatorContext({})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_KEY": "foo"}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_SECRET": "foo"}})),
],
)
def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app):
validator = BitbucketTriggerValidator()
with pytest.raises(ConfigValidationException):
validator.validate(unvalidated_config)
def test_validate_bitbucket_trigger(app):
url_hit = [False]
@urlmatch(netloc=r"bitbucket.org")
def handler(url, request):
url_hit[0] = True
return {
"status_code": 200,
"content": "oauth_token=foo&oauth_token_secret=bar",
}
with HTTMock(handler):
validator = BitbucketTriggerValidator()
url_scheme_and_hostname = URLSchemeAndHostname("http", "localhost:5000")
unvalidated_config = ValidatorContext(
{
"BITBUCKET_TRIGGER_CONFIG": {
"CONSUMER_KEY": "foo",
"CONSUMER_SECRET": "bar",
},
},
url_scheme_and_hostname=url_scheme_and_hostname,
)
validator.validate(unvalidated_config)
assert url_hit[0]
| 2.0625 | 2 |
Refraction.py | silkoch42/Geometric-Optics-from-QM | 0 | 4083 | <reponame>silkoch42/Geometric-Optics-from-QM
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 16:51:16 2019
@author: Silvan
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
k=1000
n1=2.0
n2=1.0
alpha=np.pi/6.0
beta=np.arcsin(n2/n1*np.sin(alpha))
ya=1.0
xa=-ya*np.tan(alpha)
yb=-1.0
xb=-yb*np.tan(beta)
def s(x):
return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2)
def kernel(xa,xb):
return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0)
def K(R):
L=1000 #Maximum Number of subdivisions for integral calculations
eps=0.01
N=50
x,dx=np.linspace(0.01,R,N,retstep=True)
real=np.empty(N)
imag=np.empty(N)
real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0]
imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0]
for i in range(1,N):
r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0]
r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0]
real[i]=real[i-1]+r1+r2
i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0]
i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0]
imag[i]=imag[i-1]+i1+i2
return np.sqrt(real**2+imag**2),x,real,imag
K2,x,r,i=K(3)
M=np.mean(K2[25:])
plt.plot(x,K2/M,label=r'$|\int_{-R}^{R}e^{i k s(x)}dx|^2$')
#plt.errorbar(x,K2/M,0.1*K2/M)
plt.xlabel(r'Integration range $R$')
plt.ylabel('Detection probabilty')
plt.legend(loc='best')
plt.text(2.4,0.2,r'$k=1000$')
#plt.text(1.1,0.5,r'$|\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20)
plt.savefig('refraction_v3',dpi=200)
plt.show()
#N=20
#
#dx=np.linspace(0,10,N)
#
#P=np.ones(N)
#
#for i in range(N):
# print(i+1)
# P[i]=trans_amp(dx[i])
#
#
#plt.figure(1)
#plt.plot(dx,P/np.mean(P[20:]))
#plt.text(4.0,0.5,r'$|\int_{-\Delta x}^{\Delta x} e^{ik s(x)}dx$|',fontsize=20)
#plt.ylabel('Transition Amplitude')
#plt.xlabel(r'Integration Interval $ \Delta x$')
##plt.axis([0,10,0,1.1])
#plt.legend(loc='best')
##plt.savefig('refraction',dpi=200)
#plt.show()
#x=np.linspace(-5,5,100)
#
#plt.figure(2)
#plt.plot(x,s(x))
#plt.show()
#
#d=np.linspace(0,5,100)
#xa=-d/2
#xb=d/2
#plt.figure(3)
#plt.plot(d,kernel(xa,xb)**2)
#plt.show() | 2.390625 | 2 |
readthedocs/docsitalia/management/commands/clear_elasticsearch.py | italia/readthedocs.org | 19 | 4084 | <reponame>italia/readthedocs.org<gh_stars>10-100
"""Remove the readthedocs elasticsearch index."""
from __future__ import absolute_import
from django.conf import settings
from django.core.management.base import BaseCommand
from elasticsearch import Elasticsearch
class Command(BaseCommand):
"""Clear elasticsearch index."""
def handle(self, *args, **options):
"""handle command."""
e_s = Elasticsearch(settings.ES_HOSTS)
e_s.indices.delete(index='_all')
| 1.585938 | 2 |
train.py | vnbot2/BigGAN-PyTorch | 0 | 4085 | """ BigGAN: The Authorized Unofficial PyTorch release
Code by <NAME> and <NAME>
This code is an unofficial reimplementation of
"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,"
by <NAME>, <NAME>, and <NAME> (arXiv 1809.11096).
Let's go.
"""
import datetime
import time
import torch
import dataset
import BigGAN
import train_fns
import utils
from common import *
# IMG_SIZE = 64
# IMG_SIZE_2 = IMG_SIZE * 2
def run(config):
# Update the config dict as necessary
# This is for convenience, to add settings derived from the user-specified
# configuration into the config-dict (e.g. inferring the number of classes
# and size of the images from the dataset, passing in a pytorch object
# for the activation specified as a string)
config['resolution'] = IMG_SIZE
config['n_classes'] = 1
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
# By default, skip init if resuming training.
if config['resume']:
print('Skipping initialization for training resumption...')
config['skip_init'] = True
config = utils.update_config_roots(config)
device = 'cuda'
# Seed RNG
utils.seed_rng(config['seed'])
# Prepare root folders if necessary
utils.prepare_root(config)
# Setup cudnn.benchmark for free speed
torch.backends.cudnn.benchmark = True
experiment_name = (config['experiment_name'] if config['experiment_name']
else 'generative_dog_images')
print('Experiment name is %s' % experiment_name)
G = BigGAN.Generator(**config).to(device)
D = BigGAN.Discriminator(**config).to(device)
# if config['parallel']:
G = nn.DataParallel(G)
D = nn.DataParallel(D)
# If using EMA, prepare it
if config['ema']:
print('Preparing EMA for G with decay of {}'.format(
config['ema_decay']))
G_ema = BigGAN.Generator(**{**config, 'skip_init': True,
'no_optim': True}).to(device)
G_ema = nn.DataParallel(G_ema)
ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])
else:
G_ema, ema = None, None
GD = BigGAN.G_D(G, D)
print(G)
print(D)
print('Number of params in G: {} D: {}'.format(
*[sum([p.data.nelement() for p in net.parameters()]) for net in [G, D]]))
# Prepare state dict, which holds things like epoch # and itr #
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'config': config}
# If loading from a pre-trained model, load weights
if config['resume']:
print('Loading weights...')
utils.load_weights(G, D, state_dict,
config['weights_root'], experiment_name,
config['load_weights'] if config['load_weights'] else None,
G_ema if config['ema'] else None)
# Prepare data; the Discriminator's batch size is all that needs to be passed
# to the dataloader, as G doesn't require dataloading.
# Note that at every loader iteration we pass in enough data to complete
# a full D iteration (regardless of number of D steps and accumulations)
D_batch_size = (config['batch_size'] *
config['num_D_steps'] * config['num_D_accumulations'])
loaders = dataset.get_data_loaders(
data_root=config['data_root'],
label_root=config['label_root'],
batch_size=D_batch_size,
num_workers=config['num_workers'],
shuffle=config['shuffle'],
pin_memory=config['pin_memory'],
drop_last=True,
load_in_mem=config['load_in_mem'],
mask_out=config['mask_out']
)
# Prepare noise and randomly sampled label arrays
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
num_samples = config['num_fixed_samples']
z_, y_ = utils.prepare_z_y(
num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(
num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
# Loaders are loaded, prepare the training function
train = train_fns.create_train_fn(
G, D, GD, z_, y_, ema, state_dict, config)
print('Beginning training at epoch %d...' % state_dict['epoch'])
start_time = time.perf_counter()
loader = loaders[0]
total_iters = config['num_epochs'] * len(loader)
# Train for specified number of epochs, although we mostly track G iterations.
pbar = tqdm(total=total_iters)
for _ in range(state_dict['itr']):
pbar.update()
timer = mmcv.Timer()
timer.start()
start_itr = state_dict['itr']
for epoch in range(state_dict['epoch'], config['num_epochs']):
for i, data in enumerate(loader):
x, y = data['img'], data['label']
# Increment the iteration counter
state_dict['itr'] += 1
# Make sure G and D are in training mode, just in case they got set to eval
# For D, which typically doesn't have BN, this shouldn't matter much.
G.train()
D.train()
if config['ema']:
G_ema.train()
x, y = x.to(device), y.to(device)
metrics = train(x, y)
if not (state_dict['itr'] % config['log_interval']):
curr_time = timer.since_start()
curr_time_str = datetime.datetime.fromtimestamp(
curr_time).strftime('%H:%M:%S')
# quang duong / (quang duong da di / thoi gian da di)
eta = (
total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1))
eta_str = datetime.datetime.fromtimestamp(
eta).strftime('%H:%M:%S')
log = "[{}] [{}] [{} / {}] Ep {}, ".format(
curr_time_str, eta_str, state_dict['itr'], total_iters, epoch)
log += ', '.join(['%s : %+4.3f' % (key, metrics[key])
for key in metrics])
pbar.set_description(log)
# print(log)
# Save weights and copies as configured at specified interval
if not (state_dict['itr'] % config['sample_every']):
if config['G_eval_mode']:
# print('Switching G to eval mode...')
G.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name, save_weight=False)
if not (state_dict['itr'] % config['save_every']):
if config['G_eval_mode']:
# print('Switching G to eval mode...')
G.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name, save_weight=True)
pbar.update()
# Increment epoch counter at end of epoch
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main()
| 2.453125 | 2 |
geocamUtil/tempfiles.py | geocam/geocamUtilWeb | 4 | 4086 | <reponame>geocam/geocamUtilWeb<gh_stars>1-10
# __BEGIN_LICENSE__
#Copyright (c) 2015, United States Government, as represented by the
#Administrator of the National Aeronautics and Space Administration.
#All rights reserved.
# __END_LICENSE__
import os
import time
import random
import shutil
from glob import glob
import traceback
import sys
from geocamUtil import FileUtil
from django.conf import settings
def getTempName(prefix, suffix=''):
return '%s/%s-%s-%s%s' % (settings.TMP_DIR,
prefix,
time.strftime('%Y-%m-%d-%H%M'),
'%04x' % random.getrandbits(16),
suffix)
def deleteStaleFiles():
files = glob('%s/*' % settings.TMP_DIR)
now = time.time()
for f in files:
if (now - os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and
not f.endswith('/README.txt')):
try:
os.unlink(f)
except OSError:
traceback.print_exc()
print >> sys.stderr, '[tempfiles.deleteStaleFiles: could not unlink %s]' % f
def makeTempDir(prefix):
d = getTempName(prefix)
if not os.path.exists(settings.TMP_DIR):
FileUtil.mkdirP(settings.TMP_DIR)
os.system('chmod go+rw %s' % settings.TMP_DIR)
deleteStaleFiles()
FileUtil.mkdirP(d)
return d
def initZipDir(prefix):
return makeTempDir(prefix)
def finishZipDir(zipDir):
zipFile = '%s.zip' % zipDir
oldDir = os.getcwd()
os.chdir(os.path.dirname(settings.TMP_DIR))
os.system('zip -r %s %s' % (zipFile, os.path.basename(zipDir)))
os.chdir(oldDir)
shutil.rmtree(zipDir)
return zipFile
| 2 | 2 |
Ex1:Tests/ex2.py | Lludion/Exercises-SE | 0 | 4087 | # Ce fichier contient (au moins) cinq erreurs.
# Instructions:
# - tester jusqu'à atteindre 100% de couverture;
# - corriger les bugs;"
# - envoyer le diff ou le dépôt git par email."""
import hypothesis
from hypothesis import given, settings
from hypothesis.strategies import integers, lists
class BinHeap:
#structure de tas binaires d'entiers
def __init__(self):
#initialise un tas binaire d'entiers avec un element 0
self.heapList = [0]
self.currentSize = 1#taille de la liste heapList (invariant)
def percUp(self,i):
#upward percolation until 0 reached or father is bigger
while i // 2 > 0 and self.heapList[i] < self.heapList[i // 2]:
tmp = self.heapList[i // 2]
self.heapList[i // 2] = self.heapList[i]
self.heapList[i] = tmp
i //= 2
def insert(self,k):
#inserting a new value into the heap
self.heapList.append(k)
self.percUp(self.currentSize)
self.currentSize = self.currentSize + 1
def percDown(self,i):
while (i * 2) < self.currentSize:#while I have a child
mc = self.minChild(i)#mc is the index of the smallest
if self.heapList[i] > self.heapList[mc]:
tmp = self.heapList[i]
self.heapList[i] = self.heapList[mc]
self.heapList[mc] = tmp
i = mc
def minChild(self,i):
if i * 2 >= self.currentSize or i == 0:
print("No Child. None is returned.")
return
if i * 2 + 1 >= self.currentSize:
return i * 2
else:
if self.heapList[i*2] < self.heapList[i*2+1]:
return i * 2
else:
return i * 2 + 1
def delMin(self):
try:
rval = self.heapList[1]
except IndexError:
print("Empty heap. Nothing is changed. None is returned.")
return
self.currentSize = self.currentSize - 1
self.heapList[1] = self.heapList[self.currentSize]
self.heapList.pop()
self.percDown(1)
return rval
def buildHeap(self,alist):
#creates a whole heap from a list, by percolating all its elements
i = 1
self.currentSize = len(alist) + 1# + 1
self.heapList = [0] + alist # enlever le [:]
while (i < self.currentSize):
self.percUp(i)
i += 1
def assert_isheaplist(x,val,lon,HL):
assert ((x * 2 + 1 > lon) or (x * 2 + 1 == lon and HL[2*x] >= val) or (HL[2*x] >= val and HL[2*x+1] >= val))
def assert_goodheap(tau,lon):
for x in range(1,lon):
assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)
def test_init():
tau = BinHeap()
assert tau.heapList == [0]
assert tau.currentSize == 1
@given(integers())
@settings(max_examples=100)
def test_percup(integer):
gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3]
tau = BinHeap()
tau.currentsize = 16
tau.heapList = gamma[:]
tau.percUp(15)
assert tau.heapList == gamma[:]
tau.heapList[15] = 2
tau.percUp(15)
print(tau.heapList)
assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3]
assert tau.currentsize == 16
tau.heapList.append(8)
tau.currentsize = 17
tau.percUp(16)
assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10]
tau.heapList.append(integer)
tau.currentsize = 18
tau.percUp(17)
assert tau.heapList[17] >= tau.heapList[8]
assert tau.heapList[8] >= tau.heapList[4]
@given(lists(elements=integers()))
@settings(max_examples=1000)
def test_build(L):
tau = BinHeap()
tau.buildHeap(L)
assert tau.currentSize == len(L) + 1
assert sorted(tau.heapList) == sorted(L+[0])
assert_goodheap(tau,len(L)+1)
#for x in range(1,len(L) + 1):
# assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)
@given(lists(elements=integers()),integers())
@settings(max_examples=1000)
def test_insert(L,i):
tau = BinHeap()
tau.buildHeap(L)
tau.insert(i)
assert_goodheap(tau,len(L)+1)
@given(lists(elements=integers()),integers())
@settings(max_examples=100)
def test_percDown(L,i):
tau = BinHeap()
L += [10]
tau.buildHeap(L)
tau.heapList[1] = i
tau.percDown(1)
for x in range(1,len(L) + 1):
for _ in range(len(L)):
tau.percDown(x)
#then we test that we got a well-ordered heap
assert_goodheap(tau,len(L)+1)
@given(lists(elements=integers()))
@settings(max_examples=400,deadline=None)
def test_delmin(L):
L += [10]
tau = BinHeap()
assert tau.delMin() is None
tau.buildHeap(L)
#print(L)
#print("sorted",sorted(L),"\n")
#print("TAU ", tau.heapList,"\n")
assert tau.delMin() == min(L)
@given(lists(elements=integers()),integers())
@settings(max_examples=400)
def test_minChild(L,i):
tau = BinHeap()
assert tau.minChild(abs(i)) is None
tau.buildHeap(2*L+[0,1])
assert tau.minChild(len(L)+1) is not None
@given(lists(elements=integers()),lists(elements=integers()))
@settings(max_examples=400,deadline=None)
def test_general(L,K):
tau = BinHeap()
tau.buildHeap(L)#tas construit avec L
for k in K:tau.insert(k)#on rajoute les elements de K
assert_goodheap(tau,tau.currentSize)
x = []
while tau.currentSize > 1:x.append(tau.delMin())#on retire tous les elements
assert x == sorted(L + K)#verifie qu'on a bien le minimum avec delmin
assert tau.delMin() is None
x = []
tau.buildHeap(K)
for l in L:#teste si 1 suite d'insertion/ suppression maintient la structure
tau.delMin()
tau.insert(l)
assert_goodheap(tau,tau.currentSize) | 3.5625 | 4 |
python/snewpy/snowglobes.py | svalder/snewpy | 0 | 4088 | <gh_stars>0
# -*- coding: utf-8 -*-
"""The ``snewpy.snowglobes`` module contains functions for interacting with SNOwGLoBES.
`SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected
event rates from a given input supernova neutrino flux. It supports many
different neutrino detectors, detector materials and interaction channels.
There are three basic steps to using SNOwGLoBES from SNEWPY:
* **Generating input files for SNOwGLoBES:**
There are two ways to do this, either generate a time series or a fluence file. This is done taking as input the supernova simulation model.
The first will evaluate the neutrino flux at each time step, the latter will compute the integrated neutrino flux (fluence) in the time bin.
The result is a compressed .tar file containing all individual input files.
* **Running SNOwGLoBES:**
This step convolves the fluence generated in the previous step with the cross-sections for the interaction channels happening in various detectors supported by SNOwGLoBES.
It takes into account the effective mass of the detector as well as a smearing matrix describing the energy-dependent detection efficiency.
The output gives the number of events detected as a function of energy for each interaction channel, integrated in a given time window (or time bin), or in a snapshot in time.
* **Collating SNOwGLoBES outputs:**
This step puts together all the interaction channels and time bins evaluated by SNOwGLoBES in a single file (for each detector and for each time bin).
The output tables allow to build the detected neutrino energy spectrum and neutrino time distribution, for each reaction channel or the sum of them.
"""
import io
import logging
import os
import re
import tarfile
from pathlib import Path
from tempfile import TemporaryDirectory
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from tqdm.auto import tqdm
import snewpy.models
from snewpy.flavor_transformation import *
from snewpy.neutrino import Flavor, MassHierarchy
from snewpy.snowglobes_interface import SNOwGLoBES
logger = logging.getLogger(__name__)
def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None):
"""Generate time series files in SNOwGLoBES format.
This version will subsample the times in a supernova model, produce energy
tables expected by SNOwGLoBES, and compress the output into a tarfile.
Parameters
----------
model_path : str
Input file containing neutrino flux information from supernova model.
model_type : str
Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`.
transformation_type : str
Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values.
d : int or float
Distance to supernova in kpc.
output_filename : str or None
Name of output file. If ``None``, will be based on input file name.
ntbins : int
Number of time slices. Will be ignored if ``deltat`` is also given.
deltat : astropy.Quantity or None
Length of time slices.
Returns
-------
str
Path of compressed .tar file with neutrino flux data.
"""
model_class = getattr(snewpy.models.ccsn, model_type)
# Choose flavor transformation. Use dict to associate the transformation name with its class.
flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)}
flavor_transformation = flavor_transformation_dict[transformation_type]
model_dir, model_file = os.path.split(os.path.abspath(model_path))
snmodel = model_class(model_path)
# Subsample the model time. Default to 30 time slices.
tmin = snmodel.get_time()[0]
tmax = snmodel.get_time()[-1]
if deltat is not None:
dt = deltat
ntbins = int((tmax-tmin)/dt)
else:
dt = (tmax - tmin) / (ntbins+1)
tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s
times = 0.5*(tedges[1:] + tedges[:-1])
# Generate output.
if output_filename is not None:
tfname = output_filename + 'kpc.tar.bz2'
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2'
with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:
#creates file in tar archive that gives information on parameters
output = '\n'.join(map(str, transformation_type)).encode('ascii')
tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output))
MeV = 1.60218e-6 * u.erg
energy = np.linspace(0, 100, 501) * MeV # 1MeV
# Loop over sampled times.
for i, t in enumerate(times):
osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation)
osc_fluence = {}
table = []
table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt))
table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')
# Generate energy + number flux table.
for j, E in enumerate(energy):
for flavor in Flavor:
osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2)
s = '{:17.8E}'.format(E/(1e3 * MeV))
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
table.append(s)
logging.debug(s)
# Encode energy/flux table and output to file in tar archive.
output = '\n'.join(table).encode('ascii')
extension = ".dat"
model_file_root, _ = os.path.splitext(model_file)
filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \
'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension)
info = tarfile.TarInfo(name=filename)
info.size = len(output)
tf.addfile(info, io.BytesIO(output))
return os.path.join(model_dir, tfname)
def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None, tend=None):
"""Generate fluence files in SNOwGLoBES format.
This version will subsample the times in a supernova model, produce energy
tables expected by SNOwGLoBES, and compress the output into a tarfile.
Parameters
----------
model_path : str
Input file containing neutrino flux information from supernova model.
model_type : str
Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`.
transformation_type : str
Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values.
d : int or float
Distance to supernova in kpc.
output_filename : str or None
Name of output file. If ``None``, will be based on input file name.
tstart : astropy.Quantity or None
Start of time interval to integrate over, or list of start times of the time series bins.
tend : astropy.Quantity or None
End of time interval to integrate over, or list of end times of the time series bins.
Returns
-------
str
Path of compressed .tar file with neutrino flux data.
"""
model_class = getattr(snewpy.models.ccsn, model_type)
# Choose flavor transformation. Use dict to associate the transformation name with its class.
flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)}
flavor_transformation = flavor_transformation_dict[transformation_type]
model_dir, model_file = os.path.split(os.path.abspath(model_path))
snmodel = model_class(model_path)
#set the timings up
#default if inputs are None: full time window of the model
if tstart is None:
tstart = snmodel.get_time()[0]
tend = snmodel.get_time()[-1]
try:
if len(tstart/u.s) > 0:
t0 = tstart[0]
t1 = tend[-1]
nbin = len(tstart/u.s)
except:
t0 = tstart
t1 = tend
nbin = 1
times = 0.5*(tstart + tend)
model_times = snmodel.get_time()
model_tstart = model_times*1.0
model_tend = model_times*1.0
model_tstart[0] = model_times[0]
for i in range(1, len(model_times), 1):
model_tstart[i] = 0.5*(model_times[i]+model_times[i-1])
model_tend[i-1] = model_tstart[i]
model_tend[len(model_times)-1] = model_times[-1]
if nbin > 1:
starting_index = np.zeros(len(times), dtype=np.int64)
ending_index = np.zeros(len(times), dtype=np.int64)
for i in range(len(tstart)):
starting_index[i] = next(j for j, t in enumerate(model_tend) if t > tstart[i])
ending_index[i] = next(j for j, t in enumerate(model_tend) if t >= tend[i])
else:
starting_index = [next(j for j, t in enumerate(model_tend) if t > tstart)]
ending_index = [next(j for j, t in enumerate(model_tend) if t >= tend)]
# Generate output.
if output_filename is not None:
tfname = output_filename+'.tar.bz2'
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2'
with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:
#creates file in tar archive that gives information on parameters
output = '\n'.join(map(str, transformation_type)).encode('ascii')
tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output))
MeV = 1.60218e-6 * u.erg
energy = np.linspace(0, 100, 501) * MeV
# Loop over sampled times.
for i in range(nbin):
if nbin > 1:
ta = tstart[i]
tb = tend[i]
t = times[i]
dt = tb-ta
else:
ta = tstart
tb = tend
t = times
dt = tb-ta
#first time bin of model in requested interval
osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation)
if dt < model_tend[starting_index[i]]-ta:
dt = dt
else:
for flavor in Flavor:
osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta)
#intermediate time bins of model in requested interval
for j in range(starting_index[i]+1, ending_index[i], 1):
temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation)
for flavor in Flavor:
osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j])
#last time bin of model in requested interval
temp_spectra = snmodel.get_transformed_spectra(
model_times[ending_index[i]], energy, flavor_transformation)
for flavor in Flavor:
osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]])
for flavor in Flavor:
osc_spectra[flavor] /= (tb-ta)
osc_fluence = {}
table = []
table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt))
table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')
# Generate energy + number flux table.
for j, E in enumerate(energy):
for flavor in Flavor:
osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2)
s = '{:17.8E}'.format(E/(1e3 * MeV))
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
table.append(s)
logging.debug(s)
# Encode energy/flux table and output to file in tar archive.
output = '\n'.join(table).encode('ascii')
extension = ".dat"
if output_filename is not None:
if nbin > 1:
filename = output_filename+"_"+str(i)+extension
else:
filename = output_filename+extension
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \
'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension)
info = tarfile.TarInfo(name=filename)
info.size = len(output)
tf.addfile(info, io.BytesIO(output))
return os.path.join(model_dir, tfname)
def simulate(SNOwGLoBESdir, tarball_path, detector_input="all", verbose=False):
"""Takes as input the neutrino flux files and configures and runs the supernova script inside SNOwGLoBES, which outputs calculated event rates expected for a given (set of) detector(s). These event rates are given as a function of the neutrino energy and time, for each interaction channel.
Parameters
----------
SNOwGLoBESdir : str
Path to directory where SNOwGLoBES is installed.
tarball_path : str
Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.
detector_input : str
Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES.
verbose : bool
Whether to generate verbose output, e.g. for debugging.
"""
sng = SNOwGLoBES(SNOwGLoBESdir)
if detector_input == 'all':
detector_input = list(sng.detectors)
detector_input.remove('d2O')
elif isinstance(detector_input,str):
detector_input = [detector_input]
result = {}
#Extracts data from tarfile and sets up lists of paths and fluxfilenames for later use
with TemporaryDirectory(prefix='snowglobes') as tempdir:
with tarfile.open(tarball_path) as tar:
tar.extractall(tempdir)
flux_files = list(Path(tempdir).glob('*.dat'))
if len(detector_input)>0:
detector_input = tqdm(detector_input, desc='Detectors', leave=False)
for det in detector_input:
res=sng.run(flux_files, det)
result[det]=dict(zip((f.stem for f in flux_files),res))
# save result to file for re-use in collate()
cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'
logging.info(f'Saving simulation results to {cache_file}')
np.save(cache_file, result)
return result
re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\d*)_?(.*)')
def get_channel_label(c):
mapp = {'nc':'NeutralCurrent',
'ibd':'Inverse Beta Decay',
'e':r'${\nu}_x+e^-$'}
def gen_label(m):
flv,bar,Nuc,num,res = m.groups()
if flv!='e':
flv='\\'+flv
if bar:
bar='\\'+bar
s = f'${bar}{{\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res
return s
if c in mapp:
return mapp[c]
else:
return re_chan_label.sub(gen_label, c)
def collate(SNOwGLoBESdir, tarball_path, detector_input="all", skip_plots=False, verbose=False, remove_generated_files=True):
"""Collates SNOwGLoBES output files and generates plots or returns a data table.
Parameters
----------
SNOwGLoBESdir : str
Path to directory where SNOwGLoBES is installed.
tarball_path : str
Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.
detector_input : str
Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES.
skip_plots: bool
If False, it gives as output the plot of the energy distribution for each time bin and for each interaction channel.
verbose : bool
Whether to generate verbose output, e.g. for debugging.
remove_generated_files: bool
Remove the output files from SNOwGLoBES, collated files, and .png's made for this snewpy run.
Returns
-------
dict
Dictionary of data tables: One table per time bin; each table contains in the first column the energy bins, in the remaining columns the number of events for each interaction channel in the detector.
"""
def aggregate_channels(table, **patterns):
#rearrange the table to have only channel column
levels = list(table.columns.names)
levels.remove('channel')
t = table.stack(levels)
for name,pattern in patterns.items():
#get channels which contain `like`
t_sel = t.filter(like=pattern)
#sum over them and save to a separate column
t_agg = t_sel.sum(axis='columns')
#drop processed channels
t.drop(t_sel.columns, axis='columns',inplace=True)
t[name]=t_agg #fill the column
#return table with the original levels order
t = t.unstack(levels)
t = t.reorder_levels(table.columns.names, axis=1)
return t
def do_plot(table, params):
#plotting the events from given table
flux,det,weighted,smeared = params
for c in table.columns:
if table[c].max() > 0.1:
plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1)
plt.xlim(right=0.10)
plt.ylim(bottom=0.10)
plt.yscale('log')
plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best', borderaxespad=0) # formats complete graph
smear_title = 'Interaction' if smeared=='unsmeared' else 'Detected'
plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events')
if smeared=='smeared':
plt.xlabel('Detected Energy (GeV)')
plt.ylabel('Events')
else:
plt.xlabel('Neutrino Energy (GeV)')
plt.ylabel('Interaction Events')
#read the results from storage
cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'
logging.info(f'Reading tables from {cache_file}')
tables = np.load(cache_file, allow_pickle=True).tolist()
#This output is similar to what produced by:
#tables = simulate(SNOwGLoBESdir, tarball_path,detector_input)
#dict for old-style results, for backward compatibiity
results = {}
#save collated files:
with TemporaryDirectory(prefix='snowglobes') as tempdir:
tempdir = Path(tempdir)
for det in tables:
results[det] = {}
for flux,t in tables[det].items():
t = aggregate_channels(t,nc='nc_',e='_e')
for w in ['weighted','unweighted']:
for s in ['smeared','unsmeared']:
table = t[w][s]
filename_base = f'{flux}_{det}_events_{s}_{w}'
filename = tempdir/f'Collated_{filename_base}.dat'
#save results to text files
with open(filename,'w') as f:
f.write(table.to_string(float_format='%23.15g'))
#format the results for the output
header = 'Energy '+' '.join(list(table.columns))
data = table.to_numpy().T
index = table.index.to_numpy()
data = np.concatenate([[index],data])
results[filename.name] = {'header':header,'data':data}
#optionally plot the results
if skip_plots is False:
plt.figure(dpi=300)
do_plot(table,(flux,det,w,s))
filename = tempdir/f'{filename_base}_log_plot.png'
plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight')
#Make a tarfile with the condensed data files and plots
output_name = Path(tarball_path).stem
output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed'
output_path = Path(tarball_path).parent/(output_name+'.tar.gz')
with tarfile.open(output_path, "w:gz") as tar:
for file in tempdir.iterdir():
tar.add(file,arcname=output_name+'/'+file.name)
logging.info(f'Created archive: {output_path}')
return results
| 2.453125 | 2 |
rlcycle/dqn_base/loss.py | cyoon1729/Rlcycle | 128 | 4089 | from typing import List, Tuple
from omegaconf import DictConfig
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlcycle.common.abstract.loss import Loss
class DQNLoss(Loss):
"""Compute double DQN loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
q_value = network.forward(states).gather(1, actions)
with torch.no_grad():
next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1)
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_q = rewards + (1 - dones) * n_step_gamma * next_q
element_wise_loss = F.smooth_l1_loss(
q_value, target_q.detach(), reduction="none"
)
return element_wise_loss
class QRLoss(Loss):
"""Compute quantile regression loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...],
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * next_z
distance = target_z - z_dists
quantile_huber_loss = (
network.tau - (distance.detach() < 0).float()
).abs() * self.huber_loss(distance)
element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True)
return element_wise_loss
@staticmethod
def huber_loss(x: List[torch.Tensor], k: float = 1.0):
return torch.where(x.abs() <= k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))
class CategoricalLoss(Loss):
"""Compute C51 loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
batch_size = states.size(0)
offset = (
torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size)
.long()
.unsqueeze(1)
.expand(batch_size, network.num_atoms)
)
if self.use_cuda:
offset = offset.cuda()
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * network.support
target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max)
target_proj = self.dist_projection(network, next_z, target_z, offset)
log_dist = torch.log(z_dists)
element_wise_loss = -(target_proj * log_dist).sum(1)
return element_wise_loss
def dist_projection(
self,
network: nn.Module,
next_z: torch.Tensor,
target_z: torch.Tensor,
offset: torch.Tensor,
) -> torch.Tensor:
b = (target_z - network.v_min) / network.delta_z
lb = b.floor().long()
ub = b.ceil().long()
proj_dist = torch.zeros(next_z.size())
if self.use_cuda:
proj_dist = proj_dist.cuda()
proj_dist.view(-1).index_add_(
0, (lb + offset).view(-1), (next_z * (ub.float() - b)).view(-1)
)
proj_dist.view(-1).index_add_(
0, (ub + offset).view(-1), (next_z * (b - lb.float())).view(-1)
)
return proj_dist
| 2.34375 | 2 |
scripts/gap_filling_viewer.py | raphischer/probgf | 3 | 4090 | <gh_stars>1-10
"""viewer application which allows to interactively view spatio-temporal gap filling results"""
import os
import argparse
from datetime import datetime, timedelta
from tkinter import Canvas, Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL
import numpy as np
from PIL import Image, ImageTk
import probgf.media as media
class MainWindow():
def next(self, event=None):
self.curr_img = (self.curr_img + 1) % len(self.imgs_orig)
self.refresh()
def prev(self, event=None):
self.curr_img = (self.curr_img - 1) % len(self.imgs_orig)
self.refresh()
def click_wheel(self, event):
self.start_drag = (event.x + self.shift_x, event.y + self.shift_y)
def click_left(self, event):
if not self.over_button:
self.prev()
def click_right(self, event):
if not self.over_button:
self.next()
def refresh(self):
zoom = float(self.zoom) / 100
self.start_x = int(self.img_w_f / 2 - self.img_w_f / zoom / 2) + self.shift_x
self.end_x = int(self.start_x + self.img_w_f / zoom)
self.start_y = int(self.img_w_f / 2 - self.img_w_f / zoom / 2) + self.shift_y
self.end_y = int(self.start_y + self.img_w_f / zoom)
if not self.mask_toggle:
self.b_masks.config(relief=RAISED)
img1 = self.imgs_orig[self.curr_img]
img2 = self.imgs_pred[self.curr_img]
else:
self.b_masks.config(relief=SUNKEN)
img1 = self.imgs_orig_m[self.curr_img]
img2 = self.imgs_pred_m[self.curr_img]
img1 = img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)
img2 = img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)
self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1)
self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2)
self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img])
self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img])
self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img])
self.canvas.itemconfig(self.day_info, text='{} - cloud cover {:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img],
self.cc[self.curr_img] * 100,
self.errors[self.curr_img]))
if self.zoom == 100:
self.canvas.itemconfig(self.zoom, text='')
self.b_reset.config(state=DISABLED)
else:
self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom))
self.b_reset.config(state=NORMAL)
def zoomer(self, event):
if event.num == 4 or event.delta == 120 or event.keysym == 'plus':
self.zoom += 20
elif event.delta == 240:
self.zoom += 40
elif event.delta == 360:
self.zoom += 60
else:
if self.zoom - 20 >= 100:
self.zoom -= 20
if self.zoom == 100:
self.reset_transform()
self.refresh()
def drag_roi(self, event):
self.shift_x = min(max(self.start_drag[0] - event.x, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)),
int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2))
self.shift_y = min(max(self.start_drag[1] - event.y, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)),
int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2))
self.refresh()
def toggle_mask(self, event=None):
self.mask_toggle = not self.mask_toggle
self.refresh()
def reset_transform(self, event=None):
self.mask_toggle = False
self.zoom = 100
self.shift_x = 0
self.shift_y = 0
self.refresh()
def button_enter(self, event):
self.over_button = True
def button_leave(self, enter):
self.over_button = False
def __init__(self, root, w, h, imgs_p, imgs_o, imgs_m, dates, errors, logos):
self.dates = dates
self.errors = errors
# setup images
self.img_w = int(h * 0.68) # width of each displayed image
self.imgs_orig_m = [] # masked full images
self.imgs_pred_m = []
self.imgs_orig = [] # unmasked full images
self.imgs_pred = []
self.cc = []
for index, img in enumerate(imgs_p):
self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0))
self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0))
self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))
self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))
self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size)
self.curr_img = 0
# text labels and logos
h_logos = int(h / 17)
b_logos = int(w / 100)
self.canvas = Canvas(root, width=w, height=h)
self.canvas.pack()
self.canvas.configure(background='white')
self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS))
self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS))
self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS))
self.canvas.create_image(int(self.logo1.width() / 2 + b_logos), int(self.logo1.height() / 2 + b_logos), image=self.logo1)
self.canvas.create_image(int(w - self.logo2.width() / 2 - b_logos), int(self.logo2.height() / 2 + b_logos), image=self.logo2)
self.canvas.create_image(int(w - self.logo3.width() / 2 - b_logos), int(h - (self.logo3.height() / 2 + b_logos)), image=self.logo3)
self.canvas.create_text(w / 2, h * 0.06, font=("Courier", int(h / 25)), text='Gap Filling Viewer')
self.canvas.create_text(w / 3.9, h * 0.19, font=("Courier", int(h / 35)), text='Observed')
self.canvas.create_text(w - w / 3.9, h * 0.19, font=("Courier", int(h / 35)), text='Predicted')
self.day_info = self.canvas.create_text(w / 2, h * 0.13, font=("Courier", int(h / 30)), text='')
self.zoom = self.canvas.create_text(w * 0.12, h * 0.94, font=("Courier", int(h / 50)), text='')
# image timeline
imagelist_h = int(self.img_w / len(self.imgs_pred)) + 1
imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8')
for index in range(len(self.imgs_pred)):
imagelist_a[index, :, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS))
self.imagelists = []
for index in range(len(self.imgs_pred)):
c_list = np.array(imagelist_a)
c_list[index, :int(w / 600), :, :] = 255
c_list[index, (imagelist_h - int(w / 600)):, :, :] = 255
c_list[index, :, :int(w / 600), :] = 255
c_list[index, :, (imagelist_h - int(w / 600)):, :] = 255
self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3))))
self.i_list = self.canvas.create_image(w * 0.5, h * 0.56, image=self.imagelists[self.curr_img])
# images and buttons
self.img_w_f = self.imgs_orig[0].size[0] # full image width
self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_orig] # images for visualization
self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_pred]
self.i_left = self.canvas.create_image(w / 3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img])
self.i_right = self.canvas.create_image(w - w / 3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img])
self.b_masks = Button(root, font=("Courier", int(h / 50)), text = "Show masks", command=self.toggle_mask)
self.b_reset = Button(root, font=("Courier", int(h / 50)), text = "Reset view", command=self.reset_transform, state=DISABLED)
self.b_quit = Button(root, font=("Courier", int(h / 50)), text = "Quit", command=self.canvas.master.destroy)
self.reset_transform()
self.canvas.create_window(w * 0.30, h * 0.94, window=self.b_masks)
self.canvas.create_window(w * 0.50, h * 0.94, window=self.b_reset)
self.canvas.create_window(w * 0.70, h * 0.94, window=self.b_quit)
# bind buttons and keys
root.bind("q", lambda e: self.canvas.master.destroy())
root.bind("r", self.reset_transform)
root.bind("m", self.toggle_mask)
root.bind("<Right>", self.next)
root.bind("<Left>", self.prev)
root.bind("<Down>", self.next)
root.bind("<Up>", self.prev)
root.bind("<Button-3>", self.click_right)
root.bind("<Button-1>", self.click_left)
root.bind("<Button-2>", self.click_wheel)
root.bind("<Button-4>", self.zoomer)
root.bind("<Button-5>", self.zoomer)
root.bind("<MouseWheel>", self.zoomer)
root.bind("<B2-Motion>", self.drag_roi)
root.bind("+", self.zoomer)
root.bind("-", self.zoomer)
self.over_button = False
self.b_masks.bind("<Enter>", self.button_enter)
self.b_masks.bind("<Leave>", self.button_leave)
self.b_reset.bind("<Enter>", self.button_enter)
self.b_reset.bind("<Leave>", self.button_leave)
self.b_quit.bind("<Enter>", self.button_enter)
self.b_quit.bind("<Leave>", self.button_leave)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-l', '--left', default='imgs/original/',
help='directory with images which are shown on the left')
parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/',
help='directory with images which are shown on the right')
parser.add_argument('-m', '--masks', default='imgs/mask/',
help='directory with mask images')
parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv',
help='report containing date and error information for the right hand images')
parser.add_argument('-y', '--year', type=int, default=2018,
help='year of data acquisition')
parser.add_argument('-W', '--width', type=int, default=1280,
help='window width')
parser.add_argument('-H', '--height', type=int, default=720,
help='window height')
args = parser.parse_args()
imgs_o = [Image.open(img) for img in sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])]
imgs_p = [Image.open(img) for img in sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])]
imgs_m = [Image.open(img) for img in sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])]
report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1]
dates = [(datetime(args.year, 1, 1) + timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y') for day in range(report.shape[0])]
errors = ['{:4.1f}'.format(error) if error != 0.0 else 'n.a. ' for error in report[:, 5]]
logos = [media.logo1, media.logo2, media.logo3]
if len(imgs_o) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.left, args.report))
if len(imgs_p) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.right, args.report))
if len(imgs_m) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.masks, args.report))
root = Tk()
root.title('Gap Filling Viewer')
root.geometry("%dx%d+0+0" % (args.width, args.height))
MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m, dates, errors, logos)
root.focus_set()
root.mainloop()
| 2.46875 | 2 |
paypal/pro/tests.py | pdfcrowd/django-paypal | 1 | 4091 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.forms import ValidationError
from django.http import QueryDict
from django.test import TestCase
from django.test.client import Client
from paypal.pro.fields import CreditCardField
from paypal.pro.helpers import PayPalWPP, PayPalError
class RequestFactory(Client):
# Used to generate request objects.
def request(self, **request):
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
return WSGIRequest(environ)
RF = RequestFactory()
REQUEST = RF.get("/pay/", REMOTE_ADDR="127.0.0.1:8000")
class DummyPayPalWPP(PayPalWPP):
pass
# """Dummy class for testing PayPalWPP."""
# responses = {
# # @@@ Need some reals data here.
# "DoDirectPayment": """ack=Success×tamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=""",
# }
#
# def _request(self, data):
# return self.responses["DoDirectPayment"]
class CreditCardFieldTest(TestCase):
def testCreditCardField(self):
field = CreditCardField()
field.clean('4797503429879309')
self.assertEquals(field.card_type, "Visa")
self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455')
class PayPalWPPTest(TestCase):
def setUp(self):
# Avoding blasting real requests at PayPal.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.item = {
'amt': '9.95',
'inv': 'inv',
'custom': 'custom',
'next': 'http://www.example.com/next/',
'returnurl': 'http://www.example.com/pay/',
'cancelurl': 'http://www.example.com/cancel/'
}
self.wpp = DummyPayPalWPP(REQUEST)
def tearDown(self):
settings.DEBUG = self.old_debug
def test_doDirectPayment_missing_params(self):
data = {'firstname': 'Chewbacca'}
self.assertRaises(PayPalError, self.wpp.doDirectPayment, data)
def test_doDirectPayment_valid(self):
data = {
'firstname': 'Brave',
'lastname': 'Star',
'street': '1 Main St',
'city': u'San Jos\xe9',
'state': 'CA',
'countrycode': 'US',
'zip': '95131',
'expdate': '012019',
'cvv2': '037',
'acct': '4797503429879309',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199',}
data.update(self.item)
self.assertTrue(self.wpp.doDirectPayment(data))
def test_doDirectPayment_invalid(self):
data = {
'firstname': 'Epic',
'lastname': 'Fail',
'street': '100 Georgia St',
'city': 'Vancouver',
'state': 'BC',
'countrycode': 'CA',
'zip': 'V6V 1V1',
'expdate': '012019',
'cvv2': '999',
'acct': '1234567890',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199',}
data.update(self.item)
self.assertFalse(self.wpp.doDirectPayment(data))
def test_setExpressCheckout(self):
# We'll have to stub out tests for doExpressCheckoutPayment and friends
# because they're behind paypal's doors.
nvp_obj = self.wpp.setExpressCheckout(self.item)
self.assertTrue(nvp_obj.ack == "Success")
### DoExpressCheckoutPayment
# PayPal Request:
# {'amt': '10.00',
# 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'custom': u'website_id=480&cname=1',
# 'inv': u'website-480-cname',
# 'method': 'DoExpressCheckoutPayment',
# 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'payerid': u'BN5JZ2V7MLEV4',
# 'paymentaction': 'Sale',
# 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'token': u'<PASSWORD>'}
#
# PayPal Response:
# {'ack': 'Success',
# 'amt': '10.00',
# 'build': '848077',
# 'correlationid': '375f4773c3d34',
# 'currencycode': 'USD',
# 'feeamt': '0.59',
# 'ordertime': '2009-03-04T20:56:08Z',
# 'paymentstatus': 'Completed',
# 'paymenttype': 'instant',
# 'pendingreason': 'None',
# 'reasoncode': 'None',
# 'taxamt': '0.00',
# 'timestamp': '2009-03-04T20:56:09Z',
# 'token': '<PASSWORD>',
# 'transactionid': '3TG42202A7335864V',
# 'transactiontype': 'expresscheckout',
# 'version': '54.0'} | 2.15625 | 2 |
Hackerrank_Bot_Saves_Princess.py | madhurgupta96/Algorithmic-Journey | 0 | 4092 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 19:46:40 2020
@author: Intel
"""
def displayPathtoPrincess(n,grid):
me_i=n//2
me_j=n//2
for i in range(n):
if 'p' in grid[i]:
pe_i=i
for j in range(n):
if 'p'==grid[i][j]:
pe_j=j
break
break
while((me_i!=pe_i) | (me_j!=pe_j)):
if(me_i-pe_i<0):
print('DOWN')
me_i=me_i+1
elif(me_i-pe_i>0):
print('UP')
me_i=me_i-1
else:
if(me_j-pe_j>0):
print('LEFT')
me_j=me_j-1
elif(me_j-pe_j<0):
print('RIGHT')
me_j=me_j+1
else:
break
m = int(input())
grid = []
for i in range(0, m):
grid.append(input().strip())
displayPathtoPrincess(m,grid) | 3.6875 | 4 |
gelviz/basic.py | HiDiHlabs/gelviz | 0 | 4093 | <gh_stars>0
import matplotlib.pyplot as plt
import pybedtools
import pandas as pnd
import numpy as np
import tabix
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle
from matplotlib.patches import Arrow
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.cm as cm
import matplotlib
import tabix
import math
def plotGenes(genes_bed,
exons_bed,
introns_bed,
region_bed,
blacklist=None,
gene_map=None,
plot_gene_ids=True,
y_max=None,
distance_ratio=0.1,
ax=None,
plot_legend=False,
legend_loc="lower right",
color_plus="#80b1d3",
color_minus="#fb8072"):
"""Function for plotting gene structures, i.e. introns exons of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing TX start,
and TX end of genes.
:type genes_bed: :class:`pybedtools.BedTool`
:param exons_bed: :class:`pybedtools.BedTool` object containing exons of
genes.
:type exons_bed: :class:`pybedtools.BedTool`
:param introns_bed: :class:`pybedtools.BedTool` object containing introns
:type introns_bed: :class:`pybedtools.BedTool`
:param region_bed: :class:`pybedtools.BedTool` object containing the one
region, for which the gene plot is created.
:type region_bed: :class:`pybedtools.BedTool`
:param blacklist: List of gene names, for genes that should not be shown on
the plot, default is None
:type blacklist: list, optional
:param plot_gene_ids: If True, all gene ids will be included in the plot,
False otherwise, default is True
:type plot_gene_ids: bool, optional
:param y_max: Max y value in the gene plot. If not set, then y_max is the
max number of stacked genes, default is None.
:type y_max: bool, optional
:param distance_ratio: Minimal distance between two genes, as ratio of ax
width, such that two genes are plotted side by side. If this ratio is
underwent, the genes will be stacked, default is 0.1.
:type distance_ratio: float, optional
:param ax: Axes instance on which the genes are plotted, default is None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True, a legend describing plus or minus stranded
genes is plotted, False otherwise. Default is False.
:type plot_legend: bool, optional
:param legend_loc: Location of the legend. Either of "lower left",
"lower right", "upper left", "upper right", default is "lower right".
:type legend_loc: str, optional
:param color_plus: Color code for plus stranded genes, default is "#80b1d3".
:type color_plus: str, optional.
:param color_minus: Color code for minus stranded genes, default is
"#fb8072".
:type color_minus: str, optional.
:return: Tuple of max_y_pos+1.5, patch_list, patch_description_list, where
1. max_y_pos+1.5 is the max_y_position + 1.5. max_y_pos defines the \
number of stacked genes.
2. patch_list is the list of patches drawn on the ax.
3. patch_description_list is the list of descriptions for the patches \
drawn on the ax.
:rtype: list
"""
ax = ax if ax is not None else plt.gca()
genes_in_region = genes_bed
exons_in_region = exons_bed
introns_in_region = introns_bed
region_border_up = int(region_bed[0][1])
region_border_down = int(region_bed[0][2])
region_size = region_border_down-region_border_up
color_forward = color_plus
color_reverse = color_minus
max_y_pos = None
if(not len(genes_in_region) == 0):
# Determine y positions of genes for plotting
max_y_pos, y_pos_dict = determineYPosGene(genes_in_region,
(region_border_down-
region_border_up),
distance_ratio)
if(not y_max is None):
max_y_pos = y_max
# Plot Exons
for i in exons_in_region:
start = int(i[1])
end = int(i[2])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
y = max_y_pos-y_pos_dict[gene_name]+0.5
rect = Rectangle((start, y-.2),
end-start,
.4,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
patch_list = []
patch_description_list = []
met_forward = False
met_reverse = False
# Plot Introns
for i in introns_in_region:
start = int(i[1])
end = int(i[2])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
y = max_y_pos-y_pos_dict[gene_name]+0.5
patch = Rectangle((start, y-.03),
end-start,
.06,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(patch)
if(strand == "+" and not(met_forward)):
patch_list += [patch]
patch_description_list += ["forward strand"]
met_forward = True
elif(strand == "-" and not(met_reverse)):
patch_list += [patch]
patch_description_list += ["reverse strand"]
met_reverse = True
# Plot Gene Names
if(plot_gene_ids):
for i in genes_in_region:
start = int(i[1])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
border_distance_down = region_border_down-start
if(start < region_border_up):
start = region_border_up
border_distance_down = region_border_down-start
if(not(float(border_distance_down)/float(region_size)
< distance_ratio)):
gene_name = str(i[3])
gene_name_label = gene_name
if(not gene_map is None):
gene_name_label = gene_map[gene_name]
y = max_y_pos-y_pos_dict[gene_name]+.8
plt.text(start,
y,
gene_name_label,
size=5,
color = color)
gene_name = str(i[3])
gene_name_label = gene_name
if(not gene_map is None):
gene_name_label = gene_map[gene_name]
y = max_y_pos-y_pos_dict[gene_name]+.8
plt.text(start, y, gene_name_label, size=5, color = color)
plt.xlim([region_border_up, region_border_down])
plt.ylim([0, max_y_pos+1.5])
plt.yticks([], [])
if(plot_legend):
plt.legend(patch_list,
patch_description_list,
loc=legend_loc,
fontsize=5)
return max_y_pos+1.5, patch_list, patch_description_list
def determineYPosGene(genes_bed,
region_size,
distance_ratio):
'''Function that determines the max y position for gene plotting via
function plotGenes.
:param genes_bed: :class:`pybedtools.BedTool` object containing genes to be
plotted.
:type genes_bed: :class:`pybedtools.BedTool`
:param region_size: Size of region to be plotted in base pairs.
:type region_size: int
:param distance_ratio: Minimal distance between two genes, as ratio of ax
width, such that two genes are plotted side by side. If this ratio is
underwent, the genes will be stacked.
:type distance_ratio: float
:return: Tuple of
1. max_y_pos: Defines the number of stacked genes.
2. y_pos_dict: Dictionary with keys = gene ids and values = y position \
of gene.
:rtype: tuple
'''
sort_indices = [int(idx) for idx in np.argsort([i[1] for i in genes_bed])]
genes_sorted_bed = [genes_bed[i] for i in sort_indices]
y_pos_dict = {}
y_level_dict = {}
max_y_pos = 0
for interval in genes_sorted_bed:
gene_name = interval[3]
gene_start = int(interval[1])
gene_end = int(interval[2])
for i in range(max_y_pos+1):
if(i == 0 and not max_y_pos in y_level_dict):
y_pos_dict[gene_name] = i
y_level_dict[i] = [[gene_start, gene_end]]
break
elif(gene_start > y_level_dict[i][-1][1] and
float(gene_start-y_level_dict[i][-1][0])/float(region_size) >
distance_ratio):
y_pos_dict[gene_name] = i
y_level_dict[i] += [[gene_start, gene_end]]
break
elif(i == max_y_pos):
max_y_pos += 1
y_pos_dict[gene_name] = max_y_pos
y_level_dict[max_y_pos] = [[gene_start, gene_end]]
break
else:
continue
return max_y_pos, y_pos_dict
def createGeneNameMap(gene_name_mapping_filename):
'''Function that creates a mapping between gene ids
:param gene_name_mapping_file: Path to a tab separated file, for which the
first column is a ensemble gene id, and the second column is the HUGO
gene name
:type gene_name_mapping_file: str
:return: Dictionary containing the gene id mapping.
:rtype: dictionary
'''
gene_name_mapping_file = open(gene_name_mapping_filename, "r")
gene_map = {}
for line in gene_name_mapping_file:
split_line = line.rstrip().split("\t")
ensembl_gene_id = split_line[0].split(".")[0]
hugo_gene_symbol = split_line[1].split(".")[0]
gene_map[ensembl_gene_id] = hugo_gene_symbol
gene_name_mapping_file.close()
return gene_map
def plotGeneExpression(genes_bed,
region_bed,
expression_df_g1,
expression_df_g2,
gene_names_map,
blacklist=None,
ax=None,
plot_legend=False,
color_g1="#fb8072",
color_g2="#80b1d3",
g1_id="tumor",
g2_id="normal",
plot_gene_names=True):
'''Function for plotting paired gene expression (e.g. tumor and normal) on a
gene region scale retaining the position of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing TXstart,
and TXend of genes.
:type genes_bed: :class:`pybedtools.BedTool`
:param region_bed: :class:`pybedtools.BedTool` object containing the region
to be plotted
:type region_bed: :class:`pybedtools.BedTool`
:param expression_df_g1: :class:`pandas.Dataframe` containing the expression
values of g1 samples (columns: sample ids; index: gene ids)
:type expression_df_g1: :class:`pandas.DataFrame`
:param expression_df_g2: :class:`pandas.Dataframe` containing the expression
values of g2 samples (columns: sample ids; index: gene ids)
:type expression_df_g2: :class:`pandas.DataFrame`
:param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values:
HUGO GENE SYMBOLs.
:type gene_names_map: dict.
:param blacklist: Set containing gene ids not to be plotted, default to
None.
:type blacklist: set, optional
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True legend is plotted, False otherwise, defaults to
False.
:type plot_legend: bool
:param color_g1: Color used for plotting g1 samples expression, defaults to
"#fb8072".
:type color_g1: str, optional
:param color_g2: Color used for plotting g2 samples expression, defaults to
"#80b1d3".
:type color_g2: str, optional
:param g1_id: ID of g1 used for legend plotting, defaults to "tumor".
:type g1_id: str, optional
:param g2_id: ID of g2 used for legend plotting, defaults to "normal".
:type g2_id: str, optional
:param plot_gene_names: If True, the HUGO GENE SYMBOLs will be shown, else
the GENE SYMBOLs are hidden.
:type plot_gene_names: bool.
:return: Axis on which plot was placed.
:rtype: :class:`matplotlib.axes._subplots.AxesSubplot`
'''
ax = ax if ax is not None else plt.gca()
# Get gene names and regions
genes_in_region_bed = genes_bed.intersect(region_bed,
wa=True,
u=True).sort()
gene_names = []
gene_regions = []
for e in genes_in_region_bed:
gene_name_ens = str(e[3])
gene_names += [gene_names_map[gene_name_ens]]
gene_regions += [[int(e[1]), int(e[2])]]
region_right_border = int(region_bed[0][2])
region_left_border = int(region_bed[0][1])
# Determine minimal extension of barplot
extension=None
for i in range(len(gene_regions)):
if(not blacklist is None and gene_names[i] in blacklist):
continue
left_border = gene_regions[i][0]
right_border = None
if(i < len(gene_names)-1):
right_border = gene_regions[i+1][0]
else:
right_border = region_right_border
current_extension = right_border-left_border
if(current_extension == 0.):
continue
if(extension is None):
extension = float(current_extension)
elif(current_extension < extension):
extension = float(current_extension)
boxprops = {"color": "k", "linewidth": .3}
flierprops = {"color": "k"}
medianprops = {"color": "k", "linewidth": .3}
whiskerprops = {"color": "k", "linewidth": .3}
capprops={"color": "k", "linewidth": .3}
patch_list = None
patch_description_list = None
tick_positions = []
gene_names_clean = []
counter=0
patch_saved = False
for gene_name in gene_names:
left_border = gene_regions[counter][0]
right_border = region_right_border
if(not blacklist is None and gene_name in blacklist):
counter += 1
continue
if(counter < len(gene_names)-1):
right_border = gene_regions[counter+1][0]
bplot_g1_pos = left_border + extension/4.
bplot_g2_pos = left_border + 3*(extension/4.)
tick_positions += [left_border + extension/2.]
gene_names_clean += [gene_name]
exp_values_g1 = expression_df_g1.loc[gene_name, :]
if(type(exp_values_g1).__name__ == "Series"):
exp_values_g1 = list(exp_values_g1)
else:
exp_values_g1 = list(exp_values_g1.iloc[0, :])
exp_values_g2 = expression_df_g2.loc[gene_name, :]
if(type(exp_values_g2).__name__ == "Series"):
exp_values_g2 = list(exp_values_g2)
else:
exp_values_g2 = list(exp_values_g2.iloc[0, :])
bplot_g1 = ax.boxplot([np.log2([i if
i >= 1. else
1. for
i in exp_values_g1])],
positions=[bplot_g1_pos],
widths=extension/2.,
patch_artist=True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
bplot_g2 = ax.boxplot([np.log2([i if
i >= 1. else
1. for
i in exp_values_g2])],
positions=[bplot_g2_pos],
widths=extension/2.,
patch_artist = True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
bplot_g1["boxes"][0].set_facecolor(color_g1)
bplot_g2["boxes"][0].set_facecolor(color_g2)
if(not patch_saved):
patch_saved=True
patch_list = [bplot_g1["boxes"][0], bplot_g2["boxes"][0]]
patch_description_list = [g1_id, g2_id]
counter += 1
ax.set_xlim(region_left_border, region_right_border)
ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))
ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))
if(not plot_gene_names):
ax.xaxis.set_major_formatter(
ticker.FixedFormatter(([ " " for i in
gene_names_clean])))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
tick.set_size(6)
for ytick in ax.get_yticklabels():
ytick.set_size(6)
if(plot_legend):
ax.legend(patch_list,
patch_description_list,
fontsize=5,
loc='lower left')
return ax
def plotGeneExpressionEqualDist(genes_bed,
gene_mid_points,
region,
expression_df,
groups,
gene_names_map=None,
blacklist=None,
ax=None,
plot_legend=False,
colors=None,
ids=None,
plot_gene_names=True,
position_gene_names="bottom",
log_transformed=True,
plot_points=False,
alpha=.5):
'''Function for plotting grouped gene expression (e.g. tumor and normal) on
a gene region scale equalizing the position of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing gene
regions.
:type genes_bed: :class:`pybedtools.BedTool`
:param gene_mid_points: list of integer values containing center positions
of genes.
:type gene_mid_points: list
:param region: List containing the region to be plotted
([<chrom>, <start>, <end>]).
:type region: list
:param groups: List of lists containing the IDs of the different groups.
:type groups: list
:param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values:
HUGO GENE SYMBOLs.
:type gene_names_map: dict.
:param expression_df: class:`pandas.DataFrame` object containing the
expression values of all samples (columns: sample ids; index: gene ids).
:type expression_df: class:`pandas.DataFrame`
:param blacklist: Set containing gene ids not to be plotted, defaults to
None,
:type blacklist: set, optional
:param ax: (default: None) Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True plot legend, False otherwise, defaults to False.
:type plot_legend: bool, optional
:param colors: List of colors used for plotting samples expression. The
number of colors must be the same as the number of groups, defaults to
None.
:type colors: str, optional
:param ids: IDs used for legend plotting, defaults to None. Number of ids
must be the same as the number of groups.
:type ids: list, optional.
:param plot_gene_names: True if gene names shall be plotted,
False otherwise, defaults to True.
:type plot_gene_names: bool, optional
:param position_gene_names: Either of "top", or "bottom", defaults to
"bottom".
:type position_gene_names: str, optional
:param log_transformed: If True use log transformed values for plotting,
non-transformed values otherwise.
:type log_transformed: bool, optional
:param plot_points: If True, a point per expression value is plotted in
addition to the boxplot, no points are plotted otherwise, defaults to
False.
:type plot_points: bool, optional
:param alpha: Alpha value for the background color of the boxplots boxes,
defaults to 0.5.
:type alpha: float, optional
:return: Plots axis.
:rtype: :class:`matplotlib.axes._subplots.AxesSubplot`
'''
standard_colors = ["#66c2a5",
"#fc8d62",
"#8da0cb",
"#ec87c2",
"#a6d854",
"#ffd92f",
"#e5c494",
"#bbbbbb"]
ax = ax if ax is not None else plt.gca()
region_bed = pybedtools.BedTool("\t".join([str(i) for i in region]),
from_string=True)
# Get gene names and regions
genes_in_region_bed = genes_bed.intersect(region_bed,
wa=True,
u=True).sort()
gene_names = []
gene_regions = []
for e in genes_in_region_bed:
gene_name_ens = str(e[3])
if(not gene_names_map is None):
gene_names += [gene_names_map[gene_name_ens]]
else:
gene_names += [gene_name_ens]
gene_regions += [[int(e[1]), int(e[2])]]
region_right_border = int(region_bed[0][2])
region_left_border = int(region_bed[0][1])
# Determine minimal extension of barplot
extension=None
if(len(gene_mid_points) <= 1):
extension=region[2]-region[1]
else:
extension=gene_mid_points[1]-gene_mid_points[0]
# Subtract a small percentage of region size from extension
extension=extension-(region[2]-region[1])*.01
boxprops = {"color": "k", "linewidth": .3, "alpha":alpha}
flierprops = {"color": "k"}
medianprops = {"color": "k", "linewidth": .3}
whiskerprops = {"color": "k", "linewidth": .3}
capprops={"color": "k", "linewidth": .3}
patch_list = []
patch_description_list = []
tick_positions = []
gene_names_clean = []
counter=0
for gene_name in gene_names:
left_border = gene_mid_points[counter]-extension/2
right_border = gene_mid_points[counter]+extension/2
if(not blacklist is None and gene_name in blacklist):
counter += 1
continue
n_groups = len(groups)
for g in range(n_groups):
bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.))
tick_positions += [left_border + extension/2.]
gene_names_clean += [gene_name]
exp_values = expression_df.loc[gene_name, groups[g]]
if(type(exp_values).__name__ == "Series"):
exp_values = list(exp_values)
else:
exp_values = list(exp_values.iloc[0, :])
expression_values = exp_values
if(log_transformed):
expression_values = np.log2([i
if i >= 1.
else 1.
for i in exp_values])
bplot = ax.boxplot(expression_values,
positions=[bplot_pos],
widths=extension/float(n_groups),
patch_artist=True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
color = None
if(not colors is None):
color = colors[g]
else:
color = standard_colors[g]
bplot["boxes"][0].set_facecolor(color)
if(plot_points):
x_positions = [ (bplot_pos+
(i-.5)*
((2*extension)/(float(n_groups)*3))) for i in
list(np.random.rand(len(expression_values))) ]
plt.plot(x_positions, expression_values, "k.", markersize=3)
g_id = None
if(not ids is None):
g_id = ids[g]
else:
g_id = "group "+str(g)
if(not g_id in patch_description_list):
patch_list += [bplot["boxes"][0]]
patch_description_list += [g_id]
counter += 1
ax.set_xlim(region_left_border, region_right_border)
if(position_gene_names == "top"):
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))
ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))
if(not plot_gene_names):
ax.xaxis.set_major_formatter(ticker.FixedFormatter(
([ " " for i in
gene_names_clean])))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
tick.set_size(5)
for ytick in ax.get_yticklabels():
ytick.set_size(5)
if(plot_legend):
ax.legend(patch_list,
patch_description_list,
fontsize=5,
loc='lower left')
return ax
def plotGenomicSegments(segments_list,
chrom,
start,
end,
ax = None):
'''Function for plotting genomix segments in different colors
:param segments_tabix_filename: Path to tabixed bed file containing
(chrom, start, end, name, score, strand, start, end, color). The color
field is used to determine the color for plotting (R,G,B).
:type segments_Tabix_filename: str
:param chrom: Chromosome of the region to be plotted.
:type chrom: str
:param start: Start position of the region to be plotted.
:type start: str
:param end: End position of the region to be plotted.
:type end: str
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Dictionary with keys = names of segments, and values patch
:rtype: dict
'''
ax = ax if ax is not None else plt.gca()
patches_dict = {}
for segment in segments_list:
segment_start = int(segment[1])
segment_end = int(segment[2])
color = tuple([ float(i)/256. for i in
str(segment[-1]).split(",") ]+[1])
segment_type = str(segment[3])
if(segment_type == "R"):
color = (1,1,1,1)
rect = Rectangle((segment_start, 0),
segment_end-segment_start,
1,
color=color)
ax.add_patch(rect)
patches_dict[segment_type] = rect
plt.xlim(int(start), int(end))
plt.ylim(0, 1)
plt.yticks([], [])
return patches_dict
def plotCNVs(cnvs_bed,
chromosome,
start,
end,
ploidy=2,
cnv_threshold=0.7,
color_gain="g",
color_loss="r",
color_neutral="k",
ax=None):
'''Function for plotting CNV segments
:param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with
following entries:
1. Chromosome,
2. Start Position,
3. End Position,
4. Deviation from ploidy,
5. True Copy Number)
:type cnvs_bed: :class:`pybedtools.BedTool`
:param chromosome: Chromosome for which to plot CNVs.
:type chromosome: str
:param start: Start position on chromosome.
:type start: int
:param end: End position on chromosome.
:type end: int
:param ploidy: Assumed ploidy of tumor, defaults to 2.
:type ploidy: int, optional
:param cnv_threshold: Minimal deviation from ploidy to be considered as a
CNV, defaults to 0.7.
:type cnv_threshold: float, optional
:param color_gain: Plot color of copy number gains, defaults to "g".
:type color_gain: str, optional
:param color_loss: Plot color of copy number losses, defaults to "r".
:type color_loss: str, optional
:param color_neutral: Plot color of copy number neutral regions, defaults to
"k".
:type color_neutral: str, optional
:param ax: Axis used for plotting.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
# Use given axis for plotting
ax = ax if ax is not None else plt.gca()
for interval in cnvs_bed:
current_start = int(interval[1])
current_end = int(interval[2])
ploidy_dev = float(interval[3])
tcn = float(interval[4])
# Smooth tcn, if ploidy_dev is smaller than cnv_threshold
if(abs(ploidy_dev) < cnv_threshold):
tcn = ploidy
color = color_neutral
if(ploidy_dev >= cnv_threshold):
color=color_gain
elif(ploidy_dev <= -1.*cnv_threshold):
color = color_loss
if(abs(ploidy_dev) > cnv_threshold):
rect = Rectangle((current_start, tcn-.2),
current_end-current_start,
.4,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
else:
rect = Rectangle((current_start, tcn-.1),
current_end-current_start,
.2,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
# Plot thresholds
color_threshold=(189./255., 189./255., 189./255., 0.5)
if(ploidy == 2):
plt.plot([int(start), int(end)],
[1, 1],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[2, 2],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[3, 3],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[4, 4],
color=color_threshold,
linestyle="--",
linewidth=.5)
elif(ploidy == 4):
plt.plot([int(start), int(end)],
[1, 1],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[2, 2],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[3, 3],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[4, 4],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[5, 5],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[6, 6],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.xlim([int(start), int(end)])
if(ploidy == 2):
plt.ylim([0, 4.5])
plt.yticks([0, 1, 2, 3, 4], ["0", "1", "2", "3", "4"], size=6)
elif(ploidy == 4):
plt.ylim([0, 6.5])
plt.yticks([0, 2, 4, 6], ["0", "2", "4", "6"], size=6)
plt.xticks(rotation=45)
def plotCNVsHeat(cnvs_bed,
chromosome,
start,
end,
ploidy=2,
cnv_threshold=0.7,
cmap="bwr",
max_dev=None,
ax=None):
'''Function for plotting CNV segments as heatmap
:param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with
following entries:
1. Chromosome,
2. Start Position,
3. End Position,
4. Deviation from ploidy,
5. True Copy Number)
:type cnvs_bed: :class:`pybedtools.BedTool`
:param chromosome: Chromosome for which to plot CNVs.
:type chromosome: str
:param start: Start position on chromosome.
:type start: int
:param end: End position on chromosome.
:type end: int
:param ploidy: Assumed ploidy of tumor, defaults to 2.
:type ploidy: int, optional
:param cnv_threshold: Minimal deviation from ploidy to be considered as a
CNV, defaults to 0.7.
:type cnv_threshold: float, optional
:param cmap: Colormap used for plotting CNVs, defaults to "bwr".
:type cmap: str, optional
:param max_dev: Maximal deviation from ploidy to plot, defaults to None.
:type max_dev: float, optional
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
# Use given axis for plotting
ax = ax if ax is not None else plt.gca()
colors = plt.cm.get_cmap(cmap)
if(max_dev is None):
max_dev = max([abs(float(i[3])) for i in cnvs_bed])
for interval in cnvs_bed:
current_start = int(interval[1])
current_end = int(interval[2])
ploidy_dev = float(interval[3])
tcn = float(interval[4])
if(tcn < -1.*max_dev):
tcn = -1.*max_dev
elif(tcn > max_dev):
tcn = max_dev
color = colors((ploidy_dev+max_dev)/(2*max_dev))
if(abs(ploidy_dev) < cnv_threshold):
color=colors(.5)
rect = Rectangle((current_start, .5),
current_end-current_start,
1,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
plt.xlim([int(start), int(end)])
plt.ylim([.5, 1.5])
plt.xticks([], [])
plt.yticks([], [])
def readACESeqAsBed(input_filename):
'''Function that reads CNVs from ACESeq ("*most_important*") files and
converts them to pybedtools.BedTool object
:param input_filename: Full path to ACESeq "most_important" file
:type input_filename: str
:return: :class:`pybedtools.BedTool` object containing CNVs from ACESeq
:rtype: :class:`pybedtools.BedTool`
'''
input_file = open(input_filename, "r")
cnv_bed_list = []
ploidy = None
for line in input_file:
if(line[:7] == "#ploidy"):
ploidy = float(line.rstrip().split(":")[1])
print(ploidy)
if(line[0] == "#" or line[:5] == "chrom"):
continue
split_line = line.rstrip().split("\t")
ploidy_dev = float(split_line[5])-ploidy
chrom = split_line[0]
if(chrom == "23"):
chrom="X"
elif(chrom == "24"):
chrom = "Y"
cnv_bed_list += [ [chrom,
split_line[1],
split_line[2],
str(ploidy_dev),
split_line[5],
"+"]
]
input_file.close()
return pybedtools.BedTool("\n".join(["\t".join(e) for e in
cnv_bed_list]),
from_string=True)
def plotChIPSignals(chip_signals,
r_chrom,
r_start,
r_end,
ax=None,
color="b",
offset=None,
merge=None):
'''Function that plots bedGraph like iterators.
:param chip_signals: Iterator for which each element is a list-ike
object containing:
1. Chromosome
2. Start postion
3. End position
4. Value to be plotted as bar
:type chip_signals: iterator
:param r_chrom: Chromosome of region to be plotted.
:type r_chrom: str
:param r_start: Start position of region to be plotted.
:type r_start: int
:param r_end: End position of region to be plotted.
:type r_end: int
:param ax: Axis of plot
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param color: color of bars, defaults to "b".
:type color: str, optional
:param offset: Length of intervals, defaults to None.
:type offset: int, optional
:param merge: Number of elements to be merged. If this value is not equal to
0, than merge elements will be averaged an plotted, defaults to 0.
:type merge: int, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
max_signal = 0
left = []
height = []
for signal in chip_signals:
start = int(signal[1])
end = int(signal[2])
value = float(signal[3])
if(value > max_signal):
max_signal = value
if(not offset is None):
end = start + offset
left += [start]
height += [value]
left_merged = []
height_merged = []
if(not merge is None):
heights = []
lefts = []
for i in range(len(left)):
if(i % merge == 0 and not (i == 0)):
left_merged += [lefts[0]]
lefts = []
height_merged += [np.mean(heights)]
heights = []
heights += [height[i]]
lefts += [left[i]]
if(not i % merge == 0):
left_merged += [lefts[0]]
lefts = []
height_merged += [np.mean(heights)]
heights = []
offset = merge*offset
left = left_merged
height = height_merged
plt.bar(left, height, offset, color = color, edgecolor = color)
plt.xlim(r_start, r_end)
def plotMethylationProfileHeat(methylation_bed,
chrom,
start,
end,
bin_size=1000,
ax = None):
'''Function for plotting methylation values as heatmap
:param methylation_bed: Methylation calls. Following fields must be
included: Chrom, Start, End, Methylated Cs, Unmethylated Cs.
:type methylation_bed: :class:`pybedtools.BedTool`
:param chrom: Chromosome of region to be plotted.
:type chrom: str
:param start: Start position of region to be plotted.
:type start: int
:param end: End position of region to be plotted.
:type end: int
:param bin_size: size of bin to average methylation values, defaults to
1000.
:type bin_size: int, optional
:param ax: Axis to be used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
binned_meth_calls = [ [0, 0] for i in range(int(((end-start)/bin_size)+1)) ]
counter = 0
for element in methylation_bed:
# Determine bin
position = int(element[1])
if(position < start or position > end):
continue
n_meth = int(element[3])
n_unmeth = int(element[4])
current_bin = int((position-start)/bin_size)
counter += 1
binned_meth_calls[current_bin][0] += n_meth
binned_meth_calls[current_bin][1] += n_unmeth
binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1]))
if (float(i[0])+float(i[1])) > 0
else "NA"
for i in binned_meth_calls ]
binned_average_meth_no_missing = []
n = len(binned_average_meth)
for i in range(n):
if(not binned_average_meth[i] == "NA"):
binned_average_meth_no_missing += [binned_average_meth[i]]
else:
meth_before = (binned_average_meth[i-1]
if not i == 0
else "NA")
meth_after = (binned_average_meth[i+1]
if not i == len(binned_average_meth)-1
else "NA")
average_list = [ j
for j
in [meth_before, meth_after]
if not j == "NA" ]
binned_average_meth_no_missing += [ (float(sum(average_list))/
float(len(average_list)))
if len(average_list) > 0
else 0. ]
binned_average_meth = binned_average_meth_no_missing
# Plot average methylation values per bin
# Define Colormap
cmap = cm.bwr
norm = matplotlib.colors.Normalize(vmin=0., vmax=1.)
m = matplotlib.cm.ScalarMappable(norm = norm, cmap = cmap)
for cbin in range(len(binned_average_meth)):
rect = Rectangle((start+cbin*bin_size, 0),
bin_size,
1,
color=m.to_rgba(binned_average_meth[cbin]))
ax.add_patch(rect)
plt.xlim([start, end])
plt.ylim([0, 1])
plt.xticks([], [])
plt.yticks([], [])
def plotMethylationProfile(meth_calls,
chrom,
start,
end,
color="k",
ax=None):
'''Function that plots methylation values as dot plots.
:param meth_calls: Iterator containing list-like elements with the following
entries:
1. Chromsome
2. Start position
3. end position
4. Number methylated cytosines
5. Number unmethylated cytosines
Or
1. Chromsome
2. Start position
3. end position
4. Beta Value
:type meth_calles: iterator
:param chrom: Chromosome of region to be plotted.
:type chrom: str
:param start: Start position of region to be plotted.
:type start: int
:param end: End position of region to be plotted.
:type end: int
:param color: Color of points representing methylation values, defaults to
"k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
n_entries = len(meth_calls[0])
if(n_entries == 5):
plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],
[ float(m[3])/(float(m[3])+float(m[4]))
if not(float(m[3])+float(m[4]) == 0.)
else 0. for m in meth_calls],
color=color,
marker=".",
linestyle='None',
markersize=1,
alpha=.5)
elif(n_entries == 4):
plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],
[ float(m[4]) for m in m in meth_calls],
color=color,
marker=".",
linestyle='None',
markersize=1,
alpha=.5)
plt.ylim([0, 1])
plt.xticks([], [])
plt.xlim([start, end])
def plotTX(chrom_r,
start_r,
end_r,
TX_pos,
direction="right",
color="k",
ax=None):
'''Function that plots a translocation event as a bar, showing the part
of the genome that is translocated.
:param chrom_r: Chromosome of the region to be plotted.
:type chrom_r: str
:param start_r: Start position of the region to be plotted.
:type start_r: int
:param end_r: End position of the region to be plotted.
:type end_r: int
:param TX_pos: Position of the translocation.
:type TX_pos: int
:param direction: Direction of the genomic part that is translocated. Either
of "left" (upstream), or "right" (downstream), defaults to "left".
:type direction: str, optional
:param color: Color of the bar representing the translocation, defaults to
"k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
TX_start = TX_pos
TX_end = end_r
if(direction == "left"):
TX_start = start_r
TX_end = TX_pos
rect = Rectangle((TX_start, .4),
TX_end-TX_start,
.2,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
plt.xlim([start_r, end_r])
plt.ylim([0.3, 0.7])
def plotRegions(regions,
start,
end,
color="#cbebc4",
edgecolor=False,
alpha=1,
ax = None):
'''Functions that plots genomic regions as simple rectangles.
:param regions: Iterator containig list-like elements with the following
entries:
1. Chromosome
2. Start position
3. End position
:type regions: iterator
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param color: Color of the rectangles representing the regions to be
plotted, defaults to "#cbebc4".
:type color: str, optional
:param edge_color: Color of region edge. If False, no edge is plotted,
defaults to False.
:type edge_color: str, optional
:param alpha: Alpha value of the rectangle, representing the region to be
plotted, defaults to 1.
:type alpha: float, optional.
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
c = 0
for region in regions:
if(not edgecolor):
current_color = color
rect = Rectangle([int(region[1]), -.75],
int(region[2])-int(region[1]),
1.5,
facecolor=current_color,
edgecolor='none',
alpha=alpha)
c += 1
else:
current_color = color
rect = Rectangle([int(region[1]), -.75],
int(region[2])-int(region[1]),
1.5,
facecolor=current_color,
edgecolor=edgecolor,
alpha=alpha)
c += 1
ax.add_patch(rect)
plt.xticks([], [])
plt.yticks([], [])
plt.xlim([start, end])
plt.ylim([-1, 1])
def plotMotifDirections(motifs_bed,
start,
end,
head_width=0.2,
head_length=1000,
overhang=0,
color_plus="#80b1d3",
color_minus="#fb8072",
ax=None):
'''Function that plots TF motifs as arrows, indicating their directionality.
:param motifs_bed: :class:`pybedtools.BedTool` object containing regions
of the TF sited to be plotted.
:type motifs_bed: :class:`pybedtools.BedTool`
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param head_width: Width of the arrow head as proportion of the arrow,
defaults to 0.2
:type head_width: float, optional
:param head_length: Length of the arrow in bp (depends on the region that
is plotted), defaults to 1000.
:type head_length: int, optional
:param overhang: Fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one. Defaults to 0.
:type overhang: float, optional
:param color_plus: Color of plus stranded TF regions, defaults to "#80b1d3".
:type color_plus: str, optional
:param color_minus: Color of plus stranded TF regions, defaults to
"#fb8072".
:type color_minus: str, optional
:param ax: Axis on which to plot contact map, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
for motif in motifs_bed:
motif_start = int(motif[1])
motif_end = int(motif[2])
strand = str(motif[3])
arrow_start = motif_start
arrow_end = motif_end
color=color_plus
dx = head_length
if(strand == "-"):
arrow_start = motif_end
arrow_end = motif_start
color = color_minus
dx = -1.*head_length
plt.arrow(arrow_start,
.5,
dx,
0,
head_width=head_width,
head_length=head_length,
overhang=overhang,
head_starts_at_zero=False,
edgecolor="none",
facecolor=color,
length_includes_head=True)
plt.xlim([start, end])
plt.ylim([0.4, 0.6])
def plotHiCContactMap(contact_map,
start,
end,
segment_size,
cmap="Greys",
vmin=None,
vmax=None,
location="top",
ax=None):
'''Function that plots HiC contact maps as pyramid plots
:param contact_map: Matrix that contains the intensity values of HiC
contacts.
:type contact_map: :class:`pandas.DataFrame`
:param start: Chromosomal start position of region to be plotted.
:type start: int
:param end: Chromosomal end position of region to be plotted.
:type end: int
:param segment_size: Size of the segments for which contacts were called.
:type segment_size: int
:param cmap: Name of the colormap to be used for plotting HiC intensities,
defaults to "Greys".
:type cmap: str, optional
:param vmin: Minimal value of intensity range to be plotted, defaults to
None
:type vmin: float, optional
:param vmax: Maximal value of intensity range to be plotted, defaults to
None.
:type vmax: float, optional
:param location: Either of "top" | "bottom". If location == "top", the
pyramid points upwards, else if location == "bottom" the pyramid points
downwards, defaults to top,
:type location: str, optional
:param ax: Axis on which to plot contact map, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
contact_map_index1 = (start)/segment_size
contact_map_index2 = ((end)/segment_size)+1
sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2,
contact_map_index1:contact_map_index2]
if(vmin is None):
vmin = 0
if(vmax is None):
vmax = np.percentile(contact_map, 99.9)
colormap = plt.get_cmap(cmap)
for i in range(contact_map_index1, contact_map_index2):
y_range = (range(contact_map_index1+(i-contact_map_index1),
contact_map_index2)
if location == "top"
else range(contact_map_index1,
contact_map_index2-(contact_map_index2-i)))
for j in y_range:
# Define midpoint of rectangle
midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2.,
(j*segment_size-i*segment_size)/2.)
vertices = [(midpoint[0]-segment_size/2., midpoint[1]),
(midpoint[0], midpoint[1]-segment_size/2.),
(midpoint[0]+segment_size/2., midpoint[1]),
(midpoint[0], midpoint[1]+segment_size/2.),
(midpoint[0]-segment_size/2., midpoint[1])
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(vertices, codes)
intensity_value = contact_map.iloc[i, j]
intensity_value = (intensity_value/vmax
if intensity_value <= vmax
else 1.)
facecolor = colormap(intensity_value)
patch = matplotlib.patches.PathPatch(path,
facecolor=facecolor,
edgecolor='none')
ax.add_patch(patch)
ax.set_xlim(start, end)
if(location == "top"):
ax.set_ylim(0, (end-start)/2.)
else:
ax.set_ylim(-1.*(end-start)/2., 0)
def distanceEqualizer(genomic_segments,
start,
end,
direction="top_down",
color="k",
ax = None):
'''Function that plots arcs from unequal distances of genomic segments to
equal distances.
:param genomic_segments: List of segments for which distances shall be
equalized (each segment is of the form [<chrom>, <start>, <end>])
:type genomic_segments: list
:param start: Start position of the genomic region.
:type start: int
:param end: End position of the genomic region.
:type end: int
:param color: Color of lines equalizing distances, defaults to "k".
:type color: str, optional
:param direction: Direction of distance equalization (top_down | bottom_up),
defaults to "top_down".
:type direction: str, optional.
:param ax: Axis on which to plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: List of equalized region midpoints.
:rtype: list
'''
ax = ax if ax is not None else plt.gca()
# Calculate midpoints of original and distance equalized segments
n_segments = len(genomic_segments)
equalized_region_size = (end-start)
if(n_segments > 0):
equalized_region_size=(end-start)/n_segments
equalized_region_mid_points = []
for i in range(1, n_segments+1):
equalized_region_mid_points += [((start+
i*equalized_region_size)-
equalized_region_size/2)]
region_mid_points = []
for e in genomic_segments:
if(int(e[1]) < start):
region_mid_points += [start+(int(e[2])-start)/2]
elif(int(e[2]) > end):
region_mid_points += [int(e[1])+(end-int(e[1]))/2]
else:
region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2]
for i in range(len(region_mid_points)):
region_mid_point = region_mid_points[i]
equalized_region_mid_point = equalized_region_mid_points[i]
codes = []
vertices = []
if(direction == "top_down"):
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
vertices = [(region_mid_point, 1),
(region_mid_point, .8),
(equalized_region_mid_point, .2),
(equalized_region_mid_point, 0)]
else:
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
vertices = [(region_mid_point, 0),
(region_mid_point, .2),
(equalized_region_mid_point, .8),
(equalized_region_mid_point, 1)]
path = Path(vertices, codes)
path_patch = PathPatch(path,
facecolor="none",
edgecolor=color,
linewidth=.5)
ax.add_patch(path_patch)
ax.axis("off")
plt.xlim([start, end])
plt.ylim([0, 1])
return equalized_region_mid_points
def plotCoordinates(chrom,
start,
end,
color="k",
ax = None,
upper=True,
loc_coordinates="up",
revert_coordinates=False,
rotation=0):
'''Function that plots genomic coordinates in a linea fashion.
:param chrom: Chromosome of the region to be plotted.
:type chrom: str
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param color: Color of the genomic scales elements, defaults to "k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param upper: If True, make less ticks, else if False make more ticks.
:type upper: bool, optional
:param loc_coordinates: Either of "up" | "down". If "up", plot ticks to
upper direction, else if "down", plot ticks to lower direction, defaults
to "up".
:type loc_coordinates: str, optional
:param revert_coordinates: If True, coordinates are reverted to decreasing
order. Else, coordinates stay in increasing order, defaults to False.
:type revert_coordinates: bool, optional
:param rotation: Rotational angle of coordinate strings, defaults to 0.
:type rotation: int, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
tick_size = 10**math.ceil((np.log10((end-start)/10)))
if(not upper):
tick_size = 10**int((np.log10((end-start)/10)))
# Determine first tick position
first_tick = start+(tick_size-start%tick_size)
ticks = []
current_tick = first_tick
while(current_tick <= end):
ticks += [current_tick]
current_tick = current_tick + tick_size
scale = None
if(first_tick > 1000000):
scale = "Mb"
else:
scale="Kb"
digits_to_round = None
divisor = None
if(scale == "Mb"):
digits_to_round = int(6-np.log10(tick_size))
divisor = 1000000
else:
digits_to_round = int(5-np.log10(tick_size))
divisor = 100000
tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale
for i in ticks ]
if(loc_coordinates == "up"):
plt.plot([start, end],
[0, 0],
linestyle="-",
color=color,
linewidth=1)
else:
plt.plot([start, end],
[0.3, 0.3],
linestyle="-",
color=color,
linewidth=1)
if(revert_coordinates):
ticks = [ start + end-i for i in ticks ]
ticks.reverse()
tick_labels.reverse()
print(tick_labels)
for i in range(len(ticks)):
if(loc_coordinates == "up"):
plt.plot([ticks[i], ticks[i]],
[0., .3],
linestyle="-",
color=color,
linewidth=1)
plt.text(ticks[i],
.4,
tick_labels[i],
horizontalalignment="center",
verticalalignment="bottom",
fontsize=5,
color=color,
rotation=rotation)
else:
plt.plot([ticks[i], ticks[i]],
[.3, .0],
linestyle="-",
color=color,
linewidth=1)
plt.text(ticks[i],
-.1,
tick_labels[i],
horizontalalignment="center",
fontsize=5,
color=color,
verticalalignment="top",
rotation=rotation)
plt.xlim([start, end])
plt.yticks([], [])
if(loc_coordinates == "up"):
plt.ylim([-.1, .8])
else:
plt.ylim([-1.5, .3])
plt.xticks([], [])
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
def plotLinksAsArcs(links_bed,
chrom_r,
start_r,
end_r,
lw=1,
color="k",
ax = None):
'''Function that plots links between genomic regions as arcs.
:param links_bed: Iterator, that contains bed-like structured lists with the
following elements:
1. Chromosome region1
2. Start region1
3. End region1
4. Chromosome region2
5. Start region2
6. End region2
:type links_bed: iterator
:param chrom_r: Chromosome of the region to be plotted.
:type chrom_r: str
:param start_r: Chromosomal start position of the region to be plotted.
:type start_r: int
:param end_r: Chromosomal end positiont of the region to be plotted.
:type end_r: int
:param color: Color of the arc, defaults to "k".
:type color: str, optional.
:param ax: Axis where the plot is drawn, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
max_dist = 0
for e in links_bed:
link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2
link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2
distance = abs(link_pos2-link_pos1)
if(distance > max_dist):
max_dist = distance
mid_point = link_pos1 + (link_pos2-link_pos1)/2
if(link_pos2 < link_pos2):
mid_point = link_pos2 + (link_pos1-link_pos2)/2
vertices = [(link_pos1, 0),
(mid_point, distance),
(link_pos2, 0)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
path = Path(vertices,
codes)
patch = PathPatch(path,
facecolor = "None",
edgecolor = color,
lw = lw)
ax.add_patch(patch)
#ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.xticks([], [])
plt.yticks([], [])
plt.xlim([start_r, end_r])
plt.ylim([0, max_dist/2])
| 2.546875 | 3 |
toc/fsa/fsa.py | djrochford/toc | 0 | 4094 | <filename>toc/fsa/fsa.py<gh_stars>0
"""
File containing DFA and NFA public classes
"""
import collections.abc
from itertools import product, chain, combinations
from string import printable
from typing import (
AbstractSet,
Container,
FrozenSet,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
Union,
cast
)
from .base import (
_Base,
_extract_states_alphabet,
_error_message,
_good_alphabet,
_check_input
)
State = str
Symbol = str
Regex = str
FsaTransitionFunction = Mapping[
Tuple[State, Symbol], Union[State, AbstractSet[State]]
]
class _FSA(_Base):
def __init__(
self,
*,
transition_function: FsaTransitionFunction,
start_state: State,
accept_states: AbstractSet[State]
):
super().__init__(
transition_function=transition_function, start_state=start_state
)
self._accept_states = accept_states
self._states, self._alphabet = _extract_states_alphabet(
self._transition_function.keys()
)
self._well_defined()
@property
def alphabet(self) -> FrozenSet[Symbol]:
return self._alphabet
@property
def accept_states(self) -> AbstractSet[State]:
return self._accept_states
def _well_defined(self) -> None:
super()._well_defined()
_good_alphabet(alphabet=self.alphabet, name="alphabet")
self._good_accept()
self._good_domain(self.alphabet)
def _good_accept(self) -> None:
bad_accept_states = self.accept_states - self.states
_error_message(
bad_set=bad_accept_states,
message_singular=("Accept state {} is not a member of the fsa's "
"state set."),
message_plural=("Accept states {} are not members of the fsa's "
"state set.")
)
def _good_range(self):
raise NotImplementedError
GnfaTransitionFunction = Mapping[Tuple[State, State], Regex]
MutableGnfaTF = MutableMapping[Tuple[State, State], Regex]
class _GNFA:
def __init__(
self,
transition_function: GnfaTransitionFunction,
body_states: Set[State],
start_state: State,
accept_state: State
):
self.transition_function = transition_function
self.body_states = body_states
self.start_state = start_state
self.accept_state = accept_state
self.states = (
self.body_states | {self.start_state} | {self.accept_state}
)
def reduce(self) -> "_GNFA":
"""
Output a GNFA equivalent to `self` with one less state in it.
"""
def union_main_scope(regex: Regex) -> bool:
paren_count = 0
for char in regex:
if char == '(':
paren_count += 1
elif char == ')':
paren_count -= 1
elif char == '|':
if paren_count == 0:
return True
return False
def regex_star(regex: Regex) -> Regex:
if regex in EMPTIES:
return '€'
if len(regex) == 1:
return regex + '*'
return f"({regex})*"
def regex_concat(regex1: Regex, regex2: Regex) -> Regex:
if regex1 == 'Ø' or regex2 == 'Ø':
return 'Ø'
if regex1 == '€':
return regex2
if regex2 == '€':
return regex1
if union_main_scope(regex1):
regex1 = f'({regex1})'
if union_main_scope(regex2):
regex2 = f'({regex2})'
return regex1 + regex2
def regex_union(regex1: Regex, regex2: Regex) -> Regex:
if regex1 == "Ø":
return regex2
if regex2 == "Ø":
return regex1
return f"{regex1}|{regex2}"
rip = self.body_states.pop()
r2 = self.transition_function[(rip, rip)]
reduced_tf = {}
for state1 in self.states - {self.accept_state, rip}:
r1 = self.transition_function[(state1, rip)]
for state2 in self.states - {self.start_state, rip}:
r3 = self.transition_function[(rip, state2)]
r4 = self.transition_function[(state1, state2)]
new_regex = regex_union(
regex_concat(regex_concat(r1, regex_star(r2)), r3),
r4
)
reduced_tf[(state1, state2)] = new_regex
return _GNFA(
reduced_tf,
self.body_states - {rip},
self.start_state,
self.accept_state
)
NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]]
MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]]
class NFA(_FSA):
"""
A nondeterministic finite automaton class. Takes three keyword arguments:
- `transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]]
- `start_state`: State
- `accept_states`: AbstractSet[State]
(Where States are strings, and Symbols are one-char strings.)
The transition function' keys implicitly define the nfa's state-set and
alphabet; the first elements of the tuples represent the nfa's states, and
the second elements are the symbols in the alphabet.
The domain of the transition function is the power-set of the nfa's state
set --- i.e., the values of the transition function dictionary should be
sets (or frozensets). The empty set is a valid value; in fact, you are
required to specify that the successor set for a given state-symbol pair is
the empty set, if it is.
You can define epsilon-moves by using the empty string in place of an
alphabet symbol in the transition function. Note that the empty string will
not be inferred to be a member of the alphabet (and hence the checks below
will work as you would expect).
The class will raise a ValueError exception on instantiation if any of the
following are true:
1. the start state is not a member of the set of states inferred from
the transition function;
2. the set of accept states is not a subset of the set of states
inferred from the transition function;
3. a member of the alphabet inferred from the transition function is
not a one-character string;
4. a member of the transition function's range is not a set;
5. the range of the transition function is not a subset of the power
set of states inferred from the transition function;
6. the transition function is missing cases -- i.e., it is not the case
that every pair of a state and a symbol is in the domain of the
transition function.
The exception message will specify which of these six conditions things
triggered the exception, and which states/symbols are the source of the
problem.
"""
def __or__(self, other: "NFA") -> "NFA":
"""
Let A be the language recognised by nfa1, and B be the language
recognized by nfa2. `nfa1 | nfa2` returns an nfa that recognizes A
union B. The cardinality of the state-set of nfa1 | nfa2 is the
cardinality of the state set of nfa1 plus the cardinality of the
state-set of nfa2 plus 1.
There is no problem with the input NFAs having different alphabets.
"""
new_self, new_other, union_tf = self._combine(other)
union_start_state = _get_new_state(new_self.states | new_other.states)
union_tf[(union_start_state, '')] = {
new_self.start_state, new_other.start_state
}
for symbol in new_self.alphabet | new_other.alphabet:
union_tf[(union_start_state, symbol)] = set()
union_accept_states = new_self.accept_states | new_other.accept_states
return NFA(
transition_function=union_tf,
start_state=union_start_state,
accept_states=union_accept_states
)
def __add__(self, other: "NFA") -> "NFA":
"""
Let A be the language recognised by nfa1, and B be the language
recognized by nfa2. `nfa1 + nfa2` returns an nfa that recognizes A
concat B -- i.e., the language consisting of the set of strings of the
form a concat b, where a is an element of A and b is an element of B.
Note that this `+` operation is not commutative.
"""
new_self, new_other, concat_tf = self._combine(other)
for state in new_self.accept_states:
if (state, '') in concat_tf:
concat_tf[(state, '')].add(new_other.start_state)
else:
concat_tf[(state, '')] = {new_other.start_state}
return NFA(
transition_function=concat_tf,
start_state=new_self.start_state,
accept_states=new_other.accept_states
)
def _combine(self, other: "NFA") -> Tuple["NFA", "NFA", MutableNfaTF]:
def prime(state: State):
return state + '`'
def copy(nfa: NFA) -> NFA:
copy_tf = {}
for state, symbol in nfa.transition_function.keys():
copy_tf[(prime(state), symbol)] = {
prime(x) for x in nfa.transition_function[(state, symbol)]
}
copy_start = prime(nfa.start_state)
copy_accept = {prime(x) for x in nfa.accept_states}
return NFA(
transition_function=copy_tf,
start_state=copy_start,
accept_states=copy_accept
)
overlap = self.states & other.states
while overlap:
other = copy(other)
overlap = self.states & other.states
def add_empty_transitions(
nfa1: NFA, nfa2: NFA
) -> Tuple[NfaTransitionFunction, NfaTransitionFunction]:
def add_one_way(nfa1: NFA, nfa2: NFA) -> NfaTransitionFunction:
new_tf = nfa1.transition_function
extra_symbols = nfa2.alphabet - nfa1.alphabet
if extra_symbols:
for pair in product(nfa1.states, extra_symbols):
new_tf[pair] = set()
return new_tf
return add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1)
self_tf, other_tf = add_empty_transitions(self, other)
new_self = NFA(
transition_function=self_tf,
start_state=self.start_state,
accept_states=self.accept_states
)
new_other = NFA(
transition_function=other_tf,
start_state=other.start_state,
accept_states=other.accept_states
)
combination_tf = {}
combination_tf.update(new_self.transition_function)
combination_tf.update(new_other.transition_function)
return new_self, new_other, combination_tf
def _good_range(self) -> None:
bad_range = {
x for x in self.transition_function.values()
if not isinstance(x, collections.abc.Set)
}
_error_message(
bad_set=bad_range,
message_singular=("Value {} in the range of the transition "
"function is not a set."),
message_plural=("Values {} in the range of the transition "
"function are not sets.")
)
transition_range: Set[Optional[AbstractSet[State]]] = set.union(
*self.transition_function.values()
)
_error_message(
bad_set=transition_range - self.states,
message_singular=("State {} in the range of the transition "
"function is not in the fsa's state set."),
message_plural=("States {} in the range of the transition "
"function are not in the fsa's state set.")
)
def _get_successors(
self, *, state_set: AbstractSet[State], symbol: Symbol
) -> FrozenSet[State]:
def get_successor(state: State, sym: Symbol) -> AbstractSet[State]:
self._transition_function = cast(
NfaTransitionFunction, self._transition_function
)
return self._transition_function.get((state, sym), frozenset())
empty: FrozenSet[State] = frozenset() # This avoids a mypy bug.
return empty.union(
*[frozenset(get_successor(state, symbol)) for state in state_set]
)
def _add_epsilons(self, state_set: AbstractSet[State]) -> FrozenSet[State]:
epsilon_neighbours = self._get_successors(
state_set=state_set, symbol=''
)
while epsilon_neighbours - state_set:
state_set = state_set | epsilon_neighbours
epsilon_neighbours = self._get_successors(
state_set=epsilon_neighbours, symbol=''
)
return frozenset(state_set)
def _transition(self, state_set: AbstractSet[State], symbol: Symbol):
return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol))
def accepts(self, string: str) -> bool:
"""
Determines whether nfa accepts input string. Will raise a ValueError
exception is the string contains symbols that aren't in the nfa's
alphabet.
"""
_check_input(string=string, alphabet=self.alphabet)
current_states = self._add_epsilons({self.start_state})
for symbol in string:
current_states = self._transition(current_states, symbol)
return not current_states & self.accept_states == set()
def determinize(self) -> "DFA":
"""Returns a DFA that recognizes the same same language as the NFA
instance.
WARNING: The set of DFA states has the cardinality of the power-set of
the set of NFA states. For related reasons, the time complexity of this
method is exponential in the number of states of the NFA. Don't
determinize big NFAs.
"""
# powerset code an itertools recipe, from
# https://docs.python.org/3/library/itertools.html#recipes
# (minor modification to make the return a set of frozensets).
def powerset(iterable: Iterable) -> Set[FrozenSet]:
s = list(iterable)
return {
frozenset(item) for item in chain.from_iterable(
combinations(s, r) for r in range(len(s)+1)
)
}
state_sets = powerset(self.states)
determinized_tf = {}
determinized_accept = set()
for (state_set, symbol) in product(state_sets, self._alphabet):
determinzed_state = _stringify(state_set)
determinized_tf[(determinzed_state, symbol)] = _stringify(
self._transition(state_set, symbol)
)
if set(state_set) & self.accept_states:
determinized_accept.add(determinzed_state)
determinized_start = _stringify(
self._add_epsilons({self._start_state})
)
return DFA(
transition_function=determinized_tf,
start_state=determinized_start,
accept_states=determinized_accept
)
def star(self) -> "NFA":
"""
Let A be the language recognised by nfa. `nfa.self()` returns an nfa
that recognizes A* -- i.e., the set of all strings formed by
concatenating any number of members of A.
"""
star_start = _get_new_state(self.states)
star_tf = self.transition_function
star_tf[(star_start, '')] = {self.start_state}
for symbol in self.alphabet:
star_tf[(star_start, symbol)] = set()
for state in self.accept_states:
star_tf[(state, '')] = {self.start_state}
star_accepts = self.accept_states | {star_start}
return NFA(
transition_function=star_tf,
start_state=star_start,
accept_states=star_accepts
)
@staticmethod
def fit(
regex: Regex,
alphabet: AbstractSet[Symbol] = (
set(printable) - {'(', ')', '|', '*'}
)
) -> "NFA":
"""
Takes a regular expression and an alphabet (i.e., a set of
one-character strings) as input; returns an NFA that recognises the
language defined by that regular expression and that alphabet.
The alphabet parameter is optional; it's default value is
string.printable -- i.e., the set of "printable" characters, which
includes the standard ASCII letters and digits, and most common
punctuation and white space.
Actually, that's not quite right -- the default value is
string.printable *minus* parentheses, the vertical bar, the star
symbol, and the tilde, for reasons that I will explain presently.
As of now, the syntax of the regular expressions that this method takes
as input is very simple -- much simpler than the standard python
regular expresssions. All characters are intepreted as literals for
symbols in the alphabet except for '(', '')', '|', '*', '•', '€' and
'Ø'. The parentheses, vertical bar and star mean what you'd expect
them to mean if you are familiar with regular expressions. '•'
(option-8 on a mac keyboard) means concatenation. You can leave
concatentation implicit, as is usual; no need to write '•'' explicitly
if you don't want to. But it gets used internally. '€' (option-shift-2)
is used to match the empty string (because it kind of looks like an
epsilon); there's no other way to match, for instance, {'', '0'} with
the current syntax. (Quick challenge: it's not totally obvious how to
match the empty string in normal python regex syntax either, though it
can be done; give it a go.) 'Ø' (option-shift-o) represents the empty
set; you can match to the empty language with it.
For reaons related to the above, the characters '(', ')', '|', '*',
'•', '€' and 'Ø' cannot be symbols in the alphabet of the NFA. (My
apologies to speakers of Scandinavian languages for the last one; I am
very against English chauvinism, but your letter is so very close to
the empty-set symbol. If, by some miracle, there is someone who cares
about this, I will change the symbol for empty-set.)
In the absence of parentheses, the order of operations is: `*`, then
`•`, then `|`.
This method uses a version of Dijkstra's shunting yard algorithm to
parse the regex and build the NFA.
The method will raise a ValueError exception if any of the following
conditions hold:
1. the alphabet contains any of the verboten characters -- i.e.,`(`
, `)`, `|`, `*`, `•`, `€` and `Ø`,
2. the input regex string contains a character not in the alphabet,
and not one of the above veboten characters,
3. the input regex contain a binary operator followed by an
operator, or
4. the input regex does not have properly matching parentheses.
"""
operator_to_operation = {
'|': NFA.__or__,
'•': NFA.__add__
}
_error_message(
bad_set=set(NOT_SYMBOLS) & alphabet,
message_singular="Alphabet cannot contain character {}.",
message_plural="Alphabet cannot contain characters {}."
)
def fit_empty(empty: Regex) -> NFA:
tf: NfaTransitionFunction = {
pair: set() for pair in product({'q1'}, alphabet)
}
accept_states = set() if empty == 'Ø' else {'q1'}
return NFA(
transition_function=tf,
start_state='q1',
accept_states=accept_states
)
def fit_symbol(symbol: Symbol) -> NFA:
tf: MutableNfaTF = {
pair: set() for pair in product({'q1', 'q2'}, alphabet)
}
tf[('q1', symbol)] = {'q2'}
return NFA(
transition_function=tf, start_state='q1', accept_states={'q2'}
)
machine_stack: List[NFA] = []
operator_stack = ['sentinel']
def binary_operate() -> None:
right_operand = machine_stack.pop()
left_operand = machine_stack.pop()
machine = operator_to_operation[operator_stack.pop()](
left_operand, right_operand
)
machine_stack.append(machine)
def compare(operator: Regex) -> int:
return (
OPERATORS.index(operator)
- OPERATORS.index(operator_stack[-1])
)
regex = _pre_process(regex, alphabet)
for char in regex:
if char in EMPTIES:
machine_stack.append(fit_empty(char))
elif char in alphabet:
machine_stack.append(fit_symbol(char))
elif char == '*':
machine_stack[-1] = machine_stack[-1].star()
elif char in OPERATORS:
if operator_stack[-1] in PARENTHE or compare(char) > 0:
operator_stack.append(char)
else:
while (
operator_stack[-1] not in PARENTHE
and compare(char) <= 0
):
binary_operate()
operator_stack.append(char)
elif char == '(':
operator_stack.append(char)
else:
while operator_stack[-1] != '(':
binary_operate()
operator_stack.pop()
while len(operator_stack) > 1:
binary_operate()
return machine_stack.pop()
OPERATORS = ['sentinel', '|', '•', '*']
PARENTHE = ['(', ')']
EMPTIES = ['€', 'Ø']
NOT_SYMBOLS = OPERATORS + PARENTHE + EMPTIES
def _pre_process(regex: Regex, alphabet: AbstractSet[Symbol]) -> Regex:
first_char = regex[0]
if first_char in OPERATORS:
raise ValueError(f"Regex cannot start with '{first_char}'.")
processed = ''
paren_count = 0
for char in regex:
if char in alphabet or char == '(':
if len(processed) > 0:
processed += (
'•' if processed[-1] not in {'(', '|'}
else ''
)
if char not in alphabet | set(NOT_SYMBOLS):
raise ValueError(
f"Regex contains character '{char}' that is not in "
"alphabet and not an accepted regex character."
)
if char in OPERATORS and processed[-1] in {'|', '•'}:
raise ValueError(
"Regex contains binary operator followed by an "
"operator; not cool."
)
if char == '(':
paren_count += 1
if char == ')':
paren_count -= 1
if paren_count < 0:
raise ValueError(
"Right parenthesis occurs in regex withour matching "
"left parenthesis."
)
processed += char
if paren_count > 0:
raise ValueError(
"Left parenthesis occurs in regex without matching right "
"parenthesis."
)
return processed
DfaTransitionFunction = Mapping[Tuple[State, Symbol], State]
class DFA(_FSA):
"""
A deterministic finite automaton class. Takes three keyword arguments:
- `transition_function`: Mapping[Tuple[State, Symbol], State]
- `start_state`: State
- `accept_state`: AbstractSet[State]
(where States are strings and Symbols are one-char strings).
The keys of the `transition_function` implicitly define the dfa's state-set
and alphabet.
The class will raise a ValueError exception on instantiation if any of th
following are true:
* the start state is not a member of the set of states inferred from the
transition function;
* the set of accept states is not a subset of the set of states inferred
from the transition function;
* the range of the transition function is not a subset of the set of
states inferred from the transition function;
* a member of the alphabet inferred from the transition function is not a
one-character string;
* the transition function is missing a case -- i.e., it is not the case
that every pair of a state and a symbol is in the domain of the
transition function.
The exception message will specify which of these above conditions things
triggered the exception, and which states/symbols are the source of the
problem.
"""
def __or__(self, other: "DFA") -> "DFA":
"""
Let A be the language recognised by dfa1, and B be the language
recognized by dfa2. `dfa1 | dfa2` returns a dfa that recognizes A union
B. The states of dfa1 | dfa2 are ordered pairs of states from dfa1 and
dfa2. There is no problem with the input DFAs having different
alphabets.
"""
union_alphabet = self.alphabet | other.alphabet
def maybe_add_state(
dfa1: DFA, dfa2: DFA
) -> Tuple[FrozenSet[State], DfaTransitionFunction]:
new_tf = dfa1.transition_function
new_states = dfa1.states
extra_symbols = dfa2.alphabet - dfa1.alphabet
if extra_symbols:
error_state = _get_new_state(dfa1.states)
new_states = dfa1.states | {error_state}
for symbol in union_alphabet:
new_tf[(error_state, symbol)] = error_state
for symbol in extra_symbols:
for state in dfa1.states:
new_tf[(state, symbol)] = error_state
return new_states, new_tf
self_states, self_tf = maybe_add_state(self, other)
other_states, other_tf = maybe_add_state(other, self)
state_pairs = product(self_states, other_states)
union_transition_function = {}
for (state1, state2), symbol in product(state_pairs, union_alphabet):
union_transition_function[(state1 + state2, symbol)] = (
self_tf[(state1, symbol)] + other_tf[(state2, symbol)]
)
union_start_state = self.start_state + other.start_state
union_accept_states = {
_stringify(item) for item in (
set(product(self.accept_states, other_states))
| set(product(self_states, other.accept_states))
)
}
return DFA(
transition_function=union_transition_function,
start_state=union_start_state,
accept_states=union_accept_states
)
def __add__(self, other: "DFA") -> "DFA":
"""
Let A be the language recognised by dfa1, B be the language recognised
by dfa2. `dfa1 + dfa2` returns a DFA that recognises the set of all
concatenations of strings in A with strings in B. This DFA operator is
parasitic on the NFA operator; it converts the input DFAs into NFAs,
uses the NFA '+', then converts the result back to a DFA. That makes
for a relatively simple but, sadly, computationally expensive algorith.
For that reason, I recommend you don't `+` dfas with large numbers of
states.
"""
return (self.non_determinize() + other.non_determinize()).determinize()
def _gnfize(self) -> _GNFA:
gnfa_tf: MutableGnfaTF = {}
for state1, symbol in self.transition_function.keys():
state2 = self.transition_function[(state1, symbol)]
if (state1, state2) in gnfa_tf.keys():
gnfa_tf[(state1, state2)] += '|' + symbol
else:
gnfa_tf[(state1, state2)] = symbol
gnfa_start = _get_new_state(self.states)
gnfa_accept = _get_new_state(self.states | {gnfa_start})
gnfa_tf[(gnfa_start, self.start_state)] = '€'
for state in self.accept_states:
gnfa_tf[(state, gnfa_accept)] = '€'
for state1, state2 in product(
self.states | {gnfa_start}, self.states | {gnfa_accept}
):
if (state1, state2) not in gnfa_tf:
gnfa_tf[(state1, state2)] = 'Ø'
return _GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept)
def _good_range(self) -> None:
transition_range = set(self.transition_function.values())
bad_range = transition_range - self.states
_error_message(
bad_set=bad_range,
message_singular=("State {} in the range of the transition "
"function is not in the fsa's state set."),
message_plural=("States {} in the range of the transition "
"function are not in the fsa's state set.")
)
def accepts(self, string: str) -> bool:
"""
`my_dfa.accepts("some string")` returns `True` if my_dfa accepts "some
string", and `False` otherwise. Will raise a ValueError exception is
the string contains symbols that aren't in the DFA's alphabet.
"""
_check_input(string=string, alphabet=self.alphabet)
current_state = self.start_state
for symbol in string:
current_state = self.transition_function[(current_state, symbol)]
return current_state in self.accept_states
def encode(self) -> Regex:
"""
Let A be the language accepted by dfa. `dfa.encode()` returns a regex
string that generates A. That regex string is liable to be much more
complicated than necessary; maybe I'll figure out how to improve on
average simplicity, eventually.
"""
gnfa = self._gnfize()
while len(gnfa.states) > 2:
gnfa = gnfa.reduce()
return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)]
def non_determinize(self) -> NFA:
"""
Convenience method that takes a DFA instance and returns an NFA
instance.
"""
nd_transition_function = {
key: {value} for key, value in self.transition_function.items()
}
return NFA(
transition_function=nd_transition_function,
start_state=self.start_state,
accept_states=self.accept_states
)
def _stringify(states: Iterable[State]) -> str:
if not isinstance(states, collections.abc.Sequence):
states = list(states)
states.sort()
return "".join(states)
def _get_new_state(state_set: Container) -> State:
counter = 1
new_state = 'new_state1'
while new_state in state_set:
counter += 1
new_state = "new_state" + str(counter)
return new_state
| 2.671875 | 3 |
Numbers/Roman Number Generator/tests.py | fossabot/IdeaBag2-Solutions | 10 | 4095 | <reponame>fossabot/IdeaBag2-Solutions<filename>Numbers/Roman Number Generator/tests.py
#!/usr/bin/env python3
import unittest
from roman_number_generator import arabic_to_roman
class Test(unittest.TestCase):
def _start_arabic_to_roman(self):
self.assertRaises(ValueError, arabic_to_roman, 4000)
self.assertEqual(arabic_to_roman(4), "IV")
self.assertEqual(arabic_to_roman(12), "XII")
self.assertEqual(arabic_to_roman(20), "XX")
if __name__ == "__main__":
unittest.main()
| 2.9375 | 3 |
modules/moduleBase.py | saintaardvark/glouton-satnogs-data-downloader | 0 | 4096 | from infrastructure.satnogClient import SatnogClient
import os
class ModuleBase:
def __init__(self, working_dir):
self.working_dir = working_dir
def runAfterDownload(self, file_name, full_path, observation):
raise NotImplementedError() | 1.960938 | 2 |
oregano_plugins/fusion/server.py | MrNaif2018/Oregano | 0 | 4097 | <reponame>MrNaif2018/Oregano
#!/usr/bin/env python3
#
# Oregano - a lightweight Ergon client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A basic server implementation for CashFusion. Does not natively offer SSL
support, however a server admin may run an SSL server proxy such as nginx for
that purpose.
"""
import secrets
import sys
import threading
import time
import traceback
from collections import defaultdict
import oregano.schnorr as schnorr
from oregano.address import Address
from oregano.util import PrintError, ServerError, TimeoutException
from . import fusion_pb2 as pb
from .comms import send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash
from .protocol import Protocol
from .util import (FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components,
rand_position)
from .validation import (check_playercommit, check_covert_component, validate_blame, ValidationError,
check_input_electrumx)
# Resistor "E series" values -- round numbers that are almost geometrically uniform
E6 = [1.0, 1.5, 2.2, 3.3, 4.7, 6.8]
E12 = [1.0, 1.2, 1.5, 1.8, 2.2, 2.7, 3.3, 3.9, 4.7, 5.6, 6.8, 8.2]
E24 = [1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1, 5.6, 6.2, 6.8, 7.5, 8.2, 9.1]
# TODO - make these configurable
class Params:
num_components = 23
component_feerate = 1000 # sats/kB
max_excess_fee = 300000 # sats
tiers = [round(b*s) for b in [10000, 100000, 1000000, 10000000, 100000000] for s in E12]
# How many clients do we want before starting a fusion?
min_clients = 8
# If all clients submitted largest possible component (uncompressed p2pkh input), how many could we take until the result would exceed 100 kB standard tx size limitation?
max_clients = (100000 - 12) // (num_components * 173)
# Every round, clients leave ... How many clients do we need as an absolute minimum (for privacy)?
min_safe_clients = 6
# Choose the minimum excess fee based on dividing the overhead amongst players, in the smallest fusion
# (these overhead numbers assume op_return script size of 1 + 5 (lokad) + 33 (session hash) )
if min_safe_clients * num_components >= 2 * 0xfc:
# the smallest fusion could require 3-byte varint for both inputs and outputs lists
overhead = 62
elif min_safe_clients * num_components >= 0xfc:
# the smallest fusion could require 3-byte varint for either inputs or outputs lists
overhead = 60
else:
# the smallest fusion will use 1-byte varint for both inputs and outputs lists
overhead = 58
min_excess_fee = (overhead + min_safe_clients - 1) // min_safe_clients
# How many clients can share same tag on a given tier (if more try to join, reject)
max_tier_client_tags = 100
# For a given IP, how many players can they represent in the same fuse?
ip_max_simul_fuse = 3
# Guaranteed time to launch a fusion if the pool has stayed at or above min_clients for this long.
start_time_max = 1200
# Inter-fusion delay -- after starting any fusion, wait this long before starting the next one (unless hit max time or pool is full).
start_time_spacing = 120
# But don't start a fusion if it has only been above min_clients for a short time (unless pool is full).
start_time_min = 400
# whether to print a lot of logs
noisy = False
# How long covert connections are allowed to stay open without activity.
# note this needs to consider the maximum interval between messages:
# - how long from first connection to last possible Tor component submission?
# - how long from one round's component submission to the next round's component submission?
COVERT_CLIENT_TIMEOUT = 40
# used for non-cryptographic purposes
import random
rng = random.Random()
rng.seed(secrets.token_bytes(32))
def clientjob_send(client, msg, timeout = Protocol.STANDARD_TIMEOUT):
client.send(msg, timeout=timeout)
def clientjob_goodbye(client, text):
# a gentler goodbye than killing
if text is not None:
client.send_error(text)
raise client.Disconnect
class ClientThread(ClientHandlerThread):
"""Basic thread per connected client."""
def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT):
submsg, mtype = recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout)
return submsg
def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT):
send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout)
def send_error(self, msg):
self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT)
def error(self, msg):
self.send_error(msg)
raise FusionError(f'Rejected client: {msg}')
class ClientTag(bytes):
""" enhanced bytes object to represent a pool tag """
__slots__ = ()
def __new__(cls, ipstr, tagbytes, maxsimul):
ipb = ipstr.encode()
b = bytes([maxsimul, len(ipb)]) + ipb + tagbytes
return super().__new__(cls, b)
@property
def maxsimul(self):
return self[0]
class TagStatus:
__slots__ = ('pool', 'all_')
def __init__(self):
self.pool = 0
self.all_ = 0
class WaitingPool:
""" a waiting pool for a specific tier """
def __init__(self, fill_threshold, tag_max):
self.pool = set() # clients who will be put into fusion round if started at this tier
self.queue = list() # clients who are waiting due to tags being full
self.tags = defaultdict(TagStatus) # how are the various tags
self.fill_threshold = fill_threshold # minimum number of pool clients to trigger setting fill_time
self.fill_time = None # when did pool exceed fill_threshold
self.tag_max = tag_max # how many clients can share same tag (in pool and queue)
def check_add(self, client):
for t in client.tags:
ts = self.tags.get(t)
if ts is not None and ts.all_ >= self.tag_max:
return "too many clients with same tag"
def _add_pool(self, client):
self.pool.add(client)
for t in client.tags:
ts = self.tags[t]
ts.pool += 1
if len(self.pool) == self.fill_threshold:
self.fill_time = time.monotonic()
def add(self, client):
can_pool = True
for t in client.tags:
ts = self.tags[t]
ts.all_ += 1
if ts.pool >= t.maxsimul:
can_pool = False
if can_pool:
self._add_pool(client)
else:
self.queue.append(client)
return can_pool
def remove(self, client):
# make sure to call try_move_from_queue() after calling this
try:
self.pool.remove(client)
except KeyError:
in_pool = False
try:
self.queue.remove(client)
except ValueError:
return False
else:
in_pool = True
if len(self.pool) < self.fill_threshold:
self.fill_time = None
for t in client.tags:
ts = self.tags[t]
ts.all_ -= 1
if in_pool:
ts.pool -= 1
if ts.all_ == 0: # cleanup for no-longer-used tags
del self.tags[t]
return True
def try_move_from_queue(self):
# attempt to move clients from queue into pool
moved = []
for client in self.queue:
for t in client.tags:
ts = self.tags[t]
if ts.pool >= t.maxsimul:
break
else:
self._add_pool(client)
moved.append(client)
for client in moved:
self.queue.remove(client)
class FusionServer(GenericServer):
"""Server for clients waiting to start a fusion. New clients get a
ClientThread made for them, and they are put into the waiting pools.
Once a Fusion thread is started, the ClientThreads are passed over to
a FusionController to run the rounds."""
def __init__(self, config, network, bindhost, port, upnp = None, announcehost = None, donation_address = None):
assert network
assert isinstance(donation_address, (Address, type(None)))
if not schnorr.has_fast_sign() or not schnorr.has_fast_verify():
raise RuntimeError("Fusion requires libsecp256k1")
super().__init__(bindhost, port, ClientThread, upnp = upnp)
self.config = config
self.network = network
self.announcehost = announcehost
self.donation_address = donation_address
self.waiting_pools = {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t in Params.tiers}
self.t_last_fuse = time.monotonic() # when the last fuse happened; as a placeholder, set this to startup time.
self.reset_timer()
def run(self):
try:
super().run()
finally:
self.waiting_pools.clear() # gc clean
def reset_timer(self, ):
""" Scan pools for the favoured fuse:
- Out of the pool(s) with the most number of players,
- Choose the pool with the earliest fill time;
- If no pools are filled then there is no favoured fuse.
(since fill time is a float, this will almost always be unique)
"""
with self.lock:
time_best = None
tier_best = None
size_best = 0
for t, pool in self.waiting_pools.items():
ft = pool.fill_time
if ft is None:
continue
size = len(pool.pool)
if size >= size_best:
if time_best is None or ft < time_best or size > size_best:
time_best = ft
tier_best = t
size_best = size
if time_best is None:
self.tier_best_starttime = None
else:
self.tier_best_starttime = max(time_best + Params.start_time_min, self.t_last_fuse + Params.start_time_spacing)
self.tier_best = tier_best
def start_fuse(self, tier):
""" Immediately launch Fusion at the selected tier. """
with self.lock:
chosen_clients = list(self.waiting_pools[tier].pool)
# Notify that we will start.
for c in chosen_clients:
c.start_ev.set()
# Remove those clients from all pools
for t, pool in self.waiting_pools.items():
for c in chosen_clients:
pool.remove(c)
pool.try_move_from_queue()
# Update timing info
self.t_last_fuse = time.monotonic()
self.reset_timer()
# Uncomment the following to: Remove from spawned clients list, so that the fusion can continue independently of waiting server.
# self.spawned_clients.difference_update(chosen_clients)
# Kick off the fusion.
rng.shuffle(chosen_clients)
fusion = FusionController(self. network, tier, chosen_clients, self.bindhost, upnp = self.upnp, announcehost = self.announcehost)
fusion.start()
return len(chosen_clients)
def new_client_job(self, client):
client_ip = client.connection.socket.getpeername()[0]
msg = client.recv('clienthello')
if msg.version != Protocol.VERSION:
client.error("Mismatched protocol version, please upgrade")
if msg.genesis_hash:
if msg.genesis_hash != get_current_genesis_hash():
# For now, msg.genesis_hash is optional and we tolerate it
# missing. However, if the client declares the genesis_hash, we
# do indeed disallow them connecting if they are e.g. on testnet
# and we are mainnet, etc.
client.error("This server is on a different chain, please switch servers")
else:
client.print_error("👀 No genesis hash declared by client, we'll let them slide...")
if self.stopping:
return
donation_address = ''
if isinstance(self.donation_address, Address):
donation_address = self.donation_address.to_full_ui_string()
client.send(pb.ServerHello( num_components = Params.num_components,
component_feerate = Params.component_feerate,
min_excess_fee = Params.min_excess_fee,
max_excess_fee = Params.max_excess_fee,
tiers = Params.tiers,
donation_address = donation_address
))
# We allow a long timeout for clients to choose their pool.
msg = client.recv('joinpools', timeout=120)
if len(msg.tiers) == 0:
client.error("No tiers")
if len(msg.tags) > 5:
client.error("Too many tags")
# Event for signalling us that a pool started.
start_ev = threading.Event()
client.start_ev = start_ev
if client_ip.startswith('127.'):
# localhost is whitelisted to allow unlimited access
client.tags = []
else:
# Default tag: this IP cannot be present in too many fuses.
client.tags = [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)]
for tag in msg.tags:
if len(tag.id) > 20:
client.error("Tag id too long")
if not (0 < tag.limit < 6):
client.error("Tag limit out of range")
ip = '' if tag.no_ip else client_ip
client.tags.append(ClientTag(ip, tag.id, tag.limit))
try:
mytierpools = {t: self.waiting_pools[t] for t in msg.tiers}
except KeyError:
if self.stopping:
return
client.error(f"Invalid tier selected: {t}")
try:
mytiers = list(mytierpools)
rng.shuffle(mytiers) # shuffle the adding order so that if filling more than one pool, we don't have bias towards any particular tier
with self.lock:
if self.stopping:
return
# add this client to waiting pools
for pool in mytierpools.values():
res = pool.check_add(client)
if res is not None:
client.error(res)
for t in mytiers:
pool = mytierpools[t]
pool.add(client)
if len(pool.pool) >= Params.max_clients:
# pool filled up to the maximum size, so start immediately
self.start_fuse(t)
return
# we have added to pools, which may have changed the favoured tier
self.reset_timer()
inftime = float('inf')
while True:
with self.lock:
if self.stopping or start_ev.is_set():
return
tnow = time.monotonic()
# scan through tiers and collect statuses, also check start times.
statuses = dict()
tfill_thresh = tnow - Params.start_time_max
for t, pool in mytierpools.items():
if client not in pool.pool:
continue
status = pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players = Params.min_clients)
remtime = inftime
if pool.fill_time is not None:
# a non-favoured pool will start eventually
remtime = pool.fill_time - tfill_thresh
if t == self.tier_best:
# this is the favoured pool, can start at a special time
remtime = min(remtime, self.tier_best_starttime - tnow)
if remtime <= 0:
self.start_fuse(t)
return
elif remtime != inftime:
status.time_remaining = round(remtime)
statuses[t] = status
client.send(pb.TierStatusUpdate(statuses = statuses))
start_ev.wait(2)
except:
# Remove client from waiting pools on failure (on success, we are already removed; on stop we don't care.)
with self.lock:
for t, pool in mytierpools.items():
if pool.remove(client):
pool.try_move_from_queue()
if self.tier_best in mytierpools:
# we left from best pool, so it might not be best anymore.
self.reset_timer()
raise
class ResultsCollector:
# Collect submissions from different sources, with a deadline.
def __init__(self, num_results, done_on_fail = True):
self.num_results = int(num_results)
self.done_on_fail = bool(done_on_fail)
self.done_ev = threading.Event()
self.lock = threading.Lock()
self.results = []
self.fails = []
def __enter__(self, ):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self.fails.append(exc_value)
if self.done_on_fail:
self.done_ev.set()
elif len(self.fails) + len(getattr(self, 'results', ())) >= self.num_results:
self.done_ev.set()
def gather(self, *, deadline):
remtime = deadline - time.monotonic()
self.done_ev.wait(max(0., remtime))
with self.lock:
ret = self.results
del self.results
return ret
def add(self, result):
with self.lock:
try:
self.results.append(result)
except AttributeError:
return False
else:
if len(self.fails) + len(self.results) >= self.num_results:
self.done_ev.set()
return True
class FusionController(threading.Thread, PrintError):
""" This controls the Fusion rounds running from server side. """
def __init__(self, network, tier, clients, bindhost, upnp = None, announcehost = None):
super().__init__(name="FusionController")
self.network = network
self.tier = tier
self.clients = list(clients)
self.bindhost = bindhost
self.upnp = upnp
self.announcehost = announcehost
self.daemon = True
def sendall(self, msg, timeout = Protocol.STANDARD_TIMEOUT):
for client in self.clients:
client.addjob(clientjob_send, msg, timeout)
def check_client_count(self,):
live = [c for c in self.clients if not c.dead]
if len(live) < Params.min_safe_clients:
for c in live:
c.kill("too few remaining live players")
raise FusionError("too few remaining live players")
def run (self, ):
self.print_error(f'Starting fusion with {len(self.clients)} players at tier={self.tier}')
covert_server = CovertServer(self.bindhost, upnp = self.upnp)
try:
annhost = covert_server.host if self.announcehost is None else self.announcehost
annhost_b = annhost.encode('ascii')
annport = covert_server.port
covert_server.noisy = Params.noisy
covert_server.start()
self.print_error(f'Covert server started @ {covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})')
begin_time = round(time.time())
self.sendall(pb.FusionBegin(tier = self.tier,
covert_domain = annhost_b,
covert_port = annport,
covert_ssl = False,
server_time = begin_time))
self.last_hash = calc_initial_hash(self.tier, annhost_b, annport, False, begin_time)
time.sleep(Protocol.WARMUP_TIME)
# repeatedly run rounds until successful or exception
while True:
covert_server.reset()
# Clean up dead clients
self.clients = [c for c in self.clients if not c.dead]
self.check_client_count()
if self.run_round(covert_server):
break
self.print_error('Ended successfully!')
except FusionError as e:
self.print_error(f"Ended with error: {e}")
except Exception as e:
self.print_error('Failed with exception!')
traceback.print_exc(file=sys.stderr)
for c in self.clients:
c.addjob(clientjob_goodbye, 'internal server error')
finally:
covert_server.stop()
for c in self.clients:
c.addjob(clientjob_goodbye, None)
self.clients = [] # gc
def kick_missing_clients(self, goodclients, reason = None):
baddies = set(self.clients).difference(goodclients)
for c in baddies:
c.kill(reason)
def run_round(self, covert_server):
covert_priv, covert_Upub, covert_Cpub = gen_keypair()
round_pubkey = covert_Cpub
# start to accept covert components
covert_server.start_components(round_pubkey, Params.component_feerate)
# generate blind nonces (slow!)
for c in self.clients:
c.blinds = [schnorr.BlindSigner() for _co in range(Params.num_components)]
lock = threading.Lock()
seen_salthashes = set()
# Send start message to players; record the time we did this
round_time = round(time.time())
collector = ResultsCollector(len(self.clients), done_on_fail = False)
def client_start(c, collector):
with collector:
c.send(pb.StartRound(round_pubkey = round_pubkey,
blind_nonce_points = [b.get_R() for b in c.blinds],
server_time = round_time
))
msg = c.recv('playercommit')
commit_messages = check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components)
newhashes = set(m.salted_component_hash for m in commit_messages)
with lock:
expected_len = len(seen_salthashes) + len(newhashes)
seen_salthashes.update(newhashes)
if len(seen_salthashes) != expected_len:
c.error('duplicate component commitment')
if not collector.add((c, msg.initial_commitments, msg.excess_fee)):
c.error("late commitment")
# record for later
c.blind_sig_requests = msg.blind_sig_requests
c.random_number_commitment = msg.random_number_commitment
for client in self.clients:
client.addjob(client_start, collector)
# Record the time that we sent 'startround' message to players; this
# will form the basis of our covert timeline.
covert_T0 = time.monotonic()
self.print_error(f"startround sent at {time.time()}; accepting covert components")
# Await commitment messages then process results
results = collector.gather(deadline = covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS)
# Filter clients who didn't manage to give a good commitment.
prev_client_count = len(self.clients)
self.clients = [c for c, _, _ in results]
self.check_client_count()
self.print_error(f"got commitments from {len(self.clients)} clients (dropped {prev_client_count - len(self.clients)})")
total_excess_fees = sum(f for _,_,f in results)
# Generate scrambled commitment list, but remember exactly where each commitment originated.
commitment_master_list = [(commit, ci, cj) for ci, (_, commitments, _) in enumerate(results) for cj,commit in enumerate(commitments)]
rng.shuffle(commitment_master_list)
all_commitments = tuple(commit for commit,ci,cj in commitment_master_list)
# Send blind signatures
for c in self.clients:
scalars = [b.sign(covert_priv, e) for b,e in zip(c.blinds, c.blind_sig_requests)]
c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars))
del c.blinds, c.blind_sig_requests
del results, collector
# Sleep a bit before uploading commitments, as clients are doing this.
remtime = covert_T0 + Protocol.T_START_COMPS - time.monotonic()
if remtime > 0:
time.sleep(remtime)
# Upload the full commitment list; we're a bit generous with the timeout but that's OK.
self.sendall(pb.AllCommitments(initial_commitments = all_commitments),
timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES)
# Sleep until end of covert components phase
remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic()
assert remtime > 0, "timings set up incorrectly"
time.sleep(remtime)
component_master_list = list(covert_server.end_components().items())
self.print_error(f"ending covert component acceptance. {len(component_master_list)} received.")
# Sort the components & contribs list, then separate it out.
component_master_list.sort(key=lambda x:x[1][0])
all_components = [comp for comp, (sort_key, contrib) in component_master_list]
component_contribs = [contrib for comp, (sort_key, contrib) in component_master_list]
del component_master_list
# Do some preliminary checks to see whether we should just skip the
# signing phase and go directly to blame, or maybe even restart / end
# without sharing components.
skip_signatures = False
if len(all_components) != len(self.clients)*Params.num_components:
skip_signatures = True
self.print_error("problem detected: too few components submitted")
if total_excess_fees != sum(component_contribs):
skip_signatures = True
self.print_error("problem detected: excess fee mismatch")
self.last_hash = session_hash = calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments, all_components)
#TODO : Check the inputs and outputs to see if we even have reasonable
# privacy with what we have.
bad_components = set()
###
if skip_signatures:
self.print_error("skipping covert signature acceptance")
self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures = True))
else:
self.print_error("starting covert signature acceptance")
tx, input_indices = tx_from_components(all_components, session_hash)
sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache = True))))
for i in range(len(tx.inputs()))]
pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for inp in tx.inputs()]
covert_server.start_signatures(sighashes,pubkeys)
self.sendall(pb.ShareCovertComponents(components = all_components, session_hash = session_hash))
# Sleep until end of covert signatures phase
remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic()
if remtime < 0:
# really shouldn't happen, we had plenty of time
raise FusionError("way too slow")
time.sleep(remtime)
signatures = list(covert_server.end_signatures())
missing_sigs = len([s for s in signatures if s is None])
###
self.print_error(f"ending covert signature acceptance. {missing_sigs} missing :{'(' if missing_sigs else ')'}")
# mark all missing-signature components as bad.
bad_inputs = set(i for i,sig in enumerate(signatures) if sig is None)
# further, search for duplicated inputs (through matching the prevout and claimed pubkey).
prevout_spenders = defaultdict(list)
for i, inp in enumerate(tx.inputs()):
prevout_spenders[f"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}"].append(i)
for prevout, spenders in prevout_spenders.items():
if len(spenders) == 1:
continue
self.print_error(f"multi-spend of f{prevout} detected")
# If exactly one of the inputs is signed, we don't punish him
# because he's the honest guy and all the other components were
# just imposters who didn't have private key. If more than one
# signed, then it's malicious behaviour!
if sum((signatures[i] is not None) for i in spenders) != 1:
bad_inputs.update(spenders)
if bad_inputs:
bad_components.update(input_indices[i] for i in bad_inputs)
else:
for i, (inp, sig) in enumerate(zip(tx.inputs(), signatures)):
inp['signatures'][0] = sig.hex() + '41'
assert tx.is_complete()
txid = tx.txid()
self.print_error("completed the transaction! " + txid)
try:
self.network.broadcast_transaction2(tx, timeout=3)
except ServerError as e:
nice_msg, = e.args
server_msg = e.server_msg
self.print_error(f"could not broadcast the transaction! {nice_msg}")
except TimeoutException:
self.print_error("timed out while trying to broadcast transaction! misconfigured?")
# This probably indicates misconfiguration since fusion server ought
# to have a good connection to the EC server. Report this back to clients
# as an 'internal server error'.
raise
else:
self.print_error("broadcast was successful!")
# Give our transaction a small head start in relaying, before sharing the
# signatures. This makes it slightly harder for one of the players to
# broadcast a malleated version by re-signing one of their inputs.
time.sleep(2)
self.sendall(pb.FusionResult(ok = True, txsignatures = signatures))
return True
self.sendall(pb.FusionResult(ok = False, bad_components = sorted(bad_components)))
###
self.print_error(f"entering blame phase. bad components: {bad_components}")
if len(self.clients) < 2:
# Sanity check for testing -- the proof sharing thing doesn't even make sense with one player.
for c in self.clients:
c.kill('blame yourself!')
return
# scan the commitment list and note where each client's commitments ended up
client_commit_indexes = [[None]*Params.num_components for _ in self.clients]
for i, (commit, ci, cj) in enumerate(commitment_master_list):
client_commit_indexes[ci][cj] = i
collector = ResultsCollector(len(self.clients), done_on_fail = False)
def client_get_proofs(client, collector):
with collector:
msg = client.recv('myproofslist')
seed = msg.random_number
if sha256(seed) != client.random_number_commitment:
client.error("seed did not match commitment")
proofs = msg.encrypted_proofs
if len(proofs) != Params.num_components:
client.error("wrong number of proofs")
if any(len(p) > 200 for p in proofs):
client.error("too-long proof") # they should only be 129 bytes long.
# generate the possible destinations list (all commitments, but leaving out the originating client's commitments).
myindex = self.clients.index(client)
possible_commitment_destinations = [(ci,cj) for commit, ci, cj in commitment_master_list if ci != myindex]
N = len(possible_commitment_destinations)
assert N == len(all_commitments) - Params.num_components
# calculate the randomly chosen destinations, same way as client did.
relays = []
for i, proof in enumerate(proofs):
dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed, N, i)]
src_commitment_idx = client_commit_indexes[myindex][i]
relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx))
if not collector.add((client, relays)):
client.error("late proofs")
for client in self.clients:
client.addjob(client_get_proofs, collector)
results = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT)
# Now, repackage the proofs according to destination.
proofs_to_relay = [list() for _ in self.clients]
for src_client, relays in results:
for proof, src_commitment_idx, dest_client_idx, dest_key_idx in relays:
proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client))
live_clients = len(results)
collector = ResultsCollector(live_clients, done_on_fail = False)
def client_get_blames(client, myindex, proofs, collector):
with collector:
# an in-place sort by source commitment idx removes ordering correlations about which client sent which proof
proofs.sort(key = lambda x:x[1])
client.send(pb.TheirProofsList(proofs = [
dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z)
for x,y,z, _ in proofs]))
msg = client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME)
# More than one blame per proof is malicious. Boot client
# immediately since client may be trying to DoS us by
# making us check many inputs against blockchain.
if len(msg.blames) > len(proofs):
client.error('too many blames')
if len(set(blame.which_proof for blame in msg.blames)) != len(msg.blames):
client.error('multiple blames point to same proof')
# Note, the rest of this function might run for a while if many
# checks against blockchain need to be done, perhaps even still
# running after run_round has exited. For this reason we try to
# not reference self.<variables> that may change.
for blame in msg.blames:
try:
encproof, src_commitment_idx, dest_key_idx, src_client = proofs[blame.which_proof]
except IndexError:
client.kill(f'bad proof index {blame.which_proof} / {len(proofs)}')
continue
src_commit_blob, src_commit_client_idx, _ = commitment_master_list[src_commitment_idx]
dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]]
try:
ret = validate_blame(blame, encproof, src_commit_blob, dest_commit_blob, all_components, bad_components, Params.component_feerate)
except ValidationError as e:
self.print_error("got bad blame; clamed reason was: "+repr(blame.blame_reason))
client.kill(f'bad blame message: {e} (you claimed: {blame.blame_reason!r})')
continue
if isinstance(ret, str):
self.print_error(f"verified a bad proof (for {src_commitment_idx}): {ret}")
src_client.kill(f'bad proof (for {src_commitment_idx}): {ret}')
continue
if src_client.dead:
# If the blamed client is already dead, don't waste more time.
# Since nothing after this point can report back to the
# verifier, there is no privacy leak by the ommission.
continue
assert ret, 'expecting input component'
outpoint = ret.prev_txid[::-1].hex() + ':' + str(ret.prev_index)
try:
check_input_electrumx(self.network, ret)
except ValidationError as e:
reason = f'{e.args[0]} ({outpoint})'
self.print_error(f"blaming[{src_commitment_idx}] for bad input: {reason}")
src_client.kill('you provided a bad input: ' + reason)
continue
except Exception as e:
self.print_error(f"player indicated bad input but checking failed with exception {repr(e)} ({outpoint})")
else:
self.print_error(f"player indicated bad input but it was fine ({outpoint})")
# At this point we could blame the originator, however
# blockchain checks are somewhat subjective. It would be
# appropriate to add some 'ban score' to the player.
# we aren't collecting any results, rather just marking that
# 'checking finished' so that if all blames are checked, we
# can start next round right away.
collector.add(None)
for idx, (client, proofs) in enumerate(zip(self.clients, proofs_to_relay)):
client.addjob(client_get_blames, idx, proofs, collector)
_ = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME * 2)
self.sendall(pb.RestartRound())
class CovertClientThread(ClientHandlerThread):
def recv(self, *expected_msg_names, timeout=None):
submsg, mtype = recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout)
return submsg, mtype
def send(self, submsg, timeout=None):
send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout)
def send_ok(self,):
self.send(pb.OK(), timeout=5)
def send_error(self, msg):
self.send(pb.Error(message = msg), timeout=5)
def error(self, msg):
self.send_error(msg)
raise FusionError(f'Rejected client: {msg}')
class CovertServer(GenericServer):
"""
Server for covert submissions. How it works:
- Launch the server at any time. By default, will bind to an ephemeral port.
- Before start of covert components phase, call start_components.
- To signal the end of covert components phase, owner calls end_components, which returns a dict of {component: contrib}, where contrib is (+- amount - fee).
- Before start of covert signatures phase, owner calls start_signatures.
- To signal the end of covert signatures phase, owner calls end_signatures, which returns a list of signatures (which will have None at positions of missing signatures).
- To reset the server for a new round, call .reset(); to kill all connections, call .stop().
"""
def __init__(self, bindhost, port=0, upnp = None):
super().__init__(bindhost, port, CovertClientThread, upnp = upnp)
self.round_pubkey = None
def start_components(self, round_pubkey, feerate):
self.components = dict()
self.feerate = feerate
self.round_pubkey = round_pubkey
for c in self.spawned_clients:
c.got_submit = False
def end_components(self):
with self.lock:
ret = self.components
del self.components
return ret
def start_signatures(self, sighashes, pubkeys):
num_inputs = len(sighashes)
assert num_inputs == len(pubkeys)
self.signatures = [None]*num_inputs
self.sighashes = sighashes
self.pubkeys = pubkeys
for c in self.spawned_clients:
c.got_submit = False
def end_signatures(self):
with self.lock:
ret = self.signatures
del self.signatures
return ret
def reset(self):
try:
del self.round_pubkey
del self.components
del self.feerate
except AttributeError:
pass
try:
del self.sighashes
del self.pubkeys
except AttributeError:
pass
def new_client_job(self, client):
client.got_submit = False
while True:
msg, mtype = client.recv('component', 'signature', 'ping', timeout = COVERT_CLIENT_TIMEOUT)
if mtype == 'ping':
continue
if client.got_submit:
# We got a second submission before a new phase started. As
# an anti-spam measure we only allow one submission per connection
# per phase.
client.error('multiple submission in same phase')
if mtype == 'component':
try:
round_pubkey = self.round_pubkey
feerate = self.feerate
_ = self.components
except AttributeError:
client.error('component submitted at wrong time')
sort_key, contrib = check_covert_component(msg, round_pubkey, feerate)
with self.lock:
try:
self.components[msg.component] = (sort_key, contrib)
except AttributeError:
client.error('component submitted at wrong time')
else:
assert mtype == 'signature'
try:
sighash = self.sighashes[msg.which_input]
pubkey = self.pubkeys[msg.which_input]
existing_sig = self.signatures[msg.which_input]
except AttributeError:
client.error('signature submitted at wrong time')
except IndexError:
raise ValidationError('which_input too high')
sig = msg.txsignature
if len(sig) != 64:
raise ValidationError('signature length is wrong')
# It might be we already have this signature. This is fine
# since it might be a resubmission after ack failed delivery,
# but we don't allow it to consume our CPU power.
if sig != existing_sig:
if not schnorr.verify(pubkey, sig, sighash):
raise ValidationError('bad transaction signature')
if existing_sig:
# We received a distinct valid signature. This is not
# allowed and we break the connection as a result.
# Note that we could have aborted earlier but this
# way third parties can't abuse us to find out the
# timing of a given input's signature submission.
raise ValidationError('conflicting valid signature')
with self.lock:
try:
self.signatures[msg.which_input] = sig
except AttributeError:
client.error('signature submitted at wrong time')
client.send_ok()
client.got_submit = True
| 1.4375 | 1 |
Excercici4Package/ex4.py | jtorrenth/CienciaDades | 0 | 4098 | <gh_stars>0
import matplotlib.pyplot as plt
def countvalues(dataframe, subject):
# Filtrem i tractem el dataset
economydf = filtrar(dataframe, "economy")
# el printem
printar(economydf, subject)
# Filtrem ara per subject infected i ho desem en un altre df
infectedf = filtrar(dataframe, "infected")
# Calculem els percentatjes
percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100
percentnotatall = (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) * 100
# Els printem
print("percentatge very: {}%".format(percentvery))
print("percentatge not_at_all: {}%".format(percentnotatall))
grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall, " % Persones", "Satisfacció", "% de persones preocupades o no per infected")
def printar(df, subject):
# Printem a la consola els valors
print("Valors per subject {}".format(subject))
pplvery = df['ppl_very'].sum()
pplnot = df['ppl_not_at_all'].sum()
print("Very: {}".format(pplvery))
print("Not at All: {}".format(pplnot))
# Finalment, grafiquem
# Cal tancar el grafic per a seguir amb l'execució
grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot, "Persones", "Satisfacció", "Nombre de persones preocupades o no per l'economia")
def filtrar(dataframe, subject1):
df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy()
# Afegim els valors en funció del samplesize a dues noves columnes
df['ppl_very'] = df['very'] / 100 * df['sample_size']
df['ppl_not_at_all'] = df['not_at_all'] / 100 * df['sample_size']
return df
def grafic4(label1, label2, valor1, valor2, leyenday, leyendax, titulo):
# Declaramos valors per l'eix x
eje_x = [label1, label2]
# Declaramos valors per l'eix y
eje_y = [valor1, valor2]
# Fem la grafica
plt.bar(eje_x, eje_y)
# Llegenda de l'eix x
plt.ylabel(leyenday)
# Legenda en el eje x
plt.xlabel(leyendax)
# Títol de Grafica
plt.title(titulo)
# Mostrem Grafica
plt.show()
#Funcio per a l'excercici 4.4
def grades(df):
df['538 Grade']=df['538 Grade'].str[0]
print(df.groupby('538 Grade').size())
| 3.078125 | 3 |
build/rules.bzl | filmil/bazel-ebook | 9 | 4099 | <gh_stars>1-10
# Copyright (C) 2020 Google Inc.
#
# This file has been licensed under Apache 2.0 license. Please see the LICENSE
# file at the root of the repository.
# Build rules for building ebooks.
# This is the container
CONTAINER = "filipfilmar/ebook-buildenv:1.1"
# Use this for quick local runs.
#CONTAINER = "ebook-buildenv:local"
EbookInfo = provider(fields=["figures", "markdowns"])
# Returns the docker_run script invocation command based on the
# script path and its reference directory.
#
# Params:
# script_path: (string) The full path to the script to invoke
# dir_reference: (string) The path to a file used for figuring out
# the reference directories (build root and repo root).
def _script_cmd(script_path, dir_reference):
return """\
{script} \
--container={container} \
--dir-reference={dir_reference}""".format(
script=script_path,
container=CONTAINER,
dir_reference=dir_reference,
)
def _drawtiming_png_impl(ctx):
cmd = "drawtiming"
docker_run = ctx.executable._script
figures = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
in_file = src
out_file = ctx.actions.declare_file(in_file.basename + ".png")
figures += [out_file]
script_cmd = _script_cmd(docker_run.path, in_file.path)
ctx.actions.run_shell(
progress_message = "timing diagram to PNG with {1}: {0}".format(in_file.short_path, cmd),
inputs = [in_file],
outputs = [out_file],
tools = [docker_run],
command = """\
{script} \
{cmd} --output "{out_file}" "{in_file}"
""".format(
cmd=cmd,
out_file=out_file.path,
in_file=in_file.path,
script=script_cmd),
)
deps = []
for target in ctx.attr.deps:
ebook_provider = target[EbookInfo]
if not ebook_provider:
continue
deps += ebook_provider.figures
runfiles = ctx.runfiles(files = figures)
return [
EbookInfo(figures=figures+deps, markdowns=[]),
DefaultInfo(files=depset(figures+deps), runfiles=runfiles),
]
drawtiming_png = rule(implementation = _drawtiming_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".t"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a timing diagram file into png using drawtiming",
)
def _generalized_graphviz_rule_impl(ctx, cmd):
docker_run = ctx.executable._script
figures = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
in_file = src
out_file = ctx.actions.declare_file(in_file.basename + ".png")
figures += [out_file]
script_cmd = _script_cmd(docker_run.path, in_file.path)
ctx.actions.run_shell(
progress_message = "graphviz to PNG with {1}: {0}".format(in_file.short_path, cmd),
inputs = [in_file],
outputs = [out_file],
tools = [docker_run],
command = """\
{script} \
{cmd} -Tpng -o "{out_file}" "{in_file}"
""".format(
cmd=cmd,
out_file=out_file.path,
in_file=in_file.path,
script=script_cmd),
)
deps = []
for target in ctx.attr.deps:
ebook_provider = target[EbookInfo]
if not ebook_provider:
continue
deps += ebook_provider.figures
runfiles = ctx.runfiles(files = figures)
return [
EbookInfo(figures=figures+deps, markdowns=[]),
DefaultInfo(files=depset(figures+deps), runfiles=runfiles),
]
def _neato_png_impl(ctx):
return _generalized_graphviz_rule_impl(ctx, "neato")
neato_png = rule(implementation = _neato_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".dot"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a graphviz dot file into png using neato",
)
def _dot_png_impl(ctx):
return _generalized_graphviz_rule_impl(ctx, "dot")
dot_png = rule(implementation = _dot_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".dot"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a graphviz dot file into png using dot",
)
def _asymptote_impl(ctx):
asycc = ctx.executable._script
figures = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
in_file = src
out_file = ctx.actions.declare_file(in_file.basename + ".png")
figures += [out_file]
script_cmd = _script_cmd(asycc.path, in_file.path)
ctx.actions.run_shell(
progress_message = "ASY to PNG: {0}".format(in_file.short_path),
inputs = [in_file],
outputs = [out_file],
tools = [asycc],
command = """\
{script} \
asy -render 5 -f png -o "{out_file}" "{in_file}"
""".format(
out_file=out_file.path, in_file=in_file.path, script=script_cmd),
)
deps = []
for target in ctx.attr.deps:
ebook_provider = target[EbookInfo]
if not ebook_provider:
continue
deps += ebook_provider.figures
runfiles = ctx.runfiles(files=figures+deps)
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
EbookInfo(figures=figures+deps, markdowns=[]),
DefaultInfo(files=depset(figures+deps), runfiles=runfiles),
]
asymptote = rule(implementation = _asymptote_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".asy"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform an asymptote file into png",
)
def _copy_file_to_workdir_renamed(ctx, src):
src_copy = ctx.actions.declare_file("{}_{}".format(ctx.label.name, src.short_path))
ctx.actions.run_shell(
progress_message = "Copying {} to {}".format(src.short_path, src_copy.short_path),
outputs = [src_copy],
inputs = [src],
command="cp {} {}".format(src.path, src_copy.path),
)
return src_copy
def _copy_file_to_workdir(ctx, src):
src_copy = ctx.actions.declare_file(src.basename)
ctx.actions.run_shell(
progress_message = "Copying {}".format(src.short_path),
outputs = [src_copy],
inputs = [src],
command="cp {} {}".format(src.path, src_copy.path),
)
return src_copy
def _markdown_lib_impl(ctx):
markdowns = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
markdowns += [_copy_file_to_workdir(ctx, src)]
figures = []
for target in ctx.attr.deps:
provider = target[EbookInfo]
figures += (provider.figures or [])
markdowns += (provider.markdowns or [])
runfiles = ctx.runfiles(files=figures+markdowns)
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
EbookInfo(figures=figures, markdowns=markdowns),
DefaultInfo(
files=depset(figures+markdowns),
runfiles=runfiles,
),
]
markdown_lib = rule(
implementation = _markdown_lib_impl,
doc = "Declares a set of markdown files",
attrs = {
"srcs": attr.label_list(
allow_files = [".md"],
doc = "The markdown source files",
),
"deps": attr.label_list(
doc = "The file to compile",
providers = [EbookInfo],
),
},
)
def _ebook_epub_impl(ctx):
name = ctx.label.name
# This is duplicated in _ebook_pdf_impl.
# steps
# run htex on all *md, gives book.htex
markdowns = []
figures = []
for dep in ctx.attr.deps:
provider = dep[EbookInfo]
markdowns += provider.markdowns
figures += provider.figures
dir_reference = markdowns[0]
htex_file = ctx.actions.declare_file("{}.htex".format(name))
markdowns_paths = [file.path for file in markdowns]
markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns)
script = ctx.executable._script
script_cmd = _script_cmd(script.path, markdowns_paths[0])
ctx.actions.run_shell(
progress_message = "Building equation environments for: {}".format(name),
inputs = markdowns,
outputs = [htex_file],
tools = [script],
command = """\
{script} \
pandoc -s --gladtex -o {target} {sources} \
""".format(
script=script_cmd,
target=htex_file.path,
sources=" ".join(markdowns_paths))
)
# run gladtex on the resulting htex to obtain html and output directory with figures.
outdir = ctx.actions.declare_directory("{}.eqn".format(name))
html_file = ctx.actions.declare_file("{}.html".format(name))
ctx.actions.run_shell(
progress_message = "Extracting equations for: {}".format(name),
inputs = [htex_file],
outputs = [outdir, html_file],
tools = [script],
command = """\
{script} --cd-to-dir-reference \
gladtex -r 200 -d {outdir} {htex_file} \
""".format(
script=script_cmd,
outdir=_strip_reference_dir(dir_reference, outdir.path),
htex_file=_strip_reference_dir(dir_reference, htex_file.path),
)
)
outdir_tar = ctx.actions.declare_file("{}.tar".format(outdir.basename))
tar_command = "(cd {base} ; tar cf {archive} {dir})".format(
base=outdir_tar.dirname,
archive=outdir_tar.basename,
dir=outdir.basename)
ctx.actions.run_shell(
progress_message = "Archiving equations: {}".format(outdir_tar.short_path),
inputs = [outdir],
outputs = [outdir_tar],
command = tar_command,
)
# run htexepub to obtain book.epub.
# This is gonna be fun!
epub_metadata = ctx.attr.metadata_xml.files.to_list()[0]
epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata)
title_yaml = ctx.attr.title_yaml.files.to_list()[0]
title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata)
ebook_epub = ctx.actions.declare_file("{}.epub".format(name))
inputs = [epub_metadata, title_yaml, html_file, outdir, outdir_tar] + markdowns + figures
ctx.actions.run_shell(
progress_message = "Building EPUB for: {}".format(name),
inputs = inputs,
tools = [script],
outputs = [ebook_epub],
command = """\
{script} --cd-to-dir-reference \
pandoc --epub-metadata={epub_metadata} \
-f html -t epub3 -o {ebook_epub} {html_file} \
""".format(
script=script_cmd,
epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path),
ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path),
html_file=_strip_reference_dir(dir_reference, html_file.path),
))
runfiles = ctx.runfiles(files=[ebook_epub])
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
dep[EbookInfo],
DefaultInfo(
files=depset([ebook_epub, outdir, outdir_tar]),
runfiles=runfiles,
)
]
ebook_epub = rule(
implementation = _ebook_epub_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in EPUB format"
)
def _strip_reference_dir(reference_dir, path):
return path.replace(reference_dir.dirname+"/", "")
def _strip_reference_dir_from_files(reference_dir, files):
return [ _strip_reference_dir(reference_dir, file.path) for file in files]
def _ebook_pdf_impl(ctx):
name = ctx.label.name
# steps
# run htex on all *md, gives book.htex
markdowns = []
figures = []
for dep in ctx.attr.deps:
provider = dep[EbookInfo]
markdowns += provider.markdowns
figures += provider.figures
dir_reference = markdowns[0]
# Fixed up paths -- relative to the directory dir_reference, not the
# directory where the build happens! This is needed because we can not control
# figure inclusion.
markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns)
script = ctx.executable._script
script_cmd = _script_cmd(script.path, dir_reference.path)
# run htexepub to obtain book.epub.
# This is gonna be fun!
epub_metadata = ctx.attr.metadata_xml.files.to_list()[0]
epub_metadata = _copy_file_to_workdir(ctx, epub_metadata)
title_yaml = ctx.attr.title_yaml.files.to_list()[0]
title_yaml = _copy_file_to_workdir(ctx, title_yaml)
ebook_pdf = ctx.actions.declare_file("{}.pdf".format(name))
inputs = [epub_metadata, title_yaml] + markdowns + figures
ctx.actions.run_shell(
progress_message = "Building PDF for: {}".format(name),
inputs = inputs,
tools = [script],
outputs = [ebook_pdf],
command = """\
{script} --cd-to-dir-reference \
pandoc --epub-metadata={epub_metadata} \
--mathml -o {ebook_pdf} {markdowns} \
""".format(
script=script_cmd,
epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path),
ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path),
markdowns=" ".join(markdowns_paths),
))
runfiles = ctx.runfiles(files=[ebook_pdf])
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
DefaultInfo(
files=depset([ebook_pdf]),
runfiles=runfiles,
)
]
ebook_pdf = rule(
implementation = _ebook_pdf_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in PDF format"
)
def _ebook_kindle_impl(ctx):
mobi_file = ctx.actions.declare_file("{}.mobi".format(ctx.label.name))
# First provider is EbookInfo, second is DefaultInfo.
(ebook_info, default_info) = _ebook_epub_impl(ctx)
# There can be only one such file
outputs = default_info.files.to_list()
epub_file = outputs[0]
equation_outdir = outputs[1]
equation_outdir_tar = outputs[2]
captured_output = ctx.actions.declare_file(
"{}.untar-out".format(ctx.label.name))
# untar the equation dir
# Maybe this is not needed.
tar_command = "(cd {base} ; tar xvf {archive}) > {output}".format(
base=equation_outdir_tar.dirname,
archive=equation_outdir_tar.basename,
output=captured_output.path)
ctx.actions.run_shell(
progress_message = "Unarchiving equations: {}".format(equation_outdir_tar.short_path),
inputs = [equation_outdir_tar],
outputs = [captured_output],
command = tar_command,
)
dir_reference = epub_file
script = ctx.executable._script
name = ctx.label.name
script_cmd = _script_cmd(script.path, epub_file.path)
ctx.actions.run_shell(
progress_message = "Building MOBI for: {}".format(name),
inputs = [epub_file, equation_outdir],
tools = [script],
outputs = [mobi_file],
command = """\
{script} --cd-to-dir-reference \
ebook-convert {epub_file} {mobi_file} \
""".format(
script=script_cmd,
epub_file=_strip_reference_dir(dir_reference, epub_file.path),
mobi_file=_strip_reference_dir(dir_reference, mobi_file.path),
))
runfiles = ctx.runfiles(files=[mobi_file])
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
DefaultInfo(
files=depset([mobi_file, captured_output]),
runfiles=runfiles,
)
]
ebook_kindle = rule(
implementation = _ebook_kindle_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in the Kindle's MOBI format"
)
| 2.484375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.