hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a2060f7a5bbf7940bdfcddee312af57d9d85854 | 2,126 | py | Python | common/client.py | yangtao4389/default_django2.0 | 9d0f827e1b46ba6c0c6952dddc6a59b0bb40b46d | [
"Apache-2.0"
] | 1 | 2020-09-30T01:27:57.000Z | 2020-09-30T01:27:57.000Z | common/client.py | yangtao4389/default_django2.0 | 9d0f827e1b46ba6c0c6952dddc6a59b0bb40b46d | [
"Apache-2.0"
] | 10 | 2020-02-11T23:44:04.000Z | 2022-03-11T23:40:19.000Z | common/client.py | yangtao4389/default_django2.0 | 9d0f827e1b46ba6c0c6952dddc6a59b0bb40b46d | [
"Apache-2.0"
] | null | null | null | import urllib.parse
def get_client_ip(request):
"""
:param request:
:return: 访问用户的ip
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def get_client_previous_url(request):
"""
:param request:
:return:访问用户的前一个地址
"""
try:
url = request.META.get('HTTP_REFERER')
except:
url = None
return url
def get_client_current_path(request):
"""
:param request:
:return: 不带参数的地址
"""
return request.path
def get_client_current_full_path(request):
"""
:param request:
:return: 带参数的完整地址
"""
return request.get_full_path()
def url_query_replace(url, name, value):
"""
url参数替换
"""
parse_obj = urllib.parse.urlparse(url)
parse_query = parse_obj.query
parse_query_dict = dict(urllib.parse.parse_qsl(parse_query))
if name in parse_query_dict.keys():
# 找到后需要将所有同名的参数全部删除,然后替换成新的
del parse_query_dict[name]
parse_query_dict[name] = value
pure_url = url.replace('?'+parse_query, '')
ret = pure_url + "?" + urllib.parse.urlencode(parse_query_dict)
return ret
def get_client_query_str(request):
try:
return request.META['QUERY_STRING']
except:
return None
def get_client_HTTP_USER_AGENT(request):
try:
return request.META['HTTP_USER_AGENT']
except:
return None
def decode_url(url):
'''
编码url
:param url:
:return:
'''
return urllib.parse.quote(url)
def decode_url_query(url):
'''
只编码url参数
:param url:
:return:
'''
parse_obj = urllib.parse.urlparse(url)
parse_query = parse_obj.query
pure_url = url.replace('?' + parse_query, '')
ret = pure_url + "?" + urllib.parse.quote_plus(parse_query)
return ret
def get_post_body(request):
'''
无论是get 还是 post请求,最开始都是request.body数据,只是django将其封装为request.GET.get() 而已
:param request:
:return:
'''
try:
return request.body
except:
pass | 22.145833 | 74 | 0.630762 |
4a206290140dfc7bbe0f11af99653e761b472652 | 3,054 | py | Python | tests/api/views_test.py | IslomSobirov/elasticsearch | 4d6e2affc1df833dd97f6046f330e764728c9330 | [
"MIT"
] | null | null | null | tests/api/views_test.py | IslomSobirov/elasticsearch | 4d6e2affc1df833dd97f6046f330e764728c9330 | [
"MIT"
] | null | null | null | tests/api/views_test.py | IslomSobirov/elasticsearch | 4d6e2affc1df833dd97f6046f330e764728c9330 | [
"MIT"
] | null | null | null | import pytest
from aiohttp import web
import settings
from utils.helpers import getFromElastic, QuestionIndex, ChoiceIndex
from api.models import Question
async def test_get_question(cli, route_path, question_factory):
question = await question_factory.get()
resp = await cli.get(route_path('question_obj', str(question.id)))
resp_json = await resp.json()
print(resp_json)
assert (question.id == resp_json.get('id')) is True
async def test_get_questions(cli, route_path, question_factory):
for _ in range(settings.API_ITEMS_PER_PAGE):
await question_factory.get()
resp = await cli.get(route_path('questions'))
resp_json = await resp.json()
print(resp_json)
assert (len(resp_json) == settings.API_ITEMS_PER_PAGE) is True
async def test_post_question(cli, route_path, question_factory):
data = question_factory.initial_data()
resp = await cli.post(route_path('questions'), json=data)
resp_json = await resp.json()
print(resp_json)
assert (resp_json.get('id') > 0) is True
elastic_index_data = await getFromElastic(QuestionIndex, resp_json.get('id'))
print(elastic_index_data)
assert elastic_index_data.get('found') is True
async def test_delete_question(cli, route_path, question):
resp = await cli.delete(route_path('question_obj', str(question.id)))
elastic_index_data = await getFromElastic(QuestionIndex, question.id)
assert elastic_index_data.get('found') is False
assert (resp.status == web.HTTPAccepted.status_code) is True
with pytest.raises(Question.DoesNotExist):
await Question.get_by(question.id)
async def test_get_choices(cli, route_path, choice_factory):
for _ in range(settings.API_ITEMS_PER_PAGE):
await choice_factory.get()
resp = await cli.get(route_path('choices'))
resp_json = await resp.json()
print(resp_json)
assert (len(resp_json) == settings.API_ITEMS_PER_PAGE) is True
async def test_get_choices_by_question_id(cli, route_path, question_factory,
choice_factory):
for _ in range(settings.API_ITEMS_PER_PAGE):
question = await question_factory.get()
choices_count = int(settings.API_ITEMS_PER_PAGE - 1)
if choices_count <= 0:
choices_count = 1
for _ in range(choices_count):
await choice_factory.get(question)
choice_uri = route_path(f'choices')
resp = await cli.get(f'{choice_uri}?question_id={question.id}')
resp_json = await resp.json()
print(resp_json)
assert (len(resp_json) == choices_count) is True
async def test_post_choice(cli, route_path, question, choice_factory):
data = choice_factory.initial_data(question.id)
resp = await cli.post(route_path('choices'), json=data)
resp_json = await resp.json()
print(resp_json)
assert (resp_json.get('id') > 0) is True
elastic_index_data = await getFromElastic(ChoiceIndex, resp_json.get('id'))
print(elastic_index_data)
assert elastic_index_data.get('found') is True | 28.277778 | 81 | 0.716765 |
4a2062b9f75e1591bfdb2bf71eedea8c30a66cf3 | 4,853 | py | Python | model_data_util/create_tt_data/cnn_build_rule.py | yuqil725/model_data_util | 35d8463e964742053d99e97f6fb19dc1cac4a93e | [
"MIT"
] | null | null | null | model_data_util/create_tt_data/cnn_build_rule.py | yuqil725/model_data_util | 35d8463e964742053d99e97f6fb19dc1cac4a93e | [
"MIT"
] | null | null | null | model_data_util/create_tt_data/cnn_build_rule.py | yuqil725/model_data_util | 35d8463e964742053d99e97f6fb19dc1cac4a93e | [
"MIT"
] | null | null | null | """
# Convolutional Layer:
## Rule 1: No Convolutional Layer After the First Dense Layer
## Rule 2: At least the half of layers are convolutional
## Rule 3: Convolutional layer is always the first one
#Maxpooling Layer
## Rule 3: No Maxpooling Layer After the First Dense Layer
## Rule 4: No two Maxpooling Layers Next to Each Other
## Rule 5: Maxpooling Layers are only after convolutional layers
#Dropout Layer
## Rule 6: No two Dropout Layers Next to Each Other
## Rule 7: No dropout rate higher than 0.5
## Rule 8: Maximum two Dropout Layers
#Flatten Layer and AveragePooling Layer
## Rule 9: All CNN end with one Flatten or AveragePooling Layer followed by maximum 2 dense layer
A CNN was divided into parts:
1. convolutional_part: composed by conv2d, maxpooling2d, dropout layers
2. flatten part: composed by a flatten layer
3. dense part: composed by dense and dropout layer
4. output part: compose by a dense layer with softmax as activation
"""
import random
import numpy as np
class CnnRules:
def __init__(self, max_layers=64):
self.first_dense_occurred = False # Rule: No Convolutional Layer After the First Dense Layer
self.max_dropout = 2
self.remain_dropout = 2
self.initial_layer = ["Conv2D"]
self.max_layers = max_layers # the total max layers
self.layer_order = []
self.generatePartAll()
def generatePartLayers(self):
"""
:return: the number of layers per parts
"""
part_layers_num = {}
part_layers_num["output"] = 1
part_layers_num["flatten"] = 1
part_layers_num["dense"] = random.choice(range(3))
remain_max_layers = self.max_layers - np.sum(list(part_layers_num.values()))
part_layers_num["conv"] = random.choice(range(3, remain_max_layers))
return part_layers_num
def generatePartConv(self):
"""
Generate the layer_order of part_conv
The available layers and their probabilities to occur are hard coded
"""
def nextAvailableLayer(l_name):
if l_name == "Conv2D":
next_available_layer_weights = {"Conv2D": random.uniform(0.5, 1)}
next_available_layer_weights["MaxPooling2D"] = (1 - next_available_layer_weights[
"Conv2D"]) * random.uniform(0.8, 1)
next_available_layer_weights["Dropout"] = (1 - np.sum(list(next_available_layer_weights.values())))
l = list(next_available_layer_weights.keys())
l_w = list(next_available_layer_weights.values())
next_layer = random.choices(l, weights=l_w)[0]
elif l_name == "MaxPooling2D":
# Rule: No two Maxpooling Layers Next to Each Other
next_available_layer_weights = {"Conv2D": random.uniform(0.9, 1)}
next_available_layer_weights["Dropout"] = (1 - np.sum(list(next_available_layer_weights.values())))
l = list(next_available_layer_weights.keys())
l_w = list(next_available_layer_weights.values())
next_layer = random.choices(l, weights=l_w)[0]
elif l_name == "Dropout":
# Rule: the layer after dropout should only be Conv2D
next_layer = "Conv2D"
else:
print("Error: received unsupported layer name: %s" % l_name)
return next_layer
self.layer_order = self.initial_layer.copy()
for _ in range(len(self.layer_order), self.part_layers_num["conv"]):
self.layer_order.append(nextAvailableLayer(self.layer_order[-1]))
def generatePartFlatten(self):
"""
Generate the layer_order of part_flatten
The available layers and their probabilities to occur are hard coded
"""
# self.layer_order.append(random.choice(["Flatten", "AveragePooling2D"]))
self.layer_order.append("Flatten")
def generatePartDense(self):
"""
Generate the layer order of part_dense
"""
if self.part_layers_num["dense"] == 1:
self.layer_order.append(random.choice(["Dense", "Dropout"]))
if self.part_layers_num["dense"] == 2:
self.layer_order.append("Dense")
self.layer_order.append("Dropout")
def generatePartOutput(self):
"""
Generate the layer order of part_output
"""
self.layer_order.append("Dense")
def generatePartAll(self):
"""
Run all generatePartXXX functions
"""
self.part_layers_num = self.generatePartLayers()
self.generatePartConv()
self.generatePartFlatten()
self.generatePartDense()
self.generatePartOutput()
return self.layer_order
if __name__ == "__main__":
cnn_rules = CnnRules()
print(cnn_rules.generatePartAll()) | 40.107438 | 115 | 0.646816 |
4a20648735696c6ba155251112c315c746c23ae4 | 56,941 | py | Python | jax/_src/lax/linalg.py | SCiarella/jax | a7c9b6d11fa833c748d72b3ccc11baeed9c0248c | [
"Apache-2.0"
] | null | null | null | jax/_src/lax/linalg.py | SCiarella/jax | a7c9b6d11fa833c748d72b3ccc11baeed9c0248c | [
"Apache-2.0"
] | null | null | null | jax/_src/lax/linalg.py | SCiarella/jax | a7c9b6d11fa833c748d72b3ccc11baeed9c0248c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from functools import partial
import numpy as np
from jax._src.numpy import lax_numpy as jnp
from jax._src.numpy.vectorize import vectorize
from jax._src import ad_util
from jax._src import api
from jax import lax
from jax._src import dtypes
from jax.interpreters import xla
from jax.interpreters import ad
from jax.interpreters import batching
from jax._src.util import prod
from jax.core import Primitive, ShapedArray, raise_to_shaped
from jax._src.lax.lax import (
standard_primitive, standard_unop, naryop_dtype_rule, _float, _complex,
_input_dtype, _broadcasting_select)
from jax._src.lax import lax as lax_internal
from jax._src.lib import lapack
from jax._src.lib import cuda_linalg
from jax._src.lib import cusolver
from jax._src.lib import cusparse
from jax._src.lib import rocsolver
from jax._src.lib import xla_client
from jax._src.lib import xla_bridge as xb
from jax._src.lib import version as jaxlib_version
xops = xla_client.ops
# traceables
def cholesky(x, symmetrize_input: bool = True):
"""Cholesky decomposition.
Computes the Cholesky decomposition
.. math::
A = L . L^H
of square matrices, :math:`A`, such that :math:`L`
is lower triangular. The matrices of :math:`A` must be positive-definite and
either Hermitian, if complex, or symmetric, if real.
Args:
x: A batch of square Hermitian (symmetric if real) positive-definite
matrices with shape ``[..., n, n]``.
symmetrize_input: If ``True``, the matrix is symmetrized before Cholesky
decomposition by computing :math:`\\frac{1}{2}(x + x^H)`. If ``False``,
only the lower triangle of ``x`` is used; the upper triangle is ignored
and not accessed.
Returns:
The Cholesky decomposition as a matrix with the same dtype as ``x`` and
shape ``[..., n, n]``. If Cholesky decomposition fails, returns a matrix
full of NaNs. The behavior on failure may change in the future.
"""
if symmetrize_input:
x = symmetrize(x)
return jnp.tril(cholesky_p.bind(x))
def eig(x, compute_left_eigenvectors=True, compute_right_eigenvectors=True):
"""Eigendecomposition of a general matrix.
Nonsymmetric eigendecomposition is at present only implemented on CPU.
"""
return eig_p.bind(x, compute_left_eigenvectors=compute_left_eigenvectors,
compute_right_eigenvectors=compute_right_eigenvectors)
def eigh(x, lower: bool = True, symmetrize_input: bool = True):
"""Eigendecomposition of a Hermitian matrix.
Computes the eigenvectors and eigenvalues of a complex Hermitian or real
symmetric square matrix.
Args:
x: A batch of square complex Hermitian or real symmetric matrices with shape
``[..., n, n]``.
lower: If ``symmetrize_input`` is ``False``, describes which triangle of the
input matrix to use. If ``symmetrize_input`` is ``False``, only the
triangle given by ``lower`` is accessed; the other triangle is ignored and
not accessed.
symmetrize_input: If ``True``, the matrix is symmetrized before the
eigendecomposition by computing :math:`\\frac{1}{2}(x + x^H)`.
Returns:
A tuple ``(w, v)``.
``w`` is an array with the same dtype as ``x`` such that ``w[..., :, i]`` is
the eigenvector corresponding to ``v[..., i]``.
``v`` is an array with the same dtype as ``x`` (or its real counterpart if
complex) with shape ``[..., n]`` containing the eigenvalues of ``x``.
"""
if symmetrize_input:
x = symmetrize(x)
v, w = eigh_p.bind(x, lower=lower)
return v, w
def lu_pivots_to_permutation(pivots, permutation_size: int):
"""Converts the pivots (row swaps) returned by LU to a permutation.
We build a permutation rather than applying `pivots` directly to the rows
of a matrix because lax loops aren't differentiable.
Args:
pivots: an int32 array of shape (..., k) of row swaps to perform
permutation_size: the size of the output permutation. Has to be >= k.
Returns:
An int32 array of shape (..., permutation_size).
"""
permutation = lu_pivots_to_permutation_p.bind(
pivots, permutation_size=int(permutation_size))
return permutation
def lu(x):
"""LU decomposition with partial pivoting.
Computes the matrix decomposition:
.. math::
P.A = L.U
where :math:`P` is a permutation of the rows of :math:`A`, :math:`L` is a
lower-triangular matrix with unit-diagonal elements, and :math:`U` is an
upper-triangular matrix.
Args:
x: A batch of matrices with shape ``[..., m, n]``.
Returns:
A tuple ``(lu, pivots, permutation)``.
``lu`` is a batch of matrices with the same shape and dtype as ``x``
containing the :math:`L` matrix in its lower triangle and the :math:`U`
matrix in its upper triangle. The (unit) diagonal elements of :math:`L` are
not represented explicitly.
``pivots`` is an int32 array with shape ``[..., min(m, n)]`` representing a
sequence of row swaps that should be performed on :math:`A`.
``permutation`` is an alternative representation of the sequence of row
swaps as a permutation, represented as an int32 array with shape
``[..., m]``.
"""
lu, pivots, permutation = lu_p.bind(x)
return lu, pivots, permutation
def qr(x, full_matrices: bool = True):
"""QR decomposition.
Computes the QR decomposition
.. math::
A = Q . R
of matrices :math:`A`, such that :math:`Q` is a unitary (orthogonal) matrix,
and :math:`R` is an upper-triangular matrix.
Args:
x: A batch of matrices with shape ``[..., m, n]``.
full_matrices: Determines if full or reduced matrices are returned; see
below.
Returns:
A pair of arrays ``(q, r)``.
Array ``q`` is a unitary (orthogonal) matrix,
with shape ``[..., m, m]`` if ``full_matrices=True``, or
``[..., m, min(m, n)]`` if ``full_matrices=False``.
Array ``r`` is an upper-triangular matrix with shape ``[..., m, n]`` if
``full_matrices=True``, or ``[..., min(m, n), n]`` if
``full_matrices=False``.
"""
q, r = qr_p.bind(x, full_matrices=full_matrices)
return q, r
def svd(x, full_matrices=True, compute_uv=True):
"""Singular value decomposition.
Returns the singular values if compute_uv is False, otherwise returns a triple
containing the left singular vectors, the singular values and the adjoint of
the right singular vectors.
"""
result = svd_p.bind(x, full_matrices=full_matrices, compute_uv=compute_uv)
if compute_uv:
s, u, v = result
return u, s, v
else:
s, = result
return s
def triangular_solve(a, b, left_side: bool = False, lower: bool = False,
transpose_a: bool = False, conjugate_a: bool = False,
unit_diagonal: bool = False):
r"""Triangular solve.
Solves either the matrix equation
.. math::
\mathit{op}(A) . X = B
if ``left_side`` is ``True`` or
.. math::
X . \mathit{op}(A) = B
if ``left_side`` is ``False``.
``A`` must be a lower or upper triangular square matrix, and where
:math:`\mathit{op}(A)` may either transpose :math:`A` if ``transpose_a``
is ``True`` and/or take its complex conjugate if ``conjugate_a`` is ``True``.
Args:
a: A batch of matrices with shape ``[..., m, m]``.
b: A batch of matrices with shape ``[..., m, n]`` if ``left_side`` is
``True`` or shape ``[..., n, m]`` otherwise.
left_side: describes which of the two matrix equations to solve; see above.
lower: describes which triangle of ``a`` should be used. The other triangle
is ignored.
transpose_a: if ``True``, the value of ``a`` is transposed.
conjugate_a: if ``True``, the complex conjugate of ``a`` is used in the
solve. Has no effect if ``a`` is real.
unit_diagonal: if ``True``, the diagonal of ``a`` is assumed to be unit
(all 1s) and not accessed.
Returns:
A batch of matrices the same shape and dtype as ``b``.
"""
conjugate_a = conjugate_a and jnp.issubdtype(lax.dtype(a), jnp.complexfloating)
singleton = jnp.ndim(b) == jnp.ndim(a) - 1
if singleton:
b = jnp.expand_dims(b, -1 if left_side else -2)
out = triangular_solve_p.bind(
a, b, left_side=left_side, lower=lower, transpose_a=transpose_a,
conjugate_a=conjugate_a, unit_diagonal=unit_diagonal)
if singleton:
out = out[..., 0] if left_side else out[..., 0, :]
return out
# utilities
@partial(vectorize, signature='(n,m),(m)->(n)')
def _matvec_multiply(a, b):
return lax.dot(a, b, precision=lax.Precision.HIGHEST)
def _check_solve_shapes(a, b):
if not (a.ndim >= 2 and b.ndim in [a.ndim, a.ndim - 1] and
a.shape[-1] == a.shape[-2] == b.shape[a.ndim - 2]):
raise ValueError(
"The arguments to solve must have shapes a=[..., m, m] and "
f"b=[..., m, k] or b=[..., m]; got a={a.shape} and b={b.shape}")
def _solve(a, b):
_check_solve_shapes(a, b)
# Broadcast leading dimensions of b to the shape of a, as is required by
# custom_linear_solve.
out_shape = tuple(d_a if d_b == 1 else d_b
for d_a, d_b in zip(a.shape[:-1] + (1,), b.shape))
b = jnp.broadcast_to(b, out_shape)
# With custom_linear_solve, we can reuse the same factorization when
# computing sensitivities. This is considerably faster.
lu_, _, permutation = lu(lax.stop_gradient(a))
custom_solve = partial(
lax.custom_linear_solve,
lambda x: _matvec_multiply(a, x),
solve=lambda _, x: lu_solve(lu_, permutation, x, trans=0),
transpose_solve=lambda _, x: lu_solve(lu_, permutation, x, trans=1))
if a.ndim == b.ndim + 1:
# b.shape == [..., m]
return custom_solve(b)
else:
# b.shape == [..., m, k]
return api.vmap(custom_solve, b.ndim - 1, max(a.ndim, b.ndim) - 1)(b)
def _T(x): return jnp.swapaxes(x, -1, -2)
def _H(x): return jnp.conj(_T(x))
def symmetrize(x): return (x + _H(x)) / 2
def _unpack_tuple(f, n):
def g(c, *args, **kwargs):
t = f(c, *args, **kwargs)
return (xops.GetTupleElement(t, i) for i in range(n))
return g
# primitives
_cpu_lapack_types = {np.dtype(np.float32), np.dtype(np.float64),
np.dtype(np.complex64), np.dtype(np.complex128)}
# Cholesky decomposition
def cholesky_jvp_rule(primals, tangents):
x, = primals
sigma_dot, = tangents
L = jnp.tril(cholesky_p.bind(x))
# Forward-mode rule from https://arxiv.org/pdf/1602.07527.pdf
def phi(X):
l = jnp.tril(X)
return l / (jnp._constant_like(X, 1) + jnp.eye(X.shape[-1], dtype=X.dtype))
tmp = triangular_solve(L, sigma_dot, left_side=False, transpose_a=True,
conjugate_a=True, lower=True)
L_dot = lax.batch_matmul(L, phi(triangular_solve(
L, tmp, left_side=True, transpose_a=False, lower=True)),
precision=lax.Precision.HIGHEST)
return L, L_dot
def cholesky_batching_rule(batched_args, batch_dims):
x, = batched_args
bd, = batch_dims
x = batching.moveaxis(x, bd, 0)
return cholesky(x), 0
cholesky_p = standard_unop(_float | _complex, 'cholesky')
ad.primitive_jvps[cholesky_p] = cholesky_jvp_rule
batching.primitive_batchers[cholesky_p] = cholesky_batching_rule
def _nan_like(c, operand):
shape = c.get_shape(operand)
dtype = shape.element_type()
if jnp.issubdtype(dtype, np.complexfloating):
nan = xb.constant(c, np.array(np.nan * (1. + 1j), dtype=dtype))
else:
nan = xb.constant(c, np.array(np.nan, dtype=dtype))
return xops.Broadcast(nan, shape.dimensions())
def _cholesky_cpu_gpu_translation_rule(potrf_impl, c, operand):
shape = c.get_shape(operand)
batch_dims = shape.dimensions()[:-2]
result, info = potrf_impl(c, operand, lower=True)
ok = xops.Eq(info, xops.ConstantLiteral(c, np.array(0, np.int32)))
return _broadcasting_select(c,
xops.Reshape(ok, batch_dims + (1, 1)), result,
_nan_like(c, result))
xla.backend_specific_translations['cpu'][cholesky_p] = partial(
_cholesky_cpu_gpu_translation_rule, lapack.potrf)
if cusolver is not None:
xla.backend_specific_translations['gpu'][cholesky_p] = partial(
_cholesky_cpu_gpu_translation_rule, cusolver.potrf)
if rocsolver is not None:
xla.backend_specific_translations['gpu'][cholesky_p] = partial(
_cholesky_cpu_gpu_translation_rule, rocsolver.potrf)
# Asymmetric eigendecomposition
def eig_impl(operand, *, compute_left_eigenvectors, compute_right_eigenvectors):
return (
xla.apply_primitive(eig_p, operand,
compute_left_eigenvectors=compute_left_eigenvectors,
compute_right_eigenvectors=compute_right_eigenvectors))
def eig_translation_rule(c, operand, *, compute_left_eigenvectors,
compute_right_eigenvectors):
raise NotImplementedError(
"Nonsymmetric eigendecomposition is only implemented on the CPU backend")
def eig_abstract_eval(operand, *, compute_left_eigenvectors,
compute_right_eigenvectors):
if isinstance(operand, ShapedArray):
if operand.ndim < 2 or operand.shape[-2] != operand.shape[-1]:
raise ValueError("Argument to nonsymmetric eigendecomposition must have "
"shape [..., n, n], got shape {}".format(operand.shape))
batch_dims = operand.shape[:-2]
n = operand.shape[-1]
dtype = np.complex64 if dtypes.finfo(operand.dtype).bits == 32 else np.complex128
dtype = dtypes.canonicalize_dtype(dtype)
vl = vr = operand.update(shape=batch_dims + (n, n), dtype=dtype)
w = operand.update(shape=batch_dims + (n,), dtype=dtype)
else:
raise NotImplementedError
output = [w]
if compute_left_eigenvectors:
output.append(vl)
if compute_right_eigenvectors:
output.append(vr)
return tuple(output)
_cpu_geev = lapack.geev
def eig_cpu_translation_rule(c, operand, *, compute_left_eigenvectors,
compute_right_eigenvectors):
shape = c.get_shape(operand)
batch_dims = shape.dimensions()[:-2]
w, vl, vr, info = _cpu_geev(c, operand, jobvl=compute_left_eigenvectors,
jobvr=compute_right_eigenvectors)
ok = xops.Eq(info, xops.ConstantLiteral(c, np.array(0, np.int32)))
w = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1,)), w,
_nan_like(c, w))
output = [w]
if compute_left_eigenvectors:
vl = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), vl,
_nan_like(c, vl))
output.append(vl)
if compute_right_eigenvectors:
vr = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), vr,
_nan_like(c, vr))
output.append(vr)
return xops.Tuple(c, output)
def eig_batching_rule(batched_args, batch_dims, *, compute_left_eigenvectors,
compute_right_eigenvectors):
x, = batched_args
bd, = batch_dims
x = batching.moveaxis(x, bd, 0)
return (eig_p.bind(x, compute_left_eigenvectors=compute_left_eigenvectors,
compute_right_eigenvectors=compute_right_eigenvectors),
(0,) * (1 + compute_left_eigenvectors + compute_right_eigenvectors))
def eig_jvp_rule(primals, tangents, *, compute_left_eigenvectors,
compute_right_eigenvectors):
if compute_left_eigenvectors or compute_right_eigenvectors:
raise NotImplementedError(
'The derivatives of eigenvectors are not implemented, only '
'eigenvalues. See '
'https://github.com/google/jax/issues/2748 for discussion.')
# Formula for derivative of eigenvalues w.r.t. a is eqn 4.60 in
# https://arxiv.org/abs/1701.00392
a, = primals
da, = tangents
l, v = eig(a, compute_left_eigenvectors=False)
return [l], [jnp.sum(_solve(v, da.astype(v.dtype)) * _T(v), -1)]
eig_p = Primitive('eig')
eig_p.multiple_results = True
eig_p.def_impl(eig_impl)
eig_p.def_abstract_eval(eig_abstract_eval)
xla.translations[eig_p] = eig_translation_rule
xla.backend_specific_translations['cpu'][eig_p] = eig_cpu_translation_rule
batching.primitive_batchers[eig_p] = eig_batching_rule
ad.primitive_jvps[eig_p] = eig_jvp_rule
# Symmetric/Hermitian eigendecomposition
def eigh_impl(operand, lower):
v, w = xla.apply_primitive(eigh_p, operand, lower=lower)
return v, w
def eigh_translation_rule(c, operand, lower):
shape = c.get_shape(operand)
dims = shape.dimensions()
if dims[-1] == 0:
return xops.Tuple(c, [operand, xops.Real(xops.Reshape(operand, dims[:-1]))])
return xops.Tuple(c, xops.Eigh(operand, lower=lower))
def eigh_abstract_eval(operand, lower):
if isinstance(operand, ShapedArray):
if operand.ndim < 2 or operand.shape[-2] != operand.shape[-1]:
raise ValueError(
"Argument to symmetric eigendecomposition must have shape [..., n, n],"
"got shape {}".format(operand.shape))
batch_dims = operand.shape[:-2]
n = operand.shape[-1]
v = operand.update(shape=batch_dims + (n, n))
w = operand.update(shape=batch_dims + (n,),
dtype=lax_internal._complex_basetype(operand.dtype))
else:
v, w = operand, operand
return v, w
def _eigh_cpu_gpu_translation_rule(syevd_impl, c, operand, lower):
shape = c.get_shape(operand)
batch_dims = shape.dimensions()[:-2]
v, w, info = syevd_impl(c, operand, lower=lower)
ok = xops.Eq(info, xops.ConstantLiteral(c, np.array(0, np.int32)))
v = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), v,
_nan_like(c, v))
w = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1,)), w,
_nan_like(c, w))
return xops.Tuple(c, [v, w])
def eigh_jvp_rule(primals, tangents, lower):
# Derivative for eigh in the simplest case of distinct eigenvalues.
# This is classic nondegenerate perurbation theory, but also see
# https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf
# The general solution treating the case of degenerate eigenvalues is
# considerably more complicated. Ambitious readers may refer to the general
# methods below or refer to degenerate perturbation theory in physics.
# https://www.win.tue.nl/analysis/reports/rana06-33.pdf and
# https://people.orie.cornell.edu/aslewis/publications/99-clarke.pdf
a, = primals
a_dot, = tangents
v, w_real = eigh_p.bind(symmetrize(a), lower=lower)
# for complex numbers we need eigenvalues to be full dtype of v, a:
w = w_real.astype(a.dtype)
eye_n = jnp.eye(a.shape[-1], dtype=a.dtype)
# carefully build reciprocal delta-eigenvalue matrix, avoiding NaNs.
Fmat = jnp.reciprocal(eye_n + w[..., jnp.newaxis, :] - w[..., jnp.newaxis]) - eye_n
# eigh impl doesn't support batch dims, but future-proof the grad.
dot = partial(lax.dot if a.ndim == 2 else lax.batch_matmul,
precision=lax.Precision.HIGHEST)
vdag_adot_v = dot(dot(_H(v), a_dot), v)
dv = dot(v, jnp.multiply(Fmat, vdag_adot_v))
dw = jnp.real(jnp.diagonal(vdag_adot_v, axis1=-2, axis2=-1))
return (v, w_real), (dv, dw)
def eigh_batching_rule(batched_args, batch_dims, lower):
x, = batched_args
bd, = batch_dims
x = batching.moveaxis(x, bd, 0)
return eigh_p.bind(x, lower=lower), (0, 0)
eigh_p = Primitive('eigh')
eigh_p.multiple_results = True
eigh_p.def_impl(eigh_impl)
eigh_p.def_abstract_eval(eigh_abstract_eval)
xla.translations[eigh_p] = eigh_translation_rule
ad.primitive_jvps[eigh_p] = eigh_jvp_rule
batching.primitive_batchers[eigh_p] = eigh_batching_rule
_cpu_syevd = lapack.syevd
xla.backend_specific_translations['cpu'][eigh_p] = partial(
_eigh_cpu_gpu_translation_rule, _cpu_syevd)
if cusolver is not None:
xla.backend_specific_translations['gpu'][eigh_p] = partial(
_eigh_cpu_gpu_translation_rule, cusolver.syevd)
if rocsolver is not None:
xla.backend_specific_translations['gpu'][eigh_p] = partial(
_eigh_cpu_gpu_translation_rule, rocsolver.syevd)
triangular_solve_dtype_rule = partial(
naryop_dtype_rule, _input_dtype, (_float | _complex, _float | _complex),
'triangular_solve')
def triangular_solve_shape_rule(a, b, left_side=False, **unused_kwargs):
if a.ndim < 2:
msg = "triangular_solve requires a.ndim to be at least 2, got {}."
raise TypeError(msg.format(a.ndim))
if b.ndim < 2:
msg = "triangular_solve requires b.ndim to be at least 2, got {}."
raise TypeError(msg.format(b.ndim))
if a.shape[-1] != a.shape[-2]:
msg = ("triangular_solve requires the last two dimensions of a to be equal "
"in size, got a.shape of {}.")
raise TypeError(msg.format(a.shape))
if a.shape[:-2] != b.shape[:-2]:
msg = ("triangular_solve requires both arguments to have the same number "
"of dimensions and equal batch dimensions, got {} and {}.")
raise TypeError(msg.format(a.shape, b.shape))
common_dim = -2 if left_side else -1
if a.shape[-1] != b.shape[common_dim]:
msg = "Incompatible shapes for arguments to triangular_solve: {} and {}."
raise TypeError(msg.format(a.shape, b.shape))
return b.shape
def triangular_solve_jvp_rule_a(
g_a, ans, a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal):
m, n = b.shape[-2:]
k = 1 if unit_diagonal else 0
g_a = jnp.tril(g_a, k=-k) if lower else jnp.triu(g_a, k=k)
g_a = lax.neg(g_a)
g_a = jnp.swapaxes(g_a, -1, -2) if transpose_a else g_a
g_a = jnp.conj(g_a) if conjugate_a else g_a
dot = partial(lax.dot if g_a.ndim == 2 else lax.batch_matmul,
precision=lax.Precision.HIGHEST)
def a_inverse(rhs):
return triangular_solve(a, rhs, left_side, lower, transpose_a, conjugate_a,
unit_diagonal)
# triangular_solve is about the same cost as matrix multplication (~n^2 FLOPs
# for matrix/vector inputs). Order these operations in whichever order is
# cheaper.
if left_side:
assert g_a.shape[-2:] == a.shape[-2:] == (m, m) and ans.shape[-2:] == (m, n)
if m > n:
return a_inverse(dot(g_a, ans)) # A^{-1} (∂A X)
else:
return dot(a_inverse(g_a), ans) # (A^{-1} ∂A) X
else:
assert g_a.shape[-2:] == a.shape[-2:] == (n, n) and ans.shape[-2:] == (m, n)
if m < n:
return a_inverse(dot(ans, g_a)) # (X ∂A) A^{-1}
else:
return dot(ans, a_inverse(g_a)) # X (∂A A^{-1})
def triangular_solve_transpose_rule(
cotangent, a, b, left_side, lower, transpose_a, conjugate_a,
unit_diagonal):
# Triangular solve is nonlinear in its first argument and linear in its second
# argument, analogous to `div` but swapped.
assert not ad.is_undefined_primal(a) and ad.is_undefined_primal(b)
if type(cotangent) is ad_util.Zero:
cotangent_b = ad_util.Zero(b.aval)
else:
cotangent_b = triangular_solve(a, cotangent, left_side, lower,
not transpose_a, conjugate_a, unit_diagonal)
return [None, cotangent_b]
def triangular_solve_batching_rule(batched_args, batch_dims, left_side,
lower, transpose_a, conjugate_a,
unit_diagonal):
x, y = batched_args
bx, by = batch_dims
if bx is batching.not_mapped:
if left_side:
y = batching.moveaxis(y, by, -1)
y_flat = y.reshape(y.shape[:-2] + (y.shape[-2] * y.shape[-1],))
bdim_out = y.ndim - 1
else:
y = batching.moveaxis(y, by, -2)
y_flat = y.reshape(y.shape[:-3] + (y.shape[-3] * y.shape[-2], y.shape[-1]))
bdim_out = y.ndim - 2
out_flat = triangular_solve(
x, y_flat, left_side=left_side, lower=lower,
transpose_a=transpose_a, conjugate_a=conjugate_a,
unit_diagonal=unit_diagonal)
return out_flat.reshape(y.shape), bdim_out
else:
size = next(t.shape[i] for t, i in zip(batched_args, batch_dims)
if i is not None)
x = batching.bdim_at_front(x, bx, size)
y = batching.bdim_at_front(y, by, size)
return triangular_solve(x, y, left_side=left_side, lower=lower,
transpose_a=transpose_a, conjugate_a=conjugate_a,
unit_diagonal=unit_diagonal), 0
def _triangular_solve_translation_rule(
ctx, avals_in, avals_out, a, b, *, left_side, lower, transpose_a,
conjugate_a, unit_diagonal):
if conjugate_a and not transpose_a:
a = xops.Conj(a)
conjugate_a = False
if not transpose_a:
transpose = xops.TriangularSolveOptions_Transpose.NO_TRANSPOSE
else:
transpose = (xops.TriangularSolveOptions_Transpose.ADJOINT if conjugate_a
else xops.TriangularSolveOptions_Transpose.TRANSPOSE)
return [
xops.TriangularSolve(a, b, left_side, lower, unit_diagonal, transpose)]
triangular_solve_p = standard_primitive(
triangular_solve_shape_rule, triangular_solve_dtype_rule,
'triangular_solve', translation_rule=_triangular_solve_translation_rule)
ad.defjvp2(triangular_solve_p,
triangular_solve_jvp_rule_a,
lambda g_b, _, a, b, **kws: triangular_solve(a, g_b, **kws))
ad.primitive_transposes[triangular_solve_p] = triangular_solve_transpose_rule
batching.primitive_batchers[triangular_solve_p] = triangular_solve_batching_rule
def _triangular_solve_cpu_translation_rule(
c, a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal):
shape = c.get_shape(a)
dtype = shape.element_type().type
if conjugate_a and not transpose_a:
a = xops.Conj(a)
conjugate_a = False
if len(shape.dimensions()) == 2 and np.dtype(dtype) in _cpu_lapack_types:
return lapack.jax_trsm(
c, xb.constant(c, np.array(1, dtype=dtype)),
a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal)
else:
# Fall back to the HLO implementation for unsupported types or batching.
# TODO: Consider swapping XLA for LAPACK in batched case
if not transpose_a:
transpose = xops.TriangularSolveOptions_Transpose.NO_TRANSPOSE
else:
transpose = (xops.TriangularSolveOptions_Transpose.ADJOINT if conjugate_a
else xops.TriangularSolveOptions_Transpose.TRANSPOSE)
return xops.TriangularSolve(a, b, left_side, lower, unit_diagonal, transpose)
xla.backend_specific_translations['cpu'][triangular_solve_p] = \
_triangular_solve_cpu_translation_rule
def _triangular_solve_gpu_translation_rule(trsm_impl,
c, a, b, left_side, lower, transpose_a, conjugate_a, unit_diagonal):
shape = c.get_shape(a)
dims = shape.dimensions()
m, n = dims[-2:]
batch = prod(dims[:-2])
if conjugate_a and not transpose_a:
a = xops.Conj(a)
conjugate_a = False
if batch > 1 and m <= 256 and n <= 256:
return trsm_impl(
c, a, b, left_side, lower, transpose_a,
conjugate_a, unit_diagonal)
else:
# Use the XLA implementation for unbatched triangular_solve.
if not transpose_a:
transpose = xops.TriangularSolveOptions_Transpose.NO_TRANSPOSE
else:
transpose = (xops.TriangularSolveOptions_Transpose.ADJOINT if conjugate_a
else xops.TriangularSolveOptions_Transpose.TRANSPOSE)
return xops.TriangularSolve(a, b, left_side, lower, unit_diagonal,
transpose)
if cusolver is not None:
xla.backend_specific_translations['gpu'][triangular_solve_p] = \
partial(_triangular_solve_gpu_translation_rule, cusolver.trsm)
if rocsolver is not None:
xla.backend_specific_translations['gpu'][triangular_solve_p] = \
partial(_triangular_solve_gpu_translation_rule, rocsolver.trsm)
# Support operation for LU decomposition: Transformation of the pivots returned
# by LU decomposition into permutations.
# Define this outside lu_pivots_to_permutation to ensure fori_loop cache hits
def _lu_pivots_body_fn(i, permutation_and_swaps):
permutation, swaps = permutation_and_swaps
batch_dims = swaps.shape[:-1]
j = swaps[..., i]
iotas = jnp.ix_(*(lax.iota(jnp.int32, b) for b in batch_dims))
x = permutation[..., i]
y = permutation[iotas + (j,)]
permutation = permutation.at[..., i].set(y)
return permutation.at[iotas + (j,)].set(x), swaps
@partial(api.jit, static_argnums=(1,))
def _generic_lu_pivots_to_permutation(swaps, m):
"""Converts the pivots (row swaps) returned by LU to a permutation.
We build a permutation rather than applying `swaps` directly to the rows
of a matrix because lax loops aren't differentiable.
Args:
swaps: an array of shape (..., k) of row swaps to perform
m: the size of the output permutation. m should be >= k.
Returns:
An int32 array of shape (..., m).
"""
assert len(swaps.shape) >= 1
batch_dims = swaps.shape[:-1]
k = swaps.shape[-1]
permutation = lax.broadcasted_iota(jnp.int32, batch_dims + (m,),
len(batch_dims))
if m == 0:
return permutation
result, _ = lax.fori_loop(np.array(0, np.int32), np.array(k, np.int32),
_lu_pivots_body_fn, (permutation, swaps))
return result
def _lu_pivots_to_permutation_abstract_eval(pivots, *, permutation_size):
pivots = raise_to_shaped(pivots)
if isinstance(pivots, ShapedArray):
if pivots.ndim < 1 or pivots.dtype != np.dtype(np.int32):
raise ValueError(
'Argument to lu_pivots_to_permutation must have rank >= 1 and dtype '
'int32. Got shape={} and dtype={}'.format(pivots.shape, pivots.dtype))
if permutation_size < pivots.shape[-1]:
raise ValueError(
'Output permutation size {} has to exceed the trailing dimension of '
'the pivots. Got shape {}'.format(permutation_size, pivots.shape))
batch_dims = pivots.shape[:-1]
permutations = pivots.update(shape=batch_dims + (permutation_size,))
else:
permutations = pivots
return permutations
def _lu_pivots_to_permutation_batching_rule(batched_args, batch_dims, *,
permutation_size):
x, = batched_args
bd, = batch_dims
x = batching.moveaxis(x, bd, 0)
return lu_pivots_to_permutation_p.bind(
x, permutation_size=permutation_size), 0
def _lu_pivots_to_permutation_translation_rule(c, pivots, *, permutation_size):
lowered_fun = xla.lower_fun(
lambda x: _generic_lu_pivots_to_permutation(x, permutation_size),
multiple_results=False)
return lowered_fun(c, pivots)
lu_pivots_to_permutation_p = Primitive('lu_pivots_to_permutation')
lu_pivots_to_permutation_p.multiple_results = False
lu_pivots_to_permutation_p.def_impl(
partial(xla.apply_primitive, lu_pivots_to_permutation_p))
lu_pivots_to_permutation_p.def_abstract_eval(
_lu_pivots_to_permutation_abstract_eval)
batching.primitive_batchers[lu_pivots_to_permutation_p] = (
_lu_pivots_to_permutation_batching_rule)
xla.translations[lu_pivots_to_permutation_p] = (
_lu_pivots_to_permutation_translation_rule)
if cuda_linalg:
xla.backend_specific_translations['gpu'][lu_pivots_to_permutation_p] = (
cuda_linalg.lu_pivots_to_permutation)
# LU decomposition
# Computes a pivoted LU decomposition such that
# PA = LU
# In the style of LAPACK, LU are stored in the same matrix.
def _lu_unblocked(a):
"""Unblocked LU decomposition, as a rolled loop."""
m, n = a.shape
def body(k, state):
pivot, perm, a = state
m_idx = jnp.arange(m)
n_idx = jnp.arange(n)
if jnp.issubdtype(a.dtype, jnp.complexfloating):
t = a[:, k]
magnitude = jnp.abs(jnp.real(t)) + jnp.abs(jnp.imag(t))
else:
magnitude = jnp.abs(a[:, k])
i = jnp.argmax(jnp.where(m_idx >= k, magnitude, -jnp.inf))
pivot = pivot.at[k].set(i)
a = a.at[[k, i],].set(a[[i, k],])
perm = perm.at[[i, k],].set(perm[[k, i],])
# a[k+1:, k] /= a[k, k], adapted for loop-invariant shapes
x = a[k, k]
a = a.at[:, k].set(jnp.where(m_idx > k, a[:, k] / x, a[:, k]))
# a[k+1:, k+1:] -= jnp.outer(a[k+1:, k], a[k, k+1:])
a = a - jnp.where((m_idx[:, None] > k) & (n_idx > k),
jnp.outer(a[:, k], a[k, :]), jnp.array(0, dtype=a.dtype))
return pivot, perm, a
pivot = jnp.zeros((min(m, n),), dtype=jnp.int32)
perm = jnp.arange(m, dtype=jnp.int32)
if m == 0 and n == 0:
# If the array is empty, the loop body never executes but tracing it to a
# jaxpr fails because the indexing cannot succeed.
return (pivot, perm, a)
return lax.fori_loop(0, min(m, n), body, (pivot, perm, a))
def _lu_blocked(a, block_size=128):
"""Blocked LU decomposition, as an unrolled loop."""
m, n = a.shape
r = min(m, n)
pivot = jnp.zeros((r,), dtype=jnp.int32)
perm = jnp.arange(m, dtype=jnp.int32)
for k in range(0, r, block_size):
b = min(r - k, block_size)
block_pivot, block_perm, lu_block = _lu_unblocked(a[k:, k:k+b])
pivot = pivot.at[k:k+b].set(block_pivot + k)
perm = perm.at[k:].set(perm[block_perm + k])
a = a.at[k:, :].set(a[block_perm + k, :])
a = a.at[k:, k:k+b].set(lu_block)
if k + b < n:
a = a.at[k:k+b, k+b:].set(
triangular_solve(a[k:k+b, k:k+b], a[k:k+b, k+b:], left_side=True,
lower=True, unit_diagonal=True))
a = a.at[k+b:, k+b:].add(-lax.dot(a[k+b:, k:k+b], a[k:k+b, k+b:],
precision=lax.Precision.HIGHEST))
return a, pivot, perm
def _lu_python(x):
"""Default LU decomposition in Python, where no better version exists."""
m, n = x.shape[-2:]
batch_dims = x.shape[:-2]
if len(batch_dims) > 0:
batch_size = np.prod(batch_dims, dtype=np.int64)
lu, pivot, perm = api.vmap(_lu_blocked)(lax.reshape(x, (batch_size, m, n)))
lu = lax.reshape(lu, batch_dims + (m, n))
pivot = lax.reshape(pivot, batch_dims + (min(m, n),))
perm = lax.reshape(perm, batch_dims + (m,))
else:
lu, pivot, perm = _lu_blocked(x)
return lu, pivot, perm
def _lu_impl(operand):
lu, pivot, perm = xla.apply_primitive(lu_p, operand)
return lu, pivot, perm
def _lu_abstract_eval(operand):
operand = raise_to_shaped(operand)
if isinstance(operand, ShapedArray):
if operand.ndim < 2:
raise ValueError("Argument to LU decomposition must have ndims >= 2")
batch_dims = operand.shape[:-2]
m = operand.shape[-2]
n = operand.shape[-1]
pivot = operand.update(shape=batch_dims + (min(m, n),), dtype=jnp.int32)
perm = operand.update(shape=batch_dims + (m,), dtype=jnp.int32)
else:
pivot = operand
perm = operand
return operand, pivot, perm
def _lu_jvp_rule(primals, tangents):
a, = primals
a_dot, = tangents
lu, pivots, permutation = lu_p.bind(a)
a_shape = jnp.shape(a)
m, n = a_shape[-2:]
dtype = lax.dtype(a)
k = min(m, n)
batch_dims = a_shape[:-2]
iotas = jnp.ix_(*(lax.iota(jnp.int32, b) for b in batch_dims + (1,)))
x = a_dot[iotas[:-1] + (permutation, slice(None))]
# Differentiation of Matrix Functionals Using Triangular Factorization
# F. R. De Hoog, R. S. Anderssen, and M. A. Lukas
#
# LU = A
# ==> L'U + LU' = A'
# ==> inv(L) . L' + U' . inv(U) = inv(L) A' inv(U)
# ==> L' = L . tril(inv(L) . A' . inv(U), -1)
# U' = triu(inv(L) . A' . inv(U)) . U
ndims = len(a_shape)
l_padding = [(0, 0, 0)] * ndims
l_padding[-1] = (0, m - k, 0)
zero = jnp._constant_like(lu, 0)
l = lax.pad(jnp.tril(lu[..., :, :k], -1), zero, l_padding)
l = l + jnp.eye(m, m, dtype=dtype)
u_eye = lax.pad(jnp.eye(n - k, n - k, dtype=dtype), zero,
((k, 0, 0), (k, 0, 0)))
u_padding = [(0, 0, 0)] * ndims
u_padding[-2] = (0, n - k, 0)
u = lax.pad(jnp.triu(lu[..., :k, :]), zero, u_padding) + u_eye
la = triangular_solve(l, x, left_side=True, transpose_a=False, lower=True,
unit_diagonal=True)
lau = triangular_solve(u, la, left_side=False, transpose_a=False,
lower=False)
l_dot = jnp.matmul(l, jnp.tril(lau, -1))
u_dot = jnp.matmul(jnp.triu(lau), u)
lu_dot = l_dot + u_dot
return (lu, pivots, permutation), (lu_dot, ad_util.Zero.from_value(pivots),
ad_util.Zero.from_value(permutation))
def _lu_batching_rule(batched_args, batch_dims):
x, = batched_args
bd, = batch_dims
x = batching.moveaxis(x, bd, 0)
return lu_p.bind(x), (0, 0, 0)
def _lu_cpu_gpu_translation_rule(getrf_impl, c, operand, backend):
shape = c.get_shape(operand)
batch_dims = shape.dimensions()[:-2]
m = shape.dimensions()[-2]
lu, pivot, info = getrf_impl(c, operand)
# Subtract 1 from the pivot to get 0-based indices.
pivot = xops.Sub(pivot, xops.ConstantLiteral(c, np.array(1, np.int32)))
ok = xops.Ge(info, xops.ConstantLiteral(c, np.array(0, np.int32)))
lu = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), lu,
_nan_like(c, lu))
perm = xla.lower_fun(lambda x: lu_pivots_to_permutation(x, m),
multiple_results=False, backend=backend)(c, pivot)
return xops.Tuple(c, [lu, pivot, perm])
def _lu_tpu_translation_rule(c, operand):
if hasattr(xops, "LU"):
lu, pivot, perm = xops.LU(operand)
return xops.Tuple(c, [lu, pivot, perm])
else:
return xla.lower_fun(_lu_python, multiple_results=True)(c, operand)
lu_p = Primitive('lu')
lu_p.multiple_results = True
lu_p.def_impl(_lu_impl)
lu_p.def_abstract_eval(_lu_abstract_eval)
xla.translations[lu_p] = xla.lower_fun(_lu_python, multiple_results=True)
ad.primitive_jvps[lu_p] = _lu_jvp_rule
batching.primitive_batchers[lu_p] = _lu_batching_rule
xla.backend_specific_translations['cpu'][lu_p] = partial(
_lu_cpu_gpu_translation_rule, lapack.getrf, backend='cpu')
if cusolver is not None:
xla.backend_specific_translations['gpu'][lu_p] = partial(
_lu_cpu_gpu_translation_rule, cusolver.getrf, backend='gpu')
if rocsolver is not None:
xla.backend_specific_translations['gpu'][lu_p] = partial(
_lu_cpu_gpu_translation_rule, rocsolver.getrf, backend='gpu')
xla.backend_specific_translations['tpu'][lu_p] = _lu_tpu_translation_rule
@partial(vectorize, excluded={3}, signature='(n,n),(n),(n,k)->(n,k)')
def _lu_solve_core(lu, permutation, b, trans):
m = lu.shape[0]
x = jnp.reshape(b, (m, np.prod(b.shape[1:])))
if trans == 0:
x = x[permutation, :]
x = triangular_solve(lu, x, left_side=True, lower=True, unit_diagonal=True)
x = triangular_solve(lu, x, left_side=True, lower=False)
elif trans == 1 or trans == 2:
conj = trans == 2
x = triangular_solve(lu, x, left_side=True, lower=False, transpose_a=True,
conjugate_a=conj)
x = triangular_solve(lu, x, left_side=True, lower=True, unit_diagonal=True,
transpose_a=True, conjugate_a=conj)
x = x[jnp.argsort(permutation), :]
else:
raise ValueError("'trans' value must be 0, 1, or 2, got {}".format(trans))
return lax.reshape(x, b.shape)
@partial(api.jit, static_argnums=(3,))
def _lu_solve(lu, permutation, b, trans):
if len(lu.shape) < 2 or lu.shape[-1] != lu.shape[-2]:
raise ValueError("last two dimensions of LU decomposition must be equal, "
"got shape {}".format(lu.shape))
if len(b.shape) < 1:
raise ValueError("b matrix must have rank >= 1, got shape {}"
.format(b.shape))
# Broadcasting follows NumPy's convention for linalg.solve: the RHS is
# treated as a (batched) vector if the number of dimensions differ by 1.
# Otherwise, broadcasting rules apply.
rhs_vector = lu.ndim == b.ndim + 1
if rhs_vector:
if b.shape[-1] != lu.shape[-1]:
raise ValueError("When LU decomposition matrix and b have the same "
"number of dimensions, last axis of LU decomposition "
"matrix (shape {}) and b array (shape {}) must match"
.format(lu.shape, b.shape))
b = b[..., jnp.newaxis]
else:
if b.shape[-2] != lu.shape[-1]:
raise ValueError("When LU decomposition matrix and b different "
"numbers of dimensions, last axis of LU decomposition "
"matrix (shape {}) and second to last axis of b array "
"(shape {}) must match"
.format(lu.shape, b.shape))
x = _lu_solve_core(lu, permutation, b, trans)
return x[..., 0] if rhs_vector else x
def lu_solve(lu, permutation, b, trans=0):
"""LU solve with broadcasting."""
return _lu_solve(lu, permutation, b, trans)
# QR decomposition
def qr_impl(operand, full_matrices):
q, r = xla.apply_primitive(qr_p, operand, full_matrices=full_matrices)
return q, r
def qr_translation_rule(c, operand, full_matrices):
return xops.Tuple(c, xops.QR(operand, full_matrices))
def qr_abstract_eval(operand, full_matrices):
if isinstance(operand, ShapedArray):
if operand.ndim < 2:
raise ValueError("Argument to QR decomposition must have ndims >= 2")
batch_dims = operand.shape[:-2]
m = operand.shape[-2]
n = operand.shape[-1]
k = m if full_matrices else min(m, n)
q = operand.update(shape=batch_dims + (m, k))
r = operand.update(shape=batch_dims + (k, n))
else:
q = operand
r = operand
return q, r
def qr_jvp_rule(primals, tangents, full_matrices):
# See j-towns.github.io/papers/qr-derivative.pdf for a terse derivation.
x, = primals
dx, = tangents
q, r = qr_p.bind(x, full_matrices=False)
*_, m, n = x.shape
if full_matrices or m < n:
raise NotImplementedError(
"Unimplemented case of QR decomposition derivative")
dx_rinv = triangular_solve(r, dx) # Right side solve by default
qt_dx_rinv = jnp.matmul(_H(q), dx_rinv)
qt_dx_rinv_lower = jnp.tril(qt_dx_rinv, -1)
do = qt_dx_rinv_lower - _H(qt_dx_rinv_lower) # This is skew-symmetric
# The following correction is necessary for complex inputs
do = do + jnp.eye(n, dtype=do.dtype) * (qt_dx_rinv - jnp.real(qt_dx_rinv))
dq = jnp.matmul(q, do - qt_dx_rinv) + dx_rinv
dr = jnp.matmul(qt_dx_rinv - do, r)
return (q, r), (dq, dr)
def qr_batching_rule(batched_args, batch_dims, full_matrices):
x, = batched_args
bd, = batch_dims
x = batching.moveaxis(x, bd, 0)
return qr_p.bind(x, full_matrices=full_matrices), (0, 0)
def _qr_cpu_gpu_translation_rule(geqrf_impl, orgqr_impl, c, operand,
full_matrices):
shape = c.get_shape(operand)
dims = shape.dimensions()
m, n = dims[-2:]
batch_dims = dims[:-2]
r, tau, info_geqrf = geqrf_impl(c, operand)
if m < n:
q = xops.Slice(r, [0] * len(dims), list(batch_dims) + [m, m],
[1] * len(dims))
q, info_orgqr = orgqr_impl(c, q, tau)
elif not full_matrices:
q, info_orgqr = orgqr_impl(c, r, tau)
r = xops.Slice(r, [0] * len(dims), list(batch_dims) + [n, n],
[1] * len(dims))
else:
padding_config = [(0, 0, 0)] * len(dims)
padding_config[-1] = (0, m - n, 0)
q = xops.Pad(r, xops.Constant(c, np.array(0, dtype=shape.element_type())),
xla_client.make_padding_config(padding_config))
q, info_orgqr = orgqr_impl(c, q, tau)
if info_geqrf is not None:
ok = xops.And(
xops.Eq(info_geqrf, xops.ConstantLiteral(c, np.array(0, np.int32))),
xops.Eq(info_orgqr, xops.ConstantLiteral(c, np.array(0, np.int32))))
q = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), q,
_nan_like(c, q))
r = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), r,
_nan_like(c, r))
else:
pass # rocsolver does not return info
r = xla.lower_fun(jnp.triu, multiple_results=False)(c, r)
return xops.Tuple(c, [q, r])
qr_p = Primitive('qr')
qr_p.multiple_results = True
qr_p.def_impl(qr_impl)
qr_p.def_abstract_eval(qr_abstract_eval)
xla.translations[qr_p] = qr_translation_rule
ad.primitive_jvps[qr_p] = qr_jvp_rule
batching.primitive_batchers[qr_p] = qr_batching_rule
xla.backend_specific_translations['cpu'][qr_p] = partial(
_qr_cpu_gpu_translation_rule, lapack.geqrf, lapack.orgqr)
if cusolver is not None:
xla.backend_specific_translations['gpu'][qr_p] = partial(
_qr_cpu_gpu_translation_rule, cusolver.geqrf, cusolver.orgqr)
if rocsolver is not None:
xla.backend_specific_translations['gpu'][qr_p] = partial(
_qr_cpu_gpu_translation_rule, rocsolver.geqrf, rocsolver.orgqr)
# Singular value decomposition
def svd_impl(operand, full_matrices, compute_uv):
return xla.apply_primitive(svd_p, operand, full_matrices=full_matrices,
compute_uv=compute_uv)
def svd_translation_rule(c, operand, full_matrices, compute_uv):
shape = c.get_shape(operand).dimensions()
m, n = shape[-2:]
if m == 0 or n == 0:
return xla.lower_fun(_empty_svd, multiple_results=True)(
c, operand, full_matrices=full_matrices, compute_uv=compute_uv)
u, s, v = xops.SVD(operand)
permutation = list(range(len(shape)))
permutation[-1], permutation[-2] = permutation[-2], permutation[-1]
vt = xops.Transpose(v, permutation)
if not full_matrices and m != n:
u = xops.SliceInDim(u, 0, min(m, n), stride=1, dimno=len(shape) - 1)
vt = xops.SliceInDim(vt, 0, min(m, n), stride=1, dimno=len(shape) - 2)
if not compute_uv:
return xops.Tuple(c, [s])
else:
return xops.Tuple(c, [s, u, vt])
def svd_abstract_eval(operand, full_matrices, compute_uv):
if isinstance(operand, ShapedArray):
if operand.ndim < 2:
raise ValueError("Argument to singular value decomposition must have ndims >= 2")
batch_dims = operand.shape[:-2]
m = operand.shape[-2]
n = operand.shape[-1]
s = operand.update(shape=batch_dims + (min(m, n),),
dtype=lax_internal._complex_basetype(operand.dtype))
if compute_uv:
u = operand.update(shape=batch_dims + (m, m if full_matrices else min(m, n)))
vt = operand.update(shape=batch_dims + (n if full_matrices else min(m, n), n))
return s, u, vt
else:
return s,
else:
raise NotImplementedError
def svd_jvp_rule(primals, tangents, full_matrices, compute_uv):
A, = primals
dA, = tangents
s, U, Vt = svd_p.bind(A, full_matrices=False, compute_uv=True)
if compute_uv and full_matrices:
# TODO: implement full matrices case, documented here: https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf
raise NotImplementedError(
"Singular value decomposition JVP not implemented for full matrices")
Ut, V = _H(U), _H(Vt)
s_dim = s[..., None, :]
dS = jnp.matmul(jnp.matmul(Ut, dA), V)
ds = jnp.real(jnp.diagonal(dS, 0, -2, -1))
if not compute_uv:
return (s,), (ds,)
s_diffs = (s_dim + _T(s_dim)) * (s_dim - _T(s_dim))
s_diffs_zeros = jnp.eye(s.shape[-1], dtype=s.dtype) # jnp.ones((), dtype=A.dtype) * (s_diffs == 0.) # is 1. where s_diffs is 0. and is 0. everywhere else
F = 1 / (s_diffs + s_diffs_zeros) - s_diffs_zeros
dSS = s_dim * dS # dS.dot(jnp.diag(s))
SdS = _T(s_dim) * dS # jnp.diag(s).dot(dS)
s_zeros = jnp.ones((), dtype=A.dtype) * (s == 0.)
s_inv = 1 / (s + s_zeros) - s_zeros
s_inv_mat = jnp.vectorize(jnp.diag, signature='(k)->(k,k)')(s_inv)
dUdV_diag = .5 * (dS - _H(dS)) * s_inv_mat
dU = jnp.matmul(U, F * (dSS + _H(dSS)) + dUdV_diag)
dV = jnp.matmul(V, F * (SdS + _H(SdS)))
m, n = A.shape[-2:]
if m > n:
dU = dU + jnp.matmul(jnp.eye(m, dtype=A.dtype) - jnp.matmul(U, Ut), jnp.matmul(dA, V)) / s_dim
if n > m:
dV = dV + jnp.matmul(jnp.eye(n, dtype=A.dtype) - jnp.matmul(V, Vt), jnp.matmul(_H(dA), U)) / s_dim
return (s, U, Vt), (ds, dU, _H(dV))
def _empty_svd(a, *, full_matrices, compute_uv):
batch_shape = a.shape[:-2]
m, n = a.shape[-2:]
s = jnp.empty(batch_shape + (0,), dtype=lax_internal._complex_basetype(a.dtype))
if not compute_uv:
return (s,)
if full_matrices:
size = max(m, n)
u = jnp.broadcast_to(jnp.eye(size, dtype=a.dtype), batch_shape + (size, size))
else:
u = jnp.empty(batch_shape + (m, n), dtype=a.dtype)
v = jnp.empty(batch_shape + (0, 0), dtype=a.dtype)
if m < n:
u, v = v, u
return s, u, v
def _svd_cpu_gpu_translation_rule(gesvd_impl, c, operand, full_matrices, compute_uv):
shape = c.get_shape(operand).dimensions()
m, n = shape[-2:]
batch_dims = shape[:-2]
if m == 0 or n == 0:
return xla.lower_fun(_empty_svd, multiple_results=True)(
c, operand, full_matrices=full_matrices, compute_uv=compute_uv)
s, u, vt, info = gesvd_impl(c, operand,
full_matrices=full_matrices,
compute_uv=compute_uv)
ok = xops.Eq(info, xops.ConstantLiteral(c, np.array(0, np.int32)))
s = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1,)), s,
_nan_like(c, s))
result = [s]
if compute_uv:
u = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), u,
_nan_like(c, u))
vt = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), vt,
_nan_like(c, vt))
result += [u, vt]
return xops.Tuple(c, result)
def svd_batching_rule(batched_args, batch_dims, full_matrices, compute_uv):
x, = batched_args
bd, = batch_dims
x = batching.moveaxis(x, bd, 0)
outs = svd_p.bind(x, full_matrices=full_matrices, compute_uv=compute_uv)
if compute_uv:
return outs, (0, 0, 0)
else:
return outs, (0,)
svd_p = Primitive('svd')
svd_p.multiple_results = True
svd_p.def_impl(svd_impl)
svd_p.def_abstract_eval(svd_abstract_eval)
ad.primitive_jvps[svd_p] = svd_jvp_rule
batching.primitive_batchers[svd_p] = svd_batching_rule
xla.translations[svd_p] = svd_translation_rule
xla.backend_specific_translations['cpu'][svd_p] = partial(
_svd_cpu_gpu_translation_rule, lapack.gesdd)
if cusolver is not None:
xla.backend_specific_translations['gpu'][svd_p] = partial(
_svd_cpu_gpu_translation_rule, cusolver.gesvd)
if rocsolver is not None:
xla.backend_specific_translations['gpu'][svd_p] = partial(
_svd_cpu_gpu_translation_rule, rocsolver.gesvd)
tridiagonal_solve_p = Primitive('tridiagonal_solve')
tridiagonal_solve_p.multiple_results = False
tridiagonal_solve_p.def_impl(
functools.partial(xla.apply_primitive, tridiagonal_solve_p))
tridiagonal_solve_p.def_abstract_eval(lambda dl, d, du, b, *, m, n, ldb, t: b)
# TODO(tomhennigan): Consider AD rules using lax.custom_linear_solve?
if cusparse is not None and hasattr(cusparse, "gtsv2"):
xla.backend_specific_translations['gpu'][tridiagonal_solve_p] = cusparse.gtsv2
def _tridiagonal_solve_translation_rule(c, dl, d, du, b, *, m, n, ldb, t):
del m, n, ldb, t
lowered_fun = xla.lower_fun(_tridiagonal_solve_jax, multiple_results=False)
return lowered_fun(c, dl, d, du, b)
xla.translations[tridiagonal_solve_p] = _tridiagonal_solve_translation_rule
def _tridiagonal_solve_jax(dl, d, du, b):
"""Pure JAX implementation of `tridiagonal_solve`."""
prepend_zero = lambda x: jnp.append(jnp.zeros([1], dtype=x.dtype), x[:-1])
fwd1 = lambda tu_, x: x[1] / (x[0] - x[2] * tu_)
fwd2 = lambda b_, x: (x[0] - x[3] * b_) / (x[1] - x[3] * x[2])
bwd1 = lambda x_, x: x[0] - x[1] * x_
double = lambda f, args: (f(*args), f(*args))
# Forward pass.
_, tu_ = lax.scan(lambda tu_, x: double(fwd1, (tu_, x)),
du[0] / d[0],
(d, du, dl),
unroll=32)
_, b_ = lax.scan(lambda b_, x: double(fwd2, (b_, x)),
b[0] / d[0],
(b, d, prepend_zero(tu_), dl),
unroll=32)
# Backsubstitution.
_, x_ = lax.scan(lambda x_, x: double(bwd1, (x_, x)),
b_[-1],
(b_[::-1], tu_[::-1]),
unroll=32)
return x_[::-1]
def tridiagonal_solve(dl, d, du, b):
r"""Computes the solution of a tridiagonal linear system.
This function computes the solution of a tridiagonal linear system::
.. math::
A . X = B
Args:
dl: The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``.
Note that ``dl[0] = 0``.
d: The middle diagnoal of A: ``d[i] := A[i, i]`` for i in ``[0,m)``.
du: The upper diagonal of A: ``du[i] := A[i, i+1]`` for i in ``[0,m)``.
Note that ``dl[m - 1] = 0``.
b: Right hand side matrix.
Returns:
Solution ``X`` of tridiagonal system.
"""
if dl.ndim != 1 or d.ndim != 1 or du.ndim != 1:
raise ValueError('dl, d and du must be vectors')
if dl.shape != d.shape or d.shape != du.shape:
raise ValueError(
f'dl={dl.shape}, d={d.shape} and du={du.shape} must all be `[m]`')
if b.ndim != 2:
raise ValueError(f'b={b.shape} must be a matrix')
m, = dl.shape
if m < 3:
raise ValueError(f'm ({m}) must be >= 3')
ldb, n = b.shape
if ldb < max(1, m):
raise ValueError(f'Leading dimension of b={ldb} must be ≥ max(1, {m})')
if dl.dtype != d.dtype or d.dtype != du.dtype or du.dtype != b.dtype:
raise ValueError(f'dl={dl.dtype}, d={d.dtype}, du={du.dtype} and '
f'b={b.dtype} must be the same dtype,')
t = dl.dtype
if t not in (np.float32, np.float64):
raise ValueError(f'Only f32/f64 are supported, got {t}')
return tridiagonal_solve_p.bind(dl, d, du, b, m=m, n=n, ldb=ldb, t=t)
# Schur Decomposition
def schur(x,
compute_schur_vectors=True,
sort_eig_vals=False,
select_callable=None):
return schur_p.bind(
x,
compute_schur_vectors=compute_schur_vectors,
sort_eig_vals=sort_eig_vals,
select_callable=select_callable)
def _schur_impl(operand, *, compute_schur_vectors, sort_eig_vals,
select_callable):
return xla.apply_primitive(
schur_p,
operand,
compute_schur_vectors=compute_schur_vectors,
sort_eig_vals=sort_eig_vals,
select_callable=select_callable)
def _schur_translation_rule(c, operand, *, compute_schur_vectors,
sort_eig_vals):
raise NotImplementedError(
"Schur decomposition is only implemented on the CPU backend.")
def _schur_abstract_eval(operand, *, compute_schur_vectors, sort_eig_vals,
select_callable):
if operand.ndim < 2 or operand.shape[-2] != operand.shape[-1]:
raise ValueError("Argument to Schur decomposition must have "
"shape [..., n, n], got shape {}".format(operand.shape))
batch_dims = operand.shape[:-2]
n = operand.shape[-1]
dtype = operand.dtype
dtype = dtypes.canonicalize_dtype(dtype)
T = operand.update(shape=batch_dims + (n, n), dtype=dtype)
vs = operand.update(shape=batch_dims + (n, n), dtype=dtype)
return (T, vs) if compute_schur_vectors else (T,)
def _schur_cpu_translation_rule(c, operand, *, compute_schur_vectors,
sort_eig_vals, select_callable):
shape = c.get_shape(operand)
batch_dims = shape.dimensions()[:-2]
if jaxlib_version < (0, 1, 72):
raise NotImplementedError(
"The Schur primitive is only implemented for jaxlib versions >= 0.1.72"
)
_cpu_gees = lapack.gees
if sort_eig_vals:
T, vs, sdim, info = _cpu_gees(
c,
operand,
jobvs=compute_schur_vectors,
sort=sort_eig_vals,
select=select_callable)
else:
T, vs, info = _cpu_gees(
c,
operand,
jobvs=compute_schur_vectors,
sort=sort_eig_vals,
select=select_callable)
ok = xops.Eq(info, xops.ConstantLiteral(c, np.array(0, np.int32)))
T = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), T,
_nan_like(c, T))
output = [T]
if compute_schur_vectors:
vs = _broadcasting_select(c, xops.Reshape(ok, batch_dims + (1, 1)), vs,
_nan_like(c, vs))
output.append(vs)
return xops.Tuple(c, output)
def _schur_batching_rule(batched_args, batch_dims, *, compute_schur_vectors,
sort_eig_vals, select_callable):
x, = batched_args
bd, = batch_dims
x = batching.moveaxis(x, bd, 0)
return schur_p.bind(
x,
compute_schur_vectors=compute_schur_vectors,
sort_eig_vals=sort_eig_vals,
select_callable=select_callable), (0,) * (1 + compute_schur_vectors)
def _schur_jvp_rule(primals, tangents, *, compute_schur_vectors, sort_eig_vals):
raise NotImplementedError(
'The differentiation rules for the Schur factorization have not been implemented.'
)
schur_p = Primitive('schur')
schur_p.multiple_results = True
schur_p.def_impl(_schur_impl)
schur_p.def_abstract_eval(_schur_abstract_eval)
xla.translations[schur_p] = _schur_translation_rule
xla.backend_specific_translations['cpu'][schur_p] = _schur_cpu_translation_rule
batching.primitive_batchers[schur_p] = _schur_batching_rule
ad.primitive_jvps[schur_p] = _schur_jvp_rule
| 36.430582 | 157 | 0.665443 |
4a20651d5a76b755bafd113e7a42c61b581005a2 | 954 | gyp | Python | src/pkg/mbedtls/mbedtls_static.gyp | dartino/fletch | aa7aba8473f405dd49b9c81b0faeeebfa6e94fc8 | [
"BSD-3-Clause"
] | 144 | 2016-01-29T00:14:04.000Z | 2021-02-20T09:36:11.000Z | src/pkg/mbedtls/mbedtls_static.gyp | akashfoss/sdk | aa7aba8473f405dd49b9c81b0faeeebfa6e94fc8 | [
"BSD-3-Clause"
] | 241 | 2016-01-27T15:37:56.000Z | 2016-09-09T07:34:07.000Z | src/pkg/mbedtls/mbedtls_static.gyp | akashfoss/sdk | aa7aba8473f405dd49b9c81b0faeeebfa6e94fc8 | [
"BSD-3-Clause"
] | 30 | 2016-02-23T18:14:54.000Z | 2020-10-18T13:49:34.000Z | # Copyright (c) 2016, the Dartino project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE.md file.
{
'targets': [
{
'target_name': 'mbedtls',
'type': 'static_library',
'standalone_static_library': 1,
'includes': [
'mbedtls_sources.gypi',
],
'cflags_c!': [
'-fvisibility=hidden',
],
'xcode_settings': {
'OTHER_CFLAGS': [
'-std=c99', # clang on mac does not like inline unless we explicitly use c99.
],
},
'defines': [
'MBEDTLS_CONFIG_FILE=<mbedtls_config.h>',
],
'sources': [
'bindings.c',
],
'include_dirs': [
'.',
],
'conditions': [
['OS=="linux"', {
'cflags': [
'-fomit-frame-pointer',
],
}],
],
},
],
}
| 23.268293 | 87 | 0.503145 |
4a20656e14131d07e791e6769d341e88a054878a | 2,926 | py | Python | pylith/materials/RheologyPoroelasticity.py | cehanagan/pylith | cf5c1c34040460a82f79b6eb54df894ed1b1ee93 | [
"MIT"
] | 93 | 2015-01-08T16:41:22.000Z | 2022-02-25T13:40:02.000Z | pylith/materials/RheologyPoroelasticity.py | sloppyjuicy/pylith | ac2c1587f87e45c948638b19560813d4d5b6a9e3 | [
"MIT"
] | 277 | 2015-02-20T16:27:35.000Z | 2022-03-30T21:13:09.000Z | pylith/materials/RheologyPoroelasticity.py | sloppyjuicy/pylith | ac2c1587f87e45c948638b19560813d4d5b6a9e3 | [
"MIT"
] | 71 | 2015-03-24T12:11:08.000Z | 2022-03-03T04:26:02.000Z | # ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file pylith/materials/RheologyPoroelasticity.py
#
# @brief Python material for isotropic, linearly poroelastic, plane
# strain material.
#
# Factory: poroelasticity_rheology
from pylith.utils.PetscComponent import PetscComponent
from .materials import RheologyPoroelasticity as ModuleRheology
class RheologyPoroelasticity(PetscComponent, ModuleRheology):
"""Python object for bulk rheology of a poroelastic material.
FACTORY: poroelasticity_rheology
"""
import pythia.pyre.inventory
from pylith.topology.Subfield import subfieldFactory
from pylith.utils.EmptyBin import EmptyBin
auxiliarySubfields = pythia.pyre.inventory.facilityArray(
"auxiliary_subfields", itemFactory=subfieldFactory, factory=EmptyBin)
auxiliarySubfields.meta['tip'] = "Discretization information for physical properties and state variables."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name):
"""Constructor.
"""
PetscComponent.__init__(self, name, facility="rheologyporoelasticity")
return
def preinitialize(self, problem):
from pylith.mpi.Communicator import mpi_comm_world
comm = mpi_comm_world()
if 0 == comm.rank:
self._info.log("Performing minimal initialization of poroelasticity rheology '%s'." %
self.aliases[-1])
self._createModuleObj()
return
def addAuxiliarySubfields(self, material, problem):
for subfield in self.auxiliarySubfields.components():
fieldName = subfield.aliases[-1]
descriptor = subfield.getTraitDescriptor("quadrature_order")
if hasattr(descriptor.locator, "source") and descriptor.locator.source == "default":
quadOrder = problem.defaults.quadOrder
else:
quadOrder = subfield.quadOrder
material.setAuxiliarySubfieldDiscretization(fieldName, subfield.basisOrder, quadOrder, subfield.dimension,
subfield.cellBasis, subfield.feSpace, subfield.isBasisContinuous)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _createModuleObj(self):
"""Call constructor for module object for access to C++ object.
"""
raise NotImplementedError("Implement in derived class.")
# End of file
| 36.123457 | 121 | 0.633971 |
4a2065765f9d2d281d64bc5a3b2dfa43177e9cf8 | 3,709 | py | Python | mockredis/tests/test_list.py | optimizely/mockredis | 0f783f70e8ffcf2a65a0f41b691ac3f8135e82bc | [
"Apache-2.0"
] | null | null | null | mockredis/tests/test_list.py | optimizely/mockredis | 0f783f70e8ffcf2a65a0f41b691ac3f8135e82bc | [
"Apache-2.0"
] | null | null | null | mockredis/tests/test_list.py | optimizely/mockredis | 0f783f70e8ffcf2a65a0f41b691ac3f8135e82bc | [
"Apache-2.0"
] | 1 | 2021-02-14T12:04:33.000Z | 2021-02-14T12:04:33.000Z | from unittest import TestCase
from mockredis.redis import MockRedis
class TestList(TestCase):
"""
Tests for MockRedis list operations
"""
def setUp(self):
self.redis = MockRedis()
self.redis.flushdb()
def test_initially_empty(self):
"""
List is created empty.
"""
self.assertEqual(0, len(self.redis.redis['test_list']))
def test_llen(self):
self.assertEquals(0, self.redis.llen('test_list'))
self.redis.redis['test_list'] = ['val1', 'val2']
self.assertEquals(2, self.redis.llen('test_list'))
self.redis.redis['test_list'].pop(0)
self.assertEquals(1, self.redis.llen('test_list'))
self.redis.redis['test_list'].pop(0)
self.assertEquals(0, self.redis.llen('test_list'))
def test_lpop(self):
self.redis.redis['test_list'] = ['val1', 'val2']
self.assertEquals('val1', self.redis.lpop('test_list'))
self.assertEquals(1, len(self.redis.redis['test_list']))
self.assertEquals('val2', self.redis.lpop('test_list'))
self.assertEquals(0, len(self.redis.redis['test_list']))
self.assertIsNone(self.redis.lpop('test_list'))
def test_lpush(self):
"""
Insertion maintains order but not uniqueness.
"""
# lpush two values
self.redis.lpush('test_list', 'val1')
self.redis.lpush('test_list', 'val2')
# validate insertion
self.assertEquals(2, len(self.redis.redis['test_list']))
self.assertEquals('list', self.redis.type('test_list'))
self.assertEquals('val2', self.redis.redis['test_list'][0])
self.assertEquals('val1', self.redis.redis['test_list'][1])
# insert two more values with one repeated
self.redis.lpush('test_list', 'val1', 'val3')
# validate the update
self.assertEquals(4, len(self.redis.redis['test_list']))
self.assertEquals('list', self.redis.type('test_list'))
self.assertEquals('val3', self.redis.redis['test_list'][0])
self.assertEquals('val1', self.redis.redis['test_list'][1])
self.assertEquals('val2', self.redis.redis['test_list'][2])
self.assertEquals('val1', self.redis.redis['test_list'][3])
def test_rpop(self):
self.redis.redis['test_list'] = ['val1', 'val2']
self.assertEquals('val2', self.redis.rpop('test_list'))
self.assertEquals(1, len(self.redis.redis['test_list']))
self.assertEquals('val1', self.redis.rpop('test_list'))
self.assertEquals(0, len(self.redis.redis['test_list']))
self.assertIsNone(self.redis.lpop('test_list'))
def test_rpush(self):
"""
Insertion maintains order but not uniqueness.
"""
# lpush two values
self.redis.rpush('test_list', 'val1')
self.redis.rpush('test_list', 'val2')
# validate insertion
self.assertEquals(2, len(self.redis.redis['test_list']))
self.assertEquals('list', self.redis.type('test_list'))
self.assertEquals('val1', self.redis.redis['test_list'][0])
self.assertEquals('val2', self.redis.redis['test_list'][1])
# insert two more values with one repeated
self.redis.rpush('test_list', 'val1', 'val3')
# validate the update
self.assertEquals(4, len(self.redis.redis['test_list']))
self.assertEquals('list', self.redis.type('test_list'))
self.assertEquals('val1', self.redis.redis['test_list'][0])
self.assertEquals('val2', self.redis.redis['test_list'][1])
self.assertEquals('val1', self.redis.redis['test_list'][2])
self.assertEquals('val3', self.redis.redis['test_list'][3])
| 39.457447 | 67 | 0.626854 |
4a20663196b45eb7269e894a64509fad42443ed9 | 1,849 | py | Python | openpyxl/chart/tests/test_label.py | hfutxqd/openpyxl | 50d6e37e0592aac63bc1ffeaf7b13e3b863bb066 | [
"MIT"
] | null | null | null | openpyxl/chart/tests/test_label.py | hfutxqd/openpyxl | 50d6e37e0592aac63bc1ffeaf7b13e3b863bb066 | [
"MIT"
] | null | null | null | openpyxl/chart/tests/test_label.py | hfutxqd/openpyxl | 50d6e37e0592aac63bc1ffeaf7b13e3b863bb066 | [
"MIT"
] | null | null | null | # Copyright (c) 2010-2019 openpyxl
import pytest
from openpyxl.xml.functions import tostring, fromstring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def DataLabelList():
from ..label import DataLabelList
return DataLabelList
class TestDataLabeList:
def test_ctor(self, DataLabelList):
labels = DataLabelList(numFmt="0.0%")
xml = tostring(labels.to_tree())
expected = """
<dLbls>
<numFmt formatCode="0.0%" />
</dLbls>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, DataLabelList):
src = """
<dLbls>
<showLegendKey val="0"/>
<showVal val="0"/>
<showCatName val="0"/>
<showSerName val="0"/>
<showPercent val="0"/>
<showBubbleSize val="0"/>
</dLbls>
"""
node = fromstring(src)
dl = DataLabelList.from_tree(node)
assert dl.showLegendKey is False
assert dl.showVal is False
assert dl.showCatName is False
assert dl.showSerName is False
assert dl.showPercent is False
assert dl.showBubbleSize is False
@pytest.fixture
def DataLabel():
from ..label import DataLabel
return DataLabel
class TestDataLabel:
def test_ctor(self, DataLabel):
label = DataLabel()
xml = tostring(label.to_tree())
expected = """
<dLbl>
<idx val="0"></idx>
</dLbl>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, DataLabel):
src = """
<dLbl>
<idx val="6"></idx>
</dLbl>
"""
node = fromstring(src)
label = DataLabel.from_tree(node)
assert label == DataLabel(idx=6)
| 23.1125 | 55 | 0.572201 |
4a20667ce789ef9820d4dcf4abb7acb3071558bb | 399 | py | Python | educationsite/app1/migrations/0008_auto_20200909_1807.py | shreytrivedi002/new_ed_site_giglecos | 9d49fc29dfab7cb3ca6c1cb9334c655c48ab7598 | [
"MIT"
] | 2 | 2020-09-23T17:42:40.000Z | 2020-11-08T19:28:16.000Z | educationsite/app1/migrations/0008_auto_20200909_1807.py | shreytrivedi002/new_ed_site_giglecos | 9d49fc29dfab7cb3ca6c1cb9334c655c48ab7598 | [
"MIT"
] | null | null | null | educationsite/app1/migrations/0008_auto_20200909_1807.py | shreytrivedi002/new_ed_site_giglecos | 9d49fc29dfab7cb3ca6c1cb9334c655c48ab7598 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-09-09 12:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0007_certificateissued'),
]
operations = [
migrations.AlterField(
model_name='certificateissued',
name='dateIssued',
field=models.DateTimeField(auto_now=True),
),
]
| 21 | 54 | 0.614035 |
4a2066e60af288a2dbdab437c08744e5824be66e | 4,686 | py | Python | src/k8s-extension/azext_k8s_extension/vendored_sdks/v2020_07_01_preview/_source_control_configuration_client.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-01-24T08:54:57.000Z | 2022-01-24T08:54:57.000Z | src/k8s-extension/azext_k8s_extension/vendored_sdks/v2020_07_01_preview/_source_control_configuration_client.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 9 | 2022-03-25T19:35:49.000Z | 2022-03-31T06:09:47.000Z | src/k8s-extension/azext_k8s_extension/vendored_sdks/v2020_07_01_preview/_source_control_configuration_client.py | haroonf/azure-cli-extensions | 61c044d34c224372f186934fa7c9313f1cd3a525 | [
"MIT"
] | 1 | 2022-03-10T22:13:02.000Z | 2022-03-10T22:13:02.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Optional, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import SourceControlConfigurationClientConfiguration
from .operations import ExtensionsOperations, Operations, SourceControlConfigurationsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class SourceControlConfigurationClient:
"""KubernetesConfiguration Client.
:ivar source_control_configurations: SourceControlConfigurationsOperations operations
:vartype source_control_configurations:
azure.mgmt.kubernetesconfiguration.v2020_07_01_preview.operations.SourceControlConfigurationsOperations
:ivar operations: Operations operations
:vartype operations:
azure.mgmt.kubernetesconfiguration.v2020_07_01_preview.operations.Operations
:ivar extensions: ExtensionsOperations operations
:vartype extensions:
azure.mgmt.kubernetesconfiguration.v2020_07_01_preview.operations.ExtensionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g.
00000000-0000-0000-0000-000000000000).
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = SourceControlConfigurationClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.source_control_configurations = SourceControlConfigurationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.extensions = ExtensionsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> SourceControlConfigurationClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 45.057692 | 146 | 0.708493 |
4a20679a0e27fdaa0ee0be6366716c9214644f13 | 1,189 | py | Python | examples/pylab_examples/system_monitor.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3 | 2015-11-16T07:22:28.000Z | 2016-11-11T17:55:14.000Z | examples/pylab_examples/system_monitor.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | examples/pylab_examples/system_monitor.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3 | 2017-05-31T01:42:22.000Z | 2020-06-23T13:57:49.000Z | #!/usr/bin/env python
# -*- noplot -*-
import time
from pylab import *
def get_memory():
"Simulate a function that returns system memory"
return 100*(0.5+0.5*sin(0.5*pi*time.time()))
def get_cpu():
"Simulate a function that returns cpu usage"
return 100*(0.5+0.5*sin(0.2*pi*(time.time()-0.25)))
def get_net():
"Simulate a function that returns network bandwidth"
return 100*(0.5+0.5*sin(0.7*pi*(time.time()-0.1)))
def get_stats():
return get_memory(), get_cpu(), get_net()
# turn interactive mode on for dynamic updates. If you aren't in
# interactive mode, you'll need to use a GUI event handler/timer.
ion()
fig = figure(1)
ax = subplot(111)
ind = arange(1,4)
pm, pc, pn = bar(ind, get_stats())
centers = ind + 0.5*pm.get_width()
pm.set_facecolor('r')
pc.set_facecolor('g')
pn.set_facecolor('b')
ax.set_xlim([0.5,4])
ax.set_xticks(centers)
ax.set_ylim([0,100])
ax.set_xticklabels(['Memory', 'CPU', 'Bandwidth'])
ax.set_ylabel('Percent usage')
ax.set_title('System Monitor')
for i in range(200): # run for a little while
m,c,n = get_stats()
pm.set_height(m)
pc.set_height(c)
pn.set_height(n)
ax.set_ylim([0,100])
draw()
| 22.018519 | 65 | 0.662742 |
4a2067aef0caa8c6e5471d82a501395530918b0f | 3,744 | py | Python | scripts/analysis/gather_black_holes.py | brittonsmith/yt_p3bh | 52dd594fb3ded4a88b2cb43ec18ab1c81e617baa | [
"BSD-3-Clause-Clear"
] | 1 | 2017-11-07T01:49:22.000Z | 2017-11-07T01:49:22.000Z | scripts/analysis/gather_black_holes.py | brittonsmith/yt_p3bh | 52dd594fb3ded4a88b2cb43ec18ab1c81e617baa | [
"BSD-3-Clause-Clear"
] | null | null | null | scripts/analysis/gather_black_holes.py | brittonsmith/yt_p3bh | 52dd594fb3ded4a88b2cb43ec18ab1c81e617baa | [
"BSD-3-Clause-Clear"
] | null | null | null | """
Gather Pop III star particles and fields necessary for computing Bondi-Hoyle accretion rates.
Usage: python gather_black_holes.py <simulation filename>
Example: python gather_black_holes rs_normal_bg1.h5
"""
from collections import defaultdict
import numpy as np
import os
import sys
import yt
yt.enable_parallelism()
from yt.funcs import \
ensure_dir
def find_stars(ds, filename, min_level=4):
fields=["particle_mass", "particle_index", "particle_type",
"particle_position_x", "particle_position_y", "particle_position_z",
"particle_velocity_x", "particle_velocity_y", "particle_velocity_z",
"creation_time", "metallicity_fraction"]
bfields = ["density", "temperature", "sound_speed",
"velocity_x", "velocity_y", "velocity_z", "dx"]
data = defaultdict(list)
Zcr = ds.parameters['PopIIIMetalCriticalFraction']
ns = 0
if yt.is_root():
pbar = yt.get_pbar("Reading grids", ds.index.grids.size, parallel=True)
for i, grid in enumerate(ds.index.grids):
if ds.index.grid_levels[i][0] >= min_level:
ct = grid["creation_time"]
stars = (ct > 0)
if not stars.any():
grid.clear_data()
continue
Zfr = grid["metallicity_fraction"]
stars &= (Zfr < Zcr)
if not stars.any():
grid.clear_data()
continue
pt = grid["particle_type"]
stars &= ((pt == 1) | (pt == 5))
if not stars.any():
grid.clear_data()
continue
# mass is multiplied by 1e-20 when main-sequence lifetime is over
mass = grid["particle_mass"].to("Msun")
mass[mass < 1e-9] *= 1e20
stars &= (((mass >= 25) & (mass <= 140)) | (mass >= 260))
if stars.any():
ns += stars.sum()
for field in fields:
data[field].append(grid[field][stars])
grid.clear_data()
if yt.is_root():
pbar.update(i)
if yt.is_root():
pbar.finish()
ndata = {}
if len(data["particle_mass"]) > 0:
for field in fields:
a = ds.arr(np.empty(ns), data[field][0].units)
ip = 0
for chunk in data[field]:
a[ip:ip+chunk.size] = chunk
ip += chunk.size
ndata[field] = a
yt.mylog.info("Getting %d point field values for %s." % (ndata["particle_mass"].size, str(ds)))
p = ds.arr([ndata["particle_position_%s" % ax] for ax in "xyz"])
p = np.rollaxis(p, 1)
bdatal = ds.find_field_values_at_points(bfields, p)
bdata = dict((field, bdatal[i]) for i, field in enumerate(bfields))
ndata.update(bdata)
con_args = ["center", "left_edge", "right_edge"]
extra_attrs = dict((field, getattr(ds, "domain_%s" % field))
for field in con_args)
extra_attrs["con_args"] = con_args
extra_attrs["data_type"] = "yt_data_container"
extra_attrs["container_type"] = "region"
extra_attrs["dimensionality"] = 3
ftypes = dict((field, "star") for field in fields + bfields)
yt.save_as_dataset(ds, filename, ndata,
field_types=ftypes, extra_attrs=extra_attrs)
if __name__ == "__main__":
data_dir = "black_holes_bh_new"
ensure_dir(data_dir)
es = yt.load(sys.argv[1])
fns = es.data["filename"].astype(str)
for fn in yt.parallel_objects(fns):
if not os.path.exists(fn):
continue
ds = yt.load(fn)
star_file = os.path.join(data_dir, "%s.h5" % ds)
if not os.path.exists(star_file):
find_stars(ds, star_file)
| 34.666667 | 103 | 0.576656 |
4a2067c2cbbb3bbec2e5393f15aad872af82bd09 | 4,283 | py | Python | src/data/DataLoader.py | austinsimeone/handwritten-text-recognition | 634f73e041d27196d6fbe1f950bd8af520ff872a | [
"MIT"
] | null | null | null | src/data/DataLoader.py | austinsimeone/handwritten-text-recognition | 634f73e041d27196d6fbe1f950bd8af520ff872a | [
"MIT"
] | null | null | null | src/data/DataLoader.py | austinsimeone/handwritten-text-recognition | 634f73e041d27196d6fbe1f950bd8af520ff872a | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
import numpy as np
from data import preproc as pp
import pandas as pd
import os
class Sample:
"single sample from the dataset"
def __init__(self, gtText, filePath):
self.gtText = gtText
self.filePath = filePath
class Batch:
"batch containing images and ground truth texts"
def __init__(self, gtTexts, imgs):
self.imgs = np.stack(imgs, axis=0)
self.gtTexts = gtTexts
class DataLoader:
"loads data which corresponds to IAM format"
def __init__(self, filePath, batchSize, imgSize, maxTextLen,train = True):
"loader for dataset at given location, preprocess images and text according to parameters"
#make the end of the filepathlist contain the / so that we can add the file name to the end of it
#will me augment the data in anyway?
self.dataAugmentation = False
#where does the index start - should always be 0
self.currIdx = 0
#self selected batch size
self.batchSize = batchSize
#X & Y coordinates of the png
self.imgSize = imgSize
#empty list of images to fill with the samples
self.samples = []
self.filePath = filePath
self.maxTextLen = maxTextLen
self.partitionNames = ['trainSample','validationSample']
self.train = train
df = pd.read_csv('/home/austin/Documents/Github/SimpleHTR/words_csv/2020-06-03 11:39:42.000901.csv')
chars = set()
for index, row in df.iterrows():
# filename: part1-part2-part3 --> part1/part1-part2/part1-part2-part3.png
fileName = row['file_name']
# GT text are columns starting at 9
gtText = row['truth']
chars = chars.union(set(list(gtText)))
# put sample into list
self.samples.append(Sample(gtText, fileName))
# split into training and validation set: 95% - 5%
splitIdx = int(0.95 * len(self.samples))
trainSamples = self.samples[:splitIdx]
validationSamples = self.samples[splitIdx:]
# put words into lists
trainWords = [x.gtText for x in trainSamples]
validationWords = [x.gtText for x in validationSamples]
self.img_partitions = [trainSamples,validationSamples]
self.word_partitions = [trainWords,validationWords]
# number of randomly chosen samples per epoch for training
self.numTrainSamplesPerEpoch = 25000
# list of all chars in dataset
self.charList = sorted(list(chars))
self.train_steps = int(np.ceil(len(self.word_partitions[0]) / self.batchSize))
self.valid_steps = int(np.ceil(len(self.word_partitions[1]) / self.batchSize))
def truncateLabel(self, text):
# ctc_loss can't compute loss if it cannot find a mapping between text label and input
# labels. Repeat letters cost double because of the blank symbol needing to be inserted.
# If a too-long label is provided, ctc_loss returns an infinite gradient
cost = 0
for i in range(len(text)):
if i != 0 and text[i] == text[i-1]:
cost += 2
else:
cost += 1
if cost > self.maxTextLen:
return text[:i]
return text
def getIteratorInfo(self):
"current batch index and overall number of batches"
return (self.currIdx // self.batchSize + 1, len(self.samples) // self.batchSize)
def hasNext(self):
"iterator"
return self.currIdx + self.batchSize <= len(self.samples)
def getNext(self):
"iterator"
if self.train == True:
j = 0
else:
j = 1
batchRange = range(self.currIdx, self.currIdx + self.batchSize)
gtTexts = [self.img_partitions[j][i].gtText for i in batchRange]
imgs = [pp.preprocess(os.path.join(self.filePath,self.img_partitions[j][i].filePath),self.imgSize) for i in batchRange]
self.currIdx += self.batchSize
return Batch(gtTexts, imgs)
| 36.29661 | 128 | 0.608219 |
4a2067dbde4c48be365d48d7d6c83c39f1bbb5e8 | 1,283 | py | Python | refbuka.py | gumblex/refine-buka | 815afca8b0c0f2cea6b91ce095c815b157e0b165 | [
"MIT"
] | 49 | 2015-01-06T15:50:49.000Z | 2020-12-06T05:39:12.000Z | refbuka.py | gumblex/refine-buka | 815afca8b0c0f2cea6b91ce095c815b157e0b165 | [
"MIT"
] | null | null | null | refbuka.py | gumblex/refine-buka | 815afca8b0c0f2cea6b91ce095c815b157e0b165 | [
"MIT"
] | 13 | 2015-01-19T06:21:21.000Z | 2021-08-03T08:22:15.000Z | import re
import struct
import os
import sys
import time
import json
USAGE = """
python refine.py INPUT_DIR OUTPUT_DIR
"""
def refine(file_name,output_dir):
if not os.path.isdir(output_dir):
print(USAGE)
raise IOError('output dir `%s` not found' %output_dir)
with open(file_name, 'rb') as f:
buff = f.read(10000)
toc = re.findall(r'\x00([\x00-\xff]{8})[-_a-zA-Z0-9]*(\d{4}\.jpg)',buff)
for index in toc:
pos, size = struct.unpack('<II', index[0])
img = open(os.path.join(output_dir,index[1]),'wb')
f.seek(pos)
data = f.read(size)
img.write(data)
img.close()
def extract_dir(input_dir, output_dir):
buka_files_name = [ f for f in os.listdir(input_dir) if f.endswith('.buka') ]
for buka_fn in buka_files_name:
image_dir_path = os.path.join(output_dir,buka_fn.replace('.buka', ''))
if not os.path.exists(image_dir_path):
os.mkdir(image_dir_path)
refine(os.path.join(input_dir,buka_fn), image_dir_path)
if __name__ == '__main__':
if len(sys.argv) != 3:
print(USAGE)
else:
print time.strftime('%H:%M:%S')
extract_dir(sys.argv[1], sys.argv[2])
print time.strftime('%H:%M:%S')
| 30.547619 | 81 | 0.597818 |
4a206836acc2080c7ec97e4e97e35cc7968730e5 | 9,517 | py | Python | detecting-invisible-people/tools/convert_sort_output_to_topk_format.py | lv1turtle/Occlusion-object-tracking | bda349332ce904f5f08b694ea25e3e79abc997bc | [
"MIT"
] | 26 | 2021-10-30T15:08:56.000Z | 2022-03-31T14:10:13.000Z | detecting-invisible-people/tools/convert_sort_output_to_topk_format.py | lv1turtle/Occlusion-object-tracking | bda349332ce904f5f08b694ea25e3e79abc997bc | [
"MIT"
] | null | null | null | detecting-invisible-people/tools/convert_sort_output_to_topk_format.py | lv1turtle/Occlusion-object-tracking | bda349332ce904f5f08b694ea25e3e79abc997bc | [
"MIT"
] | 4 | 2021-10-30T02:13:29.000Z | 2022-03-24T14:54:16.000Z | import json
import argparse
import glob
import cv2
import os
import numpy as np
import time
from PIL import Image
from scipy.stats import norm, chi2
COLORMAP = {i: (np.random.randint(0, 255),
np.random.randint(0, 255),
np.random.randint(0, 255))
for i in range(1,1000)}
def plot_cov_ellipse(cov, nstd=None, q=None):
"""
Parameters
----------
cov : (2, 2) array
Covariance matrix.
q : float, optional
Confidence level, should be in (0, 1)
nsig : int, optional
Confidence level in unit of standard deviations.
E.g. 1 stands for 68.3% and 2 stands for 95.4%.
Returns
-------
width, height, rotation :
The lengths of two axises and the rotation angle in degree
for the ellipse.
"""
nsig = nstd
if q is not None:
q = np.asarray(q)
elif nsig is not None:
q = 2 * norm.cdf(nsig) - 1
else:
raise ValueError('One of `q` and `nsig` should be specified.')
r2 = chi2.ppf(q, 2)
val, vec = np.linalg.eigh(cov)
width, height = 2 * np.sqrt(val[:, None] * r2)
rotation = np.degrees(np.arctan2(*vec[::-1, 0]))
return height, width, rotation
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=str, required=True)
parser.add_argument("--split", type=str,
help="can be one of 'sort' or 'gt'", required=True)
parser.add_argument("--interp", type=int, required=True)
parser.add_argument("--output-json", type=str, required=True)
parser.add_argument("--thresh", action="store_true")
parser.add_argument("--k", type=int,
help="how many samples to draw from the covariance")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--truncate", action="store_true")
args = parser.parse_args()
# ref = json.load(open('/data/all/coco/annotations/instances_val2017.json'))
# tracks = sorted(glob.glob('../../../ECCV/deep_sort/results/outputminconf1e4MOT17/*FRCNN.*'))
#tracks = sorted(glob.glob('/data/tkhurana/MOT17/train/*FRCNN/gt_interp/gt_interp.txt'))
# tracks = sorted(glob.glob('/data/tkhurana/MOT17/train/*FRCNN/gt/gt_oracle_links_mot17det_trainall_cls127_occl05_interp.txt'))
tracks = sorted(glob.glob(args.input))
print(args.input)
# tracks = sorted(glob.glob('/data2/tkhurana/DIP_EVAL/train/*/det/detCTrack.txt'))
# data = sorted(glob.glob('/data/tkhurana/MOT17/train/*DPM*/img1/*.*'))
data = sorted(glob.glob('/data2/tkhurana/DIP_EVAL/train/*/img1/*.*'))
# basepath = '/data/tkhurana/MOT17/train/'
basepath = '/data2/tkhurana/DIP_EVAL/train/'
image_to_id = {}
frame_to_img_id = {}
for i, d in enumerate(data):
# if '01_' not in d and '02_' not in d and '03_' not in d:
# continue
# if '-02-' not in d and '-04-' not in d and '-09-' not in d:
# continue
if "Huaqiangbei" in d:
continue
img = Image.open(d)
shape = img.size
frame = int(d.split('/')[-1][:-4])
seq = d.split('/')[5]
depth_map = np.load(d.replace("SDP", "FRCNN").replace("DPM", "FRCNN").replace('/img1/', '/img1Depth/')[:-4] + '.npy')
img_name = d[len(basepath):]
if seq not in frame_to_img_id:
frame_to_img_id[seq] = []
image_to_id[img_name] = i
frame_to_img_id[seq].append({'frame_id': frame, 'id': i, 'image': img_name, 'shape': shape, 'depth_map': depth_map})
result = []
widths, heights, box_heights = [], [], []
print("done")
for tr in tracks:
# print("1")
# if '-02-' not in tr and '-04-' not in tr and '-09-' not in tr:
# continue
# if '-05-' not in tr and '-10-' not in tr and '-11-' not in tr and '-13-' not in tr:
# continue
if "Huaqiangbei" in tr:
continue
# if '01_' not in tr and '02_' not in tr and '03_' not in tr:
# continue
if 'eval' in tr:
continue
print(args.interp, tr)
if args.interp == 1 and "_interp.txt" not in tr:
continue
if args.split == 'sort':
seq = tr.split('/')[-1][:-4]
elif args.split == 'gt':
seq = tr.split('/')[5]
if args.interp == 1:
seq = seq[:seq.rfind('_')]
print(seq)
# seq = tr.split('/')[5]
lines = open(tr).readlines()
for dssd, line in enumerate(lines):
# print("Doing", dssd, len(lines))
start = time.time()
i = 0
fields = line.strip().split(',')[:7]
covariance = line.strip().split(',')[-4:]
frame_id, track_id, x0, y0, w, h, score = fields
xx, xz, zx, zz = covariance
frame_id = int(frame_id)
# if frame_id not in range(1,3500,15):
correct_frame = [f for f in frame_to_img_id[seq] if f['frame_id'] == frame_id][0]
# continue
track_id = int(track_id)
x0 = float(x0)
y0 = float(y0)
w = float(w)
h = float(h)
score = float(score)
depth = score
depth_map = correct_frame["depth_map"]
x1 = x0 + w
y1 = y0 + h
topk = []
topk.append([x0, y0, w, h])
# use correct_frame instead of image
z0 = int(depth * correct_frame["shape"][1])
center_coordinates = (int((x0 + x1) / 2), z0)
cov = np.array([[xx, xz], [zx, zz]], dtype='float')
if float(xx) < 0 or float(zz) < 0:
continue
# print(cov)
width, height, angle = plot_cov_ellipse(cov, nstd=1)
# print(width, height)
if width <= 0 or height <= 0:
continue
width = int(width/2)
height = int(correct_frame["shape"][1]*height/2)
widths.append(width)
heights.append(height)
box_heights.append(h)
k = 1
#print("time 1", time.time() - start)
start = time.time()
while k <= args.k - 1:
i += 1
if i == 500:
break
pointx, pointz = np.random.normal(center_coordinates, [width, height])
pointx, pointz = np.round([pointx, pointz], 2)
# print("pointx pointz", pointx, pointz)
if pointx < 0 or pointz < 0:
continue
scalex = correct_frame["shape"][0] / depth_map.shape[1]
scaley = correct_frame["shape"][1] / depth_map.shape[0]
pointx_ = int(pointx / scalex)
y0_ = int(y0 / scaley)
w_ = int(w / scalex)
h_ = int(h / scaley)
pointx_ = min(pointx_, depth_map.shape[1]-1)
apparent_depth = correct_frame["shape"][1] * np.mean(depth_map[y0_:y0_+h_, pointx_ - int(w_ / 2):pointx_ + int(w_ / 2)])
if pointz < apparent_depth and args.truncate: # pointz
topk.append([pointx - int(w / 2), y0, w, h])
k += 1
elif not args.truncate:
topk.append([pointx - int(w / 2), y0, w, h])
k += 1
# apparent_depth = correct_frame["shape"][1] * depth_map[y0_, pointx_]
# if pointz < apparent_depth and args.truncate: # pointz
# topk.append([pointx, y0, w, h])
# k += 1
# elif not args.truncate:
# topk.append([pointx, y0, w, h])
# k += 1
while len(topk) < args.k:
topk.append([x0, y0, w, h])
# print("time 2", time.time() - start)
# print(i)
if args.debug:
path = os.path.join(basepath, correct_frame["image"])
impath = os.path.join("/data/tkhurana/MOT17/topk_truncate_{}_debug".format(args.truncate), correct_frame["image"])
if os.path.exists(impath):
debugimage = cv2.imread(impath)
else:
debugimage = cv2.imread(path)
for box in topk:
cv2.rectangle(debugimage, (int(box[0]), int(box[1])), (int(box[0] + box[2]), int(box[1] + box[3])), COLORMAP[track_id % 1000], 2)
if not os.path.exists(os.path.dirname(impath)):
os.makedirs(os.path.dirname(impath))
cv2.imwrite(impath, debugimage)
if score <= 0.0 and args.thresh:
# print("skipping")
continue
if not args.thresh:
score = 1.0
# else:
# score = 1.0
vis = float(line.strip().split(',')[-2])
# print(score)
# print(frame_id)
correct_frame_id = correct_frame["id"]
result.append({'image_id': correct_frame_id, 'category_id': 1, 'bbox': topk, 'score': score})
# print(len(result))
widths = np.array(widths)
heights = np.array(heights)
print("mean median widths", np.mean(widths), np.median(widths))
print("mean median heights", np.mean(heights), np.median(heights))
print("mean median boxheights", np.mean(box_heights), np.median(box_heights))
with open(args.output_json, 'w') as f:
json.dump(result, f)
| 37.616601 | 149 | 0.524955 |
4a206981c3c9a541b2ca41b8f6e46f39e99fdd69 | 1,327 | py | Python | script/scaffold/templates/config_flow_discovery/integration/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | script/scaffold/templates/config_flow_discovery/integration/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 56 | 2020-08-03T07:30:54.000Z | 2022-03-31T06:02:04.000Z | script/scaffold/templates/config_flow_discovery/integration/__init__.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """The NEW_NAME integration."""
import asyncio
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DOMAIN
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
# TODO List the platforms that you want to support.
# For your initial PR, limit it to 1 platform.
PLATFORMS = ["light"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the NEW_NAME component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up NEW_NAME from a config entry."""
# TODO Store an API object for your platforms to access
# hass.data[DOMAIN][entry.entry_id] = MyApi(...)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| 26.54 | 80 | 0.68425 |
4a206a00bdba03d17dd833941e5acbf3d677937c | 68,782 | py | Python | python/ray/services.py | brechtmann/ray | 0c76ebd676f794847ea990aecced22b88717d09e | [
"Apache-2.0"
] | null | null | null | python/ray/services.py | brechtmann/ray | 0c76ebd676f794847ea990aecced22b88717d09e | [
"Apache-2.0"
] | null | null | null | python/ray/services.py | brechtmann/ray | 0c76ebd676f794847ea990aecced22b88717d09e | [
"Apache-2.0"
] | null | null | null | import collections
import errno
import json
import logging
import multiprocessing
import os
import random
import socket
import subprocess
import sys
import time
import redis
import colorama
# Ray modules
import ray
import ray.ray_constants as ray_constants
import psutil
resource = None
if sys.platform != "win32":
import resource
# True if processes are run in the valgrind profiler.
RUN_RAYLET_PROFILER = False
RUN_PLASMA_STORE_PROFILER = False
# Location of the redis server and module.
RAY_HOME = os.path.join(os.path.dirname(__file__), "../..")
REDIS_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/ray/thirdparty/redis/src/redis-server")
REDIS_MODULE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/ray/gcs/redis_module/libray_redis_module.so")
# Location of the credis server and modules.
# credis will be enabled if the environment variable RAY_USE_NEW_GCS is set.
CREDIS_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/credis/redis/src/redis-server")
CREDIS_MASTER_MODULE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/credis/build/src/libmaster.so")
CREDIS_MEMBER_MODULE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/credis/build/src/libmember.so")
# Location of the plasma object store executable.
PLASMA_STORE_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/plasma/plasma_store_server")
# Location of the raylet executables.
RAYLET_MONITOR_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"core/src/ray/raylet/raylet_monitor")
RAYLET_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "core/src/ray/raylet/raylet")
GCS_SERVER_EXECUTABLE = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "core/src/ray/gcs/gcs_server")
DEFAULT_JAVA_WORKER_CLASSPATH = [
os.path.join(
os.path.abspath(os.path.dirname(__file__)), "../../../build/java/*"),
]
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
ProcessInfo = collections.namedtuple("ProcessInfo", [
"process", "stdout_file", "stderr_file", "use_valgrind", "use_gdb",
"use_valgrind_profiler", "use_perftools_profiler", "use_tmux"
])
def address(ip_address, port):
return ip_address + ":" + str(port)
def new_port():
return random.randint(10000, 65535)
def include_java_from_redis(redis_client):
"""This is used for query include_java bool from redis.
Args:
redis_client (StrictRedis): The redis client to GCS.
Returns:
True if this cluster backend enables Java worker.
"""
return redis_client.get("INCLUDE_JAVA") == b"1"
def find_redis_address_or_die():
pids = psutil.pids()
redis_addresses = set()
for pid in pids:
try:
proc = psutil.Process(pid)
# HACK: Workaround for UNIX idiosyncrasy
# Normally, cmdline() is supposed to return the argument list.
# But it in some cases (such as when setproctitle is called),
# an arbitrary string resembling a command-line is stored in
# the first argument.
# Explanation: https://unix.stackexchange.com/a/432681
# More info: https://github.com/giampaolo/psutil/issues/1179
for arglist in proc.cmdline():
# Given we're merely seeking --redis-address, we just split
# every argument on spaces for now.
for arg in arglist.split(" "):
# TODO(ekl): Find a robust solution for locating Redis.
if arg.startswith("--redis-address="):
addr = arg.split("=")[1]
redis_addresses.add(addr)
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
if len(redis_addresses) > 1:
raise ConnectionError(
"Found multiple active Ray instances: {}. ".format(redis_addresses)
+ "Please specify the one to connect to by setting `address`.")
sys.exit(1)
elif not redis_addresses:
raise ConnectionError(
"Could not find any running Ray instance. "
"Please specify the one to connect to by setting `address`.")
return redis_addresses.pop()
def get_address_info_from_redis_helper(redis_address,
node_ip_address,
redis_password=None):
redis_ip_address, redis_port = redis_address.split(":")
# For this command to work, some other client (on the same machine as
# Redis) must have run "CONFIG SET protected-mode no".
redis_client = create_redis_client(redis_address, password=redis_password)
client_table = ray.state._parse_client_table(redis_client)
if len(client_table) == 0:
raise RuntimeError(
"Redis has started but no raylets have registered yet.")
relevant_client = None
for client_info in client_table:
client_node_ip_address = client_info["NodeManagerAddress"]
if (client_node_ip_address == node_ip_address
or (client_node_ip_address == "127.0.0.1"
and redis_ip_address == get_node_ip_address())):
relevant_client = client_info
break
if relevant_client is None:
raise RuntimeError(
"Redis has started but no raylets have registered yet.")
return {
"object_store_address": relevant_client["ObjectStoreSocketName"],
"raylet_socket_name": relevant_client["RayletSocketName"],
"node_manager_port": relevant_client["NodeManagerPort"]
}
def get_address_info_from_redis(redis_address,
node_ip_address,
num_retries=5,
redis_password=None):
counter = 0
while True:
try:
return get_address_info_from_redis_helper(
redis_address, node_ip_address, redis_password=redis_password)
except Exception:
if counter == num_retries:
raise
# Some of the information may not be in Redis yet, so wait a little
# bit.
logger.warning(
"Some processes that the driver needs to connect to have "
"not registered with Redis, so retrying. Have you run "
"'ray start' on this node?")
time.sleep(1)
counter += 1
def get_webui_url_from_redis(redis_client):
webui_url = redis_client.hmget("webui", "url")[0]
return ray.utils.decode(webui_url) if webui_url is not None else None
def remaining_processes_alive():
"""See if the remaining processes are alive or not.
Note that this ignores processes that have been explicitly killed,
e.g., via a command like node.kill_raylet().
Returns:
True if the remaining processes started by ray.init() are alive and
False otherwise.
Raises:
Exception: An exception is raised if the processes were not started by
ray.init().
"""
if ray.worker._global_node is None:
raise RuntimeError("This process is not in a position to determine "
"whether all processes are alive or not.")
return ray.worker._global_node.remaining_processes_alive()
def validate_redis_address(address, redis_address):
"""Validates redis address parameter and splits it into host/ip components.
We temporarily support both 'address' and 'redis_address', so both are
handled here.
Returns:
redis_address: string containing the full <host:port> address.
redis_ip: string representing the host portion of the address.
redis_port: integer representing the port portion of the address.
Raises:
ValueError: if both address and redis_address were specified or the
address was malformed.
"""
if redis_address == "auto":
raise ValueError("auto address resolution not supported for "
"redis_address parameter. Please use address.")
if address:
if redis_address:
raise ValueError(
"Both address and redis_address specified. Use only address.")
if address == "auto":
address = find_redis_address_or_die()
redis_address = address
redis_address = address_to_ip(redis_address)
redis_address_parts = redis_address.split(":")
if len(redis_address_parts) != 2:
raise ValueError("Malformed address. Expected '<host>:<port>'.")
redis_ip = redis_address_parts[0]
try:
redis_port = int(redis_address_parts[1])
except ValueError:
raise ValueError("Malformed address port. Must be an integer.")
if redis_port < 1024 or redis_port > 65535:
raise ValueError("Invalid address port. Must "
"be between 1024 and 65535.")
return redis_address, redis_ip, redis_port
def address_to_ip(address):
"""Convert a hostname to a numerical IP addresses in an address.
This should be a no-op if address already contains an actual numerical IP
address.
Args:
address: This can be either a string containing a hostname (or an IP
address) and a port or it can be just an IP address.
Returns:
The same address but with the hostname replaced by a numerical IP
address.
"""
address_parts = address.split(":")
ip_address = socket.gethostbyname(address_parts[0])
# Make sure localhost isn't resolved to the loopback ip
if ip_address == "127.0.0.1":
ip_address = get_node_ip_address()
return ":".join([ip_address] + address_parts[1:])
def get_node_ip_address(address="8.8.8.8:53"):
"""Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# This command will raise an exception if there is no internet
# connection.
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except OSError as e:
node_ip_address = "127.0.0.1"
# [Errno 101] Network is unreachable
if e.errno == errno.ENETUNREACH:
try:
# try get node ip address from host name
host_name = socket.getfqdn(socket.gethostname())
node_ip_address = socket.gethostbyname(host_name)
except Exception:
pass
finally:
s.close()
return node_ip_address
def create_redis_client(redis_address, password=None):
"""Create a Redis client.
Args:
The IP address, port, and password of the Redis server.
Returns:
A Redis client.
"""
redis_ip_address, redis_port = redis_address.split(":")
# For this command to work, some other client (on the same machine
# as Redis) must have run "CONFIG SET protected-mode no".
return redis.StrictRedis(
host=redis_ip_address, port=int(redis_port), password=password)
def start_ray_process(command,
process_type,
fate_share,
env_updates=None,
cwd=None,
use_valgrind=False,
use_gdb=False,
use_valgrind_profiler=False,
use_perftools_profiler=False,
use_tmux=False,
stdout_file=None,
stderr_file=None,
pipe_stdin=False):
"""Start one of the Ray processes.
TODO(rkn): We need to figure out how these commands interact. For example,
it may only make sense to start a process in gdb if we also start it in
tmux. Similarly, certain combinations probably don't make sense, like
simultaneously running the process in valgrind and the profiler.
Args:
command (List[str]): The command to use to start the Ray process.
process_type (str): The type of the process that is being started
(e.g., "raylet").
fate_share: If true, the child will be killed if its parent (us) dies.
True must only be passed after detection of this functionality.
env_updates (dict): A dictionary of additional environment variables to
run the command with (in addition to the caller's environment
variables).
cwd (str): The directory to run the process in.
use_valgrind (bool): True if we should start the process in valgrind.
use_gdb (bool): True if we should start the process in gdb.
use_valgrind_profiler (bool): True if we should start the process in
the valgrind profiler.
use_perftools_profiler (bool): True if we should profile the process
using perftools.
use_tmux (bool): True if we should start the process in tmux.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
pipe_stdin: If true, subprocess.PIPE will be passed to the process as
stdin.
Returns:
Information about the process that was started including a handle to
the process that was started.
"""
# Detect which flags are set through environment variables.
valgrind_env_var = "RAY_{}_VALGRIND".format(process_type.upper())
if os.environ.get(valgrind_env_var) == "1":
logger.info("Detected environment variable '%s'.", valgrind_env_var)
use_valgrind = True
valgrind_profiler_env_var = "RAY_{}_VALGRIND_PROFILER".format(
process_type.upper())
if os.environ.get(valgrind_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.",
valgrind_profiler_env_var)
use_valgrind_profiler = True
perftools_profiler_env_var = "RAY_{}_PERFTOOLS_PROFILER".format(
process_type.upper())
if os.environ.get(perftools_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.",
perftools_profiler_env_var)
use_perftools_profiler = True
tmux_env_var = "RAY_{}_TMUX".format(process_type.upper())
if os.environ.get(tmux_env_var) == "1":
logger.info("Detected environment variable '%s'.", tmux_env_var)
use_tmux = True
gdb_env_var = "RAY_{}_GDB".format(process_type.upper())
if os.environ.get(gdb_env_var) == "1":
logger.info("Detected environment variable '%s'.", gdb_env_var)
use_gdb = True
if sum(
[use_gdb, use_valgrind, use_valgrind_profiler, use_perftools_profiler
]) > 1:
raise ValueError(
"At most one of the 'use_gdb', 'use_valgrind', "
"'use_valgrind_profiler', and 'use_perftools_profiler' flags can "
"be used at a time.")
if env_updates is None:
env_updates = {}
if not isinstance(env_updates, dict):
raise ValueError("The 'env_updates' argument must be a dictionary.")
modified_env = os.environ.copy()
modified_env.update(env_updates)
if use_gdb:
if not use_tmux:
raise ValueError(
"If 'use_gdb' is true, then 'use_tmux' must be true as well.")
# TODO(suquark): Any better temp file creation here?
gdb_init_path = os.path.join(
ray.utils.get_ray_temp_dir(), "gdb_init_{}_{}".format(
process_type, time.time()))
ray_process_path = command[0]
ray_process_args = command[1:]
run_args = " ".join(["'{}'".format(arg) for arg in ray_process_args])
with open(gdb_init_path, "w") as gdb_init_file:
gdb_init_file.write("run {}".format(run_args))
command = ["gdb", ray_process_path, "-x", gdb_init_path]
if use_valgrind:
command = [
"valgrind", "--track-origins=yes", "--leak-check=full",
"--show-leak-kinds=all", "--leak-check-heuristics=stdstring",
"--error-exitcode=1"
] + command
if use_valgrind_profiler:
command = ["valgrind", "--tool=callgrind"] + command
if use_perftools_profiler:
modified_env["LD_PRELOAD"] = os.environ["PERFTOOLS_PATH"]
modified_env["CPUPROFILE"] = os.environ["PERFTOOLS_LOGFILE"]
if use_tmux:
# The command has to be created exactly as below to ensure that it
# works on all versions of tmux. (Tested with tmux 1.8-5, travis'
# version, and tmux 2.1)
command = ["tmux", "new-session", "-d", "{}".format(" ".join(command))]
if fate_share:
assert ray.utils.detect_fate_sharing_support(), (
"kernel-level fate-sharing must only be specified if "
"detect_fate_sharing_support() has returned True")
def preexec_fn():
import signal
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT})
if fate_share and sys.platform.startswith("linux"):
ray.utils.set_kill_on_parent_death_linux()
process = subprocess.Popen(
command,
env=modified_env,
cwd=cwd,
stdout=stdout_file,
stderr=stderr_file,
stdin=subprocess.PIPE if pipe_stdin else None,
preexec_fn=preexec_fn if sys.platform != "win32" else None)
if fate_share and sys.platform == "win32":
ray.utils.set_kill_child_on_death_win32(process)
return ProcessInfo(
process=process,
stdout_file=stdout_file.name if stdout_file is not None else None,
stderr_file=stderr_file.name if stderr_file is not None else None,
use_valgrind=use_valgrind,
use_gdb=use_gdb,
use_valgrind_profiler=use_valgrind_profiler,
use_perftools_profiler=use_perftools_profiler,
use_tmux=use_tmux)
def wait_for_redis_to_start(redis_ip_address,
redis_port,
password=None,
num_retries=5):
"""Wait for a Redis server to be available.
This is accomplished by creating a Redis client and sending a random
command to the server until the command gets through.
Args:
redis_ip_address (str): The IP address of the redis server.
redis_port (int): The port of the redis server.
password (str): The password of the redis server.
num_retries (int): The number of times to try connecting with redis.
The client will sleep for one second between attempts.
Raises:
Exception: An exception is raised if we could not connect with Redis.
"""
redis_client = redis.StrictRedis(
host=redis_ip_address, port=redis_port, password=password)
# Wait for the Redis server to start.
counter = 0
while counter < num_retries:
try:
# Run some random command and see if it worked.
logger.debug(
"Waiting for redis server at {}:{} to respond...".format(
redis_ip_address, redis_port))
redis_client.client_list()
except redis.ConnectionError:
# Wait a little bit.
time.sleep(1)
logger.info("Failed to connect to the redis server, retrying.")
counter += 1
else:
break
if counter == num_retries:
raise RuntimeError("Unable to connect to Redis. If the Redis instance "
"is on a different machine, check that your "
"firewall is configured properly.")
def _compute_version_info():
"""Compute the versions of Python, and Ray.
Returns:
A tuple containing the version information.
"""
ray_version = ray.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
return ray_version, python_version
def _put_version_info_in_redis(redis_client):
"""Store version information in Redis.
This will be used to detect if workers or drivers are started using
different versions of Python, or Ray.
Args:
redis_client: A client for the primary Redis shard.
"""
redis_client.set("VERSION_INFO", json.dumps(_compute_version_info()))
def check_version_info(redis_client):
"""Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
redis_reply = redis_client.get("VERSION_INFO")
# Don't do the check if there is no version information in Redis. This
# is to make it easier to do things like start the processes by hand.
if redis_reply is None:
return
true_version_info = tuple(json.loads(ray.utils.decode(redis_reply)))
version_info = _compute_version_info()
if version_info != true_version_info:
node_ip_address = get_node_ip_address()
error_message = ("Version mismatch: The cluster was started with:\n"
" Ray: " + true_version_info[0] + "\n"
" Python: " + true_version_info[1] + "\n"
"This process on node " + node_ip_address +
" was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n")
if version_info[:2] != true_version_info[:2]:
raise RuntimeError(error_message)
else:
logger.warning(error_message)
def start_reaper(fate_share=None):
"""Start the reaper process.
This is a lightweight process that simply
waits for its parent process to die and then terminates its own
process group. This allows us to ensure that ray processes are always
terminated properly so long as that process itself isn't SIGKILLed.
Returns:
ProcessInfo for the process that was started.
"""
# Make ourselves a process group leader so that the reaper can clean
# up other ray processes without killing the process group of the
# process that started us.
try:
os.setpgrp()
except (AttributeError, OSError) as e:
errcode = e.errno if isinstance(e, OSError) else None
if errcode == errno.EPERM and os.getpgrp() == os.getpid():
# Nothing to do; we're already a session leader.
pass
else:
logger.warning("setpgrp failed, processes may not be "
"cleaned up properly: {}.".format(e))
# Don't start the reaper in this case as it could result in killing
# other user processes.
return None
reaper_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "ray_process_reaper.py")
command = [sys.executable, "-u", reaper_filepath]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_REAPER,
pipe_stdin=True,
fate_share=fate_share)
return process_info
def start_redis(node_ip_address,
redirect_files,
resource_spec,
port=None,
redis_shard_ports=None,
num_redis_shards=1,
redis_max_clients=None,
redirect_worker_output=False,
password=None,
use_credis=None,
include_java=False,
fate_share=None):
"""Start the Redis global state store.
Args:
node_ip_address: The IP address of the current node. This is only used
for recording the log filenames in Redis.
redirect_files: The list of (stdout, stderr) file pairs.
resource_spec (ResourceSpec): Resources for the node.
port (int): If provided, the primary Redis shard will be started on
this port.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_redis_shards (int): If provided, the number of Redis shards to
start, in addition to the primary one. The default value is one
shard.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
redirect_worker_output (bool): True if worker output should be
redirected to a file and false otherwise. Workers will have access
to this value when they start up.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
use_credis: If True, additionally load the chain-replicated libraries
into the redis servers. Defaults to None, which means its value is
set by the presence of "RAY_USE_NEW_GCS" in os.environ.
include_java (bool): If True, the raylet backend can also support
Java worker.
Returns:
A tuple of the address for the primary Redis shard, a list of
addresses for the remaining shards, and the processes that were
started.
"""
if len(redirect_files) != 1 + num_redis_shards:
raise ValueError("The number of redirect file pairs should be equal "
"to the number of redis shards (including the "
"primary shard) we will start.")
if redis_shard_ports is None:
redis_shard_ports = num_redis_shards * [None]
elif len(redis_shard_ports) != num_redis_shards:
raise RuntimeError("The number of Redis shard ports does not match "
"the number of Redis shards.")
processes = []
if use_credis is None:
use_credis = ("RAY_USE_NEW_GCS" in os.environ)
if use_credis:
if password is not None:
# TODO(pschafhalter) remove this once credis supports
# authenticating Redis ports
raise ValueError("Setting the `redis_password` argument is not "
"supported in credis. To run Ray with "
"password-protected Redis ports, ensure that "
"the environment variable `RAY_USE_NEW_GCS=off`.")
assert num_redis_shards == 1, (
"For now, RAY_USE_NEW_GCS supports 1 shard, and credis "
"supports 1-node chain for that shard only.")
if use_credis:
redis_executable = CREDIS_EXECUTABLE
# TODO(suquark): We need credis here because some symbols need to be
# imported from credis dynamically through dlopen when Ray is built
# with RAY_USE_NEW_GCS=on. We should remove them later for the primary
# shard.
# See src/ray/gcs/redis_module/ray_redis_module.cc
redis_modules = [CREDIS_MASTER_MODULE, REDIS_MODULE]
else:
redis_executable = REDIS_EXECUTABLE
redis_modules = [REDIS_MODULE]
redis_stdout_file, redis_stderr_file = redirect_files[0]
# Start the primary Redis shard.
port, p = _start_redis_instance(
redis_executable,
modules=redis_modules,
port=port,
password=password,
redis_max_clients=redis_max_clients,
# Below we use None to indicate no limit on the memory of the
# primary Redis shard.
redis_max_memory=None,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file,
fate_share=fate_share)
processes.append(p)
redis_address = address(node_ip_address, port)
# Register the number of Redis shards in the primary shard, so that clients
# know how many redis shards to expect under RedisShards.
primary_redis_client = redis.StrictRedis(
host=node_ip_address, port=port, password=password)
primary_redis_client.set("NumRedisShards", str(num_redis_shards))
# Put the redirect_worker_output bool in the Redis shard so that workers
# can access it and know whether or not to redirect their output.
primary_redis_client.set("RedirectOutput", 1
if redirect_worker_output else 0)
# put the include_java bool to primary redis-server, so that other nodes
# can access it and know whether or not to enable cross-languages.
primary_redis_client.set("INCLUDE_JAVA", 1 if include_java else 0)
# Init job counter to GCS.
primary_redis_client.set("JobCounter", 0)
# Store version information in the primary Redis shard.
_put_version_info_in_redis(primary_redis_client)
# Calculate the redis memory.
assert resource_spec.resolved()
redis_max_memory = resource_spec.redis_max_memory
# Start other Redis shards. Each Redis shard logs to a separate file,
# prefixed by "redis-<shard number>".
redis_shards = []
for i in range(num_redis_shards):
redis_stdout_file, redis_stderr_file = redirect_files[i + 1]
if use_credis:
redis_executable = CREDIS_EXECUTABLE
# It is important to load the credis module BEFORE the ray module,
# as the latter contains an extern declaration that the former
# supplies.
redis_modules = [CREDIS_MEMBER_MODULE, REDIS_MODULE]
else:
redis_executable = REDIS_EXECUTABLE
redis_modules = [REDIS_MODULE]
redis_shard_port, p = _start_redis_instance(
redis_executable,
modules=redis_modules,
port=redis_shard_ports[i],
password=password,
redis_max_clients=redis_max_clients,
redis_max_memory=redis_max_memory,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file,
fate_share=fate_share)
processes.append(p)
shard_address = address(node_ip_address, redis_shard_port)
redis_shards.append(shard_address)
# Store redis shard information in the primary redis shard.
primary_redis_client.rpush("RedisShards", shard_address)
if use_credis:
# Configure the chain state. The way it is intended to work is
# the following:
#
# PRIMARY_SHARD
#
# SHARD_1 (master replica) -> SHARD_1 (member replica)
# -> SHARD_1 (member replica)
#
# SHARD_2 (master replica) -> SHARD_2 (member replica)
# -> SHARD_2 (member replica)
# ...
#
#
# If we have credis members in future, their modules should be:
# [CREDIS_MEMBER_MODULE, REDIS_MODULE], and they will be initialized by
# execute_command("MEMBER.CONNECT_TO_MASTER", node_ip_address, port)
#
# Currently we have num_redis_shards == 1, so only one chain will be
# created, and the chain only contains master.
# TODO(suquark): Currently, this is not correct because we are
# using the master replica as the primary shard. This should be
# fixed later. I had tried to fix it but failed because of heartbeat
# issues.
primary_client = redis.StrictRedis(
host=node_ip_address, port=port, password=password)
shard_client = redis.StrictRedis(
host=node_ip_address, port=redis_shard_port, password=password)
primary_client.execute_command("MASTER.ADD", node_ip_address,
redis_shard_port)
shard_client.execute_command("MEMBER.CONNECT_TO_MASTER",
node_ip_address, port)
return redis_address, redis_shards, processes
def _start_redis_instance(executable,
modules,
port=None,
redis_max_clients=None,
num_retries=20,
stdout_file=None,
stderr_file=None,
password=None,
redis_max_memory=None,
fate_share=None):
"""Start a single Redis server.
Notes:
If "port" is not None, then we will only use this port and try
only once. Otherwise, random ports will be used and the maximum
retries count is "num_retries".
Args:
executable (str): Full path of the redis-server executable.
modules (list of str): A list of pathnames, pointing to the redis
module(s) that will be loaded in this redis server.
port (int): If provided, start a Redis server with this port.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
num_retries (int): The number of times to attempt to start Redis. If a
port is provided, this defaults to 1.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries.
Returns:
A tuple of the port used by Redis and ProcessInfo for the process that
was started. If a port is passed in, then the returned port value
is the same.
Raises:
Exception: An exception is raised if Redis could not be started.
"""
assert os.path.isfile(executable)
for module in modules:
assert os.path.isfile(module)
counter = 0
if port is not None:
# If a port is specified, then try only once to connect.
# This ensures that we will use the given port.
num_retries = 1
else:
port = new_port()
load_module_args = []
for module in modules:
load_module_args += ["--loadmodule", module]
while counter < num_retries:
if counter > 0:
logger.warning("Redis failed to start, retrying now.")
# Construct the command to start the Redis server.
command = [executable]
if password:
if " " in password:
raise ValueError("Spaces not permitted in redis password.")
command += ["--requirepass", password]
command += (
["--port", str(port), "--loglevel", "warning"] + load_module_args)
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_REDIS_SERVER,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
time.sleep(0.1)
# Check if Redis successfully started (or at least if it the executable
# did not exit within 0.1 seconds).
if process_info.process.poll() is None:
break
port = new_port()
counter += 1
if counter == num_retries:
raise RuntimeError("Couldn't start Redis. "
"Check log files: {} {}".format(
stdout_file.name, stderr_file.name))
# Create a Redis client just for configuring Redis.
redis_client = redis.StrictRedis(
host="127.0.0.1", port=port, password=password)
# Wait for the Redis server to start.
wait_for_redis_to_start("127.0.0.1", port, password=password)
# Configure Redis to generate keyspace notifications. TODO(rkn): Change
# this to only generate notifications for the export keys.
redis_client.config_set("notify-keyspace-events", "Kl")
# Configure Redis to not run in protected mode so that processes on other
# hosts can connect to it. TODO(rkn): Do this in a more secure way.
redis_client.config_set("protected-mode", "no")
# Discard old task and object metadata.
if redis_max_memory is not None:
redis_client.config_set("maxmemory", str(redis_max_memory))
redis_client.config_set("maxmemory-policy", "allkeys-lru")
redis_client.config_set("maxmemory-samples", "10")
logger.debug("Starting Redis shard with {} GB max memory.".format(
round(redis_max_memory / 1e9, 2)))
# If redis_max_clients is provided, attempt to raise the number of maximum
# number of Redis clients.
if redis_max_clients is not None:
redis_client.config_set("maxclients", str(redis_max_clients))
elif resource is not None:
# If redis_max_clients is not provided, determine the current ulimit.
# We will use this to attempt to raise the maximum number of Redis
# clients.
current_max_clients = int(
redis_client.config_get("maxclients")["maxclients"])
# The below command should be the same as doing ulimit -n.
ulimit_n = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# The quantity redis_client_buffer appears to be the required buffer
# between the maximum number of redis clients and ulimit -n. That is,
# if ulimit -n returns 10000, then we can set maxclients to
# 10000 - redis_client_buffer.
redis_client_buffer = 32
if current_max_clients < ulimit_n - redis_client_buffer:
redis_client.config_set("maxclients",
ulimit_n - redis_client_buffer)
# Increase the hard and soft limits for the redis client pubsub buffer to
# 128MB. This is a hack to make it less likely for pubsub messages to be
# dropped and for pubsub connections to therefore be killed.
cur_config = (redis_client.config_get("client-output-buffer-limit")[
"client-output-buffer-limit"])
cur_config_list = cur_config.split()
assert len(cur_config_list) == 12
cur_config_list[8:] = ["pubsub", "134217728", "134217728", "60"]
redis_client.config_set("client-output-buffer-limit",
" ".join(cur_config_list))
# Put a time stamp in Redis to indicate when it was started.
redis_client.set("redis_start_time", time.time())
return port, process_info
def start_log_monitor(redis_address,
logs_dir,
stdout_file=None,
stderr_file=None,
redis_password=None,
fate_share=None):
"""Start a log monitor process.
Args:
redis_address (str): The address of the Redis instance.
logs_dir (str): The directory of logging files.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
log_monitor_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "log_monitor.py")
command = [
sys.executable, "-u", log_monitor_filepath,
"--redis-address={}".format(redis_address),
"--logs-dir={}".format(logs_dir)
]
if redis_password:
command += ["--redis-password", redis_password]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_LOG_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
def start_reporter(redis_address,
stdout_file=None,
stderr_file=None,
redis_password=None,
fate_share=None):
"""Start a reporter process.
Args:
redis_address (str): The address of the Redis instance.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
reporter_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "reporter.py")
command = [
sys.executable, "-u", reporter_filepath,
"--redis-address={}".format(redis_address)
]
if redis_password:
command += ["--redis-password", redis_password]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_REPORTER,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
def start_dashboard(require_webui,
host,
redis_address,
temp_dir,
stdout_file=None,
stderr_file=None,
redis_password=None,
fate_share=None):
"""Start a dashboard process.
Args:
require_webui (bool): If true, this will raise an exception if we fail
to start the webui. Otherwise it will print a warning if we fail
to start the webui.
host (str): The host to bind the dashboard web server to.
redis_address (str): The address of the Redis instance.
temp_dir (str): The temporary directory used for log files and
information for this Ray session.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
port = 8265 # Note: list(map(ord, "RAY")) == [82, 65, 89]
while True:
try:
port_test_socket = socket.socket()
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
except socket.error:
port += 1
dashboard_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "dashboard/dashboard.py")
command = [
sys.executable,
"-u",
dashboard_filepath,
"--host={}".format(host),
"--port={}".format(port),
"--redis-address={}".format(redis_address),
"--temp-dir={}".format(temp_dir),
]
if redis_password:
command += ["--redis-password", redis_password]
webui_dependencies_present = True
try:
import aiohttp # noqa: F401
import grpc # noqa: F401
except ImportError:
webui_dependencies_present = False
warning_message = (
"Failed to start the dashboard. The dashboard requires Python 3 "
"as well as 'pip install aiohttp psutil setproctitle grpcio'.")
if require_webui:
raise ImportError(warning_message)
else:
logger.warning(warning_message)
if webui_dependencies_present:
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_DASHBOARD,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
dashboard_url = "{}:{}".format(
host if host != "0.0.0.0" else get_node_ip_address(), port)
logger.info("View the Ray dashboard at {}{}{}{}{}".format(
colorama.Style.BRIGHT, colorama.Fore.GREEN, dashboard_url,
colorama.Fore.RESET, colorama.Style.NORMAL))
return dashboard_url, process_info
else:
return None, None
def start_gcs_server(redis_address,
stdout_file=None,
stderr_file=None,
redis_password=None,
config=None,
fate_share=None):
"""Start a gcs server.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
config (dict|None): Optional configuration that will
override defaults in RayConfig.
Returns:
ProcessInfo for the process that was started.
"""
gcs_ip_address, gcs_port = redis_address.split(":")
redis_password = redis_password or ""
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
command = [
GCS_SERVER_EXECUTABLE,
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--config_list={}".format(config_str),
]
if redis_password:
command += ["--redis_password={}".format(redis_password)]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_GCS_SERVER,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
def start_raylet(redis_address,
node_ip_address,
node_manager_port,
raylet_name,
plasma_store_name,
worker_path,
temp_dir,
session_dir,
resource_spec,
object_manager_port=None,
redis_password=None,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
config=None,
include_java=False,
java_worker_options=None,
load_code_from_local=False,
fate_share=None):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the primary Redis server.
node_ip_address (str): The IP address of this node.
node_manager_port(int): The port to use for the node manager. This must
not be 0.
raylet_name (str): The name of the raylet socket to create.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the Python file that new worker
processes will execute.
temp_dir (str): The path of the temporary directory Ray will use.
session_dir (str): The path of this session.
resource_spec (ResourceSpec): Resources for this raylet.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
redis_password: The password to use when connecting to Redis.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
config (dict|None): Optional Raylet configuration that will
override defaults in RayConfig.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (list): The command options for Java worker.
Returns:
ProcessInfo for the process that was started.
"""
# The caller must provide a node manager port so that we can correctly
# populate the command to start a worker.
assert node_manager_port is not None and node_manager_port != 0
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
if use_valgrind and use_profiler:
raise ValueError("Cannot use valgrind and profiler at the same time.")
assert resource_spec.resolved()
num_initial_workers = resource_spec.num_cpus
static_resources = resource_spec.to_resource_dict()
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
num_cpus_static = static_resources.get("CPU", 0)
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), num_cpus_static))
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()])
gcs_ip_address, gcs_port = redis_address.split(":")
if include_java is True:
default_cp = os.pathsep.join(DEFAULT_JAVA_WORKER_CLASSPATH)
java_worker_command = build_java_worker_command(
json.loads(java_worker_options)
if java_worker_options else ["-classpath", default_cp],
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
)
else:
java_worker_command = []
# Create the command that the Raylet will use to start workers.
start_worker_command = [
sys.executable, worker_path,
"--node-ip-address={}".format(node_ip_address),
"--node-manager-port={}".format(node_manager_port),
"--object-store-name={}".format(plasma_store_name),
"--raylet-name={}".format(raylet_name),
"--redis-address={}".format(redis_address),
"--config-list={}".format(config_str), "--temp-dir={}".format(temp_dir)
]
if redis_password:
start_worker_command += ["--redis-password={}".format(redis_password)]
# If the object manager port is None, then use 0 to cause the object
# manager to choose its own port.
if object_manager_port is None:
object_manager_port = 0
if load_code_from_local:
start_worker_command += ["--load-code-from-local"]
command = [
RAYLET_EXECUTABLE,
"--raylet_socket_name={}".format(raylet_name),
"--store_socket_name={}".format(plasma_store_name),
"--object_manager_port={}".format(object_manager_port),
"--node_manager_port={}".format(node_manager_port),
"--node_ip_address={}".format(node_ip_address),
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--num_initial_workers={}".format(num_initial_workers),
"--maximum_startup_concurrency={}".format(maximum_startup_concurrency),
"--static_resource_list={}".format(resource_argument),
"--config_list={}".format(config_str),
"--python_worker_command={}".format(
subprocess.list2cmdline(start_worker_command)),
"--java_worker_command={}".format(
subprocess.list2cmdline(java_worker_command)),
"--redis_password={}".format(redis_password or ""),
"--temp_dir={}".format(temp_dir),
"--session_dir={}".format(session_dir),
]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET,
use_valgrind=use_valgrind,
use_gdb=False,
use_valgrind_profiler=use_profiler,
use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ),
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
def get_ray_jars_dir():
"""Return a directory where all ray-related jars and
their dependencies locate."""
current_dir = os.path.abspath(os.path.dirname(__file__))
jars_dir = os.path.abspath(os.path.join(current_dir, "jars"))
if not os.path.exists(jars_dir):
raise RuntimeError("Ray jars is not packaged into ray. "
"Please build ray with java enabled "
"(set env var RAY_INSTALL_JAVA=1)")
return os.path.abspath(os.path.join(current_dir, "jars"))
def build_java_worker_command(
java_worker_options,
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
):
"""This method assembles the command used to start a Java worker.
Args:
java_worker_options (list): The command options for Java worker.
redis_address (str): Redis address of GCS.
plasma_store_name (str): The name of the plasma store socket to connect
to.
raylet_name (str): The name of the raylet socket to create.
redis_password (str): The password of connect to redis.
session_dir (str): The path of this session.
Returns:
The command string for starting Java worker.
"""
pairs = []
if redis_address is not None:
pairs.append(("ray.redis.address", redis_address))
pairs.append(("ray.raylet.node-manager-port", node_manager_port))
if plasma_store_name is not None:
pairs.append(("ray.object-store.socket-name", plasma_store_name))
if raylet_name is not None:
pairs.append(("ray.raylet.socket-name", raylet_name))
if redis_password is not None:
pairs.append(("ray.redis.password", redis_password))
pairs.append(("ray.home", RAY_HOME))
pairs.append(("ray.log-dir", os.path.join(session_dir, "logs")))
pairs.append(("ray.session-dir", session_dir))
command = ["java"] + ["-D{}={}".format(*pair) for pair in pairs]
command += ["RAY_WORKER_RAYLET_CONFIG_PLACEHOLDER"]
# Add ray jars path to java classpath
ray_jars = os.path.join(get_ray_jars_dir(), "*")
if java_worker_options is None:
options = []
else:
assert isinstance(java_worker_options, (tuple, list))
options = list(java_worker_options)
cp_index = -1
for i in range(len(options)):
option = options[i]
if option == "-cp" or option == "-classpath":
cp_index = i + 1
break
if cp_index != -1:
options[cp_index] = options[cp_index] + os.pathsep + ray_jars
else:
options = ["-cp", ray_jars] + options
# Put `java_worker_options` in the last, so it can overwrite the
# above options.
command += options
command += ["RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER_0"]
command += ["org.ray.runtime.runner.worker.DefaultWorker"]
return command
def determine_plasma_store_config(object_store_memory,
plasma_directory=None,
huge_pages=False):
"""Figure out how to configure the plasma object store.
This will determine which directory to use for the plasma store. On Linux,
we will try to use /dev/shm unless the shared memory file system is too
small, in which case we will fall back to /tmp. If any of the object store
memory or plasma directory parameters are specified by the user, then those
values will be preserved.
Args:
object_store_memory (int): The objec store memory to use.
plasma_directory (str): The user-specified plasma directory parameter.
huge_pages (bool): The user-specified huge pages parameter.
Returns:
The plasma directory to use. If it is specified by the user, then that
value will be preserved.
"""
system_memory = ray.utils.get_system_memory()
# Determine which directory to use. By default, use /tmp on MacOS and
# /dev/shm on Linux, unless the shared-memory file system is too small,
# in which case we default to /tmp on Linux.
if plasma_directory is None:
if sys.platform == "linux" or sys.platform == "linux2":
shm_avail = ray.utils.get_shared_memory_bytes()
# Compare the requested memory size to the memory available in
# /dev/shm.
if shm_avail > object_store_memory:
plasma_directory = "/dev/shm"
else:
plasma_directory = ray.utils.get_user_temp_dir()
logger.warning(
"WARNING: The object store is using {} instead of "
"/dev/shm because /dev/shm has only {} bytes available. "
"This may slow down performance! You may be able to free "
"up space by deleting files in /dev/shm or terminating "
"any running plasma_store_server processes. If you are "
"inside a Docker container, you may need to pass an "
"argument with the flag '--shm-size' to 'docker run'.".
format(ray.utils.get_user_temp_dir(), shm_avail))
else:
plasma_directory = ray.utils.get_user_temp_dir()
# Do some sanity checks.
if object_store_memory > system_memory:
raise ValueError(
"The requested object store memory size is greater "
"than the total available memory.")
else:
plasma_directory = os.path.abspath(plasma_directory)
logger.warning("WARNING: object_store_memory is not verified when "
"plasma_directory is set.")
if not os.path.isdir(plasma_directory):
raise ValueError(
"The file {} does not exist or is not a directory.".format(
plasma_directory))
return plasma_directory
def _start_plasma_store(plasma_store_memory,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
plasma_directory=None,
huge_pages=False,
socket_name=None,
fate_share=None):
"""Start a plasma store process.
Args:
plasma_store_memory (int): The amount of memory in bytes to start the
plasma store with.
use_valgrind (bool): True if the plasma store should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the plasma store should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: a boolean flag indicating whether to start the
Object Store with hugetlbfs support. Requires plasma_directory.
socket_name (str): If provided, it will specify the socket
name used by the plasma store.
Return:
A tuple of the name of the plasma store socket and ProcessInfo for the
plasma store process.
"""
if use_valgrind and use_profiler:
raise ValueError("Cannot use valgrind and profiler at the same time.")
if huge_pages and not (sys.platform == "linux"
or sys.platform == "linux2"):
raise ValueError("The huge_pages argument is only supported on "
"Linux.")
if huge_pages and plasma_directory is None:
raise ValueError("If huge_pages is True, then the "
"plasma_directory argument must be provided.")
if not isinstance(plasma_store_memory, int):
plasma_store_memory = int(plasma_store_memory)
command = [
PLASMA_STORE_EXECUTABLE, "-s", socket_name, "-m",
str(plasma_store_memory)
]
if plasma_directory is not None:
command += ["-d", plasma_directory]
if huge_pages:
command += ["-h"]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_PLASMA_STORE,
use_valgrind=use_valgrind,
use_valgrind_profiler=use_profiler,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
def start_plasma_store(resource_spec,
stdout_file=None,
stderr_file=None,
plasma_directory=None,
huge_pages=False,
plasma_store_socket_name=None,
fate_share=None):
"""This method starts an object store process.
Args:
resource_spec (ResourceSpec): Resources for the node.
stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
Returns:
ProcessInfo for the process that was started.
"""
assert resource_spec.resolved()
object_store_memory = resource_spec.object_store_memory
plasma_directory = determine_plasma_store_config(
object_store_memory, plasma_directory, huge_pages)
if object_store_memory < ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES:
raise ValueError("Attempting to cap object store memory usage at {} "
"bytes, but the minimum allowed is {} bytes.".format(
object_store_memory,
ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES))
# Print the object store memory using two decimal places.
object_store_memory_str = (object_store_memory / 10**7) / 10**2
logger.debug("Starting the Plasma object store with {} GB memory "
"using {}.".format(
round(object_store_memory_str, 2), plasma_directory))
# Start the Plasma store.
process_info = _start_plasma_store(
object_store_memory,
use_profiler=RUN_PLASMA_STORE_PROFILER,
stdout_file=stdout_file,
stderr_file=stderr_file,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
socket_name=plasma_store_socket_name,
fate_share=fate_share)
return process_info
def start_worker(node_ip_address,
object_store_name,
raylet_name,
redis_address,
worker_path,
temp_dir,
stdout_file=None,
stderr_file=None,
fate_share=None):
"""This method starts a worker process.
Args:
node_ip_address (str): The IP address of the node that this worker is
running on.
object_store_name (str): The socket name of the object store.
raylet_name (str): The socket name of the raylet server.
redis_address (str): The address that the Redis server is listening on.
worker_path (str): The path of the source code which the worker process
will run.
temp_dir (str): The path of the temp dir.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
Returns:
ProcessInfo for the process that was started.
"""
command = [
sys.executable, "-u", worker_path,
"--node-ip-address=" + node_ip_address,
"--object-store-name=" + object_store_name,
"--raylet-name=" + raylet_name,
"--redis-address=" + str(redis_address), "--temp-dir=" + temp_dir
]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_WORKER,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
def start_monitor(redis_address,
stdout_file=None,
stderr_file=None,
autoscaling_config=None,
redis_password=None,
fate_share=None):
"""Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
autoscaling_config: path to autoscaling config file.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
monitor_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "monitor.py")
command = [
sys.executable, "-u", monitor_path,
"--redis-address=" + str(redis_address)
]
if autoscaling_config:
command.append("--autoscaling-config=" + str(autoscaling_config))
if redis_password:
command.append("--redis-password=" + redis_password)
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
def start_raylet_monitor(redis_address,
stdout_file=None,
stderr_file=None,
redis_password=None,
config=None,
fate_share=None):
"""Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
config (dict|None): Optional configuration that will
override defaults in RayConfig.
Returns:
ProcessInfo for the process that was started.
"""
gcs_ip_address, gcs_port = redis_address.split(":")
redis_password = redis_password or ""
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
command = [
RAYLET_MONITOR_EXECUTABLE,
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--config_list={}".format(config_str),
]
if redis_password:
command += ["--redis_password={}".format(redis_password)]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share)
return process_info
| 40.246928 | 79 | 0.634919 |
4a206b6d7843f9c63f6faa412967bc7cb37f123f | 4,186 | py | Python | _lambda/ask_sdk_model/interfaces/display/text_content.py | desarroyo/alexa-skill-mi-abecedario | 71fb9dc5a9ce2aeb7e336474d5162053e3af0369 | [
"MIT"
] | null | null | null | _lambda/ask_sdk_model/interfaces/display/text_content.py | desarroyo/alexa-skill-mi-abecedario | 71fb9dc5a9ce2aeb7e336474d5162053e3af0369 | [
"MIT"
] | null | null | null | _lambda/ask_sdk_model/interfaces/display/text_content.py | desarroyo/alexa-skill-mi-abecedario | 71fb9dc5a9ce2aeb7e336474d5162053e3af0369 | [
"MIT"
] | null | null | null | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.interfaces.display.text_field import TextField
class TextContent(object):
"""
:param primary_text:
:type primary_text: (optional) ask_sdk_model.interfaces.display.text_field.TextField
:param secondary_text:
:type secondary_text: (optional) ask_sdk_model.interfaces.display.text_field.TextField
:param tertiary_text:
:type tertiary_text: (optional) ask_sdk_model.interfaces.display.text_field.TextField
"""
deserialized_types = {
'primary_text': 'ask_sdk_model.interfaces.display.text_field.TextField',
'secondary_text': 'ask_sdk_model.interfaces.display.text_field.TextField',
'tertiary_text': 'ask_sdk_model.interfaces.display.text_field.TextField'
}
attribute_map = {
'primary_text': 'primaryText',
'secondary_text': 'secondaryText',
'tertiary_text': 'tertiaryText'
}
def __init__(self, primary_text=None, secondary_text=None, tertiary_text=None):
# type: (Optional[TextField], Optional[TextField], Optional[TextField]) -> None
"""
:param primary_text:
:type primary_text: (optional) ask_sdk_model.interfaces.display.text_field.TextField
:param secondary_text:
:type secondary_text: (optional) ask_sdk_model.interfaces.display.text_field.TextField
:param tertiary_text:
:type tertiary_text: (optional) ask_sdk_model.interfaces.display.text_field.TextField
"""
self.__discriminator_value = None
self.primary_text = primary_text
self.secondary_text = secondary_text
self.tertiary_text = tertiary_text
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, TextContent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 34.595041 | 96 | 0.626374 |
4a206c70c96a29ca3840a9f096ba8676bd888a42 | 2,319 | py | Python | kafka-gpb-tester/src/producer.py | BroadbandForum/obbaa-vomci | 949ad64e1b0521a892f223b7cf97e4e792b2ea1f | [
"Apache-2.0"
] | null | null | null | kafka-gpb-tester/src/producer.py | BroadbandForum/obbaa-vomci | 949ad64e1b0521a892f223b7cf97e4e792b2ea1f | [
"Apache-2.0"
] | null | null | null | kafka-gpb-tester/src/producer.py | BroadbandForum/obbaa-vomci | 949ad64e1b0521a892f223b7cf97e4e792b2ea1f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
#Copyright 2021 Broadband Forum
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
# vOMCI test consumer based on the examples in
# https://docs.confluent.io/clients-confluent-kafka-python/current/overview.html
#
# Created by Andre Brizido on 15/06/2021
from tr451_vomci_nbi_message_pb2 import Msg
from tr451_vomci_nbi_message_pb2 import Error
from confluent_kafka import Producer
import socket
conf = {'bootstrap.servers': "kafka:9092",
'client.id': socket.gethostname()}
VOMCI_TOPIC_NAME="vomci1-response"
VPROXY_TOPIC_NAME="vomci-proxy-response"
producer = Producer(conf)
def acked(err, msg):
if err is not None:
print("Failed to deliver message: %s: %s" % (str(msg), str(err)))
else:
print("Message produced: %s" % (str(msg)))
vomci_msg = Msg()
vomci_error = Error()
proxy_msg = Msg()
proxy_error = Error()
#the field names are the same as the .proto file
vomci_msg.header.msg_id="1"
vomci_msg.header.sender_name="vomci1"
vomci_msg.header.recipient_name="vOLTMF"
vomci_msg.header.object_type=vomci_msg.header.VOMCI_FUNCTION
vomci_msg.header.object_name="vomci1"
vomci_msg.body.response.rpc_resp.status_resp.status_code=0
proxy_msg.header.msg_id="2"
proxy_msg.header.sender_name="vomci-proxy"
proxy_msg.header.recipient_name="vOLTMF"
proxy_msg.header.object_type=proxy_msg.header.VOMCI_PROXY
proxy_msg.header.object_name="vomci-proxy"
proxy_msg.body.response.rpc_resp.status_resp.status_code=0
producer.produce(VOMCI_TOPIC_NAME, key="key", value=bytes(vomci_msg.SerializeToString()), callback=acked)
producer.flush()
producer.produce(VPROXY_TOPIC_NAME, key="key", value=bytes(proxy_msg.SerializeToString()), callback=acked)
# Wait up to 1 second for events. Callbacks will be invoked during
# this method call if the message is acknowledged.
producer.poll(1)
| 33.128571 | 106 | 0.77749 |
4a206cde551ab32002750e9d4d559812c422b51f | 1,985 | py | Python | other/physical_input2.py | xandao6/clockworks-flyff-bots | 94f8d74093dd4044f6a6b9f3d2642ee851dd4d21 | [
"MIT"
] | null | null | null | other/physical_input2.py | xandao6/clockworks-flyff-bots | 94f8d74093dd4044f6a6b9f3d2642ee851dd4d21 | [
"MIT"
] | null | null | null | other/physical_input2.py | xandao6/clockworks-flyff-bots | 94f8d74093dd4044f6a6b9f3d2642ee851dd4d21 | [
"MIT"
] | null | null | null | import ctypes
import time
# Bunch of stuff so that the script can send keystrokes to game #
SendInput = ctypes.windll.user32.SendInput
# C struct redefinitions
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
# Actuals Functions
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput(0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(1), ii_)
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput(0, hexKeyCode, 0x0008 | 0x0002,
0, ctypes.pointer(extra))
x = Input(ctypes.c_ulong(1), ii_)
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
# http://www.flint.jp/misc/?q=dik&lang=en
def KeyPress():
time.sleep(3)
print('ready')
PressKey(0x11)
time.sleep(3)
ReleaseKey(0x11)
KeyPress()
| 26.824324 | 75 | 0.571285 |
4a206d1d69701e5c0e25264f069989adfb874788 | 5,625 | py | Python | examples/DeepWisdom/Auto_Tabular/model_lib/dnn_n.py | zichuan-scott-xu/automl-workflow | d108e55da943775953b9f1801311a86ac07e58a0 | [
"Apache-2.0"
] | 3 | 2020-12-15T02:40:43.000Z | 2021-01-14T02:32:13.000Z | examples/DeepWisdom/Auto_Tabular/model_lib/dnn_n.py | zichuan-scott-xu/automl-workflow | d108e55da943775953b9f1801311a86ac07e58a0 | [
"Apache-2.0"
] | null | null | null | examples/DeepWisdom/Auto_Tabular/model_lib/dnn_n.py | zichuan-scott-xu/automl-workflow | d108e55da943775953b9f1801311a86ac07e58a0 | [
"Apache-2.0"
] | 4 | 2021-01-07T05:41:38.000Z | 2021-04-07T08:02:22.000Z | import numpy as np
import tensorflow as tf
from CONSTANT import *
from utils.data_utils import ohe2cat
from .meta_model import MetaModel
from sklearn.metrics import roc_auc_score
class DnnModel(MetaModel):
def __init__(self):
super(DnnModel, self).__init__()
#clear_session()
self.max_length = None
self.mean = None
self.std = None
self._model = None
self.is_init = False
self.name = 'dnn'
self.type = 'nn'
self.patience = 10
self.not_gain_threhlod = 3
def init_model(self,
num_classes,
**kwargs):
self.num_classes = num_classes
model_fn = self.model_fn
self._model = tf.estimator.Estimator(model_fn=model_fn)
self.is_init = True
def epoch_train(self, dataloader, epoch):
dataset_train = dataloader['train']
train_input_fn = lambda: self.input_function(dataset_train, 'train')
self._model.train(input_fn=train_input_fn, steps=100)
def epoch_valid(self, dataloader):
dataset_valid, label = dataloader['valid']
valid_input_fn = lambda: self.input_function(dataset_valid, 'valid')
valid_results = self._model.predict(input_fn=valid_input_fn)
preds = [x['probabilities'] for x in valid_results]
preds = np.array(preds)
valid_auc = roc_auc_score(label, preds)
return valid_auc
def predict(self, dataloader, batch_size=32):
dataset_test = dataloader['test']
valid_input_fn = lambda: self.input_function(dataset_test, 'test')
test_results = self._model.predict(input_fn=valid_input_fn)
preds = [x['probabilities'] for x in test_results]
preds = np.array(preds)
return preds
def model_fn(self, features, labels, mode):
is_training = False
keep_prob = 1
if mode == tf.estimator.ModeKeys.TRAIN:
is_training = True
keep_prob = 0.8
input_layer = features
# Replace missing values by 0
mask = tf.is_nan(input_layer)
input_layer = tf.where(mask, tf.zeros_like(input_layer), input_layer)
# Sum over time axis
input_layer = tf.reduce_sum(input_layer, axis=1)
mask = tf.reduce_sum(1 - tf.cast(mask, tf.float32), axis=1)
# Flatten
input_layer = tf.layers.flatten(input_layer)
mask = tf.layers.flatten(mask)
f = input_layer.get_shape().as_list()[1] # tf.shape(input_layer)[1]
# Build network
x = tf.layers.batch_normalization(input_layer, training=is_training)
x = tf.nn.dropout(x, keep_prob)
x_skip = self.fc(x, 256, is_training)
x = self.fc(x_skip, 256, is_training)
x = tf.nn.dropout(x, keep_prob)
x = self.fc(x, 256, is_training) + x_skip
x_mid = self.fc(x, f, is_training)
x = self.fc(x_mid, 256, is_training)
for i in range(3):
x = self.fc(x, 256, is_training)
logits = tf.layers.dense(inputs=x, units=self.num_classes)
sigmoid_tensor = tf.nn.sigmoid(logits, name="sigmoid_tensor")
predictions = {
"classes": sigmoid_tensor > 0.5,
"probabilities": sigmoid_tensor
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
loss_labels = tf.reduce_sum(sigmoid_cross_entropy_with_logits(labels=labels, logits=logits))
loss_reconst = tf.reduce_sum(mask * tf.abs(tf.subtract(input_layer, x_mid)))
loss = loss_labels + loss_reconst
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
assert mode == tf.estimator.ModeKeys.EVAL
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def fc(self, x, out_dim, is_training):
x = tf.layers.dense(inputs=x, units=out_dim)
x = tf.layers.batch_normalization(x, training=is_training)
x = tf.nn.relu(x)
return x
def input_function(self, dataset, mode):
if mode == 'train':
dataset = dataset.shuffle(buffer_size=100)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size=128)
iterator_name = 'iterator_' + mode
if not hasattr(self, iterator_name):
self.iterator = dataset.make_one_shot_iterator()
iterator = dataset.make_one_shot_iterator()
if mode == 'train':
example, labels = iterator.get_next()
return example, labels
if mode == 'valid' or mode == 'test':
example = iterator.get_next()
return example
def sigmoid_cross_entropy_with_logits(labels, logits):
labels = tf.cast(labels, dtype=tf.float32)
relu_logits = tf.nn.relu(logits)
exp_logits = tf.exp(- tf.abs(logits))
sigmoid_logits = tf.log(1 + exp_logits)
element_wise_xent = relu_logits - labels * logits + sigmoid_logits
return tf.reduce_sum(element_wise_xent) | 35.828025 | 100 | 0.630933 |
4a206dd80d886558d2bd563face0e167d590e0bc | 1,153 | py | Python | cngi/vis/visjoin.py | FedeMPouzols/cngi_prototype | 421a99c460f4092b79120f5bec122de7ce9b8b96 | [
"Apache-2.0"
] | null | null | null | cngi/vis/visjoin.py | FedeMPouzols/cngi_prototype | 421a99c460f4092b79120f5bec122de7ce9b8b96 | [
"Apache-2.0"
] | null | null | null | cngi/vis/visjoin.py | FedeMPouzols/cngi_prototype | 421a99c460f4092b79120f5bec122de7ce9b8b96 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
this module will be included in the api
"""
###############################################
def visjoin(zarr1, zarr2, outdir=None):
"""
.. todo::
This function is not yet implemented
Join together two visibility zarr directory
Parameters
----------
zarr1 : str
first zarr directory to join
zarr2 : str
second zarr directory to join
outdir : str
output directory of joined data. Default None will overwrite first directory
Returns
-------
"""
return {}
| 28.825 | 84 | 0.647875 |
4a206e8196d8db2270cb75c7990e5a6af0e7f850 | 5,239 | py | Python | client/totp_enrollment/__init__.py | buzzdeee/king-phisher-plugins | 32171113cbf4a3f08299f15b9bd1d24ae4e69b45 | [
"BSD-3-Clause"
] | 50 | 2016-05-03T00:35:58.000Z | 2020-04-07T09:36:31.000Z | client/totp_enrollment/__init__.py | rrosajp/king-phisher-plugins | b16c9f55a45d653cc69ba822f08a6100e03df3d4 | [
"BSD-3-Clause"
] | 34 | 2016-05-17T23:34:13.000Z | 2020-01-29T23:15:40.000Z | client/totp_enrollment/__init__.py | rrosajp/king-phisher-plugins | b16c9f55a45d653cc69ba822f08a6100e03df3d4 | [
"BSD-3-Clause"
] | 48 | 2016-05-11T15:17:34.000Z | 2020-03-14T23:11:22.000Z | import functools
import io
import os
import king_phisher.client.plugins as plugins
import king_phisher.client.gui_utilities as gui_utilities
from gi.repository import Gtk
from gi.repository import GdkPixbuf
import pyotp
try:
import qrcode
except ImportError:
has_qrcode = False
else:
has_qrcode = True
try:
import PIL
except ImportError:
has_pillow = False
else:
has_pillow = True
relpath = functools.partial(os.path.join, os.path.dirname(os.path.realpath(__file__)))
gtk_builder_file = relpath('totp_enrollment.ui')
user_gql_query = relpath('user_query.graphql')
class Plugin(plugins.ClientPlugin):
authors = ['Spencer McIntyre']
classifiers = ['Plugin :: Client :: Tool']
title = 'TOTP Self Enrollment'
description = """
This plugin allows users to manage the two factor authentication settings
on their account. This includes setting a new and removing an existing TOTP
secret. The two factor authentication used by King Phisher is compatible
with free mobile applications such as Google Authenticator.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
req_min_version = '1.10.0'
req_packages = {
'qrcode': has_qrcode,
'pillow': has_pillow
}
version = '1.1.2'
def initialize(self):
if not os.access(gtk_builder_file, os.R_OK):
gui_utilities.show_dialog_error(
'Plugin Error',
self.application.get_active_window(),
"The GTK Builder data file ({0}) is not available.".format(os.path.basename(gtk_builder_file))
)
return False
self.menu_items = {}
self.add_submenu('Tools > TOTP Self Enrollment')
self.menu_items['setup'] = self.add_menu_item('Tools > TOTP Self Enrollment > Setup', self.enrollment_setup)
self.menu_items['remove'] = self.add_menu_item('Tools > TOTP Self Enrollment > Remove', self.enrollment_remove)
return True
def check_totp(self, _, window, entry, new_otp, this_user):
if not new_otp.verify(entry.get_text().strip()):
gui_utilities.show_dialog_warning(
'Incorrect TOTP',
self.application.get_active_window(),
'The specified TOTP code is invalid. Make sure your time\n'\
+ 'is correct, rescan the QR code and try again.'
)
return
self.application.rpc.remote_table_row_set('users', this_user['id'], {'otp_secret': new_otp.secret})
gui_utilities.show_dialog_info(
'TOTP Enrollment',
self.application.get_active_window(),
'Successfully set the TOTP secret. Your account is now enrolled\n'\
+ 'in two factor authentication. You will be prompted to enter the\n'
+ 'value the next time you login.'
)
window.destroy()
def enrollment_remove(self, _):
rpc = self.application.rpc
this_user = rpc.graphql_file(user_gql_query, {'name': rpc.username})['db']['user']
if this_user['otpSecret'] is None:
gui_utilities.show_dialog_info(
'Not Enrolled',
self.application.get_active_window(),
'This account is not currently enrolled in two factor\n'\
+ 'authentication. There are no changes to make.'
)
return
remove = gui_utilities.show_dialog_yes_no(
'Already Enrolled',
self.application.get_active_window(),
'Are you sure you want to unenroll in TOTP? This will remove\n'\
+ 'two factor authentication on your account.'
)
if not remove:
return
rpc.remote_table_row_set('users', this_user['id'], {'otp_secret': None})
gui_utilities.show_dialog_info(
'TOTP Unenrollment',
self.application.get_active_window(),
'Successfully removed the TOTP secret. Your account is now unenrolled\n'\
+ 'in two factor authentication. You will no longer be prompted to enter\n'\
+ 'the value when you login.'
)
def enrollment_setup(self, _):
rpc = self.application.rpc
this_user = rpc.graphql_file(user_gql_query, {'name': rpc.username})['db']['user']
if this_user['otpSecret'] is not None:
reset = gui_utilities.show_dialog_yes_no(
'Already Enrolled',
self.application.get_active_window(),
'This account is already enrolled in TOTP,\nreset the existing TOTP token?'
)
if not reset:
return
new_otp = pyotp.TOTP(pyotp.random_base32())
provisioning_uri = rpc.username + '@' + self.application.config['server'].split(':', 1)[0]
provisioning_uri = new_otp.provisioning_uri(provisioning_uri) + '&issuer=King%20Phisher'
bytes_io = io.BytesIO()
qrcode_ = qrcode.make(provisioning_uri).get_image()
qrcode_.save(bytes_io, 'PNG')
pixbuf_loader = GdkPixbuf.PixbufLoader.new()
pixbuf_loader.write(bytes_io.getvalue())
pixbuf_loader.close()
pixbuf = pixbuf_loader.get_pixbuf()
self.logger.debug('loading gtk builder file from: ' + gtk_builder_file)
builder = Gtk.Builder()
builder.add_from_file(gtk_builder_file)
window = builder.get_object('TOTPEnrollment.window')
window.set_transient_for(self.application.get_active_window())
self.application.add_window(window)
image = builder.get_object('TOTPEnrollment.image_qrcode')
image.set_from_pixbuf(pixbuf)
button_check = builder.get_object('TOTPEnrollment.button_check')
entry_totp = builder.get_object('TOTPEnrollment.entry_totp')
button_check.connect('clicked', self.check_totp, window, entry_totp, new_otp, this_user)
entry_totp.connect('activate', self.check_totp, window, entry_totp, new_otp, this_user)
window.show_all()
| 35.639456 | 113 | 0.743653 |
4a206ea658de2f6db4da0fa6a37b72c3ea2a5141 | 2,156 | py | Python | raining_simulation/raindrops.py | Rajeshree73/Python-Graphics-Projects | 6e983162559392eac23e943f861b9920c006d6c6 | [
"MIT"
] | null | null | null | raining_simulation/raindrops.py | Rajeshree73/Python-Graphics-Projects | 6e983162559392eac23e943f861b9920c006d6c6 | [
"MIT"
] | null | null | null | raining_simulation/raindrops.py | Rajeshree73/Python-Graphics-Projects | 6e983162559392eac23e943f861b9920c006d6c6 | [
"MIT"
] | null | null | null | from setup import *
class Raindrop:
def __init__(self, r, v_x, v_y ,x, y):
self.r = r
self.v_init = [v_x, v_y]
self.x = x
self.y = y
self.g = 1
self.ground_height = 10
self.ground_level = int(SCREENSIZE[1]-self.ground_height)
self.grounded = False
def update(self):
self.collision()
self.x += self.v_init[0]
self.y += self.v_init[1]
self.v_init[1] += self.g
if self.grounded:
self.v_init = [0,0]
def collision(self):
if self.y >= self.ground_level:
self.v_init[1] = -0.9*self.v_init[1]
self.grounded = True
class MainDrop(Raindrop):
def __init__(self, r, colour):
self.r = r
self.colour = colour
self.sub_drops = None
self.busted = None
self.sub_drops_num = 100
v_x, v_y = 0.0, 10.0*random.random()
x, y = random.randint(0, SCREENSIZE[0]), 0.0
super(MainDrop, self).__init__(r, v_x, v_y ,x, y)
pg.mixer.music.load('drop.wav')
def draw(self):
self.burst()
if self.busted:
if False not in [self.sub_drops[i].grounded for i in range(self.sub_drops_num)] and self.grounded:
self.resume()
if self.busted:
[self.sub_drops[i].update() for i in range(self.sub_drops_num)]
[self.sub_drops[i].draw() for i in range(self.sub_drops_num)]
else:
pg.draw.circle(DISPSURFACE, self.colour, (int(self.x), int(self.y)), self.r)
def burst(self):
if self.grounded and not self.busted:
self.sub_drops = [SubDrop(self.colour,self.x, self.y) for i in range(self.sub_drops_num)]
self.busted = True
pg.mixer.music.play()
def resume(self):
self.v_init = [0.0, 10.0*random.random()]
self.x, self.y = random.randint(0, SCREENSIZE[0]), 0.0
self.busted = False
self.grounded = False
self.sub_drops = None
class SubDrop(Raindrop):
def __init__(self, colour, x, y):
self.colour = colour
v_x, v_y = 10.0*(1-2*random.random()), -10.0*random.random()
r = 1
super(SubDrop, self).__init__(r, v_x, v_y ,x, y)
def draw(self):
pg.draw.circle(DISPSURFACE, self.colour, (int(self.x), int(self.y)), self.r)
# class Rain:
# def __init__(self):
# self.drop = MainDrop(10, WHITE)
# self.sub_drops = None
#
# def updte(self):
# self.drop.updte()
| 22.226804 | 101 | 0.655844 |
4a206eb0e05261e14961a2028c2724f3cc95eb90 | 817 | py | Python | tests/basics/string_find.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 692 | 2016-12-19T23:25:35.000Z | 2022-03-31T14:20:48.000Z | tests/basics/string_find.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 259 | 2016-12-25T06:38:22.000Z | 2022-03-13T16:38:36.000Z | tests/basics/string_find.py | learnforpractice/micropython-cpp | 004bc8382f74899e7b876cc29bfa6a9cc976ba10 | [
"MIT"
] | 228 | 2016-12-19T05:03:30.000Z | 2022-03-22T18:13:00.000Z | print("hello world".find("ll"))
print("hello world".find("ll", None))
print("hello world".find("ll", 1))
print("hello world".find("ll", 1, None))
print("hello world".find("ll", None, None))
print("hello world".find("ll", 1, -1))
print("hello world".find("ll", 1, 1))
print("hello world".find("ll", 1, 2))
print("hello world".find("ll", 1, 3))
print("hello world".find("ll", 1, 4))
print("hello world".find("ll", 1, 5))
print("hello world".find("ll", -100))
print("0000".find('0'))
print("0000".find('0', 0))
print("0000".find('0', 1))
print("0000".find('0', 2))
print("0000".find('0', 3))
print("0000".find('0', 4))
print("0000".find('0', 5))
print("0000".find('-1', 3))
print("0000".find('1', 3))
print("0000".find('1', 4))
print("0000".find('1', 5))
try:
'abc'.find(1)
except TypeError:
print('TypeError')
| 28.172414 | 43 | 0.598531 |
4a207055fcc05d82971679da2e9e1a5fe3838a66 | 999 | py | Python | at/server/commands/at.py | elagheb/at_commands | 51f1fa553b651b639aa3d1e1b3ac4ff07322f7a0 | [
"BSD-2-Clause"
] | null | null | null | at/server/commands/at.py | elagheb/at_commands | 51f1fa553b651b639aa3d1e1b3ac4ff07322f7a0 | [
"BSD-2-Clause"
] | null | null | null | at/server/commands/at.py | elagheb/at_commands | 51f1fa553b651b639aa3d1e1b3ac4ff07322f7a0 | [
"BSD-2-Clause"
] | null | null | null | import abc
import logging
from .common.exceptions import ATCommandException
logger = logging.getLogger()
logger.addHandler(logging.NullHandler())
class AtCommand(metaclass=abc.ABCMeta):
COMMAND = 'command'
def __init__(self, serial_target, timeout):
self.serial_target = serial_target
self.timeout = timeout
def read(self, read_char='?', message='Error while running command'):
state = self.serial_target.run(command=self.COMMAND + read_char,
timeout=self.timeout,
exception=ATCommandException,
message=message)
return self.parse_output(state)
@abc.abstractmethod
def parse_error(self):
raise NotImplementedError
@abc.abstractmethod
def parse_output(self, result):
match = self.COMMAND[2:] + ':'
for value in result:
if match in value:
return value.split(':')[-1] | 31.21875 | 73 | 0.606607 |
4a207081326e6e74274fac0681e55f5bd1a923c5 | 2,618 | bzl | Python | rules/app.bzl | tinyspeck/rules_ios | 01c61778b24965510fe46a35f2e8492cf160d22c | [
"Apache-2.0"
] | 146 | 2020-04-18T19:36:32.000Z | 2022-03-27T09:59:27.000Z | rules/app.bzl | bazel-ios/rules_ios | 3807b2011db427e9dbd89b063528da48c9d13ac3 | [
"Apache-2.0"
] | 225 | 2020-04-17T22:23:14.000Z | 2022-03-31T16:40:55.000Z | rules/app.bzl | tinyspeck/rules_ios | 01c61778b24965510fe46a35f2e8492cf160d22c | [
"Apache-2.0"
] | 42 | 2020-05-01T20:55:13.000Z | 2022-03-30T10:04:22.000Z | load("@build_bazel_rules_apple//apple:ios.bzl", rules_apple_ios_application = "ios_application")
load("//rules:library.bzl", "apple_library")
load("//rules:plists.bzl", "info_plists_by_setting")
# We need to try and partition out arguments for obj_library / swift_library
# from ios_application since this creates source file libs internally.
#
# The docs for ios_application are at rules_apple
# https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_application
# - Perhaps we can just remove this wrapper longer term.
_IOS_APPLICATION_KWARGS = [
"bundle_id",
"infoplists",
"env",
"minimum_os_version",
"test_host",
"families",
"entitlements",
"entitlements_validation",
"extensions",
"visibility",
"launch_storyboard",
"provisioning_profile",
"resources",
"app_icons",
"tags",
"strings",
"alternate_icons",
"settings_bundle",
"minimum_deployment_os_version",
"ipa_post_processor",
"include_symbols_in_bundle",
]
def ios_application(name, apple_library = apple_library, infoplists_by_build_setting = {}, **kwargs):
"""
Builds and packages an iOS application.
Args:
name: The name of the iOS application.
apple_library: The macro used to package sources into a library.
infoplists_by_build_setting: A dictionary of infoplists grouped by bazel build setting.
Each value is applied if the respective bazel build setting
is resolved during the analysis phase.
If '//conditions:default' is not set the value in 'infoplists'
is set as default.
**kwargs: Arguments passed to the apple_library and ios_application rules as appropriate.
"""
application_kwargs = {arg: kwargs.pop(arg) for arg in _IOS_APPLICATION_KWARGS if arg in kwargs}
library = apple_library(name = name, namespace_is_module_name = False, platforms = {"ios": application_kwargs.get("minimum_os_version")}, **kwargs)
application_kwargs["launch_storyboard"] = application_kwargs.pop("launch_storyboard", library.launch_screen_storyboard_name)
application_kwargs["families"] = application_kwargs.pop("families", ["iphone", "ipad"])
rules_apple_ios_application(
name = name,
deps = library.lib_names,
infoplists = info_plists_by_setting(name = name, infoplists_by_build_setting = infoplists_by_build_setting, default_infoplists = application_kwargs.pop("infoplists", [])),
**application_kwargs
)
| 40.90625 | 179 | 0.690985 |
4a2070decaa1ec0d044cfcb6a999ed79dbe64007 | 4,387 | py | Python | src/detection/config/R50_FPN_DCN_test_static.py | inzva/Traffic-Anomaly-Detection | 09bbfd55f5d375832afcc1994d3b549082830594 | [
"MIT"
] | 4 | 2020-05-31T13:19:58.000Z | 2021-03-11T18:51:06.000Z | src/detection/config/R50_FPN_DCN_test_static.py | inzva/Traffic-Anomaly-Detection | 09bbfd55f5d375832afcc1994d3b549082830594 | [
"MIT"
] | null | null | null | src/detection/config/R50_FPN_DCN_test_static.py | inzva/Traffic-Anomaly-Detection | 09bbfd55f5d375832afcc1994d3b549082830594 | [
"MIT"
] | 3 | 2020-05-31T13:19:58.000Z | 2021-03-16T08:43:23.000Z | # model settings
model = dict(
type='FasterRCNN',
pretrained='modelzoo://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch',
dcn=dict(
modulated=False,
deformable_groups=1,
fallback_on_stride=False),
stage_with_dcn=(False, 'last', 'last', 'all')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
use_sigmoid_cls=False),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
smoothl1_beta=1 / 9.0,
debug=False),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.9,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.9), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'AnomalyDataset'
data_root = 'data/AIC_Track3/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'train.json',
img_prefix=data_root + 'Images/',
img_scale=[(1600,800),(2400,1200)],
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=True,
with_label=True),
test=dict(
type=dataset_type,
ann_file=data_root + 'test_data_processed.json',
img_prefix=data_root + 'processed_images/',
img_scale=[(1620,800),(2430,1200)],
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_label=False,
test_mode=True))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[16, 22])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 25
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/faster_rcnn_dconv_c3-c5_r50_fpn_1x'
load_from = 'mmdetection/faster_rcnn_dconv_c3-c5_r50_fpn_1x_20190125-1b768045.pth'
resume_from = None
workflow = [('train', 1)]
| 28.861842 | 82 | 0.585138 |
4a20717d05bbba47c9686dc8ff863417b2a5e45b | 1,528 | py | Python | benchmarks/eq_design_bench.py | jatinchowdhury18/AudioDSPy | 96ce0e223877cb5515f14da4f6d2ce8282d81f3c | [
"MIT"
] | 37 | 2019-11-06T07:47:40.000Z | 2022-03-07T10:50:45.000Z | benchmarks/eq_design_bench.py | jatinchowdhury18/AudioDSPy | 96ce0e223877cb5515f14da4f6d2ce8282d81f3c | [
"MIT"
] | 3 | 2019-10-22T03:24:09.000Z | 2022-01-30T00:45:21.000Z | benchmarks/eq_design_bench.py | jatinchowdhury18/AudioDSPy | 96ce0e223877cb5515f14da4f6d2ce8282d81f3c | [
"MIT"
] | 3 | 2020-09-09T20:18:37.000Z | 2022-03-17T00:48:05.000Z | import audio_dspy as adsp
import numpy as np
_num_ = 1000
_fc_ = 1000
_Q_ = 0.7071
_fs_ = 44100
_gain_ = 2
_N_ = 16
class EQTimeSuite:
"""
Benchmarkng Suite for EQ design functions
"""
def time_bell_filter(self):
for _ in range(_num_):
adsp.design_bell(_fc_, _Q_, _gain_, _fs_)
def time_lowshelf_filter(self):
for _ in range(_num_):
adsp.design_lowshelf(_fc_, _Q_, _gain_, _fs_)
def time_highshelf_filter(self):
for _ in range(_num_):
adsp.design_highshelf(_fc_, _Q_, _gain_, _fs_)
def time_notch_filter(self):
for _ in range(_num_):
adsp.design_notch(_fc_, _Q_, _fs_)
def time_LPF1_filter(self):
for _ in range(_num_):
adsp.design_LPF1(_fc_, _fs_)
def time_LPF2_filter(self):
for _ in range(_num_):
adsp.design_LPF2(_fc_, _Q_, _fs_)
def time_LPFN_filter(self):
for _ in range(_num_):
adsp.design_LPFN(_fc_, _Q_, _N_, _fs_)
def time_HPF1_filter(self):
for _ in range(_num_):
adsp.design_HPF1(_fc_, _fs_)
def time_HPF2_filter(self):
for _ in range(_num_):
adsp.design_HPF2(_fc_, _Q_, _fs_)
def time_HPFN_filter(self):
for _ in range(_num_):
adsp.design_HPFN(_fc_, _Q_, _N_, _fs_)
def time_bilinear_biquad(self):
for _ in range(_num_):
adsp.bilinear_biquad(np.array([1, 1, 1]), np.array(
[1, 0, 0]), _fs_, matchPole=True)
| 25.04918 | 63 | 0.603403 |
4a20736e8234ec27d65f4948991798e305f4a761 | 1,453 | py | Python | ba3e-db-graph-kmers/db_graph_kmers.py | kjco/bioinformatics-algorithms | 3c466157b89c1cbd54749563e39d86a307d7a3f3 | [
"MIT"
] | null | null | null | ba3e-db-graph-kmers/db_graph_kmers.py | kjco/bioinformatics-algorithms | 3c466157b89c1cbd54749563e39d86a307d7a3f3 | [
"MIT"
] | null | null | null | ba3e-db-graph-kmers/db_graph_kmers.py | kjco/bioinformatics-algorithms | 3c466157b89c1cbd54749563e39d86a307d7a3f3 | [
"MIT"
] | null | null | null | # Programming solution for:
# Construct the De Bruijn Graph of a Collection of k-mers
# http://rosalind.info/problems/ba3e/
#
# Given an arbitrary collection of k-mers Patterns (where some k-mers may
# appear multiple times), we define CompositionGraph(Patterns) as a graph with
# |Patterns| isolated edges. Every edge is labeled by a k-mer from Patterns,
# and the starting and ending nodes of an edge are labeled by the prefix and
# suffix of the k-mer labeling that edge. We then define the de Bruijn graph of
# Patterns, denoted DeBruijn(Patterns), by gluing identically labeled nodes in
# CompositionGraph(Patterns), which yields the following algorithm.
#
# **De Bruijn Graph from k-mers Problem**
#
# Construct the de Bruijn graph from a collection of k-mers.
# - Given: A collection of k-mers Patterns.
# - Return: The de Bruijn graph DeBruijn(Patterns), in the form of an adjacency
# list.
# Sample input:
# pattern_list = ['GAGG','GGGG','GGGA','CAGG','AGGG','GGAG']
pattern_list = [line.strip() for line in open('dataset_54_7.txt','r')]
d = dict()
for kmer in pattern_list:
prefix = kmer[0:len(kmer)-1]
suffix = kmer[1:len(kmer)]
if prefix not in d:
d[prefix] = [suffix]
else:
d[prefix].append(suffix)
for key in sorted(d.iterkeys()):
print "%s -> %s" % (key, ','.join(sorted(list(set(d[key])))))
# list(set(my_list)) removes duplicates in list
| 39.27027 | 82 | 0.679284 |
4a2074a6d1520b5c650567f40249b338adafbf07 | 141 | py | Python | matfiz_tasks/Topic25_Sockets/T25.19_client.py | andriidem308/python_practice | 85a0ebd6ecbecf63eaba170c8279f0a88600237a | [
"MIT"
] | 2 | 2020-01-27T11:58:54.000Z | 2020-03-30T10:54:08.000Z | matfiz_tasks/Topic25_Sockets/T25.19_client.py | andriidem308/python_practice | 85a0ebd6ecbecf63eaba170c8279f0a88600237a | [
"MIT"
] | null | null | null | matfiz_tasks/Topic25_Sockets/T25.19_client.py | andriidem308/python_practice | 85a0ebd6ecbecf63eaba170c8279f0a88600237a | [
"MIT"
] | null | null | null | import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host, port = socket.gethostname(), 2519
sock.connect((host, port))
| 17.625 | 56 | 0.751773 |
4a20757b2391afc80bd5bd03017f6070e04e282a | 558 | py | Python | var/spack/repos/builtin/packages/py-hacking/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-hacking/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin/packages/py-hacking/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyHacking(PythonPackage):
"""OpenStack Hacking Guideline Enforcement."""
homepage = "https://docs.openstack.org/hacking/latest/"
pypi = "hacking/hacking-1.1.0.tar.gz"
version('1.1.0', sha256='23a306f3a1070a4469a603886ba709780f02ae7e0f1fc7061e5c6fb203828fee')
depends_on('py-setuptools', type='build')
| 31 | 95 | 0.749104 |
4a2075fa4be64cffb5448fd5b54479f0ec92e9dc | 2,484 | py | Python | src/controller/WEAP/ServeWaMDaMDataToWEAP/ServeWEAPData.py | WamdamProject/WaMDaM_Wizard | f8f5a830464f3c8f45e4eb0557833eefb267d7b2 | [
"BSD-3-Clause"
] | null | null | null | src/controller/WEAP/ServeWaMDaMDataToWEAP/ServeWEAPData.py | WamdamProject/WaMDaM_Wizard | f8f5a830464f3c8f45e4eb0557833eefb267d7b2 | [
"BSD-3-Clause"
] | 3 | 2018-11-17T05:49:18.000Z | 2020-12-31T15:57:14.000Z | src/controller/WEAP/ServeWaMDaMDataToWEAP/ServeWEAPData.py | WamdamProject/WaMDaM_Wizard | f8f5a830464f3c8f45e4eb0557833eefb267d7b2 | [
"BSD-3-Clause"
] | null | null | null |
import win32com.client
from controller.ConnectDB_ParseExcel import DB_Setup
from controller.ConnectDB_ParseExcel import SqlAlchemy as sq
from sqlalchemy.orm import aliased
# 1. Connect to WEAP
# Function to create the connection with WEAP
class WEAP_export(object):
WEAP = None
def __init__(self):
self.ConnectWEAP()
self.setup = DB_Setup()
self.session = self.setup.get_session()
self.excel_pointer = None
def ConnectWEAP(self):
self.WEAP=win32com.client.Dispatch("WEAP.WEAPApplication")
# make this dynamic active area
self.WEAP.ActiveArea = "BearRiverFeb2017_V10.9"
ActiveArea=self.WEAP.ActiveArea.Name
Scenario=self.WEAP.ActiveScenario.Name
WEAPAreasDirectory= self.WEAP.AreasDirectory
print ActiveArea
print Scenario
print WEAPAreasDirectory
SourceName=self.WEAP.ActiveArea.Name
# 2. Extract the WEAP Network
def QueryWaMDaMDataForWEAP(self):
# call the function wich will run the query and write its output to excel
# Time Series
from controller.WEAP.ServeWaMDaMDataToWEAP.QueryTimeSeries import TimeSeries_query, Timeseries_csv_file
df_TimeSeries = TimeSeries_query(self.session)
csv_file_name_timeseries = Timeseries_csv_file(df_TimeSeries)
# Multi Columns
from controller.WEAP.ServeWaMDaMDataToWEAP.QueryMultiAttributes import MultiAttributes_query, MultiAttributes_csv_file
df_MultiColumns = MultiAttributes_query(self.session)
csv_file_path_or_value_multi = MultiAttributes_csv_file(df_MultiColumns)
# Seasonal
from controller.WEAP.ServeWaMDaMDataToWEAP.QuerySeasonal import Seasonal_query, Seasonal_csv_file
df_Seasonal = Seasonal_query(self.session)
csv_file_path_or_value_seasonal = Seasonal_csv_file(df_Seasonal)
# Metadata for all the files together (pass all these to the function
from controller.WEAP.ServeWaMDaMDataToWEAP.ExportWEAP_Inputfile import WriteMetadataFile
WriteMetadataFile(df_TimeSeries, df_MultiColumns, df_Seasonal,
csv_file_name_timeseries, csv_file_path_or_value_seasonal,
csv_file_path_or_value_multi)
WriteMetadataFile(self)
if __name__ == '__main__':
weap_export = WEAP_export()
weap_export.QueryWaMDaMDataForWEAP()
# so here I want to run Extract_Network function and get back its resutls
| 32.25974 | 126 | 0.732287 |
4a207663502acae1032c8425c88e538e39596bea | 929 | py | Python | test/test_contact_relations_tags.py | Pluxbox/radiomanager-python-client | a25450c079110fb12d8e5b00f8b96c2619ed6172 | [
"MIT"
] | null | null | null | test/test_contact_relations_tags.py | Pluxbox/radiomanager-python-client | a25450c079110fb12d8e5b00f8b96c2619ed6172 | [
"MIT"
] | 1 | 2018-09-05T08:51:24.000Z | 2018-09-06T14:56:30.000Z | test/test_contact_relations_tags.py | Pluxbox/radiomanager-python-client | a25450c079110fb12d8e5b00f8b96c2619ed6172 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
RadioManager
RadioManager # noqa: E501
OpenAPI spec version: 2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import radiomanager_sdk
from radiomanager_sdk.models.contact_relations_tags import ContactRelationsTags # noqa: E501
from radiomanager_sdk.rest import ApiException
class TestContactRelationsTags(unittest.TestCase):
"""ContactRelationsTags unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testContactRelationsTags(self):
"""Test ContactRelationsTags"""
# FIXME: construct object with mandatory attributes with example values
# model = radiomanager_sdk.models.contact_relations_tags.ContactRelationsTags() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.658537 | 101 | 0.723358 |
4a2076ec4a10a6f13db74762f8d739a81c65a9a6 | 4,370 | py | Python | docker/utils/socket.py | kennylajara/docker-py | a48a5a9647761406d66e8271f19fab7fa0c5f582 | [
"Apache-2.0"
] | 5,611 | 2015-01-02T16:46:16.000Z | 2022-03-31T21:49:58.000Z | docker/utils/socket.py | sdrees/docker-py | 8595cca8186b5d53c04ef71a1a3db86b7c53b012 | [
"Apache-2.0"
] | 2,176 | 2015-01-01T00:57:56.000Z | 2022-03-31T13:21:54.000Z | docker/utils/socket.py | sdrees/docker-py | 8595cca8186b5d53c04ef71a1a3db86b7c53b012 | [
"Apache-2.0"
] | 1,774 | 2015-01-05T12:49:03.000Z | 2022-03-29T13:27:47.000Z | import errno
import os
import select
import socket as pysocket
import struct
try:
from ..transport import NpipeSocket
except ImportError:
NpipeSocket = type(None)
STDOUT = 1
STDERR = 2
class SocketError(Exception):
pass
def read(socket, n=4096):
"""
Reads at most n bytes from socket
"""
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
if isinstance(socket, getattr(pysocket, 'SocketIO')):
return socket.read(n)
return os.read(socket.fileno(), n)
except OSError as e:
if e.errno not in recoverable_errors:
raise
def read_exactly(socket, n):
"""
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
data = bytes()
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
raise SocketError("Unexpected EOF")
data += next_data
return data
def next_frame_header(socket):
"""
Returns the stream and size of the next frame of data waiting to be read
from socket, according to the protocol defined here:
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
return (-1, -1)
stream, actual = struct.unpack('>BxxxL', data)
return (stream, actual)
def frames_iter(socket, tty):
"""
Return a generator of frames read from socket. A frame is a tuple where
the first item is the stream number and the second item is a chunk of data.
If the tty setting is enabled, the streams are multiplexed into the stdout
stream.
"""
if tty:
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
else:
return frames_iter_no_tty(socket)
def frames_iter_no_tty(socket):
"""
Returns a generator of data read from the socket when the tty setting is
not enabled.
"""
while True:
(stream, n) = next_frame_header(socket)
if n < 0:
break
while n > 0:
result = read(socket, n)
if result is None:
continue
data_length = len(result)
if data_length == 0:
# We have reached EOF
return
n -= data_length
yield (stream, result)
def frames_iter_tty(socket):
"""
Return a generator of data read from the socket when the tty setting is
enabled.
"""
while True:
result = read(socket)
if len(result) == 0:
# We have reached EOF
return
yield result
def consume_socket_output(frames, demux=False):
"""
Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream.
"""
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
return bytes().join(frames)
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
out = [None, None]
for frame in frames:
# It is guaranteed that for each frame, one and only one stream
# is not None.
assert frame != (None, None)
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]
else:
out[0] += frame[0]
else:
if out[1] is None:
out[1] = frame[1]
else:
out[1] += frame[1]
return tuple(out)
def demux_adaptor(stream_id, data):
"""
Utility to demultiplex stdout and stderr when reading frames from the
socket.
"""
if stream_id == STDOUT:
return (data, None)
elif stream_id == STDERR:
return (None, data)
else:
raise ValueError(f'{stream_id} is not a valid stream')
| 26.011905 | 79 | 0.596568 |
4a20783f754a1254fc51cec6c9dea2bb35c7ad12 | 213 | py | Python | gaphor/SysML/blocks/tests/test_proxy_port.py | Texopolis/gaphor | 3b190620075fd413258af1e7a007b4b2167a7564 | [
"Apache-2.0"
] | 867 | 2018-01-09T00:19:09.000Z | 2022-03-31T02:49:23.000Z | gaphor/SysML/blocks/tests/test_proxy_port.py | burakozturk16/gaphor | 86267a5200ac4439626d35d306dbb376c3800107 | [
"Apache-2.0"
] | 790 | 2018-01-13T23:47:07.000Z | 2022-03-31T16:04:27.000Z | gaphor/SysML/blocks/tests/test_proxy_port.py | burakozturk16/gaphor | 86267a5200ac4439626d35d306dbb376c3800107 | [
"Apache-2.0"
] | 117 | 2018-01-09T02:24:49.000Z | 2022-03-23T08:07:42.000Z | from gaphas import Item
from gaphor.SysML.blocks.proxyport import ProxyPortItem
def test_proxy_port_item_conforms_to_item_protocol(diagram):
item = ProxyPortItem(diagram)
assert isinstance(item, Item)
| 21.3 | 60 | 0.812207 |
4a207953e7f6161c6d8c690e1991e2869676585e | 64,985 | py | Python | cinder/tests/unit/volume/test_connection.py | alexisries/openstack-cinder | 7cc6e45c5ddb8bf771bdb01b867628e41761ae11 | [
"Apache-2.0"
] | 1 | 2018-10-23T17:00:53.000Z | 2018-10-23T17:00:53.000Z | cinder/tests/unit/volume/test_connection.py | vexata/cinder | 7b84c0842b685de7ee012acec40fb4064edde5e9 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/volume/test_connection.py | vexata/cinder | 7b84c0842b685de7ee012acec40fb4064edde5e9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Volume connection test cases."""
import ddt
import mock
from cinder import context
from cinder import db
from cinder import exception
from cinder.message import message_field
from cinder import objects
from cinder.objects import fields
from cinder.tests import fake_driver
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base
import cinder.volume
import cinder.volume.targets
import cinder.volume.targets.iscsi
@ddt.ddt
class DiscardFlagTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(DiscardFlagTestCase, self).setUp()
self.volume.driver = mock.MagicMock()
@ddt.data(dict(config_discard_flag=True,
driver_discard_flag=None,
expected_flag=True),
dict(config_discard_flag=False,
driver_discard_flag=None,
expected_flag=None),
dict(config_discard_flag=True,
driver_discard_flag=True,
expected_flag=True),
dict(config_discard_flag=False,
driver_discard_flag=True,
expected_flag=True),
dict(config_discard_flag=False,
driver_discard_flag=False,
expected_flag=False),
dict(config_discard_flag=None,
driver_discard_flag=True,
expected_flag=True),
dict(config_discard_flag=None,
driver_discard_flag=False,
expected_flag=False))
@ddt.unpack
def test_initialize_connection_discard_flag(self,
config_discard_flag,
driver_discard_flag,
expected_flag):
self.volume.driver.create_export.return_value = None
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
conn_info = {
'driver_volume_type': 'iscsi',
'data': {'access_mode': 'rw',
'encrypted': False}
}
if driver_discard_flag is not None:
conn_info['data']['discard'] = driver_discard_flag
self.volume.driver.initialize_connection.return_value = conn_info
def _safe_get(key):
if key is 'report_discard_supported':
return config_discard_flag
else:
return None
self.volume.driver.configuration.safe_get.side_effect = _safe_get
with mock.patch.object(objects, 'Volume') as mock_vol:
volume = tests_utils.create_volume(self.context)
volume.volume_type_id = None
mock_vol.get_by_id.return_value = volume
conn_info = self.volume.initialize_connection(self.context,
volume,
connector)
self.assertEqual(expected_flag, conn_info['data'].get('discard'))
class VolumeConnectionTestCase(base.BaseVolumeTestCase):
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_fetchqos(self,
_mock_volume_update,
_mock_volume_get,
_mock_volume_admin_metadata_get,
mock_get_target):
"""Make sure initialize_connection returns correct information."""
_fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}]
_fake_volume = {'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID,
'volume_admin_metadata': _fake_admin_meta}
fake_volume_obj = fake_volume.fake_volume_obj(self.context,
**_fake_volume)
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = {
'fake-key': 'fake-value'}
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2'}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'key1': 'value1',
'key2': 'value2'}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector,)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
# initialize_connection() skips qos_specs that is designated to be
# consumed by back-end only
qos_values.update({'consumer': 'back-end'})
type_qos.return_value = dict(qos_specs=qos_values)
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertIsNone(conn_info['data']['qos_specs'])
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_qos_per_gb(self,
_mock_volume_update,
_mock_volume_get,
_mock_volume_admin_metadata_get,
mock_get_target):
"""QoS test with no minimum value."""
_fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}]
_fake_volume = {'size': 3,
'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID,
'volume_admin_metadata': _fake_admin_meta}
fake_volume_obj = fake_volume.fake_volume_obj(self.context,
**_fake_volume)
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = {
'fake-key': 'fake-value'}
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'write_iops_sec_per_gb': 5,
'read_iops_sec_per_gb': 7700,
'total_iops_sec_per_gb': 300000,
'read_bytes_sec_per_gb': 10,
'write_bytes_sec_per_gb': 40,
'total_bytes_sec_per_gb': 1048576}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'write_iops_sec': 15,
'read_iops_sec': 23100,
'total_iops_sec': 900000,
'read_bytes_sec': 30,
'write_bytes_sec': 120,
'total_bytes_sec': 3145728}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector,)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_qos_per_gb_with_min_small(
self, _mock_volume_update, _mock_volume_get,
_mock_volume_admin_metadata_get, mock_get_target):
"""QoS test when volume size results in using minimum."""
_fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}]
_fake_volume = {'size': 1,
'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID,
'volume_admin_metadata': _fake_admin_meta}
fake_volume_obj = fake_volume.fake_volume_obj(self.context,
**_fake_volume)
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = {
'fake-key': 'fake-value'}
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'write_iops_sec_per_gb_min': 15,
'write_iops_sec_per_gb': 5,
'read_iops_sec_per_gb_min': 23100,
'read_iops_sec_per_gb': 7700,
'total_iops_sec_per_gb_min': 900000,
'total_iops_sec_per_gb': 300000,
'read_bytes_sec_per_gb_min': 30,
'read_bytes_sec_per_gb': 10,
'write_bytes_sec_per_gb_min': 120,
'write_bytes_sec_per_gb': 40,
'total_bytes_sec_per_gb_min': 3145728,
'total_bytes_sec_per_gb': 1048576}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'write_iops_sec': 15,
'read_iops_sec': 23100,
'total_iops_sec': 900000,
'read_bytes_sec': 30,
'write_bytes_sec': 120,
'total_bytes_sec': 3145728}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector,)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
@mock.patch.object(cinder.volume.targets.iscsi.ISCSITarget,
'_get_target_chap_auth')
@mock.patch.object(db, 'volume_admin_metadata_get')
@mock.patch.object(db.sqlalchemy.api, 'volume_get')
@mock.patch.object(db, 'volume_update')
def test_initialize_connection_qos_per_gb_with_min_large(
self, _mock_volume_update, _mock_volume_get,
_mock_volume_admin_metadata_get, mock_get_target):
"""QoS test when volume size results in using per-gb values."""
_fake_admin_meta = [{'key': 'fake-key', 'value': 'fake-value'}]
_fake_volume = {'size': 100,
'volume_type_id': fake.VOLUME_TYPE_ID,
'name': 'fake_name',
'host': 'fake_host',
'id': fake.VOLUME_ID,
'volume_admin_metadata': _fake_admin_meta}
fake_volume_obj = fake_volume.fake_volume_obj(self.context,
**_fake_volume)
_mock_volume_get.return_value = _fake_volume
_mock_volume_update.return_value = _fake_volume
_mock_volume_admin_metadata_get.return_value = {
'fake-key': 'fake-value'}
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
qos_values = {'consumer': 'front-end',
'specs': {
'write_iops_sec_per_gb_min': 15,
'write_iops_sec_per_gb': 5,
'read_iops_sec_per_gb_min': 23100,
'read_iops_sec_per_gb': 7700,
'total_iops_sec_per_gb_min': 900000,
'total_iops_sec_per_gb': 300000,
'read_bytes_sec_per_gb_min': 30,
'read_bytes_sec_per_gb': 10,
'write_bytes_sec_per_gb_min': 120,
'write_bytes_sec_per_gb': 40,
'total_bytes_sec_per_gb_min': 3145728,
'total_bytes_sec_per_gb': 1048576}
}
with mock.patch.object(cinder.volume.volume_types,
'get_volume_type_qos_specs') as type_qos, \
mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver,
'initialize_connection') as driver_init:
type_qos.return_value = dict(qos_specs=qos_values)
driver_init.return_value = {'data': {}}
mock_get_target.return_value = None
qos_specs_expected = {'write_iops_sec': 500,
'read_iops_sec': 770000,
'total_iops_sec': 30000000,
'read_bytes_sec': 1000,
'write_bytes_sec': 4000,
'total_bytes_sec': 104857600}
# initialize_connection() passes qos_specs that is designated to
# be consumed by front-end or both front-end and back-end
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector,)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
qos_values.update({'consumer': 'both'})
conn_info = self.volume.initialize_connection(
self.context, fake_volume_obj, connector)
self.assertDictEqual(qos_specs_expected,
conn_info['data']['qos_specs'])
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export')
def test_initialize_connection_export_failure(self,
_mock_create_export):
"""Test exception path for create_export failure."""
volume = tests_utils.create_volume(
self.context, admin_metadata={'fake-key': 'fake-value'},
volume_type_id=fake.VOLUME_TYPE_ID, **self.volume_params)
_mock_create_export.side_effect = exception.CinderException
connector = {'ip': 'IP', 'initiator': 'INITIATOR'}
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.initialize_connection,
self.context, volume, connector)
def test_initialize_connection_maintenance(self):
"""Test initialize connection in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.initialize_connection,
self.context,
volume,
None)
@ddt.ddt
class VolumeAttachDetachTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(VolumeAttachDetachTestCase, self).setUp()
self.patch('cinder.volume.utils.clear_volume', autospec=True)
self.user_context = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID)
@ddt.data(False, True)
def test_run_attach_detach_volume_for_instance(self, volume_object):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.user_context,
**self.volume_params)
with volume.obj_as_admin():
volume.admin_metadata['readonly'] = True
volume.save()
volume_id = volume.id
self.volume.create_volume(self.user_context,
volume=volume)
volume_passed = volume if volume_object else None
attachment = self.volume.attach_volume(self.user_context,
volume_id,
instance_uuid, None,
mountpoint, 'ro',
volume=volume_passed)
attachment2 = self.volume.attach_volume(self.user_context,
volume_id,
instance_uuid, None,
mountpoint, 'ro',
volume=volume_passed)
self.assertEqual(attachment.id, attachment2.id)
vol = objects.Volume.get_by_id(self.context, volume_id)
self.assertEqual("in-use", vol.status)
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment.attach_status)
self.assertEqual(mountpoint, attachment.mountpoint)
self.assertEqual(instance_uuid, attachment.instance_uuid)
self.assertIsNone(attachment.attached_host)
admin_metadata = vol.volume_admin_metadata
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
volume = volume if volume_object else vol
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume=volume)
self.volume.detach_volume(self.context, volume_id,
attachment.id,
volume=volume_passed)
vol = objects.Volume.get_by_id(self.context, volume_id)
self.assertEqual('available', vol.status)
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
@mock.patch('cinder.volume.manager.LOG', mock.Mock())
def test_initialize_connection(self):
volume = mock.Mock(save=mock.Mock(side_effect=Exception))
with mock.patch.object(self.volume, 'driver') as driver_mock:
self.assertRaises(exception.ExportFailure,
self.volume.initialize_connection, self.context,
volume, mock.Mock())
driver_mock.remove_export.assert_called_once_with(mock.ANY, volume)
def test_run_attach_detach_2volumes_for_instance(self):
"""Make sure volume can be attached and detached from instance."""
# attach first volume to the instance
mountpoint1 = "/dev/vdc"
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume1 = tests_utils.create_volume(
self.context, admin_metadata={'readonly': 'True'},
**self.volume_params)
volume1_id = volume1['id']
self.volume.create_volume(self.context, volume1)
attachment = self.volume.attach_volume(self.context, volume1_id,
instance_uuid, None,
mountpoint1, 'ro')
vol1 = db.volume_get(context.get_admin_context(), volume1_id)
self.assertEqual("in-use", vol1['status'])
self.assertEqual('attached', attachment['attach_status'])
self.assertEqual(mountpoint1, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol1['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume1, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume1)
# attach 2nd volume to the instance
mountpoint2 = "/dev/vdd"
volume2 = tests_utils.create_volume(
self.context, admin_metadata={'readonly': 'False'},
**self.volume_params)
volume2_id = volume2['id']
self.volume.create_volume(self.context, volume2)
attachment2 = self.volume.attach_volume(self.context, volume2_id,
instance_uuid, None,
mountpoint2, 'rw')
vol2 = db.volume_get(context.get_admin_context(), volume2_id)
self.assertEqual("in-use", vol2['status'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertEqual(instance_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
admin_metadata = vol2['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:02'}
conn_info = self.volume.initialize_connection(self.context,
volume2, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume2)
# detach first volume and then 2nd volume
self.volume.detach_volume(self.context, volume1_id, attachment['id'])
vol1 = db.volume_get(self.context, volume1_id)
self.assertEqual('available', vol1['status'])
self.volume.delete_volume(self.context, volume1)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume1_id)
self.volume.detach_volume(self.context, volume2_id, attachment2['id'])
vol2 = db.volume_get(self.context, volume2_id)
self.assertEqual('available', vol2['status'])
self.volume.delete_volume(self.context, volume2)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume2_id)
def test_detach_invalid_attachment_id(self):
"""Make sure if the attachment id isn't found we raise."""
attachment_id = "notfoundid"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
instance_uuid = '12345678-1234-5678-1234-567812345678'
attached_host = 'fake_host'
mountpoint = '/dev/fake'
tests_utils.attach_volume(self.context, volume['id'],
instance_uuid, attached_host,
mountpoint)
self.volume.detach_volume(self.context, volume['id'],
attachment_id)
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('in-use', volume['status'])
def test_detach_no_attachments(self):
self.volume_params['status'] = 'detaching'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
self.volume.detach_volume(self.context, volume['id'])
volume = db.volume_get(self.context, volume['id'])
self.assertEqual('available', volume['status'])
def test_run_attach_detach_volume_for_instance_no_attachment_id(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
instance_uuid_2 = '12345678-4321-8765-4321-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid_2, None,
mountpoint, 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.assertRaises(exception.InvalidVolume,
self.volume.detach_volume,
self.context, volume_id)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance2_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertEqual(instance2_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
self.assertNotEqual(attachment, attachment2)
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('in-use', vol['status'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_instances(self):
"""Make sure volume can be attached to multiple instances."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345699'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint2, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint, attachment2['mountpoint'])
self.assertEqual(instance_uuid, attachment2['instance_uuid'])
self.assertIsNone(attachment2['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
def test_attach_detach_not_multiattach_volume_for_instances(self):
"""Make sure volume can't be attached to more than one instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id,
instance_uuid, None,
mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
instance2_uuid = '12345678-1234-5678-1234-567812345000'
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
instance2_uuid,
None,
mountpoint2, 'ro')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_for_host(self):
"""Make sure volume can be attached and detached from host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host2', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment2['attach_status'])
self.assertEqual(mountpoint2, attachment2['mountpoint'])
self.assertIsNone(attachment2['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host2', attachment2['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("in-use", vol['status'])
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual("available", vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_twice_multiattach_volume_for_hosts(self):
"""Make sure volume can be attached and detached from hosts."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=True,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertTrue(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual('attached', attachment2['attach_status'])
self.assertEqual(mountpoint, attachment2['mountpoint'])
self.assertIsNone(attachment2['instance_uuid'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
def test_run_attach_detach_not_multiattach_volume_for_hosts(self):
"""Make sure volume can't be attached to more than one host."""
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(
self.context,
admin_metadata={'readonly': 'False'},
multiattach=False,
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
attachment = self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertFalse(vol['multiattach'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='False', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('rw', conn_info['data']['access_mode'])
mountpoint2 = "/dev/sdx"
self.assertRaises(exception.InvalidVolume,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host2',
mountpoint2,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
attachment['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual('fake-host', attachment['attached_host'])
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
self.context,
volume)
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
self.assertEqual('available', vol['status'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_attach_detach_volume_with_attach_mode(self):
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
db.volume_update(self.context, volume_id, {'status': 'available', })
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertEqual(instance_uuid, attachment['instance_uuid'])
self.assertIsNone(attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.volume.detach_volume(self.context, volume_id, attachment['id'])
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
attachment = vol['volume_attachment'][0]
self.assertEqual('in-use', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.ATTACHED,
vol['attach_status'])
self.assertEqual(mountpoint, attachment['mountpoint'])
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual('fake-host', attachment['attached_host'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='ro')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume, connector)
self.assertEqual('ro', conn_info['data']['access_mode'])
self.volume.detach_volume(self.context, volume_id,
attachment['id'])
vol = db.volume_get(self.context, volume_id)
attachment = vol['volume_attachment']
self.assertEqual('available', vol['status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
self.assertEqual([], attachment)
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
self.volume.delete_volume(self.context, volume)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
instance_uuid,
None,
mountpoint,
'rw')
# Assert a user message was created
self.volume.message_api.create.assert_called_once_with(
self.context, message_field.Action.ATTACH_VOLUME,
resource_uuid=volume['id'],
exception=mock.ANY)
attachment = objects.VolumeAttachmentList.get_all_by_volume_id(
context.get_admin_context(), volume_id)[0]
self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING,
attachment.attach_status)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host',
mountpoint,
'rw')
attachment = objects.VolumeAttachmentList.get_all_by_volume_id(
context.get_admin_context(), volume_id)[0]
self.assertEqual(fields.VolumeAttachStatus.ERROR_ATTACHING,
attachment.attach_status)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(2, len(admin_metadata))
expected = dict(readonly='True', attached_mode='rw')
ret = {}
for item in admin_metadata:
ret.update({item['key']: item['value']})
self.assertDictEqual(expected, ret)
def test_run_api_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
admin_metadata = vol['volume_admin_metadata']
self.assertEqual(1, len(admin_metadata))
self.assertEqual('readonly', admin_metadata[0]['key'])
self.assertEqual('True', admin_metadata[0]['value'])
def test_detach_volume_while_uploading_to_image_is_in_progress(self):
# If instance is booted from volume with 'Terminate on Delete' flag
# set, and when we delete instance then it tries to delete volume
# even it is in 'uploading' state.
# It is happening because detach call is setting volume status to
# 'available'.
mountpoint = "/dev/sdf"
# Attach volume to the instance
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = tests_utils.create_volume(self.context,
admin_metadata={'readonly': 'True'},
**self.volume_params)
volume_id = volume['id']
self.volume.create_volume(self.context, volume)
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
# Change volume status to 'uploading'
db.volume_update(self.context, volume_id, {'status': 'uploading'})
# Call detach api
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
# Check that volume status is 'uploading'
self.assertEqual("uploading", vol['status'])
self.assertEqual(fields.VolumeAttachStatus.DETACHED,
vol['attach_status'])
def test_volume_attach_in_maintenance(self):
"""Test attach the volume in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
self.context,
volume, None, None, None, None)
def test_volume_detach_in_maintenance(self):
"""Test detach the volume in maintenance."""
test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'}
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
**self.volume_params)
volume['status'] = 'maintenance'
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.detach,
self.context,
volume, None)
| 49.644767 | 79 | 0.567346 |
4a2079728adb3fa9196fe41997409575f0995409 | 10,797 | py | Python | data_split_and_featured.py | trueto/albert_question_match | c56a5fcd43d9ffcd4d230d19a347a6d09d7b12f9 | [
"Apache-2.0"
] | 5 | 2019-12-12T06:17:31.000Z | 2021-04-15T03:17:33.000Z | data_split_and_featured.py | trueto/albert_question_match | c56a5fcd43d9ffcd4d230d19a347a6d09d7b12f9 | [
"Apache-2.0"
] | null | null | null | data_split_and_featured.py | trueto/albert_question_match | c56a5fcd43d9ffcd4d230d19a347a6d09d7b12f9 | [
"Apache-2.0"
] | null | null | null | import re
import os
import logging
import pandas as pd
from difflib import SequenceMatcher
from sklearn.model_selection import train_test_split
logger = logging.getLogger(__name__)
class DataProcess:
def __init__(self,
data_path: str,
paper_dataset_path: str):
self.data_df = pd.read_csv(data_path)
self.paper_dataset_path = paper_dataset_path
if not os.path.exists(paper_dataset_path):
os.mkdir(paper_dataset_path)
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
def split_data(self):
train_dev_df, test_df = train_test_split(self.data_df,test_size=0.1,
random_state=520,shuffle=True)
train_df, dev_df = train_test_split(train_dev_df,test_size=2000,
random_state=520,shuffle=True)
# save the split data
if len(os.listdir(self.paper_dataset_path)) > 0:
self.train_df = self.load_df('train.csv')
self.dev_df = self.load_df('dev.csv')
self.test_df = self.load_df('test.csv')
logger.info("load train/dev/test data from {}".format(self.paper_dataset_path))
else:
self.train_df,self.dev_df,self.test_df = train_df,dev_df,test_df
self.save_df(train_df,'train.csv')
self.save_df(dev_df,'dev.csv')
self.save_df(test_df,'test.csv')
logger.info("dataset was splitted as 8:1:1")
def exchange_a_b(self,
df:pd.DataFrame,
name:str):
df_path = os.path.join(self.paper_dataset_path,name)
if os.path.exists(df_path):
self.train_df_b_a = self.load_df(name)
logger.info("load data from {}".format(df_path))
return
temp_df = df.copy(deep=True)
temp_df['question1'] = df['question2']
temp_df['question2'] = df['question1']
new_df = pd.concat([df,temp_df],ignore_index=True)
self.train_df_b_a = new_df
self.save_df(new_df,name)
logger.info("save data as {}".format(df_path))
def sample_with_score(self,
data_df: pd.DataFrame,
neg_rate:float,
pos_rate:float,
name:str):
df_name = "{}_{}_".format(neg_rate,pos_rate) + name
df_path = os.path.join(self.paper_dataset_path, df_name)
if os.path.exists(df_path):
self.sample_with_score = self.load_df(df_name)
logger.info("load data from {}".format(df_path))
return
# print(data_df.head())
data_df['score'] = data_df.apply(lambda df:
self.similarity(df['question1'],df['question2']),axis=1)
# data_df['longest_match'] = data_df.apply(lambda df:
# self.longest_match(df['question1'],df['question2']),axis=1)
# self.save_df(data_df,'train_score_longest_text.csv')
## get the sample whose sequence similarity is more than neg_rate but label is 0
negative_df = data_df[(data_df['score']>=neg_rate) & (data_df['label'] == 0)]
## get the sample whose sequence similarity is less than pos_rate but label is 1
positive_df = data_df[(data_df['score']<=pos_rate) & (data_df['label'] == 1)]
temp_df = pd.concat([negative_df,positive_df],ignore_index=True)
self.sample_with_score = temp_df
self.save_df(temp_df,df_name)
logger.info("save data at {}".format(df_path))
def score_longest_text(self,
data_df:pd.DataFrame,
name: str):
df_name = name
df_path = os.path.join(self.paper_dataset_path, df_name)
if os.path.exists(df_path):
self.score_longest_text = self.load_df(df_name)
logger.info("load data from {}".format(df_path))
return
# print(data_df.head())
data_df['score'] = data_df.apply(lambda df:
self.similarity(df['question1'], df['question2']), axis=1)
logging.info("\nlabel 0 \nmedian:\t{}\nmean{}".format(
data_df[data_df['label']==0]['score'].median(),
data_df[data_df['label'] == 0]['score'].mean()
))
logging.info("\nlabel 1 \nmedian:\t{}\nmean{}".format(
data_df[data_df['label'] == 1]['score'].median(),
data_df[data_df['label'] == 1]['score'].mean()
))
data_df['longest_match'] = data_df.apply(lambda df:
self.longest_match(df['question1'], df['question2']), axis=1)
self.save_df(data_df, df_name)
logger.info("save data at {}".format(df_path))
def sample_distribution(self,df:pd.DataFrame,name):
label_name = name + '_label.csv'
category_name = name + '_category.csv'
label_path = os.path.join(self.paper_dataset_path,label_name)
category_path = os.path.join(self.paper_dataset_path, category_name)
if os.path.exists(label_path) or os.path.exists(category_path):
return
label_dis = df['label'].value_counts()
category_dis = df['category'].value_counts()
label_dis.to_csv(label_path)
category_dis.to_csv(category_path)
logger.info('file saved as {} and {}'.format(label_path,category_path))
def sample_for_classification(self,
df:pd.DataFrame,
name):
df_name = name
df_path = os.path.join(self.paper_dataset_path, df_name)
dis_path = os.path.join(self.paper_dataset_path,'dis_'+name)
if os.path.exists(df_path):
return
train_df = self.get_classification(self.train_df)
new_df = self.get_classification(df)
cls_df = pd.concat([train_df,new_df],ignore_index=True)
cls_df.drop_duplicates(inplace=True)
label_df = cls_df['label'].value_counts()
label_df.to_csv(dis_path)
cls_df.to_csv(df_path,index=False)
logger.info('data for classification saved as {}'.format(df_path))
def delete_seed_words(self,df:pd.DataFrame,name):
df_name = name
df_path = os.path.join(self.paper_dataset_path, df_name)
if os.path.exists(df_path):
self.train_df_and_noseeds = self.load_df(name)
return
temp_df = df.copy()
data = []
for name, batch_df in temp_df.groupby(by='category',sort=False):
patten = en2zh[name]
for ques1, ques2,label in zip(batch_df['question1'].tolist(),
batch_df['question2'].tolist(),
batch_df['label'].tolist()):
ques_1 = re.sub(patten,'',ques1)
ques_2 = re.sub(patten, '', ques2)
data.append([ques_1,ques_2,label,name])
new_temp_df = pd.DataFrame(data,columns=df.columns)
saved_df = pd.concat([df,new_temp_df],ignore_index=True)
self.train_df_and_noseeds = saved_df
saved_df.to_csv(df_path,index=False)
logger.info('data for delete seeds saved as {}'.format(df_path))
def get_classification(self,df: pd.DataFrame):
ques1_list = df['question1'].tolist()
ques2_list = df['question2'].tolist()
ques_list = ques1_list + ques2_list
category_list = df['category'].tolist()
label_list = category_list + category_list
temp_df = pd.DataFrame(columns=['text','label'])
temp_df['text'] = ques_list
temp_df['label'] = label_list
return temp_df
def longest_match(self,text_a,text_b):
matcher = SequenceMatcher(a=text_a,b=text_b)
i,_,size = matcher.find_longest_match(alo=0,ahi=len(text_a),
blo=0,bhi=len(text_b))
return text_a[i:i+size]
def similarity(self,text_a,text_b):
score = SequenceMatcher(a=text_a,b=text_b).ratio()
return round(score,4)
def load_df(self,
name:str):
df_path = os.path.join(self.paper_dataset_path,name)
return pd.read_csv(df_path)
def save_df(self,
df:pd.DataFrame,
name:str):
df_path = os.path.join(self.paper_dataset_path,name)
df.to_csv(df_path,index=False)
if __name__ == '__main__':
data_process = DataProcess(data_path='data/train.csv',
paper_dataset_path='paper_dataset')
## split the original dataset: train : dev : test = 8 : 1 : 1
data_process.split_data()
## trick 1: exchange the order of text pairs
data_process.exchange_a_b(data_process.train_df,'train_b_a.csv')
## get the special sample
data_process.sample_with_score(data_process.train_df,
neg_rate=0.4,
pos_rate=0.5,
name='sample_with_score.csv')
## label distribution
data_process.sample_distribution(data_process.train_df,name='train')
data_process.sample_distribution(data_process.dev_df, name='dev')
data_process.sample_distribution(data_process.test_df, name='test')
## sample for classfication
dev_id_df = pd.read_csv('data/dev_id.csv')
test_final_df = pd.read_csv('data/test_final.csv')
unlabel_df = pd.concat([dev_id_df,test_final_df])
data_process.sample_for_classification(unlabel_df,name='classification_data.csv')
## delete category words
en2zh = {
"diabetes": "糖尿病",
"aids": "艾滋病|aids|艾滋|HIV|hiv",
"breast_cancer": "乳腺癌|乳腺增生",
"hypertension": "高血压",
"hepatitis" : "乙肝|乙肝表面抗体"
}
data_process.delete_seed_words(data_process.train_df,'train_and_noseeds.csv')
data_process.exchange_a_b(data_process.train_df_and_noseeds,'train_and_noseeds_b_a.csv')
## score with longest text
# label 0
# 0.3708017028254289 mean
# 0.3333 median
# label 1
# 0.5324387016848364 mean
# 0.5263 median
# data_process.score_longest_text(data_process.train_df,'train_score_longest_text.csv')
# label 0
# 0.23685070635721756 mean
# 0.2 median
# label 1
# 0.4372390857284495 mean
# 0.4242 median
data_df = pd.read_csv("data/train_no_seeds.csv")
data_process.score_longest_text(data_df,'train_no_seeds_longest_text.csv')
# data_process.score_longest_text(data_process.train_df_and_noseeds,'train_df_and_noseeds_score.csv')
| 40.590226 | 110 | 0.600815 |
4a2079fb797f7b38e097aa154424fa5b8104d74b | 3,679 | py | Python | adafruit_register/i2c_struct.py | dhalbert/Adafruit_CircuitPython_Register | f91a08d7fab35e23159c63947f2bce13181ed0a6 | [
"MIT"
] | 6 | 2018-12-04T02:53:20.000Z | 2020-03-08T15:42:16.000Z | adafruit_register/i2c_struct.py | dhalbert/Adafruit_CircuitPython_Register | f91a08d7fab35e23159c63947f2bce13181ed0a6 | [
"MIT"
] | null | null | null | adafruit_register/i2c_struct.py | dhalbert/Adafruit_CircuitPython_Register | f91a08d7fab35e23159c63947f2bce13181ed0a6 | [
"MIT"
] | null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2016 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# pylint: disable=too-few-public-methods
"""
`adafruit_register.i2c_struct`
====================================================
Generic structured registers based on `struct`
* Author(s): Scott Shawcroft
"""
try:
import struct
except ImportError:
import ustruct as struct
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_Register.git"
class Struct:
"""
Arbitrary structure register that is readable and writeable.
Values are tuples that map to the values in the defined struct. See struct
module documentation for struct format string and its possible value types.
:param int register_address: The register address to read the bit from
:param type struct_format: The struct format string for this register.
"""
def __init__(self, register_address, struct_format):
self.format = struct_format
self.buffer = bytearray(1+struct.calcsize(self.format))
self.buffer[0] = register_address
def __get__(self, obj, objtype=None):
with obj.i2c_device:
obj.i2c_device.write(self.buffer, end=1, stop=False)
obj.i2c_device.readinto(self.buffer, start=1)
return struct.unpack_from(self.format, memoryview(self.buffer)[1:])
def __set__(self, obj, value):
struct.pack_into(self.format, self.buffer, 1, *value)
with obj.i2c_device:
obj.i2c_device.write(self.buffer)
class UnaryStruct:
"""
Arbitrary single value structure register that is readable and writeable.
Values map to the first value in the defined struct. See struct
module documentation for struct format string and its possible value types.
:param int register_address: The register address to read the bit from
:param type struct_format: The struct format string for this register.
"""
def __init__(self, register_address, struct_format):
self.format = struct_format
self.address = register_address
def __get__(self, obj, objtype=None):
buf = bytearray(1+struct.calcsize(self.format))
buf[0] = self.address
with obj.i2c_device:
obj.i2c_device.write(buf, end=1, stop=False)
obj.i2c_device.readinto(buf, start=1)
return struct.unpack_from(self.format, buf, 1)[0]
def __set__(self, obj, value):
buf = bytearray(1+struct.calcsize(self.format))
buf[0] = self.address
struct.pack_into(self.format, buf, 1, value)
with obj.i2c_device:
obj.i2c_device.write(buf)
| 39.138298 | 79 | 0.707801 |
4a207a24d8736f98436130024c6d124174368c32 | 1,470 | py | Python | procedures/points_B_ICG_first_min_befor_max.py | k-cybulski/sigman-project | 1f51e04dddb375eb58182664296b7b3f1db71756 | [
"MIT"
] | 1 | 2017-11-10T10:42:07.000Z | 2017-11-10T10:42:07.000Z | procedures/points_B_ICG_first_min_befor_max.py | k-cybulski/sigman-project | 1f51e04dddb375eb58182664296b7b3f1db71756 | [
"MIT"
] | 21 | 2017-12-28T13:39:55.000Z | 2018-07-16T14:34:29.000Z | procedures/points_B_ICG_first_min_befor_max.py | k-cybulski/sigman-project | 1f51e04dddb375eb58182664296b7b3f1db71756 | [
"MIT"
] | 1 | 2018-02-25T13:57:50.000Z | 2018-02-25T13:57:50.000Z | import numpy as np
from sigman.analyzer import InvalidArgumentError
procedure_type = 'points'
description = (
"""Procedure detect point B in ICG signal. Algorithm detect first minimum before dz/dtmax.
""")
author = 'mzylinski'
arguments = {
}
default_arguments = {
}
output_type = 'B'
required_waves = ['Signal']
required_points = [ 'R']
def procedure(waves, points, begin_time, end_time, settings):
wave = waves['Signal']
R = points['R']
r_x = []
r_y = []
for i in range(0,len(R)-1):
data = wave.data_slice(R.data_x[i], R.data_x[i+1])
data_max = max(data);
itemindex = np.where(data==data_max)[0]
while (data[itemindex[0]]*2)>=(data[itemindex[0]-1]+data[itemindex[0]-2]):
itemindex[0] = itemindex[0]-1
if itemindex[0]==2:
break
r_y.append(data[itemindex[0]])
r_x.append(R.data_x[i] + itemindex[0]*wave.sample_length)
return r_x, r_y
def interpret_arguments(waves, points, arguments):
output_arguments = {}
for key, item in arguments.items():
try:
output_arguments[key] = float(item)
except:
raise InvalidArgumentError("{} is invalid.".format(arguments[key]))
return output_arguments
def execute(waves, points, begin_time, end_time, arguments):
arguments = interpret_arguments(waves, points, arguments)
return procedure(waves, points, begin_time, end_time, arguments) | 27.735849 | 90 | 0.637415 |
4a207c9ec2c7a9540734ff1d2215c7aa59eca527 | 3,877 | py | Python | import_notes.py | kungfujam/mendeley-viz | 4b6cb3f995a252f6658e0dedda83c751a5cf5146 | [
"MIT"
] | 2 | 2018-03-01T20:46:04.000Z | 2018-06-05T13:27:42.000Z | import_notes.py | JamesOwers/mendeley-viz | 4b6cb3f995a252f6658e0dedda83c751a5cf5146 | [
"MIT"
] | null | null | null | import_notes.py | JamesOwers/mendeley-viz | 4b6cb3f995a252f6658e0dedda83c751a5cf5146 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Reads a folder of .md files (with file name formatting of export_notes.pm)
and rewrites the mendeley sqlite database notes table with the corresponding
document_id notes
"""
from __future__ import print_function, division
__author__ = "James Owers"
__license__ = "MIT"
__version__ = "0.1"
__email__ = "[email protected]"
__status__ = "Development"
doc_name_str = '{citationKey} ({docid}) {title}'
import sqlite3
import os
import pandas as pd
import numpy as np
import sys
import argparse
import glob
import markdown2
import markdown
try:
DATABASE_LOC = os.environ['MENDELEY_DATABASE_LOC']
except KeyError:
msg = 'ERROR: ' + \
'You must set a system environment variable MENDELEY_DATABASE_LOC ' +\
'that points to the mendeley sqlite database instance. c.f' +\
'http://support.mendeley.com/customer/en/portal/articles/227951-' +\
'how-do-i-locate-mendeley-desktop-database-files-on-my-computer- ' +\
'for location of the database. On unix machines you can set ' + \
'environment variables by running `export MENDELEY_DATABASE_LOC' +\
'=/path/to/database.sqlite`. You can add this command to your ' +\
'~/.bash_profile such that this is run each time you log in to ' +\
'your terminal.'
print(msg, file=sys.stderr)
raise
def diff_pd(df1, df2):
"""Identify differences between two pandas DataFrames"""
assert (df1.columns == df2.columns).all(), \
"DataFrame column names are different"
if df1.equals(df2):
return None
else:
# need to account for np.nan != np.nan returning True
diff_mask = (df1 != df2) & ~(df1.isnull() & df2.isnull())
ne_stacked = diff_mask.stack()
changed = ne_stacked[ne_stacked]
changed.index.names = ['id', 'col']
difference_locations = np.where(diff_mask)
changed_from = df1.values[difference_locations]
changed_to = df2.values[difference_locations]
return pd.DataFrame({'from': changed_from, 'to': changed_to},
index=changed.index)
def import_notes(notes_folder, backup=True, dry_run=False):
"""
Opens a connection to the database, reads the DocumentNotes table,
and overwrites entries with the contents of .md files in indir
"""
# Get relevant tables from database
db = sqlite3.connect(DATABASE_LOC)
table_name = 'DocumentNotes'
notes = pd.read_sql_query("SELECT * from {}".format(table_name), db)
if backup:
notes.to_pickle('DocumentNotes.bkp.pkl')
notes_orig = notes.copy()
all_files = glob.glob(os.path.join(notes_folder, "*.md"))
for f in all_files:
# Reading the file content to create a DataFrame
with open(f, 'r') as ff:
md_str = ff.read()
file_name = os.path.splitext(os.path.basename(f))[0]
doc_id = int(file_name.split('(', 1)[1].split(')', 1)[0])
notes.loc[notes['documentId'] == doc_id, 'text'] = md_str
notes.loc[notes['documentId'] == doc_id, 'baseNote'] = md_str
md_inside_p = lambda x: markdown.markdown(x.replace('"', '"')).\
replace('<br />','<br/>').replace('\n', '')[3:-4]
notes['text'] = notes['text'].apply(md_inside_p)
notes['baseNote'] = notes['baseNote'].apply(md_inside_p)
if dry_run:
print(diff_pd(notes_orig, notes))
else:
notes.to_sql(table_name, db, if_exists='replace', index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'')
parser.add_argument('notes_folder', type=str,
help='Path to folder containing markdown notes')
args = parser.parse_args()
notes_folder = os.path.abspath(args.notes_folder)
import_notes(notes_folder, backup=True, dry_run=True)
| 35.898148 | 78 | 0.649729 |
4a207d1fcf16be3c2d1af48e33d17b9426bae2c0 | 1,151 | py | Python | sginnovate/Preprocessing_opt.py | wanderer799/Sputum | 51471ea11d739e5ddeec4d11c5c4ae733b23ef92 | [
"MIT"
] | null | null | null | sginnovate/Preprocessing_opt.py | wanderer799/Sputum | 51471ea11d739e5ddeec4d11c5c4ae733b23ef92 | [
"MIT"
] | null | null | null | sginnovate/Preprocessing_opt.py | wanderer799/Sputum | 51471ea11d739e5ddeec4d11c5c4ae733b23ef92 | [
"MIT"
] | null | null | null | import os
import cv2 as cv
import threading
import numpy as np
class PreprocessImages():
def crop_run(self,impath,imname,savedir,model,session):
# print('impath',impath,imname)
img = cv.imread(impath)
img = cv.resize(img,(400,600))
print(img.shape)
with session.as_default():
with session.graph.as_default():
results = model.detect([img], verbose=0)
r = results[0]
if len(r['scores']) !=0:
index = np.argmax(r['scores'])
box = r['rois'][index]
y = box[0]
x = box[1]
y1 = box[2]
x1 = box[3]
mask = r['masks'][:, :, index]
temp = cv.imread(impath)
temp = cv.resize(temp, (400, 600))
for j in range(temp.shape[2]):
temp[:, :, j] = temp[:, :, j] * mask
temp = temp[y:y1, x:x1]
save_path = os.path.join(savedir,imname)
cv.imwrite(save_path,temp)
return True
| 34.878788 | 60 | 0.444831 |
4a207ef6213abbc73798d524c5f7e778bc46a079 | 10,863 | py | Python | state_farm/state_farm.py | advnturecaptlst/kaggle_archive | 8b64f15caeb73c9822cfe530b9ff0a1a0136da9c | [
"Apache-2.0"
] | null | null | null | state_farm/state_farm.py | advnturecaptlst/kaggle_archive | 8b64f15caeb73c9822cfe530b9ff0a1a0136da9c | [
"Apache-2.0"
] | null | null | null | state_farm/state_farm.py | advnturecaptlst/kaggle_archive | 8b64f15caeb73c9822cfe530b9ff0a1a0136da9c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 1 20:37:27 2016
@author: ianmurra
"""
import numpy as np
np.random.seed(13)
import os
import glob
import cv2
import math
import pickle
import datetime
import pandas as pd
import statistics
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import model_from_json
from sklearn.metrics import log_loss
from scipy.misc import imread, imresize
use_cache = 1
# color type: 1 - grey, 3 - rgb
color_type_global = 1
def get_im_skipy(path, img_rows, img_cols, color_type=1):
# Load as grayscale
if color_type == 1:
img = imread(path, True)
elif color_type == 3:
img = imread(path)
# Reduce size
resized = imresize(img, (img_cols, img_rows))
return resized
def get_im_cv2(path, img_rows, img_cols, color_type=1):
# Load as grayscale
if color_type == 1:
img = cv2.imread(path, 0)
elif color_type == 3:
img = cv2.imread(path)
# Reduce size
resized = cv2.resize(img, (img_cols, img_rows))
return resized
def get_driver_data():
dr = dict()
path = os.path.join('~/Documents/kaggle/state_farm', 'input', 'driver_imgs_list.csv')
print('Read drivers data')
f = open(path, 'r')
line = f.readline()
while (1):
line = f.readline()
if line == '':
break
arr = line.strip().split(',')
dr[arr[2]] = arr[0]
f.close()
return dr
def load_train(img_rows, img_cols, color_type=1):
X_train = []
y_train = []
driver_id = []
driver_data = get_driver_data()
print('Read train images')
for j in range(10):
print('Load folder c{}'.format(j))
path = os.path.join('..', 'input', 'train', 'c' + str(j), '*.jpg')
files = glob.glob(path)
for fl in files:
flbase = os.path.basename(fl)
img = get_im_cv2(fl, img_rows, img_cols, color_type)
X_train.append(img)
y_train.append(j)
driver_id.append(driver_data[flbase])
unique_drivers = sorted(list(set(driver_id)))
print('Unique drivers: {}'.format(len(unique_drivers)))
print(unique_drivers)
return X_train, y_train, driver_id, unique_drivers
def load_test(img_rows, img_cols, color_type=1):
print('Read test images')
path = os.path.join('..', 'input', 'test', '*.jpg')
files = glob.glob(path)
X_test = []
X_test_id = []
total = 0
thr = math.floor(len(files)/10)
for fl in files:
flbase = os.path.basename(fl)
img = get_im_cv2(fl, img_rows, img_cols, color_type)
X_test.append(img)
X_test_id.append(flbase)
total += 1
if total%thr == 0:
print('Read {} images from {}'.format(total, len(files)))
return X_test, X_test_id
def cache_data(data, path):
if os.path.isdir(os.path.dirname(path)):
file = open(path, 'wb')
pickle.dump(data, file)
file.close()
else:
print('Directory doesnt exists')
def restore_data(path):
data = dict()
if os.path.isfile(path):
file = open(path, 'rb')
data = pickle.load(file)
return data
def save_model(model):
json_string = model.to_json()
if not os.path.isdir('cache'):
os.mkdir('cache')
open(os.path.join('cache', 'architecture.json'), 'w').write(json_string)
model.save_weights(os.path.join('cache', 'model_weights.h5'), overwrite=True)
def read_model():
model = model_from_json(open(os.path.join('cache', 'architecture.json')).read())
model.load_weights(os.path.join('cache', 'model_weights.h5'))
return model
def split_validation_set(train, target, test_size):
random_state = 51
X_train, X_test, y_train, y_test = train_test_split(train, target, test_size=test_size, random_state=random_state)
return X_train, X_test, y_train, y_test
def create_submission(predictions, test_id, info):
result1 = pd.DataFrame(predictions, columns=['c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9'])
result1.loc[:, 'img'] = pd.Series(test_id, index=result1.index)
now = datetime.datetime.now()
if not os.path.isdir('subm'):
os.mkdir('subm')
suffix = info + '_' + str(now.strftime("%Y-%m-%d-%H-%M"))
sub_file = os.path.join('subm', 'submission_' + suffix + '.csv')
result1.to_csv(sub_file, index=False)
def read_and_normalize_train_data(img_rows, img_cols, color_type=1):
cache_path = os.path.join('cache', 'train_r_' + str(img_rows) + '_c_' + str(img_cols) + '_t_' + str(color_type) + '.dat')
if not os.path.isfile(cache_path) or use_cache == 0:
train_data, train_target, driver_id, unique_drivers = load_train(img_rows, img_cols, color_type)
cache_data((train_data, train_target, driver_id, unique_drivers), cache_path)
else:
print('Restore train from cache!')
(train_data, train_target, driver_id, unique_drivers) = restore_data(cache_path)
train_data = np.array(train_data, dtype=np.uint8)
train_target = np.array(train_target, dtype=np.uint8)
train_data = train_data.reshape(train_data.shape[0], color_type, img_rows, img_cols)
train_target = np_utils.to_categorical(train_target, 10)
train_data = train_data.astype('float32')
train_data /= 255
print('Train shape:', train_data.shape)
print(train_data.shape[0], 'train samples')
return train_data, train_target, driver_id, unique_drivers
def read_and_normalize_test_data(img_rows, img_cols, color_type=1):
cache_path = os.path.join('cache', 'test_r_' + str(img_rows) + '_c_' + str(img_cols) + '_t_' + str(color_type) + '.dat')
if not os.path.isfile(cache_path) or use_cache == 0:
test_data, test_id = load_test(img_rows, img_cols, color_type)
cache_data((test_data, test_id), cache_path)
else:
print('Restore test from cache!')
(test_data, test_id) = restore_data(cache_path)
test_data = np.array(test_data, dtype=np.uint8)
test_data = test_data.reshape(test_data.shape[0], color_type, img_rows, img_cols)
test_data = test_data.astype('float32')
test_data /= 255
print('Test shape:', test_data.shape)
print(test_data.shape[0], 'test samples')
return test_data, test_id
def dict_to_list(d):
ret = []
for i in d.items():
ret.append(i[1])
return ret
def merge_several_folds_mean(data, nfolds):
a = np.array(data[0])
for i in range(1, nfolds):
a += np.array(data[i])
a /= nfolds
return a.tolist()
def merge_several_folds_geom(data, nfolds):
a = np.array(data[0])
for i in range(1, nfolds):
a *= np.array(data[i])
a = np.power(a, 1/nfolds)
return a.tolist()
def copy_selected_drivers(train_data, train_target, driver_id, driver_list):
data = []
target = []
index = []
for i in range(len(driver_id)):
if driver_id[i] in driver_list:
data.append(train_data[i])
target.append(train_target[i])
index.append(i)
data = np.array(data, dtype=np.float32)
target = np.array(target, dtype=np.float32)
index = np.array(index, dtype=np.uint32)
return data, target, index
def create_model_v1(img_rows, img_cols, color_type=1):
nb_classes = 10
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 4
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(color_type, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.6))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=0, momentum=0, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
return model
def run_single():
# input image dimensions
img_rows, img_cols = 40, 50
batch_size = 128
nb_epoch = 2
random_state = 51
train_data, train_target, driver_id, unique_drivers = read_and_normalize_train_data(img_rows, img_cols, color_type_global)
test_data, test_id = read_and_normalize_test_data(img_rows, img_cols, color_type_global)
yfull_train = dict()
yfull_test = []
unique_list_train = ['p002', 'p012', 'p014', 'p015', 'p016', 'p021', 'p022', 'p024',
'p026', 'p035', 'p039', 'p041', 'p042', 'p045', 'p047', #'p049',
'p050', 'p051', 'p052', 'p056', 'p061', 'p064', 'p066', 'p072',
'p075']
X_train, Y_train, train_index = copy_selected_drivers(train_data, train_target, driver_id, unique_list_train)
unique_list_valid = ['p081']
X_valid, Y_valid, test_index = copy_selected_drivers(train_data, train_target, driver_id, unique_list_valid)
print('Start Single Run')
print('Split train: ', len(X_train), len(Y_train))
print('Split valid: ', len(X_valid), len(Y_valid))
print('Train drivers: ', unique_list_train)
print('Test drivers: ', unique_list_valid)
model = create_model_v1(img_rows, img_cols, color_type_global)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
show_accuracy=True, verbose=1, validation_data=(X_valid, Y_valid))
# score = model.evaluate(X_valid, Y_valid, show_accuracy=True, verbose=0)
# print('Score log_loss: ', score[0])
predictions_valid = model.predict(X_valid, batch_size=128, verbose=1)
score = log_loss(Y_valid, predictions_valid)
print('Score log_loss: ', score)
# Store valid predictions
for i in range(len(test_index)):
yfull_train[test_index[i]] = predictions_valid[i]
# Store test predictions
test_prediction = model.predict(test_data, batch_size=128, verbose=1)
yfull_test.append(test_prediction)
print('Final log_loss: {}, rows: {} cols: {} epoch: {}'.format(score, img_rows, img_cols, nb_epoch))
info_string = 'loss_' + str(score) \
+ '_r_' + str(img_rows) \
+ '_c_' + str(img_cols) \
+ '_ep_' + str(nb_epoch)
test_res = merge_several_folds_mean(yfull_test, 1)
print("Creating submission file")
create_submission(test_res, test_id, info_string)
run_single() | 33.118902 | 126 | 0.654055 |
4a207efbd122b977639c7b30dbb4df1649663827 | 4,144 | py | Python | src/core/runtime/object/function.py | thomasmf/nomenine | ead48185b150fdc07a5019499511f696c5326d45 | [
"MIT"
] | 1 | 2015-10-21T14:31:10.000Z | 2015-10-21T14:31:10.000Z | src/core/runtime/object/function.py | thomasmf/nomenine | ead48185b150fdc07a5019499511f696c5326d45 | [
"MIT"
] | null | null | null | src/core/runtime/object/function.py | thomasmf/nomenine | ead48185b150fdc07a5019499511f696c5326d45 | [
"MIT"
] | null | null | null |
ROOT_SCOPE_METHOD(
MD( 'Function', 'FUNCTION_FACTORY_single()' ),
MC( ARG( CW( 'function' ), CC( 'STAR_new( CLAUSE_FACTORY_single() )', 'parameters' ), CG( 'LIST', 'phrase' ) ), """
$NOM( CONTEXT,
$CA(UNION_new( $LISTNEW(
nom_definition( $CA(WORD_new( "parameters" )), PARAM_parameters ),
nom_definition( $CA(WORD_new( "phrase" )), PARAM_phrase )
) )),
Function @ ( Pattern @ ( : that parameters ) ) ( Closure @ ( : this ) ( : that phrase ) )
) ;
""" ),
MS( ARG( CW( 'definition' ), CG( 'WORD', 'name' ), CG( 'ANY', 'value' ) ), """
$NOM( CONTEXT,
$CA(UNION_new( $LISTNEW(
nom_definition( $CA(WORD_new( "name" )), PARAM_name ),
nom_definition( $CA(WORD_new( "value" )), PARAM_value )
) )),
Function @ ( : that name ) ( Stub @ ( : that value ) [] )
) ;
""" )
)
TEST( """ Function @ x ( Closure @ () [ . 1234 ] ) x == 1234 """ )
TEST( """ Function @ ( Pattern @ ( . [ x ] flatten () ) ) ( Closure @ () [ . 2345 ] ) x == 2345 """ )
TEST( """ Function @ ( Pattern @ ( . [ x ( Integer ) ] flatten () ) ) ( Closure @ () [ : that * ( : that ) ] ) x 9 == 81 """ )
TEST( """ Function @ ( Pattern @ ( . [ x ( Shape @ a ( Integer ) ) ( Shape @ b ( Integer ) ) ] flatten () ) ) ( Closure @ () [ : that a * ( : that b ) ] ) x 8 5 == 40 """ )
TEST( """ . [ if [ f1 ] then [ . 6 ] else [ . 9 ] ] evaluate ( Union @ ( . [ ( Function @ f1 ( Closure @ () [FAIL] ) ) ( Function @ f1 ( Closure @ () [ . 1 ] ) ) () ] flatten () ) ) == 9 """ )
TEST( """ function b [ . 100 ] b == 100 """ )
TEST( """ definition x 123 x == 123 """ )
OBJECT( 'FUNCTION_FACTORY',
methods = [
MS( ARG( CW( '@' ), CG( 'CLAUSE', 'clause' ), CG( 'ANY', 'action' ) ), """
JUMP__return_ANY( CONTEXT, CONTEXT, $CA(FUNCTION_new( PARAM_clause, PARAM_action )) ) ;
""" ),
]
)
OBJECTIVE( 'FUNCTION',
attributes = [
A( 'ANY', 'clause' ),
A( 'ANY', 'action' ),
],
objective = """
JUMP__consume_LIST( $CA(FRAME__TYPE_0_new( CONTEXT, ACTION )), ACTION->clause, THAT ) ;
""",
dump = D( 'clause:%s', '$DUMP( object->clause )' )
)
FRAME( 'TYPE_0',
attributes = [
A( 'FUNCTION', 'function' ),
],
methods = [
MS( ARG( CW( 'return' ), CG( 'ANY', 'value' ) ), """
$OPT(
$IFLET( element, ELEMENT, PARAM_value ) ;
nom_do_sync( FRAME__TASK_new( $CA(FRAME__ACTION_new( ACTION->parent, element->next )), ACTION->function->action, element->value ) ) ;
)
$OPT(
$IFLET( array, ARRAY_MUTABLE_NOLOCK, PARAM_value ) ;
$ARRAY_MUTABLE_NOLOCK__NONEMPTY( ACTION->parent, array ) ;
nom_do_sync( FRAME__TASK_new( $CA(FRAME__ACTION_new( ACTION->parent, $CA(nom_array_mutable_nolock_next( array )) )), ACTION->function->action, nom_array_mutable_nolock_value( array ) ) ) ;
)
REFERENCE reference = nom_reference_new( $NONE ) ;
JUMP__value( $CA(FRAME__SPLIT_new( $CA(FRAME__TYPE_1_new( ACTION->parent, ACTION->function, reference )), PARAM_value, reference )), PARAM_value ) ;
""" ),
]
)
FRAME( 'TYPE_1',
attributes = [
A( 'FUNCTION', 'function' ),
A( 'REFERENCE', 'value' ),
],
methods = [
MS( ARG( CW( 'return' ), CG( 'ANY', 'value' ) ), """
nom_do_sync( FRAME__TASK_new( $CA(FRAME__ACTION_new( ACTION->parent, PARAM_value )), ACTION->function->action, ACTION->value->value ) ) ;
""" ),
]
)
FRAME( 'ACTION',
attributes = [
A( 'ANY', 'phrase' ),
],
methods = [
MS( ARG( CW( 'return' ), CG( 'ANY', 'value' ) ), """
JUMP__return_ANY( ACTION->parent, ACTION->parent, $CA(ELEMENT_new( PARAM_value, ACTION->phrase )) ) ;
""" ),
MS( ARG( CW( 'fail' ), CG( 'ANY', 'error' ) ), """
JUMP__callerContext( $CA(FRAME__FUNCTION_FAIL_new( ACTION->parent, PARAM_error )), ACTION->parent ) ;
""" ),
],
dump = D( '%s ', '$DUMP( object->parent )' )
)
FRAME( 'FUNCTION_FAIL',
attributes = [
A( 'ANY', 'error' ),
],
methods = [
MS( ARG( CW( 'return' ), CG( 'ANY', 'value' ) ), """
JUMP__fail_ANY( ACTION->parent, PARAM_value, nom_error_new( CONTEXT, "Function failed", ACTION->error ) ) ;
""" ),
]
)
| 34.533333 | 196 | 0.542471 |
4a20806a9e7bca88b2a7f3574b06b0f2ec1a2b7e | 1,893 | py | Python | parkings/api/enforcement/valid_permit_item.py | klemmari1/parkkihubi | 93218c6046c0910e8a4c723dc7128c6eec085b8c | [
"MIT"
] | 12 | 2016-11-29T15:13:10.000Z | 2021-06-12T06:45:38.000Z | parkings/api/enforcement/valid_permit_item.py | niuzhipeng123/parkkihubi | 93218c6046c0910e8a4c723dc7128c6eec085b8c | [
"MIT"
] | 154 | 2016-11-30T09:07:58.000Z | 2022-02-12T08:29:36.000Z | parkings/api/enforcement/valid_permit_item.py | niuzhipeng123/parkkihubi | 93218c6046c0910e8a4c723dc7128c6eec085b8c | [
"MIT"
] | 15 | 2016-11-29T19:32:48.000Z | 2022-01-05T11:31:39.000Z | import django_filters
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers, viewsets
from ...models import PermitArea, PermitLookupItem
from ...pagination import CursorPagination
from .permissions import IsEnforcer
class ValidPermitItemSerializer(serializers.ModelSerializer):
permit_id = serializers.IntegerField(source='permit.id')
area = serializers.SlugRelatedField(slug_field='identifier', queryset=PermitArea.objects.all())
operator = serializers.CharField(source='permit.series.owner.operator.id')
operator_name = serializers.CharField(source='permit.series.owner.operator.name')
class Meta:
model = PermitLookupItem
fields = [
'id',
'permit_id',
'area',
'registration_number',
'start_time',
'end_time',
'operator',
'operator_name',
]
class ValidPermitItemFilter(django_filters.rest_framework.FilterSet):
reg_num = django_filters.CharFilter(
label=_("Registration number"), method='filter_reg_num')
time = django_filters.IsoDateTimeFilter(
label=_("Time"), method='filter_time')
class Meta:
model = PermitLookupItem
fields = []
def filter_reg_num(self, queryset, name, value):
return queryset.by_subject(value)
def filter_time(self, queryset, name, value):
return queryset.by_time(value)
class ValidPermitItemViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = [IsEnforcer]
queryset = PermitLookupItem.objects.active()
serializer_class = ValidPermitItemSerializer
filterset_class = ValidPermitItemFilter
pagination_class = CursorPagination
def get_queryset(self):
domain = self.request.user.enforcer.enforced_domain
return super().get_queryset().filter(permit__domain=domain)
| 33.210526 | 99 | 0.709456 |
4a2080816a1a83916eaef3e395fb19601160ab50 | 5,705 | py | Python | cancergame/cancergame.py | hieumtran/cancergame | af129cd29eb61a8593410b9b8986f4bb17948235 | [
"MIT"
] | 1 | 2021-08-24T03:00:23.000Z | 2021-08-24T03:00:23.000Z | cancergame/cancergame.py | hieumtran/cancergame | af129cd29eb61a8593410b9b8986f4bb17948235 | [
"MIT"
] | null | null | null | cancergame/cancergame.py | hieumtran/cancergame | af129cd29eb61a8593410b9b8986f4bb17948235 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
def init_cancerGDV(
xd = 0.04,
xg = 0.9,
xv = 0.06,
ba = 2.5,
bv = 2,
c = 1,
n_neigh = 4,
dt = 0.01,
iter = 3000,
rb = 10**(-1.5),
fb = 10**(-1.5),
sigma = 0.01,
d = None, timeframe = None, visual = True
):
"""Function to plot static evolution of cancer game.
Arguments
---------
xd (float): subpopulation proportion of DEF tumor;
default 0.04
xg (float): subpopulation proportion of GLY tumor;
default 0.9
xv (float): subpopulation proportion of VOP tumor;
default 0.06
ba (float): the benefit per unit of acidification;
default 2.5
bv (float): the benefit from thge oxygen per unit of vascularization;
default 2
c (float): the cost of production VEGF; default 1
n_neigh (float): the number of GLY cells in the interaction group;
default 4
dt (float): time differentiation;
default 0.0001
iter (int): tumors' evolutionary time dependency;
default 500000
rb (float): recovery barrier;
default 10**(-1.5)
fb (float): failure barrier;
default 10**(-1.5)
d (list): constraint medicine;
default None
timeframe (list): update timeframe for injecting medicine;
default None
sigma (float): penalty for medicine computation;
default 0.01
Returns
-------
2 matplotlib figure objects containing the designated simplex (3D and 2D).
"""
# Evolution of subpopulation propotions
xdpoints = [xd]
xgpoints = [xg]
xvpoints = [xv]
ppoints = [xg]
qpoints = [xv/(xv + xd)]
rate_p = []
rate_q = []
# game_proceed = 1
win = []
succeed = rb
fail = 1-fb
q = xv/(xv + xd)
p = xg
# start_time = time.time()
t = 0
for t in range(iter):
prev_q = q
prev_p = p
dq = 0
dp = 0
sum_p = 0
for k in range(0, n_neigh):
sum_p += p**k
dq = q * (1 - q) * (bv/(n_neigh+1) * sum_p - c) * dt
q += dq
# Replicator dynamic in 2-D transformation
if timeframe != None:
for i in range(len(timeframe)):
if t >= timeframe[i][0] and t <= timeframe[i][1]:
dp = p * (1 - p) * (ba/(n_neigh+1) - (bv - c) * prev_q - d) * dt
break
else:
dp = p * (1 - p) * (ba/(n_neigh+1) - (bv - c) * prev_q) * dt
else:
dp = p * (1 - p) * (ba/(n_neigh+1) - (bv - c) * prev_q) * dt
p += dp
# Convert from 2-D to 3-D
xd = (1 - q) * (1 - p)
xg = p
xv = (1 - p) * q
ppoints.append(p)
qpoints.append(q)
rate_p.append(dp)
rate_q.append(dq)
xdpoints.append(xd)
xgpoints.append(xg)
xvpoints.append(xv)
# Terminal condition
if p <= succeed:
win = 1
print("Treatment succeed at " + str(t*dt))
break
elif p >= fail:
win = 0
print("Treatment fail at " + str(t*dt))
break
# Total cost for the treatment
# total_cost = 0
# if timeframe != None:
# for i in range(len(timeframe)):
# time_treatment = (timeframe[i][1]-timefrae)*dt
# print("Total time treatment: ", time_treatment)
# total_cost = time_treatment*(d+sigma) + sigma*timelow*dt
# total_cost = time_treatment*(d+sigma)
# print("Total cost: ", total_cost)
# print("Time: ", t)
fig_2D, fig_3D = None, None
if visual == True:
# 2D visualization
fig_2D = plt.figure(figsize=(15,7))
plt.axhline(succeed, color="g", linestyle='dashed', label="Succeed barrier")
plt.axhline(fail, color="r", linestyle='dashed', label="Fail barrier")
length = len(xgpoints)
plt.plot(xgpoints, label="GLY", color="purple")
plt.plot(xdpoints, label="DEF", color="royalblue")
plt.plot(xvpoints, label="VOP", color="orange")
# plt.plot(rate_p, label="p")
# plt.plot(rate_q, label="q")
for i in range(len(timeframe)):
plt.axvspan(timeframe[i][0], timeframe[i][1], facecolor="red", alpha=0.15)
plt.xlabel("Time", fontweight="bold", fontsize='x-large')
plt.xlim(0, t)
plt.xticks(np.arange(0, t, 500), np.arange(0, t*dt, 500*dt))
plt.ylim(0, 1)
plt.ylabel("Subpopulation proportions", fontweight="bold", fontsize='x-large')
plt.legend()
# 3D visualization
dynamic = []
dynamic.append(xdpoints)
dynamic.append(xgpoints)
dynamic.append(xvpoints)
dynamic = np.array(dynamic)
dynamic = dynamic.transpose()
fail = fail*(np.sqrt(3)/2)
succeed = succeed*(np.sqrt(3)/2)
# print(fail)
transformation = np.array([[1,0],
[0.5, np.sqrt(3)/2],
[0,0]])
dynamic_2D = np.matmul(dynamic, transformation)
fig_3D = plt.figure(figsize=(7,7))
plt.plot([1,0,0.5,1], [0,0,np.sqrt(3)/2,0])
plt.plot(dynamic_2D[:, 0], dynamic_2D[:, 1], color='g')
for i in range(len(timeframe)):
plt.plot(dynamic_2D[timeframe[i][0]:timeframe[i][1], 0], dynamic_2D[timeframe[i][0]:timeframe[i][1], 1], color='r')
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.axhline(succeed, color="g", linestyle='dashed', label="Succeed barrier")
plt.axhline(fail, color="r", linestyle='dashed', label="Fail barrier")
return fig_2D, fig_3D, total_cost | 27.560386 | 123 | 0.537599 |
4a20813d1b71c796b4e0cd5f7309e8802add6acd | 1,485 | py | Python | Numerical_Methods_Physics/Halleys_Comet_1.py | Simba2805/Computational_Physics_Python | be687939c16a1d08066939830ac31ba666a3e1bb | [
"MIT"
] | null | null | null | Numerical_Methods_Physics/Halleys_Comet_1.py | Simba2805/Computational_Physics_Python | be687939c16a1d08066939830ac31ba666a3e1bb | [
"MIT"
] | null | null | null | Numerical_Methods_Physics/Halleys_Comet_1.py | Simba2805/Computational_Physics_Python | be687939c16a1d08066939830ac31ba666a3e1bb | [
"MIT"
] | null | null | null | ## --------------The motion of Halley’s comet, which has a period of about 76 years-------------------
import numpy as np
import matplotlib.pyplot as plt
m= 200 # reduced mass
n = 20000
k= 39.5
h = 2/(n-1)
h2 = h**2/2
# motion in xy-plane
t = np.linspace(0, 2, n-1) # time interval
x = np.zeros(n-1) # distance (x-component)
y = np.zeros(n-1) # distance (y-component)
r = np.zeros(n-1)
vx = np.zeros(n-1)
vy = np.zeros(n-1)
gx = np.zeros(n-1)
gy = np.zeros(n-1)
# initial positions
x[0] = 1.966843 # r_max = 5.28 × 10^(12) m (aphelion)
y[0] = 0
r[0] = x[0]
# initial velocity
vx[0] = 0
vy[0] = 0.815795 # v_min = 9.13 × 10^2 m/s
# initial accelerations
gx[0] = -k/(r[0]**2)
gy[0] = 0
# Verlet Algorithm for position an velocity
for i in range(n-2):
x[i+1] = x[i] + h*vx[i] + h2*gx[i]
y[i+1] = y[i] + h*vy[i] + h2*gy[i]
r2 = x[i+1]**2 + y[i+1]**2
r[i+1] = np.sqrt(r2)
r3 = r2*r[i+1]
gx[i+1] = -k*x[i+1]/r3
gy[i+1] = -k*y[i+1]/r3
vx[i+1] = vx[i] + h*(gx[i+1] + gx[i])/2
vy[i+1] = vy[i] + h*(gy[i+1] + gy[i])/2
# positions
print('x-component ', x)
print("")
print('y-component', y)
# ---------plotting---------
plt.plot(t, r,'--',color= 'k')
plt.title('The distance between the sun and comet')
plt.xlabel('(time/76) years')
plt.ylabel('r (m/$(a = 2.68 x 10^{12})$)') #semi-major axis of orbit a =
plt.axis([0,2.0, 0, 2.0])
plt.savefig("Halley's Comet.png")
plt.show() | 25.169492 | 103 | 0.523906 |
4a208184369354bbd18f7eab521fa9332ec8aac2 | 1,871 | py | Python | tests/test_account.py | MatthiasLohr/bdtsim | f22a1207b206b35c8f0011be6991bc3163cdfca2 | [
"Apache-2.0"
] | null | null | null | tests/test_account.py | MatthiasLohr/bdtsim | f22a1207b206b35c8f0011be6991bc3163cdfca2 | [
"Apache-2.0"
] | null | null | null | tests/test_account.py | MatthiasLohr/bdtsim | f22a1207b206b35c8f0011be6991bc3163cdfca2 | [
"Apache-2.0"
] | null | null | null | # This file is part of the Blockchain Data Trading Simulator
# https://gitlab.com/MatthiasLohr/bdtsim
#
# Copyright 2020 Matthias Lohr <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from unittest import TestCase
from bdtsim.account import Account, AccountFile
class AccountTest(TestCase):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(AccountTest, self).__init__(*args, **kwargs)
account_file = AccountFile()
self.operator = account_file.operator
self.seller = account_file.seller
self.buyer = account_file.buyer
def test_instantiation(self) -> None:
self.assertRaises(ValueError, Account, 'john', 'a')
self.assertEqual(self.operator, Account(self.operator.name, self.operator.wallet_private_key))
self.assertEqual(self.seller, Account(self.seller.name, self.seller.wallet_private_key))
self.assertEqual(self.buyer, Account(self.buyer.name, self.buyer.wallet_private_key))
def test_equals(self) -> None:
self.assertEqual(self.operator, self.operator)
self.assertEqual(self.seller, self.seller)
self.assertEqual(self.buyer, self.buyer)
self.assertNotEqual(self.operator, self.seller)
self.assertNotEqual(self.seller, self.buyer)
self.assertNotEqual(self.buyer, self.operator)
| 39.808511 | 102 | 0.728487 |
4a2081cfac55ad1134afcdea84f566b5d73d3249 | 514 | py | Python | blog/admin.py | lingyan2017/blogproject | 5cc907882ac48ea4b78ea14bb52e8a8c94fc653b | [
"MIT"
] | null | null | null | blog/admin.py | lingyan2017/blogproject | 5cc907882ac48ea4b78ea14bb52e8a8c94fc653b | [
"MIT"
] | 9 | 2021-03-19T11:20:50.000Z | 2022-03-12T00:34:14.000Z | blog/admin.py | lingyan2017/blogproject | 5cc907882ac48ea4b78ea14bb52e8a8c94fc653b | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from blog.models import Post, Category, Tag
class PostAdmin(admin.ModelAdmin):
list_display = ['title','created_time','modified_time','category','author']
fields = ['title','body','excerpt','category','tags']
def save_model(self, request, obj, form, change):
obj.author = request.user
super().save_model(request,obj,form,change)
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag) | 32.125 | 79 | 0.72179 |
4a20823c953785effc9df6e874da73c48f75ebf8 | 47 | py | Python | pysensors/classification/__init__.py | Jimmy-INL/pysensors | 62b79a233a551ae01125e20e06fde0c96b4dffd2 | [
"MIT"
] | 43 | 2020-10-26T14:43:56.000Z | 2022-03-03T16:03:15.000Z | pysensors/classification/__init__.py | Jimmy-INL/pysensors | 62b79a233a551ae01125e20e06fde0c96b4dffd2 | [
"MIT"
] | 4 | 2020-11-10T11:15:15.000Z | 2022-01-07T16:05:11.000Z | pysensors/classification/__init__.py | Jimmy-INL/pysensors | 62b79a233a551ae01125e20e06fde0c96b4dffd2 | [
"MIT"
] | 13 | 2020-10-14T10:38:38.000Z | 2022-01-03T09:05:15.000Z | from ._sspoc import SSPOC
__all__ = ["SSPOC"]
| 11.75 | 25 | 0.702128 |
4a2082461d9f1bd71e34cc7ec278e54e16d785ca | 11,710 | py | Python | chatbot/bot.py | raf555/samantha-bot-line | 6b790325f09879cc2fc6dd10c2379e16dd5488ce | [
"MIT"
] | null | null | null | chatbot/bot.py | raf555/samantha-bot-line | 6b790325f09879cc2fc6dd10c2379e16dd5488ce | [
"MIT"
] | null | null | null | chatbot/bot.py | raf555/samantha-bot-line | 6b790325f09879cc2fc6dd10c2379e16dd5488ce | [
"MIT"
] | null | null | null | import os
import json
from flask import request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.models import (
MessageEvent, FollowEvent, UnfollowEvent, JoinEvent, PostbackEvent,
TextMessage, TextSendMessage, FlexSendMessage, StickerSendMessage,
SourceRoom, SourceGroup
)
from linebot.exceptions import (
InvalidSignatureError, LineBotApiError
)
from .additional_flex_messages import whats_sop_kru, create_image_bubble, create_image_carousel
from .calendar_service import create_fungs_agenda, create_lfm_agenda
from .database_service import (
authenticate, add_follower, add_group,
get_code, get_command, get_command_description,
remove_follower, track_api_calls, update_code
)
from .movie_service import (
create_upcoming_movies_carousel, discover_movies,
create_now_showing_carousel, get_now_showing,
get_movie_details, create_movie_details_bubble
)
from .utils import parse_upcoming_movies_params, translate_date_to_words, translate_words_to_date
# line messaging api
channel_access_token = os.environ.get('CHANNEL_ACCESS_TOKEN')
channel_secret = os.environ.get('CHANNEL_SECRET')
line_bot_api = LineBotApi(channel_access_token)
handler = WebhookHandler(channel_secret)
lfm_muda_beo_id = os.environ.get('ID_LFM_MUDA_BEO')
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
print(body)
# app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
def execute_command(event, text_string):
# separate command string and other parameters or unnecessary inputs
split_text = text_string.split()
# get the first word (the command) while
# lowering the characters and removing the '?' prefix
command_string = split_text[0].lower()[1:]
# get other words other than the command itself
if len(split_text) > 1:
other_string = split_text[1:]
else: # temporary fix
other_string = []
# get the command from database
command_tuple = get_command(command_string)
# if the command exists in the database, it should be truthy
if command_tuple:
# unpack the tuple
c_type, c_content, c_clearance = get_command(command_string)
# check if the user has clearance for the command
if authenticate(event.source, c_clearance):
# collect data
track_api_calls(command_string, event.source.user_id)
# for simple text-based replies
if c_type == 'text':
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=c_content))
# for simple image-based replies
elif c_type == 'image':
# unpack the content first into ratio and image url
ratio, image_url, alt_text = json.loads(c_content).values()
# then create the content bubble using ratio and image url
content = create_image_bubble(ratio, image_url)
line_bot_api.reply_message(event.reply_token, FlexSendMessage(
alt_text=alt_text, contents=content))
# for code text-based replies
elif c_type == 'code':
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=get_code(c_content)))
# for updating code commands
elif c_type == 'update_code':
if other_string:
if command_string in ('gantikoderulat', 'gantikodelokerdoksos', 'gantikodelemarioren'):
update_code(c_content, other_string[0])
else:
update_code(c_content, ' '.join(other_string))
line_bot_api.reply_message(event.reply_token, TextSendMessage(
text=("Kode sudah diganti menjadi " + get_code(c_content))))
else:
line_bot_api.reply_message(event.reply_token, TextSendMessage(
text=('Mau diganti sama apa kodenya?')))
elif c_type == 'image carousel':
# unpack the content first into ratio and image url
ratio, image_urls, alt_text = json.loads(c_content).values()
# then create the content bubble using ratio and image url
content = create_image_carousel(ratio, image_urls)
line_bot_api.reply_message(event.reply_token, FlexSendMessage(
alt_text=alt_text, contents=content))
# for complex replies [to do list], not yet added to database
elif c_type == 'others':
if command_string == 'agenda':
duration = translate_words_to_date(' '.join(other_string))
alt_text = "Agenda " + \
translate_date_to_words(int(duration)) + " Kedepan"
if authenticate(event.source, 2):
line_bot_api.reply_message(event.reply_token, FlexSendMessage(
alt_text=alt_text, contents=create_fungs_agenda(str(duration))))
else:
line_bot_api.reply_message(event.reply_token, FlexSendMessage(
alt_text=alt_text, contents=create_lfm_agenda(str(duration))))
elif command_string == 'upcomingmovies':
start_date, end_date, region = parse_upcoming_movies_params(other_string)
line_bot_api.reply_message(event.reply_token, FlexSendMessage(alt_text="Upcoming Movies", contents=create_upcoming_movies_carousel(
discover_movies(start_date=start_date, end_date=end_date, region=region))))
elif command_string == 'nowshowing':
line_bot_api.reply_message(event.reply_token, FlexSendMessage(
alt_text="Now Showing", contents=create_now_showing_carousel(get_now_showing())))
elif command_string == 'whatsopkru':
line_bot_api.reply_message(event.reply_token, [FlexSendMessage(alt_text="What SOP' Kroe!", contents=whats_sop_kru(
1)), FlexSendMessage(alt_text="What SOP' Kroe!", contents=whats_sop_kru(2))])
elif command_string == 'filefem':
bubble = create_image_bubble("1:1.414","https://i.ibb.co/NLyCzx6/clickme-PERATURAN.jpg")
line_bot_api.reply_message(event.reply_token, [
FlexSendMessage(alt_text="FiLEFEM", contents=bubble),
TextSendMessage(text="bit.ly/FiLEFEM\n\nAkses file LFM seperti hasil DK, Kinefolk, materi Pendidikan, hasil edit DVD, koleksi film, dan lainnya disini ya kru!")
])
elif c_type == 'help':
# if user asks for detailed help of a command
if other_string:
reply = get_command_description(other_string[0])
# general help
else:
# commands available for kru
commands = '\n • ?Agenda \n • ?NowShowing \n • ?UpcomingMovies \n • ?Database \n • ?KodeLemariOren \n • ?FTP \n • ?Surat \n • ?FAQSurat \n • ?TrackRecord \n • ?LinkKinekuniya \n • ?WhatSOPKru \n • ?Netflix \n • ?KitMakingMovies \n • ?Alkhazini \n • ?PinjamDisney+ \n • ?YukSukacita'
# commands available only for fungs
if authenticate(event.source, 2):
commands += '\n\n • ?KodeRulat \n • ?GantiKodeRulat \n • ?KodeLokerDoksos \n • ?GantiKodeLokerDoksos \n • ?PasswordEneng \n • ?GantiPasswordEneng \n • ?PasswordCici \n • ?GantiPasswordCici'
reply = "Halo! \nAku bisa bantu kru sekalian dengan beberapa perintah, diantaranya: " + commands + \
"\n\nKalau masih bingung perintahnya untuk apa, coba ketik ?Help dan nama perintahnya, \nmisal: ?Help Agenda "
# send the message
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=reply))
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
text = event.message.text
if text[0] == '?':
if text.split()[0] == "?Register":
if isinstance(event.source, SourceGroup):
print(event.source.group_id, ' '.join(text.split()[1:]))
add_group(event.source.group_id, ' '.join(text.split()[1:]))
execute_command(event, text)
@handler.add(FollowEvent)
def handle_follow(event):
# get profile
profile = line_bot_api.get_profile(event.source.user_id)
# check if user exists in Muda Beo (change readme to muda beo id later on)
try:
line_bot_api.get_group_member_profile(
lfm_muda_beo_id, event.source.user_id)
user_type = 1
except LineBotApiError as err:
print(err.error.message)
user_type = 0
# add said profile to database
add_follower(profile.user_id, profile.display_name, user_type)
if user_type == 0:
welcome_reply = 'Halo, {}! Kenalkan aku Samantha, bot untuk membantu kru LFM. Kalau penasaran aku bisa membantu apa saja, kirim aja \n`?Help`'.format(
profile.display_name)
onboarding_reply = 'Oh iya, coba dulu yuk kirim `?Agenda` atau `?NowShowing`, atau pencet aja menu yang udah disediain!'
privacy_notice = "Omong-omong, aku akan merekam kapan dan fitur apa yang kalian gunakan ya. Kalau kalian tidak mau, karena belum ada sistem untuk opt-out, berkabar saja supaya rekamannya dihapus."
all_reply = [TextSendMessage(text=welcome_reply),
StickerSendMessage(package_id='11537',
sticker_id='52002734'),
TextSendMessage(text=onboarding_reply),
TextSendMessage(text=privacy_notice)]
elif user_type == 1:
welcome_reply = 'Halo, {}! Kenalkan aku Samantha, bot untuk membantu kru LFM. Tampaknya kamu tidak ada di Muda Beo. Maaf, aku tidak bisa membantumu.'.format(
profile.display_name)
all_reply = [TextSendMessage(text=welcome_reply)]
# send a welcoming message and onboarding
line_bot_api.reply_message(event.reply_token, all_reply)
@handler.add(JoinEvent)
def handle_join(event):
# get group id
if isinstance(event.source, SourceGroup):
reply = "Halo kru! Aku perlu catat nama grupnya dulu nih, tolong kirim ?Register dan nama grupnya. Contoh: ?Register LFM Muda Beo. Terus kalau udah, kabarin ke Ivan yaa. \nTerimakasih!"
line_bot_api.reply_message(event.reply_token, [TextSendMessage(
reply), StickerSendMessage(package_id='11537', sticker_id='52002739')])
if isinstance(event.source, SourceRoom):
reply = "Halo! Maaf belum bisa bantu di multichat nih. Hehe"
line_bot_api.reply_message(event.reply_token, TextSendMessage(reply))
line_bot_api.leave_room(event.source.room_id)
@handler.add(UnfollowEvent)
def handle_unfollow(event):
remove_follower(event.source.user_id)
@handler.add(PostbackEvent)
def handle_postback(event):
movie_id = int(event.postback.data)
line_bot_api.reply_message(event.reply_token, FlexSendMessage(
alt_text="Movie Details", contents=create_movie_details_bubble(get_movie_details(movie_id))))
| 45.387597 | 318 | 0.648249 |
4a2082b22ec187c28ce14419d398fd9badc165e7 | 3,729 | py | Python | setup.py | CalebBell/chemicals | e3920ae917bf1944946aa95f5461a41bcdba6c63 | [
"MIT"
] | 76 | 2020-08-29T07:47:11.000Z | 2022-03-27T03:16:46.000Z | setup.py | CalebBell/chemicals | e3920ae917bf1944946aa95f5461a41bcdba6c63 | [
"MIT"
] | 20 | 2020-08-31T04:44:53.000Z | 2022-03-25T05:40:07.000Z | setup.py | CalebBell/chemicals | e3920ae917bf1944946aa95f5461a41bcdba6c63 | [
"MIT"
] | 13 | 2020-09-01T04:57:01.000Z | 2022-02-23T03:36:58.000Z | # -*- coding: utf-8 -*-
"""Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020 Caleb Bell
<[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from distutils.core import setup
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Manufacturing',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Education',
'Topic :: Scientific/Engineering :: Atmospheric Science',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Physics',
]
setup(
name = 'chemicals',
packages = ['chemicals'],
license='MIT',
version = '1.0.12',
description = 'Chemical properties component of Chemical Engineering Design Library (ChEDL)',
author = 'Caleb Bell',
install_requires=['fluids>=1.0.9', 'scipy', 'numpy', 'pandas'],
extras_require = {
'Coverage documentation': ['wsgiref>=0.1.2', 'coverage>=4.0.3']
},
long_description=open('README.rst').read(),
platforms=["Windows", "Linux", "Mac OS", "Unix"],
author_email = '[email protected]',
url = 'https://github.com/CalebBell/chemicals',
download_url = 'https://github.com/CalebBell/chemicals/tarball/1.0.12',
keywords = ['chemical engineering', 'chemistry', 'mechanical engineering',
'thermodynamics', 'databases', 'cheminformatics', 'engineering','viscosity',
'density', 'heat capacity', 'thermal conductivity', 'surface tension',
'combustion', 'environmental engineering', 'solubility', 'vapor pressure',
'equation of state', 'molecule'],
classifiers = classifiers,
package_data={'chemicals': ['Critical Properties/*', 'Density/*',
'Electrolytes/*', 'Environment/*', 'Heat Capacity/*', 'Identifiers/*',
'Law/*', 'Misc/*', 'Phase Change/*', 'Reactions/*', 'Safety/*',
'Solubility/*', 'Interface/*', 'Triple Properties/*',
'Thermal Conductivity/*',
'Vapor Pressure/*', 'Viscosity/*']}
)
| 43.360465 | 95 | 0.700456 |
4a2082c96517cf878677bf1b4335049ec82c827c | 22,354 | py | Python | research/object_detection/models/keras_applications1/mobilenet_v2_test.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | 1 | 2021-05-17T01:42:29.000Z | 2021-05-17T01:42:29.000Z | research/object_detection/models/keras_applications1/mobilenet_v2_test.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | research/object_detection/models/keras_applications1/mobilenet_v2_test.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from research.object_detection.builders import hyperparams_builder
from research.object_detection.models.keras_applications1 import mobilenet_v2
from research.object_detection.protos import hyperparams_pb2
from research.object_detection.utils import test_case
_layers_to_check = [
'Conv1_relu',
'block_1_expand_relu', 'block_1_depthwise_relu', 'block_1_project_BN',
'block_2_expand_relu', 'block_2_depthwise_relu', 'block_2_project_BN',
'block_3_expand_relu', 'block_3_depthwise_relu', 'block_3_project_BN',
'block_4_expand_relu', 'block_4_depthwise_relu', 'block_4_project_BN',
'block_5_expand_relu', 'block_5_depthwise_relu', 'block_5_project_BN',
'block_6_expand_relu', 'block_6_depthwise_relu', 'block_6_project_BN',
'block_7_expand_relu', 'block_7_depthwise_relu', 'block_7_project_BN',
'block_8_expand_relu', 'block_8_depthwise_relu', 'block_8_project_BN',
'block_9_expand_relu', 'block_9_depthwise_relu', 'block_9_project_BN',
'block_10_expand_relu', 'block_10_depthwise_relu', 'block_10_project_BN',
'block_11_expand_relu', 'block_11_depthwise_relu', 'block_11_project_BN',
'block_12_expand_relu', 'block_12_depthwise_relu', 'block_12_project_BN',
'block_13_expand_relu', 'block_13_depthwise_relu', 'block_13_project_BN',
'block_14_expand_relu', 'block_14_depthwise_relu', 'block_14_project_BN',
'block_15_expand_relu', 'block_15_depthwise_relu', 'block_15_project_BN',
'block_16_expand_relu', 'block_16_depthwise_relu', 'block_16_project_BN',
'out_relu']
def _create_application_with_layer_outputs(
layer_names, batchnorm_training,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None):
"""Constructs Keras mobilenetv2 that extracts intermediate layer outputs."""
if not layer_names:
layer_names = _layers_to_check
full_model = mobilenet_v2.mobilenet_v2(
batchnorm_training=batchnorm_training,
conv_hyperparams=conv_hyperparams,
weights=None,
use_explicit_padding=use_explicit_padding,
alpha=alpha,
min_depth=min_depth,
include_top=False)
layer_outputs = [full_model.get_layer(name=layer).output
for layer in layer_names]
return tf.keras.Model(
inputs=full_model.inputs,
outputs=layer_outputs)
class MobilenetV2Test(test_case.TestCase):
def _build_conv_hyperparams(self):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
batch_norm {
train: true,
scale: false,
center: true,
decay: 0.2,
epsilon: 0.1,
}
"""
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def _check_returns_correct_shape(
self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False, min_depth=None,
layer_names=None):
def graph_fn(image_tensor):
model = _create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=depth_multiplier)
return model(image_tensor)
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor])
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def _check_returns_correct_shapes_with_dynamic_inputs(
self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False,
layer_names=None):
def graph_fn(image_height, image_width):
image_tensor = tf.random_uniform([batch_size, image_height, image_width,
3], dtype=tf.float32)
model = _create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
alpha=depth_multiplier)
return model(image_tensor)
feature_maps = self.execute_cpu(graph_fn, [
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
])
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def _get_variables(self, depth_multiplier, layer_names=None):
g = tf.Graph()
with g.as_default():
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
model = _create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=False,
alpha=depth_multiplier)
model(preprocessed_inputs)
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
def test_returns_correct_shapes_128(self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = [(2, 64, 64, 32),
(2, 64, 64, 96),
(2, 32, 32, 96),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 32, 32, 144),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 16, 16, 144),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 8, 8, 192),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 4, 4, 576),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 320),
(2, 4, 4, 1280)]
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_128_explicit_padding(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = [(2, 64, 64, 32),
(2, 64, 64, 96),
(2, 32, 32, 96),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 32, 32, 144),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 16, 16, 144),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 8, 8, 192),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 4, 4, 576),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 320),
(2, 4, 4, 1280)]
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, use_explicit_padding=True)
def test_returns_correct_shapes_with_dynamic_inputs(
self):
image_height = 128
image_width = 128
depth_multiplier = 1.0
expected_feature_map_shape = [(2, 64, 64, 32),
(2, 64, 64, 96),
(2, 32, 32, 96),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 32, 32, 144),
(2, 32, 32, 24),
(2, 32, 32, 144),
(2, 16, 16, 144),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 16, 16, 192),
(2, 16, 16, 32),
(2, 16, 16, 192),
(2, 8, 8, 192),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 64),
(2, 8, 8, 384),
(2, 8, 8, 384),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 8, 8, 576),
(2, 8, 8, 96),
(2, 8, 8, 576),
(2, 4, 4, 576),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 160),
(2, 4, 4, 960),
(2, 4, 4, 960),
(2, 4, 4, 320),
(2, 4, 4, 1280)]
self._check_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_299(self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
expected_feature_map_shape = [(2, 150, 150, 32),
(2, 150, 150, 96),
(2, 75, 75, 96),
(2, 75, 75, 24),
(2, 75, 75, 144),
(2, 75, 75, 144),
(2, 75, 75, 24),
(2, 75, 75, 144),
(2, 38, 38, 144),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 19, 19, 192),
(2, 19, 19, 64),
(2, 19, 19, 384),
(2, 19, 19, 384),
(2, 19, 19, 64),
(2, 19, 19, 384),
(2, 19, 19, 384),
(2, 19, 19, 64),
(2, 19, 19, 384),
(2, 19, 19, 384),
(2, 19, 19, 64),
(2, 19, 19, 384),
(2, 19, 19, 384),
(2, 19, 19, 96),
(2, 19, 19, 576),
(2, 19, 19, 576),
(2, 19, 19, 96),
(2, 19, 19, 576),
(2, 19, 19, 576),
(2, 19, 19, 96),
(2, 19, 19, 576),
(2, 10, 10, 576),
(2, 10, 10, 160),
(2, 10, 10, 960),
(2, 10, 10, 960),
(2, 10, 10, 160),
(2, 10, 10, 960),
(2, 10, 10, 960),
(2, 10, 10, 160),
(2, 10, 10, 960),
(2, 10, 10, 960),
(2, 10, 10, 320),
(2, 10, 10, 1280)]
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape)
def test_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 299
image_width = 299
depth_multiplier = 0.5 ** 12
expected_feature_map_shape = [(2, 150, 150, 32),
(2, 150, 150, 192),
(2, 75, 75, 192),
(2, 75, 75, 32),
(2, 75, 75, 192),
(2, 75, 75, 192),
(2, 75, 75, 32),
(2, 75, 75, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 38, 38, 192),
(2, 38, 38, 32),
(2, 38, 38, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 19, 19, 192),
(2, 19, 19, 32),
(2, 19, 19, 192),
(2, 10, 10, 192),
(2, 10, 10, 32),
(2, 10, 10, 192),
(2, 10, 10, 192),
(2, 10, 10, 32),
(2, 10, 10, 192),
(2, 10, 10, 192),
(2, 10, 10, 32),
(2, 10, 10, 192),
(2, 10, 10, 192),
(2, 10, 10, 32),
(2, 10, 10, 32)]
self._check_returns_correct_shape(
2, image_height, image_width, depth_multiplier,
expected_feature_map_shape, min_depth=32)
def test_hyperparam_override(self):
hyperparams = self._build_conv_hyperparams()
model = mobilenet_v2.mobilenet_v2(
batchnorm_training=True,
conv_hyperparams=hyperparams,
weights=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=32,
include_top=False)
hyperparams.params()
bn_layer = model.get_layer(name='block_5_project_BN')
self.assertAllClose(bn_layer.momentum, 0.2)
self.assertAllClose(bn_layer.epsilon, 0.1)
def test_variable_count(self):
depth_multiplier = 1
variables = self._get_variables(depth_multiplier)
self.assertEqual(len(variables), 260)
if __name__ == '__main__':
tf.test.main()
| 47.764957 | 84 | 0.346381 |
4a2083c5d68ac9ba81eac88580f8bc78e34a58a9 | 1,756 | py | Python | benchmark/startPyquil2861.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil2861.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil2861.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=31
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=14
prog += X(3) # number=15
prog += RX(1.8001325905069514,3) # number=18
prog += Z(1) # number=27
prog += CNOT(0,3) # number=16
prog += H(1) # number=22
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += CNOT(0,3) # number=28
prog += X(3) # number=29
prog += CNOT(0,3) # number=30
prog += H(1) # number=6
prog += X(1) # number=25
prog += H(2) # number=7
prog += H(3) # number=8
prog += Z(1) # number=21
prog += H(0) # number=9
prog += CNOT(2,0) # number=10
prog += X(1) # number=17
prog += CNOT(2,0) # number=11
prog += Y(0) # number=12
prog += Y(0) # number=13
prog += Z(2) # number=26
prog += CNOT(2,1) # number=23
prog += X(0) # number=19
prog += X(0) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2861.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.085714 | 64 | 0.570615 |
4a2083f6e09b26dad980c7a505b192537fab90d6 | 2,219 | py | Python | test/quantum_volume/test_qv.py | hodgestar/qiskit-ignis | 0e511df442e864cd0e06efcdd1db7b03c011168b | [
"Apache-2.0"
] | 1 | 2019-12-12T07:58:13.000Z | 2019-12-12T07:58:13.000Z | test/quantum_volume/test_qv.py | hodgestar/qiskit-ignis | 0e511df442e864cd0e06efcdd1db7b03c011168b | [
"Apache-2.0"
] | 3 | 2020-11-06T16:54:48.000Z | 2020-11-11T01:37:30.000Z | test/quantum_volume/test_qv.py | hodgestar/qiskit-ignis | 0e511df442e864cd0e06efcdd1db7b03c011168b | [
"Apache-2.0"
] | 2 | 2020-01-01T14:46:41.000Z | 2020-01-17T16:06:29.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=undefined-loop-variable
"""
Run through Quantum volume
"""
import unittest
import os
import pickle
import qiskit.ignis.verification.quantum_volume as qv
class TestQV(unittest.TestCase):
""" The test class """
def test_qv_circuits(self):
""" Test circuit generation """
# Qubit list
qubit_lists = [[0, 1, 2], [0, 1, 2, 4], [0, 1, 2, 4, 7]]
ntrials = 5
qv_circs, _ = qv.qv_circuits(qubit_lists, ntrials)
self.assertEqual(len(qv_circs), ntrials,
"Error: Not enough trials")
self.assertEqual(len(qv_circs[0]), len(qubit_lists),
"Error: Not enough circuits for the "
"number of specified qubit lists")
def test_qv_fitter(self):
""" Test the fitter with some pickled result data"""
os.path.join(os.path.dirname(__file__),
'test_fitter_results_2.pkl')
f0 = open(os.path.join(os.path.dirname(__file__),
'qv_ideal_results.pkl'), 'rb')
ideal_results = pickle.load(f0)
f0.close()
f0 = open(os.path.join(os.path.dirname(__file__),
'qv_exp_results.pkl'), 'rb')
exp_results = pickle.load(f0)
f0.close()
qubit_lists = [[0, 1, 3], [0, 1, 3, 5], [0, 1, 3, 5, 7],
[0, 1, 3, 5, 7, 10]]
qv_fitter = qv.QVFitter(qubit_lists=qubit_lists)
qv_fitter.add_statevectors(ideal_results)
qv_fitter.add_data(exp_results)
qv_success_list = qv_fitter.qv_success()
self.assertFalse(qv_success_list[0][0])
if __name__ == '__main__':
unittest.main()
| 29.197368 | 77 | 0.606129 |
4a2084006e8b61e01b3791ef9ad59181c01fe652 | 11,183 | py | Python | Python/scrabble/Player.py | CNHume/Samples | 2fdaa7a3193f19a882d0adebffaaf56af0984654 | [
"MIT"
] | null | null | null | Python/scrabble/Player.py | CNHume/Samples | 2fdaa7a3193f19a882d0adebffaaf56af0984654 | [
"MIT"
] | null | null | null | Python/scrabble/Player.py | CNHume/Samples | 2fdaa7a3193f19a882d0adebffaaf56af0984654 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2018, Christopher Hume. All rights reserved.
#
# You should have received a copy of the MIT License along with this program.
# If not, see https://opensource.org/licenses/MIT.
#
# 2018-09-09 CNHume Created File
#
from operator import itemgetter
import string
import re
from Board import Board
from Tile import Tile
class Player(object):
"""Scrabble Player"""
COMMA = u','
EMPTY = u''
EQUAL = u'='
BINGO_VALUE = 50
PERMUTE_LEN_MAX = 8
SCORELESS_TURN_MAX = 6
WHITESPACE = re.compile("\s*")
def __init__(self, players, wordList, board, reverse, testing):
self.reverse = reverse
self.testing = testing
self.players = players
self.scores = [0] * self.players
self.racks = [[] for player in range(self.players)]
self.board = board
self.board.fillRacks(self.racks)
self.dictionary = set(wordList)
if not wordList:
print(u'There are no words.')
self.turn = 0
self.last_score_turn = self.turn
self.first_pass_turn_player = None
pass
def Test(self):
#Len Count
# 1: 2
# 2: 5
# 3: 16
# 4: 65
# 5: 326
# 6: 1957
# 7: 13700
# 8: 109601
# 9: 986410
# 10: 9864101
#cases = [u'a', u'ab', u'abc', u'abcd', u'abcde', u'abcdef', u'abcdefg', u'abcdefgh', u'abcdefghi']
cases = [u'', u'aa', u'aab', u'aabc', u'aabcd']
for case in cases:
words = Player.anagram(case, True)
print(Board.KVP.format(len(case), len(words)))
print(Player.COMMA.join(words))
pass
def perform(self, commands):
for command in commands:
if not self.dispatch(command):
break
def start(self):
loop = True
while loop:
turn_player = self.turn % self.players
player_turn = self.turn / self.players
self.showRack(turn_player)
prompt = u'Player {}, Turn {}: '.format(turn_player + 1, player_turn + 1)
command = Player.prompt(prompt)
loop = self.dispatch(command)
for player in range(self.players):
self.debitRackValue(player)
Player.showScores(self.scores)
def help(self):
print(u'board')
print(u'exchange letters')
print(u'exit')
print(u'help')
print(u'list letters')
print(u'pass')
print(u'play square=letter, ...')
print(u'score')
def dispatch(self, command):
tokens = re.split(Player.WHITESPACE, command.strip())
if not tokens:
return True
turn_player = self.turn % self.players
rack = self.racks[turn_player]
token = tokens.pop(0)
rest = Player.EMPTY.join(tokens)
verb = token.lower()
if verb == Player.EMPTY:
return True
elif verb == u'board':
self.board.display()
return True
elif verb == u'exchange':
letters = rest.upper() if rest else rack
self.board.tiles.exchange(rack, letters)
self.showRack(turn_player)
self.first_pass_turn_player = None
self.turn += 1
# Test for Scoreless Turn below
pass
elif verb == u'exit':
return False
elif verb == u'help':
self.help()
return True
elif verb == u'list':
letters = letters = rest.upper() if rest else rack
self.list(letters)
return True
elif verb == u'pass':
if self.first_pass_turn_player == turn_player:
print(u'All Players passed; and Player {} has passed twice.'
.format(turn_player + 1))
return False
if self.first_pass_turn_player is None:
self.first_pass_turn_player = turn_player
self.turn += 1
# Test for Scoreless Turn below
pass
elif verb == u'play':
placements = self.parsePlacements(rest)
# Allow turn_player to try again after Illegal Tile Placement
if not placements:
return True
word = self.legalPlacements(placements)
if not word:
return True
if self.placeTiles(placements, word):
self.first_pass_turn_player = None
self.last_score_turn = self.turn
if len(rack) == 0:
print(u'Player {}, Rack Empty and Bag Empty.'
.format(turn_player + 1))
return False
self.turn += 1
# Test for Scoreless Turn below
pass
elif verb == u'score':
Player.showScores(self.scores)
return True
else:
print(u'{} is not a valid command.'.format(token))
return True
if self.last_score_turn + Player.SCORELESS_TURN_MAX <= self.turn:
print(u'The limit of {} Scoreless Turns has been reached.'
.format(Player.SCORELESS_TURN_MAX))
return False
return True
def placeTiles(self, placements, word):
letters = [letter for square, letter in placements]
turn_player = self.turn % self.players
rack = self.racks[turn_player]
if not Tile.hasLetters(rack, letters):
print(u'Player {}, Rack: {} missing some of the letters: {}'
.format(turn_player + 1, Tile.spaced(rack), Tile.spaced(letters)))
if not self.testing:
return False
# Two Blanks played on the first turn can result in a valid total of zero
valid, total = self.evaluatePlacements(placements, word)
if valid:
for placement in placements:
square, letter = placement
self.board.set(square, letter)
self.board.tiles.exchange(rack, letters)
# Award 50-point bonus if a full rack was used
if len(letters) == Tile.RACKSIZE:
print(u'Bingo!')
total += Player.BINGO_VALUE
# Increase player score
self.scores[turn_player] += total
print(u'Player {} gained {} points'.format(turn_player + 1, total))
return True
def firstPlay(self):
return self.board.get(self.board.center) == Board.BLANK
def firstValid(self, placements):
squares = [square for square, letter in placements]
if len(squares) < 2 or squares[0] in squares[1:]:
return False
return self.board.center in squares
def legalPlacements(self, placements):
empty = Player.EMPTY
if self.firstPlay():
if not self.firstValid(placements):
print(u'First word must cover the center square')
if not self.testing:
return empty
elif not self.board.contact(placements):
letters = [letter for square, letter in placements]
print(u'The letters: {} are not in contact with an existing word'
.format(Tile.spaced(letters)))
if not self.testing:
return empty
# Return Main Word:
horizontal = self.board.horizontal(placements)
word = self.board.tiled(placements, horizontal)
if not word:
squareNames = [Board.squareName(square) for square, letter in placements]
print(u'Illegal Tile Placement: {}'
.format(Tile.spaced(squareNames)))
return word
def parsePlacements(self, statement):
empty = []
placements = []
clauses = statement.split(Player.COMMA)
for clause in clauses:
placement = self.parsePlacement(clause)
if not placement:
return empty
placements.append(placement)
return placements
#
# Placements are of the form: square=letter where the letter may
# be followed by a question mark to indicate use of a blank tile.
#
def parsePlacement(self, clause):
empty = ()
terms = clause.split(Player.EQUAL)
if len(terms) != 2:
print(u'{} is not a valid placement'.format(clause))
return empty
squareName = terms[0].strip()
text = terms[1].strip()
if len(text) == 0:
print(u'no letter supplied')
return empty
blank = len(text) == 2 and text[1] == Tile.QUESTION
if len(text) > 1 and not blank:
print(u'{} is not a valid tile'.format(text))
return empty
letter = text[0]
if not letter.isalpha():
print(u'{} is not a valid letter'.format(letter))
return empty
square = self.board.parseSquare(squareName)
if not square:
return empty
return square, letter.lower() if blank else letter.upper()
def validWord(self, word, reverse):
if not self.dictionary or word.lower() in self.dictionary:
return word
if reverse:
rword = word[::-1]
if rword.lower() in self.dictionary:
return rword
print(u'{} not found'.format(word))
#[ToDo]Prompt for dictionary override [Y/N] here
return Player.EMPTY
def list(self, letters):
alphas = [letter for letter in letters if letter.isalpha()]
length = len(alphas)
if length > Player.PERMUTE_LEN_MAX:
print(u'Length of {} exceeds Maximum of {}'
.format(length, Player.PERMUTE_LEN_MAX))
if not self.testing:
return
permutations = Player.anagram(Player.EMPTY.join(alphas), True)
words = [word for word in permutations if word.lower() in self.dictionary] if self.dictionary else permutations
for index, word in enumerate(sorted(words)):
counter = str(index + 1)
print(Board.KVP.format(counter.rjust(3), word))
def evaluatePlacements(self, placements, tiledWord):
total = 0
tiledScore = self.board.score(tiledWord, placements)
horizontal = self.board.horizontal(placements)
pairs = self.board.crosswords(placements, horizontal)
pairs.insert(0, (tiledWord, tiledScore))
# Validate new words
for word, score in pairs:
valid = self.validWord(word, self.reverse)
if not valid:
if not self.testing:
return False, total
print('word: {}, score: {}'.format(valid, score))
total += score
return True, total
def showRack(self, turn_player):
rack = self.racks[turn_player]
print(u'Player {}, Rack: {}'.format(turn_player + 1, Tile.spaced(rack)))
def debitRackValue(self, player):
rack = self.racks[player]
debit = self.board.tiles.rackValue(rack)
print(u'Player {}, Debit: {}'.format(player + 1, debit))
self.scores[player] -= debit
@staticmethod
def showScores(scores):
for player, score in sorted(enumerate(scores), key=itemgetter(1, 0), reverse=True):
print(u'Player {}, Score: {}'.format(player + 1, score))
@staticmethod
def prompt(prompt):
return raw_input(prompt)
@staticmethod
def anagram(letters, subset=False):
"""Return every permutation of letters"""
words = [Player.EMPTY] if subset else []
length = len(letters)
if length < 1:
pass
elif length < 2:
words.append(letters[0])
else:
chosen = set()
for choice in range(length):
letter = letters[choice]
if letter not in chosen:
chosen.add(letter)
unused = letters[:choice] + letters[choice + 1:]
suffixes = Player.anagram(unused, subset)
for suffix in suffixes:
words.append(letter + suffix)
return words
@staticmethod
def findFirst(predicate, lines, filename=None):
"""Find first line satisfying predicate"""
try:
# Python generator
found = (index for index, element in enumerate(lines) if predicate(element))
found_index = next(found)
except StopIteration:
print(u'Only blank lines found in {0}'.format(filename))
found_index = None
return found_index
| 29.198433 | 115 | 0.635429 |
4a2085abd13290838f53316ecda4b242db62488c | 1,170 | py | Python | airflow/operators/mysql_operator.py | dtardoin/airflow | 4d7f413c7db3ffdb1236e7799e4fe647842e5dbd | [
"Apache-2.0"
] | 1 | 2021-03-02T20:08:53.000Z | 2021-03-02T20:08:53.000Z | airflow/operators/mysql_operator.py | dtardoin/airflow | 4d7f413c7db3ffdb1236e7799e4fe647842e5dbd | [
"Apache-2.0"
] | null | null | null | airflow/operators/mysql_operator.py | dtardoin/airflow | 4d7f413c7db3ffdb1236e7799e4fe647842e5dbd | [
"Apache-2.0"
] | 1 | 2019-05-14T16:19:12.000Z | 2019-05-14T16:19:12.000Z | import logging
from airflow.hooks import MySqlHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class MySqlOperator(BaseOperator):
"""
Executes sql code in a specific MySQL database
:param mysql_conn_id: reference to a specific mysql database
:type mysql_conn_id: string
:param sql: the sql code to be executed
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#ededed'
@apply_defaults
def __init__(
self, sql, mysql_conn_id='mysql_default', parameters=None,
*args, **kwargs):
super(MySqlOperator, self).__init__(*args, **kwargs)
self.mysql_conn_id = mysql_conn_id
self.sql = sql
self.parameters = parameters
def execute(self, context):
logging.info('Executing: ' + str(self.sql))
hook = MySqlHook(mysql_conn_id=self.mysql_conn_id)
hook.run(self.sql, parameters=self.parameters)
| 31.621622 | 72 | 0.678632 |
4a2085cdd79a3f15b282401bac7f383fc6083298 | 2,304 | py | Python | tests/test_derive_adduct_from_name.py | adelabriere/matchms | a580539e6db3f4f00e12097983b85b2d494159ba | [
"Apache-2.0"
] | 64 | 2020-06-22T14:59:21.000Z | 2022-03-30T00:50:13.000Z | tests/test_derive_adduct_from_name.py | adelabriere/matchms | a580539e6db3f4f00e12097983b85b2d494159ba | [
"Apache-2.0"
] | 561 | 2020-03-19T14:35:59.000Z | 2022-03-29T10:11:12.000Z | tests/test_derive_adduct_from_name.py | adelabriere/matchms | a580539e6db3f4f00e12097983b85b2d494159ba | [
"Apache-2.0"
] | 32 | 2020-05-06T07:35:59.000Z | 2022-03-10T09:03:45.000Z | import numpy
from matchms import Spectrum
from matchms.filtering import derive_adduct_from_name
def test_derive_adduct_from_name():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"compound_name": "peptideXYZ [M+H+K]"})
spectrum = derive_adduct_from_name(spectrum_in)
assert spectrum.get("adduct") == "[M+H+K]", "Expected different adduct."
assert spectrum.get("compound_name") == "peptideXYZ", "Expected different cleaned name."
def test_derive_adduct_from_name_dont_overwrite_present_adduct():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"compound_name": "peptideXYZ [M+H+K]",
"adduct": "M+H"})
spectrum = derive_adduct_from_name(spectrum_in)
assert spectrum.get("adduct") == "M+H", "Expected different adduct."
assert spectrum.get("compound_name") == "peptideXYZ", "Expected different cleaned name."
def test_derive_adduct_from_name_dont_remove_from_name():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"compound_name": "peptideXYZ [M+H+K]"})
spectrum = derive_adduct_from_name(spectrum_in, remove_adduct_from_name=False)
assert spectrum.get("adduct") == "[M+H+K]", "Expected different adduct."
assert spectrum.get("compound_name") == spectrum_in.get("compound_name"), "Expected no change to name."
def test_derive_adduct_from_name_no_compound_name_empty_name():
spectrum_in = Spectrum(mz=numpy.array([], dtype="float"),
intensities=numpy.array([], dtype="float"),
metadata={"name": ""})
spectrum = derive_adduct_from_name(spectrum_in)
assert spectrum.get("adduct", None) is None, "Expected None for adduct."
assert spectrum.get("compound_name", None) is None, "Expected None for name."
def test_empty_spectrum():
spectrum_in = None
spectrum = derive_adduct_from_name(spectrum_in)
assert spectrum is None, "Expected different handling of None spectrum."
| 41.142857 | 107 | 0.65408 |
4a2086e58a509af6536a764a7ee169742c362e19 | 131 | py | Python | recipes-support/pot-watcher/files/play-sound.py | masselstine/meta-alexa | bbb44fa29a73d1cc9670b24a031acfdcf100e8d1 | [
"MIT"
] | null | null | null | recipes-support/pot-watcher/files/play-sound.py | masselstine/meta-alexa | bbb44fa29a73d1cc9670b24a031acfdcf100e8d1 | [
"MIT"
] | null | null | null | recipes-support/pot-watcher/files/play-sound.py | masselstine/meta-alexa | bbb44fa29a73d1cc9670b24a031acfdcf100e8d1 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import pexpect
pexpect.run("ssh 192.168.42.1 'aplay /root/beedoo.wav'", events={'(?i)password':'incendia\n'})
| 21.833333 | 94 | 0.687023 |
4a2086f31a9e260c95288e7661cd2f99d991ea97 | 13,919 | py | Python | standard_train/lib/datasets/coco.py | Flsahkong/seeDiffDA | 8c5219b1eb0edb69f24cff03dbbd1a66bdd6cc42 | [
"MIT"
] | 62 | 2018-10-27T02:44:46.000Z | 2022-03-09T12:58:52.000Z | standard_train/lib/datasets/coco.py | Flsahkong/seeDiffDA | 8c5219b1eb0edb69f24cff03dbbd1a66bdd6cc42 | [
"MIT"
] | 12 | 2018-11-16T11:22:40.000Z | 2020-06-07T06:08:10.000Z | standard_train/lib/datasets/coco.py | Flsahkong/seeDiffDA | 8c5219b1eb0edb69f24cff03dbbd1a66bdd6cc42 | [
"MIT"
] | 24 | 2018-10-27T02:44:29.000Z | 2021-07-12T08:49:17.000Z | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
from model.utils.config import cfg
import os.path as osp
import sys
import os
import numpy as np
import scipy.sparse
import scipy.io as sio
import pickle
import json
import uuid
# COCO API
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as COCOmask
class coco(imdb):
def __init__(self, image_set, year, data_path=None):
imdb.__init__(self, 'coco_' + year + '_' + image_set)
# COCO specific config options
self.config = {'use_salt': True,
'cleanup': True}
# name, paths
self._year = year
self._image_set = image_set
if data_path is not None:
cfg.DATA_DIR = data_path
self._data_path = osp.join(cfg.DATA_DIR, 'coco')
# load COCO API, classes, class <-> id mappings
self._COCO = COCO(self._get_ann_file())
cats = self._COCO.loadCats(self._COCO.getCatIds())
self._classes = tuple(['__background__'] + [c['name'] for c in cats])
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._class_to_coco_cat_id = dict(list(zip([c['name'] for c in cats],
self._COCO.getCatIds())))
self._image_index = self._load_image_set_index()
#print(self._image_index)
#assert 1<0
# Default to roidb handler
self.set_proposal_method('gt')
self.competition_mode(False)
# Some image sets are "views" (i.e. subsets) into others.
# For example, minival2014 is a random 5000 image subset of val2014.
# This mapping tells us where the view's images and proposals come from.
self._view_map = {
'minival2014': 'val2014', # 5k val2014 subset
'valminusminival2014': 'val2014', # val2014 \setminus minival2014
'test-dev2015': 'test2015',
'valminuscapval2014': 'val2014',
'capval2014': 'val2014',
'captest2014': 'val2014'
}
coco_name = image_set + year # e.g., "val2014"
#coco_name = image_set
self._data_name = (self._view_map[coco_name]
if coco_name in self._view_map
else coco_name)
# Dataset splits that have ground-truth annotations (test splits
# do not have gt annotations)
self._gt_splits = ('train', 'val', 'minival')
self.bad_annotations = []
def _get_ann_file(self):
prefix = 'instances' if self._image_set.find('test') == -1 \
else 'image_info'
return osp.join(self._data_path, 'annotations',
prefix + '_' + self._image_set + self._year + '.json')
#return osp.join(self._data_path, 'annotations', self._image_set+'.json')
def _load_image_set_index(self):
"""
Load image ids.
"""
image_ids = self._COCO.getImgIds()
return image_ids
def _get_widths(self):
anns = self._COCO.loadImgs(self._image_index)
widths = [ann['width'] for ann in anns]
return widths
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_id_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self._image_index[i]
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# Example image path for index=119993:
# images/train2014/COCO_train2014_000000119993.jpg
file_name = ('COCO_' + self._data_name + '_' +
str(index).zfill(12) + '.jpg')
#image_path = osp.join(self._data_path, 'images',self._data_name, file_name)
image_path = osp.join(self._data_path, 'images', file_name)
assert osp.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = osp.join(self.cache_path, self.name + '_gt_roidb.pkl')
print(cache_file)
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
"""
try:
gt_roidb = [self._load_coco_annotation(index)
for index in self._image_index]
except KeyError as e:
print("got bad key...")
gt_roidb = []
i = 0
for index in self._image_index:
try:
gt_roidb.append(self._load_coco_annotation(index))
except KeyError as e:
#print(index, e)
self._image_index.remove(index)
i+=1
"""
#self.bad_annotations = []
gt_roidb = []
i = 0
for index in self._image_index:
try:
gt_roidb.append(self._load_coco_annotation(index))
except KeyError as e:
#print(index, e)
self.bad_annotations.append(index)
i+=1
print("got",i,"bad annotations")
#gt_roidb = [self._load_coco_annotation(index) for index in self._image_index]
with open(cache_file, 'wb') as fid:
print("dumping files")
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
#self.image_index = self._image_index
print(len(self._image_index))
return gt_roidb
def _load_coco_annotation(self, index):
"""
Loads COCO bounding-box instance annotations. Crowd instances are
handled by marking their overlaps (with all categories) to -1. This
overlap value means that crowd "instances" are excluded from training.
"""
#print("index:\n", index)
im_ann = self._COCO.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
#print("index:", index)
#print("im_ann:", im_ann)
annIds = self._COCO.getAnnIds(imgIds=index, iscrowd=None)
#print("annotation ID:", annIds)
objs = self._COCO.loadAnns(annIds)
#print(objs)
# Sanitize bboxes -- some are invalid
valid_objs = []
for obj in objs:
x1 = np.max((0, obj['bbox'][0]))
y1 = np.max((0, obj['bbox'][1]))
x2 = np.min((width - 1, x1 + np.max((0, obj['bbox'][2] - 1))))
y2 = np.min((height - 1, y1 + np.max((0, obj['bbox'][3] - 1))))
obj['area'] = (x2-x1) * (y2-y1)
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Lookup table to map from COCO category ids to our internal class
# indices
coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls],
self._class_to_ind[cls])
for cls in self._classes[1:]])
for ix, obj in enumerate(objs):
cls = coco_cat_id_to_class_ind[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
obj['iscrowd'] = False
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
overlaps[ix, :] = -1.0
else:
overlaps[ix, cls] = 1.0
ds_utils.validate_boxes(boxes, width=width, height=height)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'width': width,
'height': height,
'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
print("\n")
def _get_widths(self):
print('calling widths 1')
print(len(self.roidb))
return [r['width'] for r in self.roidb]
def append_flipped_images(self):
print("bad_annotations count:", self.bad_annotations)
for index in self.bad_annotations:
self._image_index.remove(index)
num_images = self.num_images
widths = self._get_widths()
for i in range(num_images):
#for i in range(len(widths)):
#print(self.roidb[i])
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
#print(self.roidb[i])
#print(boxes)
#print(type(boxes[0]))
#print(boxes[0])
#print(widths[i])
#try:
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
#except IndexError as e:
#print("\n Bad index...")
#assert 1<0
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'width': widths[i],
'height': self.roidb[i]['height'],
'boxes': boxes,
'gt_classes': self.roidb[i]['gt_classes'],
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'flipped': True,
'seg_areas': self.roidb[i]['seg_areas']}
self.roidb.append(entry)
self._image_index = self._image_index * 2
def _get_box_file(self, index):
# first 14 chars / first 22 chars / all chars + .mat
# COCO_val2014_0/COCO_val2014_000000447/COCO_val2014_000000447991.mat
file_name = ('COCO_' + self._data_name +
'_' + str(index).zfill(12) + '.mat')
return osp.join(file_name[:14], file_name[:22], file_name)
def _print_detection_eval_metrics(self, coco_eval):
IoU_lo_thresh = 0.5
IoU_hi_thresh = 0.95
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = \
coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
print(('~~~~ Mean and per-category AP @ IoU=[{:.2f},{:.2f}] '
'~~~~').format(IoU_lo_thresh, IoU_hi_thresh))
print('{:.1f}'.format(100 * ap_default))
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
print('{:.1f}'.format(100 * ap))
print('~~~~ Summary metrics ~~~~')
coco_eval.summarize()
def _do_detection_eval(self, res_file, output_dir):
ann_type = 'bbox'
coco_dt = self._COCO.loadRes(res_file)
coco_eval = COCOeval(self._COCO, coco_dt)
coco_eval.params.useSegm = (ann_type == 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
self._print_detection_eval_metrics(coco_eval)
eval_file = osp.join(output_dir, 'detection_results.pkl')
with open(eval_file, 'wb') as fid:
pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)
print('Wrote COCO eval results to: {}'.format(eval_file))
def _coco_results_one_category(self, boxes, cat_id):
results = []
for im_ind, index in enumerate(self.image_index):
dets = boxes[im_ind].astype(np.float)
if dets == []:
continue
scores = dets[:, -1]
xs = dets[:, 0]
ys = dets[:, 1]
ws = dets[:, 2] - xs + 1
hs = dets[:, 3] - ys + 1
results.extend(
[{'image_id': index,
'category_id': cat_id,
'bbox': [xs[k], ys[k], ws[k], hs[k]],
'score': scores[k]} for k in range(dets.shape[0])])
return results
def _write_coco_results_file(self, all_boxes, res_file):
# [{"image_id": 42,
# "category_id": 18,
# "bbox": [258.15,41.29,348.26,243.78],
# "score": 0.236}, ...]
results = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Collecting {} results ({:d}/{:d})'.format(cls, cls_ind,
self.num_classes - 1))
coco_cat_id = self._class_to_coco_cat_id[cls]
results.extend(self._coco_results_one_category(all_boxes[cls_ind],
coco_cat_id))
print('Writing results json to {}'.format(res_file))
with open(res_file, 'w') as fid:
json.dump(results, fid)
def evaluate_detections(self, all_boxes, output_dir):
res_file = osp.join(output_dir, ('detections_' +
self._image_set +
self._year +
'_results'))
if self.config['use_salt']:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
self._write_coco_results_file(all_boxes, res_file)
# Only do evaluation on non-test sets
if self._image_set.find('test') == -1:
self._do_detection_eval(res_file, output_dir)
# Optionally cleanup results json file
if self.config['cleanup']:
os.remove(res_file)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
| 35.507653 | 88 | 0.60967 |
4a20870ec2cbe399a24f358af5587d35493686eb | 4,432 | py | Python | tokens.py | JeromeTan1997/PL0 | 59f5bdad7e474310d68f0d834a70937d8a7bb671 | [
"MIT"
] | null | null | null | tokens.py | JeromeTan1997/PL0 | 59f5bdad7e474310d68f0d834a70937d8a7bb671 | [
"MIT"
] | null | null | null | tokens.py | JeromeTan1997/PL0 | 59f5bdad7e474310d68f0d834a70937d8a7bb671 | [
"MIT"
] | null | null | null | from enum import Enum
class Word(Enum):
BEGIN = 1
CALL = 2
CONST = 3
DO = 4
END = 5
IF = 6
ODD = 7
PROCEDURE = 8
READ = 9
THEN = 10
VAR = 11
WHILE = 12
WRITE = 13
def name(self):
return self.value() + 'sym'
def value(self):
return Word._values[self]
def __str__(self):
return 'Word(%s, %s)' % (self.name(), self.value())
def __repr__(self):
return self.__str__()
Word._values = {
Word.BEGIN: 'begin',
Word.CALL: 'call',
Word.CONST: 'const',
Word.DO: 'do',
Word.END: 'end',
Word.IF: 'if',
Word.ODD: 'odd',
Word.PROCEDURE: 'procedure',
Word.READ: 'read',
Word.THEN: 'then',
Word.VAR: 'var',
Word.WHILE: 'while',
Word.WRITE: 'write'
}
class Sign(Enum):
LEFTPAREN = 1
RIGHTPAREN = 2
COMMA = 3
SEMICOLON = 4
PERIOD = 5
def name(self):
return Sign._names[self]
def value(self):
return Sign._values[self]
def __str__(self):
return 'Sign(%s, %s)' % (self.name(), self.value())
def __repr__(self):
return self.__str__()
Sign._values = {
Sign.LEFTPAREN: '(',
Sign.RIGHTPAREN: ')',
Sign.COMMA: ',',
Sign.SEMICOLON: ';',
Sign.PERIOD: '.'
}
Sign._names = {
Sign.LEFTPAREN: 'lparen',
Sign.RIGHTPAREN: 'rparen',
Sign.COMMA: 'comma',
Sign.SEMICOLON: 'semicolon',
Sign.PERIOD: 'period'
}
class BinaryOperator(Enum):
PLUS = 1
MINUS = 2
TIMES = 3
SLASH = 4
EQUAL = 5
HASHTAG = 6
LESS = 7
LESSEQUAL = 8
GREATER = 9
GREATEREQUAL = 10
ASSIGN = 11
def name(self):
return BinaryOperator._names[self]
def value(self):
return BinaryOperator._values[self]
def __str__(self):
return 'BinaryOperator(%s, %s)' % (self.name(), self.value())
def __repr__(self):
return self.__str__()
BinaryOperator._values = {
BinaryOperator.PLUS: '+',
BinaryOperator.MINUS: '-',
BinaryOperator.TIMES: '*',
BinaryOperator.SLASH: '/',
BinaryOperator.EQUAL: '=',
BinaryOperator.HASHTAG: '#',
BinaryOperator.LESS: '<',
BinaryOperator.LESSEQUAL: '<=',
BinaryOperator.GREATER: '>',
BinaryOperator.GREATEREQUAL: '>=',
BinaryOperator.ASSIGN: ':='
}
BinaryOperator._names = {
BinaryOperator.PLUS: 'plus',
BinaryOperator.MINUS: 'minus',
BinaryOperator.TIMES: 'times',
BinaryOperator.SLASH: 'slash',
BinaryOperator.EQUAL: 'equal',
BinaryOperator.HASHTAG: 'hashtag',
BinaryOperator.LESS: 'less',
BinaryOperator.LESSEQUAL: 'less_equal',
BinaryOperator.GREATER: 'greater',
BinaryOperator.GREATEREQUAL: 'greater_equal',
BinaryOperator.ASSIGN: 'assign'
}
class TokenType(Enum):
WORD = 1
SIGN = 2
IDENTIFIER = 3
NUMBER = 4
OPERATOR = 5
class Token(object):
def __init__(self, type, object, line=0, index=0):
self.type = type
self.object = object
self.line = line
self.index = index
def name(self):
if self.type == TokenType.IDENTIFIER:
return 'identifier'
elif self.type == TokenType.NUMBER:
return 'number'
else:
return self.object.name()
def value(self):
if self.type == TokenType.IDENTIFIER:
return self.object
elif self.type == TokenType.NUMBER:
return self.object
else:
return self.object.value()
def __str__(self):
return 'Token(%s, %s, %d, %d)' % (self.name(), self.value(), self.line, self.index)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.type == other.type and self.object == other.object
@staticmethod
def word(value):
return Token._words.get(value, None)
@staticmethod
def single(value):
return Token._singles.get(value, None)
@staticmethod
def double(value):
return Token._doubles.get(value, None)
Token._single_tokens = list(Sign) + [operator for operator in BinaryOperator if len(operator.value()) == 1]
Token._words = dict([(word.value(), word) for word in Word])
Token._singles = dict([(sign_or_operator.value(), sign_or_operator) for sign_or_operator in Token._single_tokens])
Token._doubles = dict([(operator.value(), operator) for operator in BinaryOperator if len(operator.value()) == 2])
| 22.16 | 114 | 0.595894 |
4a20873c8421cd04f111368ca48afb37d0dda212 | 3,712 | py | Python | Lib/site-packages/pylint/extensions/no_self_use.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/pylint/extensions/no_self_use.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/pylint/extensions/no_self_use.py | edupyter/EDUPYTER38 | 396183cea72987506f1ef647c0272a2577c56218 | [
"bzip2-1.0.6"
] | null | null | null | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/pylint/blob/main/CONTRIBUTORS.txt
from __future__ import annotations
from typing import TYPE_CHECKING
from astroid import nodes
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
PYMETHODS,
decorated_with_property,
is_overload_stub,
is_protocol_class,
overrides_a_method,
)
from pylint.interfaces import INFERENCE
if TYPE_CHECKING:
from pylint.lint.pylinter import PyLinter
class NoSelfUseChecker(BaseChecker):
name = "no_self_use"
msgs = {
"R6301": (
"Method could be a function",
"no-self-use",
"Used when a method doesn't use its bound instance, and so could "
"be written as a function.",
{"old_names": [("R0201", "old-no-self-use")]},
),
}
def __init__(self, linter: PyLinter) -> None:
super().__init__(linter)
self._first_attrs: list[str | None] = []
self._meth_could_be_func: bool | None = None
def visit_name(self, node: nodes.Name) -> None:
"""Check if the name handle an access to a class member
if so, register it.
"""
if self._first_attrs and (
node.name == self._first_attrs[-1] or not self._first_attrs[-1]
):
self._meth_could_be_func = False
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
if not node.is_method():
return
self._meth_could_be_func = True
self._check_first_arg_for_type(node)
visit_asyncfunctiondef = visit_functiondef
def _check_first_arg_for_type(self, node: nodes.FunctionDef) -> None:
"""Check the name of first argument."""
# pylint: disable=duplicate-code
if node.args.posonlyargs:
first_arg = node.args.posonlyargs[0].name
elif node.args.args:
first_arg = node.argnames()[0]
else:
first_arg = None
self._first_attrs.append(first_arg)
# static method
if node.type == "staticmethod":
self._first_attrs[-1] = None
def leave_functiondef(self, node: nodes.FunctionDef) -> None:
"""On method node, check if this method couldn't be a function.
ignore class, static and abstract methods, initializer,
methods overridden from a parent class.
"""
if node.is_method():
first = self._first_attrs.pop()
if first is None:
return
class_node = node.parent.frame(future=True)
if (
self._meth_could_be_func
and node.type == "method"
and node.name not in PYMETHODS
and not (
node.is_abstract()
or overrides_a_method(class_node, node.name)
or decorated_with_property(node)
or _has_bare_super_call(node)
or is_protocol_class(class_node)
or is_overload_stub(node)
)
):
self.add_message("no-self-use", node=node, confidence=INFERENCE)
leave_asyncfunctiondef = leave_functiondef
def _has_bare_super_call(fundef_node: nodes.FunctionDef) -> bool:
for call in fundef_node.nodes_of_class(nodes.Call):
func = call.func
if isinstance(func, nodes.Name) and func.name == "super" and not call.args:
return True
return False
def register(linter: PyLinter) -> None:
linter.register_checker(NoSelfUseChecker(linter))
| 32.849558 | 83 | 0.616649 |
4a20877c681b7f1d799aba239c1beaedcc487ccd | 5,652 | py | Python | Collections-a-installer/community-general-2.4.0/plugins/modules/pushbullet.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | Collections-a-installer/community-general-2.4.0/plugins/modules/pushbullet.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 1 | 2022-03-12T02:25:26.000Z | 2022-03-12T02:25:26.000Z | Collections-a-installer/community-general-2.4.0/plugins/modules/pushbullet.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
author: "Willy Barro (@willybarro)"
requirements: [ pushbullet.py ]
module: pushbullet
short_description: Sends notifications to Pushbullet
description:
- This module sends push notifications via Pushbullet to channels or devices.
options:
api_key:
type: str
description:
- Push bullet API token
required: true
channel:
type: str
description:
- The channel TAG you wish to broadcast a push notification,
as seen on the "My Channels" > "Edit your channel" at
Pushbullet page.
device:
type: str
description:
- The device NAME you wish to send a push notification,
as seen on the Pushbullet main page.
push_type:
type: str
description:
- Thing you wish to push.
default: note
choices: [ "note", "link" ]
title:
type: str
description:
- Title of the notification.
required: true
body:
type: str
description:
- Body of the notification, e.g. Details of the fault you're alerting.
notes:
- Requires pushbullet.py Python package on the remote host.
You can install it via pip with ($ pip install pushbullet.py).
See U(https://github.com/randomchars/pushbullet.py)
'''
EXAMPLES = '''
- name: Sends a push notification to a device
community.general.pushbullet:
api_key: "ABC123abc123ABC123abc123ABC123ab"
device: "Chrome"
title: "You may see this on Google Chrome"
- name: Sends a link to a device
community.general.pushbullet:
api_key: ABC123abc123ABC123abc123ABC123ab
device: Chrome
push_type: link
title: Ansible Documentation
body: https://docs.ansible.com/
- name: Sends a push notification to a channel
community.general.pushbullet:
api_key: ABC123abc123ABC123abc123ABC123ab
channel: my-awesome-channel
title: Broadcasting a message to the #my-awesome-channel folks
- name: Sends a push notification with title and body to a channel
community.general.pushbullet:
api_key: ABC123abc123ABC123abc123ABC123ab
channel: my-awesome-channel
title: ALERT! Signup service is down
body: Error rate on signup service is over 90% for more than 2 minutes
'''
import traceback
PUSHBULLET_IMP_ERR = None
try:
from pushbullet import PushBullet
from pushbullet.errors import InvalidKeyError, PushError
except ImportError:
PUSHBULLET_IMP_ERR = traceback.format_exc()
pushbullet_found = False
else:
pushbullet_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
# ===========================================
# Main
#
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(type='str', required=True, no_log=True),
channel=dict(type='str', default=None),
device=dict(type='str', default=None),
push_type=dict(type='str', default="note", choices=['note', 'link']),
title=dict(type='str', required=True),
body=dict(type='str', default=None),
url=dict(type='str', default=None),
),
mutually_exclusive=(
['channel', 'device'],
),
supports_check_mode=True
)
api_key = module.params['api_key']
channel = module.params['channel']
device = module.params['device']
push_type = module.params['push_type']
title = module.params['title']
body = module.params['body']
url = module.params['url']
if not pushbullet_found:
module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR)
# Init pushbullet
try:
pb = PushBullet(api_key)
target = None
except InvalidKeyError:
module.fail_json(msg="Invalid api_key")
# Checks for channel/device
if device is None and channel is None:
module.fail_json(msg="You need to provide a channel or a device.")
# Search for given device
if device is not None:
devices_by_nickname = {}
for d in pb.devices:
devices_by_nickname[d.nickname] = d
if device in devices_by_nickname:
target = devices_by_nickname[device]
else:
module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys())))
# Search for given channel
if channel is not None:
channels_by_tag = {}
for c in pb.channels:
channels_by_tag[c.channel_tag] = c
if channel in channels_by_tag:
target = channels_by_tag[channel]
else:
module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys())))
# If in check mode, exit saying that we succeeded
if module.check_mode:
module.exit_json(changed=False, msg="OK")
# Send push notification
try:
if push_type == "link":
target.push_link(title, url, body)
else:
target.push_note(title, body)
module.exit_json(changed=False, msg="OK")
except PushError as e:
module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e))
module.fail_json(msg="An unknown error has occurred")
if __name__ == '__main__':
main()
| 30.387097 | 134 | 0.64402 |
4a208a1e66581bb19ba9db6380a531af887bb7b5 | 22,240 | py | Python | src/cryptography/hazmat/primitives/serialization/ssh.py | gorgiaxx/cryptography | ee511a8261a7b88c201736262c56048553eb593c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 4,492 | 2015-01-02T23:02:52.000Z | 2022-03-31T12:59:57.000Z | src/cryptography/hazmat/primitives/serialization/ssh.py | gorgiaxx/cryptography | ee511a8261a7b88c201736262c56048553eb593c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 3,692 | 2015-01-01T03:16:56.000Z | 2022-03-31T19:20:25.000Z | src/cryptography/hazmat/primitives/serialization/ssh.py | gorgiaxx/cryptography | ee511a8261a7b88c201736262c56048553eb593c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1,155 | 2015-01-09T00:48:05.000Z | 2022-03-31T23:46:43.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import binascii
import os
import re
import struct
import typing
from base64 import encodebytes as _base64_encode
from cryptography import utils
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.primitives.asymmetric import dsa, ec, ed25519, rsa
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.serialization import (
Encoding,
NoEncryption,
PrivateFormat,
PublicFormat,
)
try:
from bcrypt import kdf as _bcrypt_kdf
_bcrypt_supported = True
except ImportError:
_bcrypt_supported = False
def _bcrypt_kdf(
password: bytes,
salt: bytes,
desired_key_bytes: int,
rounds: int,
ignore_few_rounds: bool = False,
) -> bytes:
raise UnsupportedAlgorithm("Need bcrypt module")
_SSH_ED25519 = b"ssh-ed25519"
_SSH_RSA = b"ssh-rsa"
_SSH_DSA = b"ssh-dss"
_ECDSA_NISTP256 = b"ecdsa-sha2-nistp256"
_ECDSA_NISTP384 = b"ecdsa-sha2-nistp384"
_ECDSA_NISTP521 = b"ecdsa-sha2-nistp521"
_CERT_SUFFIX = b"[email protected]"
_SSH_PUBKEY_RC = re.compile(br"\A(\S+)[ \t]+(\S+)")
_SK_MAGIC = b"openssh-key-v1\0"
_SK_START = b"-----BEGIN OPENSSH PRIVATE KEY-----"
_SK_END = b"-----END OPENSSH PRIVATE KEY-----"
_BCRYPT = b"bcrypt"
_NONE = b"none"
_DEFAULT_CIPHER = b"aes256-ctr"
_DEFAULT_ROUNDS = 16
_MAX_PASSWORD = 72
# re is only way to work on bytes-like data
_PEM_RC = re.compile(_SK_START + b"(.*?)" + _SK_END, re.DOTALL)
# padding for max blocksize
_PADDING = memoryview(bytearray(range(1, 1 + 16)))
# ciphers that are actually used in key wrapping
_SSH_CIPHERS: typing.Dict[
bytes,
typing.Tuple[
typing.Type[algorithms.AES],
int,
typing.Union[typing.Type[modes.CTR], typing.Type[modes.CBC]],
int,
],
] = {
b"aes256-ctr": (algorithms.AES, 32, modes.CTR, 16),
b"aes256-cbc": (algorithms.AES, 32, modes.CBC, 16),
}
# map local curve name to key type
_ECDSA_KEY_TYPE = {
"secp256r1": _ECDSA_NISTP256,
"secp384r1": _ECDSA_NISTP384,
"secp521r1": _ECDSA_NISTP521,
}
_U32 = struct.Struct(b">I")
_U64 = struct.Struct(b">Q")
def _ecdsa_key_type(public_key: ec.EllipticCurvePublicKey) -> bytes:
"""Return SSH key_type and curve_name for private key."""
curve = public_key.curve
if curve.name not in _ECDSA_KEY_TYPE:
raise ValueError(
"Unsupported curve for ssh private key: %r" % curve.name
)
return _ECDSA_KEY_TYPE[curve.name]
def _ssh_pem_encode(
data: bytes,
prefix: bytes = _SK_START + b"\n",
suffix: bytes = _SK_END + b"\n",
) -> bytes:
return b"".join([prefix, _base64_encode(data), suffix])
def _check_block_size(data: bytes, block_len: int) -> None:
"""Require data to be full blocks"""
if not data or len(data) % block_len != 0:
raise ValueError("Corrupt data: missing padding")
def _check_empty(data: bytes) -> None:
"""All data should have been parsed."""
if data:
raise ValueError("Corrupt data: unparsed data")
def _init_cipher(
ciphername: bytes,
password: typing.Optional[bytes],
salt: bytes,
rounds: int,
) -> Cipher[typing.Union[modes.CBC, modes.CTR]]:
"""Generate key + iv and return cipher."""
if not password:
raise ValueError("Key is password-protected.")
algo, key_len, mode, iv_len = _SSH_CIPHERS[ciphername]
seed = _bcrypt_kdf(password, salt, key_len + iv_len, rounds, True)
return Cipher(algo(seed[:key_len]), mode(seed[key_len:]))
def _get_u32(data):
"""Uint32"""
if len(data) < 4:
raise ValueError("Invalid data")
return _U32.unpack(data[:4])[0], data[4:]
def _get_u64(data):
"""Uint64"""
if len(data) < 8:
raise ValueError("Invalid data")
return _U64.unpack(data[:8])[0], data[8:]
def _get_sshstr(data):
"""Bytes with u32 length prefix"""
n, data = _get_u32(data)
if n > len(data):
raise ValueError("Invalid data")
return data[:n], data[n:]
def _get_mpint(data):
"""Big integer."""
val, data = _get_sshstr(data)
if val and val[0] > 0x7F:
raise ValueError("Invalid data")
return int.from_bytes(val, "big"), data
def _to_mpint(val):
"""Storage format for signed bigint."""
if val < 0:
raise ValueError("negative mpint not allowed")
if not val:
return b""
nbytes = (val.bit_length() + 8) // 8
return utils.int_to_bytes(val, nbytes)
class _FragList(object):
"""Build recursive structure without data copy."""
def __init__(self, init=None):
self.flist = []
if init:
self.flist.extend(init)
def put_raw(self, val):
"""Add plain bytes"""
self.flist.append(val)
def put_u32(self, val):
"""Big-endian uint32"""
self.flist.append(_U32.pack(val))
def put_sshstr(self, val):
"""Bytes prefixed with u32 length"""
if isinstance(val, (bytes, memoryview, bytearray)):
self.put_u32(len(val))
self.flist.append(val)
else:
self.put_u32(val.size())
self.flist.extend(val.flist)
def put_mpint(self, val):
"""Big-endian bigint prefixed with u32 length"""
self.put_sshstr(_to_mpint(val))
def size(self):
"""Current number of bytes"""
return sum(map(len, self.flist))
def render(self, dstbuf, pos=0):
"""Write into bytearray"""
for frag in self.flist:
flen = len(frag)
start, pos = pos, pos + flen
dstbuf[start:pos] = frag
return pos
def tobytes(self):
"""Return as bytes"""
buf = memoryview(bytearray(self.size()))
self.render(buf)
return buf.tobytes()
class _SSHFormatRSA(object):
"""Format for RSA keys.
Public:
mpint e, n
Private:
mpint n, e, d, iqmp, p, q
"""
def get_public(self, data):
"""RSA public fields"""
e, data = _get_mpint(data)
n, data = _get_mpint(data)
return (e, n), data
def load_public(self, key_type, data):
"""Make RSA public key from data."""
(e, n), data = self.get_public(data)
public_numbers = rsa.RSAPublicNumbers(e, n)
public_key = public_numbers.public_key()
return public_key, data
def load_private(self, data, pubfields):
"""Make RSA private key from data."""
n, data = _get_mpint(data)
e, data = _get_mpint(data)
d, data = _get_mpint(data)
iqmp, data = _get_mpint(data)
p, data = _get_mpint(data)
q, data = _get_mpint(data)
if (e, n) != pubfields:
raise ValueError("Corrupt data: rsa field mismatch")
dmp1 = rsa.rsa_crt_dmp1(d, p)
dmq1 = rsa.rsa_crt_dmq1(d, q)
public_numbers = rsa.RSAPublicNumbers(e, n)
private_numbers = rsa.RSAPrivateNumbers(
p, q, d, dmp1, dmq1, iqmp, public_numbers
)
private_key = private_numbers.private_key()
return private_key, data
def encode_public(self, public_key, f_pub):
"""Write RSA public key"""
pubn = public_key.public_numbers()
f_pub.put_mpint(pubn.e)
f_pub.put_mpint(pubn.n)
def encode_private(self, private_key, f_priv):
"""Write RSA private key"""
private_numbers = private_key.private_numbers()
public_numbers = private_numbers.public_numbers
f_priv.put_mpint(public_numbers.n)
f_priv.put_mpint(public_numbers.e)
f_priv.put_mpint(private_numbers.d)
f_priv.put_mpint(private_numbers.iqmp)
f_priv.put_mpint(private_numbers.p)
f_priv.put_mpint(private_numbers.q)
class _SSHFormatDSA(object):
"""Format for DSA keys.
Public:
mpint p, q, g, y
Private:
mpint p, q, g, y, x
"""
def get_public(self, data):
"""DSA public fields"""
p, data = _get_mpint(data)
q, data = _get_mpint(data)
g, data = _get_mpint(data)
y, data = _get_mpint(data)
return (p, q, g, y), data
def load_public(self, key_type, data):
"""Make DSA public key from data."""
(p, q, g, y), data = self.get_public(data)
parameter_numbers = dsa.DSAParameterNumbers(p, q, g)
public_numbers = dsa.DSAPublicNumbers(y, parameter_numbers)
self._validate(public_numbers)
public_key = public_numbers.public_key()
return public_key, data
def load_private(self, data, pubfields):
"""Make DSA private key from data."""
(p, q, g, y), data = self.get_public(data)
x, data = _get_mpint(data)
if (p, q, g, y) != pubfields:
raise ValueError("Corrupt data: dsa field mismatch")
parameter_numbers = dsa.DSAParameterNumbers(p, q, g)
public_numbers = dsa.DSAPublicNumbers(y, parameter_numbers)
self._validate(public_numbers)
private_numbers = dsa.DSAPrivateNumbers(x, public_numbers)
private_key = private_numbers.private_key()
return private_key, data
def encode_public(self, public_key, f_pub):
"""Write DSA public key"""
public_numbers = public_key.public_numbers()
parameter_numbers = public_numbers.parameter_numbers
self._validate(public_numbers)
f_pub.put_mpint(parameter_numbers.p)
f_pub.put_mpint(parameter_numbers.q)
f_pub.put_mpint(parameter_numbers.g)
f_pub.put_mpint(public_numbers.y)
def encode_private(self, private_key, f_priv):
"""Write DSA private key"""
self.encode_public(private_key.public_key(), f_priv)
f_priv.put_mpint(private_key.private_numbers().x)
def _validate(self, public_numbers):
parameter_numbers = public_numbers.parameter_numbers
if parameter_numbers.p.bit_length() != 1024:
raise ValueError("SSH supports only 1024 bit DSA keys")
class _SSHFormatECDSA(object):
"""Format for ECDSA keys.
Public:
str curve
bytes point
Private:
str curve
bytes point
mpint secret
"""
def __init__(self, ssh_curve_name, curve):
self.ssh_curve_name = ssh_curve_name
self.curve = curve
def get_public(self, data):
"""ECDSA public fields"""
curve, data = _get_sshstr(data)
point, data = _get_sshstr(data)
if curve != self.ssh_curve_name:
raise ValueError("Curve name mismatch")
if point[0] != 4:
raise NotImplementedError("Need uncompressed point")
return (curve, point), data
def load_public(self, key_type, data):
"""Make ECDSA public key from data."""
(curve_name, point), data = self.get_public(data)
public_key = ec.EllipticCurvePublicKey.from_encoded_point(
self.curve, point.tobytes()
)
return public_key, data
def load_private(self, data, pubfields):
"""Make ECDSA private key from data."""
(curve_name, point), data = self.get_public(data)
secret, data = _get_mpint(data)
if (curve_name, point) != pubfields:
raise ValueError("Corrupt data: ecdsa field mismatch")
private_key = ec.derive_private_key(secret, self.curve)
return private_key, data
def encode_public(self, public_key, f_pub):
"""Write ECDSA public key"""
point = public_key.public_bytes(
Encoding.X962, PublicFormat.UncompressedPoint
)
f_pub.put_sshstr(self.ssh_curve_name)
f_pub.put_sshstr(point)
def encode_private(self, private_key, f_priv):
"""Write ECDSA private key"""
public_key = private_key.public_key()
private_numbers = private_key.private_numbers()
self.encode_public(public_key, f_priv)
f_priv.put_mpint(private_numbers.private_value)
class _SSHFormatEd25519(object):
"""Format for Ed25519 keys.
Public:
bytes point
Private:
bytes point
bytes secret_and_point
"""
def get_public(self, data):
"""Ed25519 public fields"""
point, data = _get_sshstr(data)
return (point,), data
def load_public(self, key_type, data):
"""Make Ed25519 public key from data."""
(point,), data = self.get_public(data)
public_key = ed25519.Ed25519PublicKey.from_public_bytes(
point.tobytes()
)
return public_key, data
def load_private(self, data, pubfields):
"""Make Ed25519 private key from data."""
(point,), data = self.get_public(data)
keypair, data = _get_sshstr(data)
secret = keypair[:32]
point2 = keypair[32:]
if point != point2 or (point,) != pubfields:
raise ValueError("Corrupt data: ed25519 field mismatch")
private_key = ed25519.Ed25519PrivateKey.from_private_bytes(secret)
return private_key, data
def encode_public(self, public_key, f_pub):
"""Write Ed25519 public key"""
raw_public_key = public_key.public_bytes(
Encoding.Raw, PublicFormat.Raw
)
f_pub.put_sshstr(raw_public_key)
def encode_private(self, private_key, f_priv):
"""Write Ed25519 private key"""
public_key = private_key.public_key()
raw_private_key = private_key.private_bytes(
Encoding.Raw, PrivateFormat.Raw, NoEncryption()
)
raw_public_key = public_key.public_bytes(
Encoding.Raw, PublicFormat.Raw
)
f_keypair = _FragList([raw_private_key, raw_public_key])
self.encode_public(public_key, f_priv)
f_priv.put_sshstr(f_keypair)
_KEY_FORMATS = {
_SSH_RSA: _SSHFormatRSA(),
_SSH_DSA: _SSHFormatDSA(),
_SSH_ED25519: _SSHFormatEd25519(),
_ECDSA_NISTP256: _SSHFormatECDSA(b"nistp256", ec.SECP256R1()),
_ECDSA_NISTP384: _SSHFormatECDSA(b"nistp384", ec.SECP384R1()),
_ECDSA_NISTP521: _SSHFormatECDSA(b"nistp521", ec.SECP521R1()),
}
def _lookup_kformat(key_type):
"""Return valid format or throw error"""
if not isinstance(key_type, bytes):
key_type = memoryview(key_type).tobytes()
if key_type in _KEY_FORMATS:
return _KEY_FORMATS[key_type]
raise UnsupportedAlgorithm("Unsupported key type: %r" % key_type)
_SSH_PRIVATE_KEY_TYPES = typing.Union[
ec.EllipticCurvePrivateKey,
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ed25519.Ed25519PrivateKey,
]
def load_ssh_private_key(
data: bytes,
password: typing.Optional[bytes],
backend: typing.Any = None,
) -> _SSH_PRIVATE_KEY_TYPES:
"""Load private key from OpenSSH custom encoding."""
utils._check_byteslike("data", data)
if password is not None:
utils._check_bytes("password", password)
m = _PEM_RC.search(data)
if not m:
raise ValueError("Not OpenSSH private key format")
p1 = m.start(1)
p2 = m.end(1)
data = binascii.a2b_base64(memoryview(data)[p1:p2])
if not data.startswith(_SK_MAGIC):
raise ValueError("Not OpenSSH private key format")
data = memoryview(data)[len(_SK_MAGIC) :]
# parse header
ciphername, data = _get_sshstr(data)
kdfname, data = _get_sshstr(data)
kdfoptions, data = _get_sshstr(data)
nkeys, data = _get_u32(data)
if nkeys != 1:
raise ValueError("Only one key supported")
# load public key data
pubdata, data = _get_sshstr(data)
pub_key_type, pubdata = _get_sshstr(pubdata)
kformat = _lookup_kformat(pub_key_type)
pubfields, pubdata = kformat.get_public(pubdata)
_check_empty(pubdata)
# load secret data
edata, data = _get_sshstr(data)
_check_empty(data)
if (ciphername, kdfname) != (_NONE, _NONE):
ciphername = ciphername.tobytes()
if ciphername not in _SSH_CIPHERS:
raise UnsupportedAlgorithm("Unsupported cipher: %r" % ciphername)
if kdfname != _BCRYPT:
raise UnsupportedAlgorithm("Unsupported KDF: %r" % kdfname)
blklen = _SSH_CIPHERS[ciphername][3]
_check_block_size(edata, blklen)
salt, kbuf = _get_sshstr(kdfoptions)
rounds, kbuf = _get_u32(kbuf)
_check_empty(kbuf)
ciph = _init_cipher(ciphername, password, salt.tobytes(), rounds)
edata = memoryview(ciph.decryptor().update(edata))
else:
blklen = 8
_check_block_size(edata, blklen)
ck1, edata = _get_u32(edata)
ck2, edata = _get_u32(edata)
if ck1 != ck2:
raise ValueError("Corrupt data: broken checksum")
# load per-key struct
key_type, edata = _get_sshstr(edata)
if key_type != pub_key_type:
raise ValueError("Corrupt data: key type mismatch")
private_key, edata = kformat.load_private(edata, pubfields)
comment, edata = _get_sshstr(edata)
# yes, SSH does padding check *after* all other parsing is done.
# need to follow as it writes zero-byte padding too.
if edata != _PADDING[: len(edata)]:
raise ValueError("Corrupt data: invalid padding")
return private_key
def serialize_ssh_private_key(
private_key: _SSH_PRIVATE_KEY_TYPES,
password: typing.Optional[bytes] = None,
) -> bytes:
"""Serialize private key with OpenSSH custom encoding."""
if password is not None:
utils._check_bytes("password", password)
if password and len(password) > _MAX_PASSWORD:
raise ValueError(
"Passwords longer than 72 bytes are not supported by "
"OpenSSH private key format"
)
if isinstance(private_key, ec.EllipticCurvePrivateKey):
key_type = _ecdsa_key_type(private_key.public_key())
elif isinstance(private_key, rsa.RSAPrivateKey):
key_type = _SSH_RSA
elif isinstance(private_key, dsa.DSAPrivateKey):
key_type = _SSH_DSA
elif isinstance(private_key, ed25519.Ed25519PrivateKey):
key_type = _SSH_ED25519
else:
raise ValueError("Unsupported key type")
kformat = _lookup_kformat(key_type)
# setup parameters
f_kdfoptions = _FragList()
if password:
ciphername = _DEFAULT_CIPHER
blklen = _SSH_CIPHERS[ciphername][3]
kdfname = _BCRYPT
rounds = _DEFAULT_ROUNDS
salt = os.urandom(16)
f_kdfoptions.put_sshstr(salt)
f_kdfoptions.put_u32(rounds)
ciph = _init_cipher(ciphername, password, salt, rounds)
else:
ciphername = kdfname = _NONE
blklen = 8
ciph = None
nkeys = 1
checkval = os.urandom(4)
comment = b""
# encode public and private parts together
f_public_key = _FragList()
f_public_key.put_sshstr(key_type)
kformat.encode_public(private_key.public_key(), f_public_key)
f_secrets = _FragList([checkval, checkval])
f_secrets.put_sshstr(key_type)
kformat.encode_private(private_key, f_secrets)
f_secrets.put_sshstr(comment)
f_secrets.put_raw(_PADDING[: blklen - (f_secrets.size() % blklen)])
# top-level structure
f_main = _FragList()
f_main.put_raw(_SK_MAGIC)
f_main.put_sshstr(ciphername)
f_main.put_sshstr(kdfname)
f_main.put_sshstr(f_kdfoptions)
f_main.put_u32(nkeys)
f_main.put_sshstr(f_public_key)
f_main.put_sshstr(f_secrets)
# copy result info bytearray
slen = f_secrets.size()
mlen = f_main.size()
buf = memoryview(bytearray(mlen + blklen))
f_main.render(buf)
ofs = mlen - slen
# encrypt in-place
if ciph is not None:
ciph.encryptor().update_into(buf[ofs:mlen], buf[ofs:])
txt = _ssh_pem_encode(buf[:mlen])
buf[ofs:mlen] = bytearray(slen)
return txt
_SSH_PUBLIC_KEY_TYPES = typing.Union[
ec.EllipticCurvePublicKey,
rsa.RSAPublicKey,
dsa.DSAPublicKey,
ed25519.Ed25519PublicKey,
]
def load_ssh_public_key(
data: bytes, backend: typing.Any = None
) -> _SSH_PUBLIC_KEY_TYPES:
"""Load public key from OpenSSH one-line format."""
utils._check_byteslike("data", data)
m = _SSH_PUBKEY_RC.match(data)
if not m:
raise ValueError("Invalid line format")
key_type = orig_key_type = m.group(1)
key_body = m.group(2)
with_cert = False
if _CERT_SUFFIX == key_type[-len(_CERT_SUFFIX) :]:
with_cert = True
key_type = key_type[: -len(_CERT_SUFFIX)]
kformat = _lookup_kformat(key_type)
try:
data = memoryview(binascii.a2b_base64(key_body))
except (TypeError, binascii.Error):
raise ValueError("Invalid key format")
inner_key_type, data = _get_sshstr(data)
if inner_key_type != orig_key_type:
raise ValueError("Invalid key format")
if with_cert:
nonce, data = _get_sshstr(data)
public_key, data = kformat.load_public(key_type, data)
if with_cert:
serial, data = _get_u64(data)
cctype, data = _get_u32(data)
key_id, data = _get_sshstr(data)
principals, data = _get_sshstr(data)
valid_after, data = _get_u64(data)
valid_before, data = _get_u64(data)
crit_options, data = _get_sshstr(data)
extensions, data = _get_sshstr(data)
reserved, data = _get_sshstr(data)
sig_key, data = _get_sshstr(data)
signature, data = _get_sshstr(data)
_check_empty(data)
return public_key
def serialize_ssh_public_key(public_key: _SSH_PUBLIC_KEY_TYPES) -> bytes:
"""One-line public key format for OpenSSH"""
if isinstance(public_key, ec.EllipticCurvePublicKey):
key_type = _ecdsa_key_type(public_key)
elif isinstance(public_key, rsa.RSAPublicKey):
key_type = _SSH_RSA
elif isinstance(public_key, dsa.DSAPublicKey):
key_type = _SSH_DSA
elif isinstance(public_key, ed25519.Ed25519PublicKey):
key_type = _SSH_ED25519
else:
raise ValueError("Unsupported key type")
kformat = _lookup_kformat(key_type)
f_pub = _FragList()
f_pub.put_sshstr(key_type)
kformat.encode_public(public_key, f_pub)
pub = binascii.b2a_base64(f_pub.tobytes()).strip()
return b"".join([key_type, b" ", pub])
| 30.888889 | 79 | 0.650764 |
4a208a94b7e27fc2cb42d6c54a0a31df33014553 | 1,939 | py | Python | setup.py | INCF/nineml-python | 062a2ac8a9be97cee4dad02938e3858e051cf07c | [
"BSD-3-Clause"
] | 6 | 2017-12-26T14:15:28.000Z | 2021-10-10T22:45:51.000Z | setup.py | INCF/nineml-python | 062a2ac8a9be97cee4dad02938e3858e051cf07c | [
"BSD-3-Clause"
] | 25 | 2017-07-05T03:53:53.000Z | 2021-01-19T14:14:05.000Z | setup.py | INCF/nineml-python | 062a2ac8a9be97cee4dad02938e3858e051cf07c | [
"BSD-3-Clause"
] | 5 | 2017-12-26T14:15:12.000Z | 2021-10-10T22:45:39.000Z | #!/usr/bin/env python
from setuptools import setup, find_packages
import os
import sys
PACKAGE_NAME = 'nineml'
# Get version number
sys.path.insert(0, os.path.join(os.path.dirname(__file__), PACKAGE_NAME))
from version import __version__ # @IgnorePep8 @UnresolvedImport
sys.path.pop(0)
setup(
name=PACKAGE_NAME,
version=__version__,
packages=find_packages(),
# add your name here if you contribute to the code
author="Andrew P. Davison, Thomas G. Close, Mike Hull, Eilif Muller",
author_email="[email protected]",
description=(
"A tool for reading, writing and generally working with 9ML objects "
"and files."),
long_description=open("README.rst").read(),
license="BSD 3 License",
keywords=("computational neuroscience modeling interoperability XML YAML"
"HDF5 JSON"),
url="http://nineml-python.readthedocs.io",
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering'],
install_requires=['lxml>=3.7.3',
'future>=0.16.0',
'h5py>=2.7.0',
'PyYAML>=3.1',
'sympy>=1.5.1',
'numpy>=1.11.0'],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <4',
tests_require=['nose']
)
| 37.288462 | 77 | 0.566271 |
4a208ada36f1a83bd95eea5165b5cadf1ba4a075 | 1,024 | py | Python | ctf/mixins.py | puerkito66/openCTF | 207cf96e6539a0dbfa3f87f838975c5e5f91ca9e | [
"MIT"
] | null | null | null | ctf/mixins.py | puerkito66/openCTF | 207cf96e6539a0dbfa3f87f838975c5e5f91ca9e | [
"MIT"
] | null | null | null | ctf/mixins.py | puerkito66/openCTF | 207cf96e6539a0dbfa3f87f838975c5e5f91ca9e | [
"MIT"
] | null | null | null | from django.contrib.auth.mixins import UserPassesTestMixin
from django.utils.translation import ugettext_lazy as _
class IsVerifiedMixin(UserPassesTestMixin):
"""Profile verification Mixin.
Checks if a user has verified the registered email, so some actions can be enabled.
"""
permission_denied_message = _(
"Please verify your email first before you can create a Team."
)
def test_func(self):
return self.request.user.profile.is_verified
class IsNotInTeamMixin(UserPassesTestMixin):
"""Team Verification Mixin.
Checks if a user is not alreadey in a team.
"""
permission_denied_message = _("You are already in a Team.")
def test_func(self):
return not self.request.user.team.all().exists()
class IsInTeamMixin(IsNotInTeamMixin):
"""Team Verification Mixin.
Checks if a user is in a team.
"""
permission_denied_message = _("You are not in a Team.")
def test_func(self):
return not IsNotInTeamMixin.test_func(self) | 25.6 | 87 | 0.708984 |
4a208bf00545f01cbde231e67e59546d3890d504 | 149 | py | Python | example1/build.py | donutpy/example | df72e57856a57bead27f95eb6cee219f01e78b40 | [
"MIT"
] | null | null | null | example1/build.py | donutpy/example | df72e57856a57bead27f95eb6cee219f01e78b40 | [
"MIT"
] | null | null | null | example1/build.py | donutpy/example | df72e57856a57bead27f95eb6cee219f01e78b40 | [
"MIT"
] | null | null | null | import donutpy as dpy
dpy.initiate()
dpy.buildPages()
# Run this file with the argument --buildPages
# Example: path/to/file/build.py --buildPages
| 18.625 | 46 | 0.751678 |
4a208dd87572f4a418a961cd329410a500608c4b | 1,147 | py | Python | experiments/tiered-meta-iNat/FRN/Conv-4/train.py | Jf-Chen/FRN-main | 5b57b9e0d7368058a8e3ba41a53c460b54ab9b91 | [
"MIT"
] | 43 | 2021-04-27T23:42:35.000Z | 2022-03-30T02:41:19.000Z | experiments/tiered-meta-iNat/FRN/Conv-4/train.py | Jf-Chen/FRN-main | 5b57b9e0d7368058a8e3ba41a53c460b54ab9b91 | [
"MIT"
] | 7 | 2021-05-31T10:38:17.000Z | 2022-01-06T05:20:08.000Z | experiments/tiered-meta-iNat/FRN/Conv-4/train.py | Jf-Chen/FRN-main | 5b57b9e0d7368058a8e3ba41a53c460b54ab9b91 | [
"MIT"
] | 7 | 2021-05-18T00:37:46.000Z | 2022-01-23T07:09:51.000Z | import os
import sys
import torch
import yaml
from functools import partial
sys.path.append('../../../../')
from trainers import trainer, frn_train
from datasets import dataloaders
from models.FRN import FRN
args = trainer.train_parser()
with open('../../../../config.yml', 'r') as f:
temp = yaml.safe_load(f)
data_path = os.path.abspath(temp['data_path'])
fewshot_path = os.path.join(data_path,'tiered_meta_iNat')
pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args)
train_way = args.train_way
shots = [args.train_shot, args.train_query_shot]
train_loader = dataloaders.meta_train_dataloader(data_path=pm.train,
way=train_way,
shots=shots,
transform_type=args.train_transform_type)
model = FRN(way=train_way,
shots=[args.train_shot, args.train_query_shot],
resnet=args.resnet)
train_func = partial(frn_train.default_train,train_loader=train_loader)
tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func)
tm.train(model)
tm.evaluate(model) | 30.184211 | 89 | 0.666957 |
4a208e3cd7b75e5abd2c2165f964447283137193 | 7,971 | py | Python | controllers/submit.py | NewGlobalStrategy/NetDecisionMaking | 32ff3bd126c571526e9e4999c7f8859df84bfecf | [
"MIT"
] | 1 | 2016-02-29T14:53:10.000Z | 2016-02-29T14:53:10.000Z | controllers/submit.py | NewGlobalStrategy/NetDecisionMaking | 32ff3bd126c571526e9e4999c7f8859df84bfecf | [
"MIT"
] | null | null | null | controllers/submit.py | NewGlobalStrategy/NetDecisionMaking | 32ff3bd126c571526e9e4999c7f8859df84bfecf | [
"MIT"
] | null | null | null | # - Coding UTF8 -
#
# Networked Decision Making
# Development Sites (source code):
# http://code.google.com/p/global-decision-making-system/
# http://github.com/NewGlobalStrategy/NetDecisionMaking
#
# Demo Sites (Google App Engine)
# http://netdecisionmaking.appspot.com
# http://globaldecisionmaking.appspot.com
#
# License Code: MIT
# License Content: Creative Commons Attribution 3.0
#
# Also visit: www.web2py.com
# or Groups: http://groups.google.com/group/web2py
# For details on the web framework used for this development
#
# Developed by Russ King ([email protected]
# Russ also blogs occasionally to pass the time at:
# http://proudofyourplanent.blogspot.com
# His general thinking on why this project is very important is available at
# http://www.scribd.com/doc/98216626/New-Global-Strategy
# With thanks to Guido, Massimo and many other that make this sort of thing
# much easier than it used to be
#
# This controller handles submission of questions and actions and confirmation
# that the question has been submitted
###############################################################################
@auth.requires_login()
def new_question():
#This allows creation of both questions and actions so the first
#thing to do is establish whether question or action being submitted the
#default is question unless action specified
if request.args(1) > '0':
priorquest = int(request.args(1))
else:
priorquest = 0
if request.args(0) != 'action':
heading = 'Submit Question'
labels = {'questiontext': 'Question'}
fields = ['questiontext', 'eventid', 'category', 'activescope',
'continent', 'country', 'subdivision', 'numanswers']
extrafield1 = TR(LABEL('Answer 1:'), INPUT(_name='ans1', value="", _type="text"))
extrafield2 = TR(LABEL('Answer 2:'), INPUT(_name='ans2', value="", _type="text"))
extrafield3 = TR(LABEL('Answer 3:'), INPUT(_name='ans3', value="", _type="text"))
extrafield4 = TR(LABEL('Answer 4:'), INPUT(_name='ans4', value="", _type="text"))
extrafield5 = TR(LABEL('Answer 5:'), INPUT(_name='ans5', value="", _type="text"))
extrafield6 = TR(LABEL('Answer 6:'), INPUT(_name='ans6', value="", _type="text"))
extrafield7 = TR(LABEL('Answer 7:'), INPUT(_name='ans7', value="", _type="text"))
extrafield8 = TR(LABEL('Answer 8:'), INPUT(_name='ans8', value="", _type="text"))
extrafield9 = TR(LABEL('Answer 9:'), INPUT(_name='ans9', value="", _type="text"))
extrafield10 = TR(LABEL('Answer 10:'), INPUT(_name='ans10', value="", _type="text"))
form = SQLFORM(db.question, fields=fields, labels=labels, formstyle='table3cols')
form[0].insert(-1, extrafield1)
form[0].insert(-1, extrafield2)
form[0].insert(-1, extrafield3)
form[0].insert(-1, extrafield4)
form[0].insert(-1, extrafield5)
form[0].insert(-1, extrafield6)
form[0].insert(-1, extrafield7)
form[0].insert(-1, extrafield8)
form[0].insert(-1, extrafield9)
form[0].insert(-1, extrafield10)
else:
#action form submission
heading = 'Submit Action'
labels = {'questiontext': 'Action'}
fields = ['questiontext', 'eventid', 'category', 'activescope',
'continent', 'country', 'subdivision', 'responsible',
'respemail', 'duedate']
form = SQLFORM(db.question, fields=fields, labels=labels, formstyle='table3cols')
if session.eventid:
form.vars.eventid = session.eventid
else:
form.vars.eventid = db(db.event.event_name =='Unspecified').select(db.event.id).first().id
#this can be the same for both questions and actions
if form.validate():
form.vars.auth_userid = auth.user.id
if request.args(0) == 'action':
form.vars.qtype = 'action'
form.vars.answers = ['Approve', 'Disapprove', 'OK']
form.vars.numanswers = 2
else:
form.vars.answers = [form.vars.ans1, form.vars.ans2, form.vars.ans3,
form.vars.ans4, form.vars.ans5, form.vars.ans6, form.vars.ans7,
form.vars.ans8, form.vars.ans9, form.vars.ans10]
scope = form.vars.activescope
#if scope == '1 Global':
# activetext = 'Global'
#elif scope == '2 Continental':
# activetext = form.vars.continent
#elif scope == '3 National':
# activetext = form.vars.country
#else:
# activetext = form.vars.subdivision
#form.vars.scopetext = activetext
form.vars.correctanstext = ''
form.vars.createdate = request.utcnow
#form.vars.priorquests = priorquest - now need to process separately
if request.args(0) != 'action':
del form.vars['ans1']
del form.vars['ans2']
del form.vars['ans3']
del form.vars['ans4']
del form.vars['ans5']
del form.vars['ans6']
del form.vars['ans7']
del form.vars['ans8']
del form.vars['ans9']
del form.vars['ans10']
form.vars.id = db.question.insert(**dict(form.vars))
response.flash = 'form accepted'
session.lastquestion = form.vars.id
session.eventid = form.vars.eventid
if priorquest > 0 and db(db.questlink.sourceid == priorquest and
db.questlink.targetid == form.vars.id).isempty():
db.questlink.insert(sourceid=priorquest, targetid=form.vars.id)
redirect(URL('accept_question', args=[form.vars.qtype]))
elif form.errors:
response.flash = 'form has errors'
else:
response.flash = 'please fill out the form'
return dict(form=form, heading=heading)
def accept_question():
response.flash = "Details Submitted"
if request.args(0) == 'action':
qtype = 'action'
else:
qtype = 'question'
# will now update priorquest with the subsequent question details
# and this question with priorquest details
if session.priorquest > 0:
#append into priorquests and subsquests
quest = db(db.question.id == session.priorquest).select(db.question.id,
db.question.subsquests).first()
subsquests = quest.subsquests
subsquests.append(session.lastquestion)
quest.update_record(subsquests=subsquests)
quest = db(db.question.id == session.lastquestion).select(db.question.id,
db.question.priorquests).first()
priorquests = quest.priorquests
priorquests.append(session.priorquest)
quest.update_record(priorquests=priorquests)
session.lastquestion = 0
session.priorquest = 0
return locals()
#This is called via Ajax to populate the subdivision dropdown on change of country
#now changed to derelationalise country subdivision
def multi():
#placeholder for discussion of the topic at present
pass
return locals()
def subdivn():
result = "<option value='Unspecified'>Unspecified</option>"
subdivns = db(db.subdivision.country == request.vars.country).select(
db.subdivision.subdiv_name, cache=(cache.ram, 1200), cacheable=True)
for row in subdivns:
result += "<option value='" + str(row.subdiv_name) + "'>" + row.subdiv_name + "</option>"
return XML(result)
def country():
result = "<option value='Unspecified'>Unspecified</option>"
countries = db(db.country.continent == request.vars.continent).select(
db.country.country_name, cache=(cache.ram, 6000), cacheable=True)
for countrie in countries:
result += "<option value='" + str(countrie.country_name) + "'>" + countrie.country_name + "</option>"
return XML(result)
| 40.055276 | 109 | 0.617488 |
4a208f2cc37bfe668828d8515a259b1b41ad33c5 | 2,952 | py | Python | aylien_news_api/models/media_type.py | AYLIEN/aylien_newsapi_python | ab4e667e718e8f919b894d4e4ec76e5d37b2fe74 | [
"Apache-2.0"
] | 13 | 2016-04-30T12:08:24.000Z | 2021-09-14T13:57:46.000Z | aylien_news_api/models/media_type.py | AYLIEN/aylien_newsapi_python | ab4e667e718e8f919b894d4e4ec76e5d37b2fe74 | [
"Apache-2.0"
] | 3 | 2016-09-02T08:22:28.000Z | 2018-07-03T10:59:31.000Z | aylien_news_api/models/media_type.py | AYLIEN/aylien_newsapi_python | ab4e667e718e8f919b894d4e4ec76e5d37b2fe74 | [
"Apache-2.0"
] | 5 | 2016-12-20T08:17:47.000Z | 2022-01-11T22:44:43.000Z | # coding: utf-8
"""
AYLIEN News API
The AYLIEN News API is the most powerful way of sourcing, searching and syndicating analyzed and enriched news content. It is accessed by sending HTTP requests to our server, which returns information to your client. # noqa: E501
The version of the OpenAPI document: 5.1.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from aylien_news_api.configuration import Configuration
class MediaType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
IMAGE = "image"
VIDEO = "video"
allowable_values = [IMAGE, VIDEO] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""MediaType - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MediaType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, MediaType):
return True
return self.to_dict() != other.to_dict()
| 28.941176 | 235 | 0.58435 |
4a208fb8723f51326687cb86194156c2340db109 | 832 | py | Python | molecool/functions.py | hxh5226/molecool | 69bc49f7d051a485581a859db245c74dcbb4dcea | [
"BSD-3-Clause"
] | null | null | null | molecool/functions.py | hxh5226/molecool | 69bc49f7d051a485581a859db245c74dcbb4dcea | [
"BSD-3-Clause"
] | null | null | null | molecool/functions.py | hxh5226/molecool | 69bc49f7d051a485581a859db245c74dcbb4dcea | [
"BSD-3-Clause"
] | null | null | null | """
functions.py
A Python package for analyzing and visualizing xyz files.
Handles the primary functions
The source code goes here
"""
def canvas(with_attribution=True):
"""
Placeholder function to show example docstring (NumPy format)
Replace this function and doc string for your own project
Parameters
----------
with_attribution : bool, Optional, default: True
Set whether or not to display who the quote is from
Returns
-------
quote : str
Compiled string including quote and optional attribution
"""
quote = "The code is but a canvas to our imagination."
if with_attribution:
quote += "\n\t- Adapted from Henry David Thoreau"
return quote
if __name__ == "__main__":
# Do something if this file is invoked on its own
print(canvas())
| 22.486486 | 65 | 0.674279 |
4a20901ab6b5fd3f9db53911bbfb9017a9be5882 | 4,889 | py | Python | sdk/python/pulumi_azure_nextgen/apimanagement/v20170301/get_api_release.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/apimanagement/v20170301/get_api_release.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/apimanagement/v20170301/get_api_release.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApiReleaseResult',
'AwaitableGetApiReleaseResult',
'get_api_release',
]
@pulumi.output_type
class GetApiReleaseResult:
"""
Api Release details.
"""
def __init__(__self__, api_id=None, created_date_time=None, name=None, notes=None, type=None, updated_date_time=None):
if api_id and not isinstance(api_id, str):
raise TypeError("Expected argument 'api_id' to be a str")
pulumi.set(__self__, "api_id", api_id)
if created_date_time and not isinstance(created_date_time, str):
raise TypeError("Expected argument 'created_date_time' to be a str")
pulumi.set(__self__, "created_date_time", created_date_time)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_date_time and not isinstance(updated_date_time, str):
raise TypeError("Expected argument 'updated_date_time' to be a str")
pulumi.set(__self__, "updated_date_time", updated_date_time)
@property
@pulumi.getter(name="apiId")
def api_id(self) -> Optional[str]:
"""
Identifier of the API the release belongs to.
"""
return pulumi.get(self, "api_id")
@property
@pulumi.getter(name="createdDateTime")
def created_date_time(self) -> str:
"""
The time the API was released. The date conforms to the following format: yyyy-MM-ddTHH:mm:ssZ as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "created_date_time")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
"""
Release Notes
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedDateTime")
def updated_date_time(self) -> str:
"""
The time the API release was updated.
"""
return pulumi.get(self, "updated_date_time")
class AwaitableGetApiReleaseResult(GetApiReleaseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiReleaseResult(
api_id=self.api_id,
created_date_time=self.created_date_time,
name=self.name,
notes=self.notes,
type=self.type,
updated_date_time=self.updated_date_time)
def get_api_release(api_id: Optional[str] = None,
release_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiReleaseResult:
"""
Use this data source to access information about an existing resource.
:param str api_id: API identifier. Must be unique in the current API Management service instance.
:param str release_id: Release identifier within an API. Must be unique in the current API Management service instance.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['apiId'] = api_id
__args__['releaseId'] = release_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:apimanagement/v20170301:getApiRelease', __args__, opts=opts, typ=GetApiReleaseResult).value
return AwaitableGetApiReleaseResult(
api_id=__ret__.api_id,
created_date_time=__ret__.created_date_time,
name=__ret__.name,
notes=__ret__.notes,
type=__ret__.type,
updated_date_time=__ret__.updated_date_time)
| 35.948529 | 142 | 0.652076 |
4a2090ea6e805107248f50a6982f841070f3d9a2 | 1,554 | py | Python | old/ch3/simple_cbow.py | intlabSeJun/deep-learning2 | bd02497adb627e1e07b4ed71044f675d2208ddb6 | [
"MIT"
] | null | null | null | old/ch3/simple_cbow.py | intlabSeJun/deep-learning2 | bd02497adb627e1e07b4ed71044f675d2208ddb6 | [
"MIT"
] | null | null | null | old/ch3/simple_cbow.py | intlabSeJun/deep-learning2 | bd02497adb627e1e07b4ed71044f675d2208ddb6 | [
"MIT"
] | null | null | null | import sys
sys.path.append('..')
import numpy as np
from common.layers import MatMul, SoftmaxWithLoss
class SimpleCBOW:
def __init__(self, vocab_size, hidden_size):
V, H = vocab_size, hidden_size
# 가중치 초기화
W_in = 0.01 * np.random.randn(V, H).astype('f')
W_out = 0.01 * np.random.randn(H, V).astype('f')
# 계층 생성
# layer0, layer1은 weight-sharing
self.in_layer0 = MatMul(W_in) ## 입력층은 윈도우 크기만큼 만들어야함, 인스턴스 생성.
self.in_layer1 = MatMul(W_in)
self.out_layer = MatMul(W_out)
self.loss_layer = SoftmaxWithLoss()
# 모든 가중치와 기울기를 리스트에 모음
layers = [self.in_layer0, self.in_layer1, self.out_layer, self.loss_layer]
self.params, self.grads = [], []
for layer in layers:
self.params += layer.params
self.grads += layer.grads
# 인스턴스 변수에 단어의 분산 표현 저장
self.word_vecs = W_in
def forward(self, contexts, target):
# 양옆 단어에 대한 x*Win을 batch만큼 수행. -> 해당단어가 중심단어에 관해 어느정도의 의미가 있는지를 나타내(분산표현)
# -> one_hot으로 표현되어 matmul이 수행되므로 weight에서 해당 행이 분산표현 벡터(값)이 됨.
h0 = self.in_layer0.forward(contexts[:, 0]) # (batch, 7) * (vocab_size(7), hidden)
h1 = self.in_layer1.forward(contexts[:, 1]) # (bathc, 7) * (vocab_size, hidden)
h = (h0 + h1) * 0.5 # 양 옆의 분산표현의 합.
score = self.out_layer.forward(h) # (batch,hidden) * ( hidden, vocab_size )
# print(score)
# print(target)
loss = self.loss_layer.forward(score, target)
return loss
def backward(self, dout=1):
ds = self.loss_layer.backward(dout)
da = self.out_layer.backward(ds)
da *= 0.5
self.in_layer1.backward(da)
self.in_layer0.backward(da)
return None
| 30.470588 | 84 | 0.678893 |
4a2091d3f88f7984253c24295c8503b29f434cf1 | 1,068 | py | Python | src/spaceone/config/info/domain_config_info.py | jihyungSong/config | c917c4c3cd27ee85d343d808967ae2da1638512d | [
"Apache-2.0"
] | null | null | null | src/spaceone/config/info/domain_config_info.py | jihyungSong/config | c917c4c3cd27ee85d343d808967ae2da1638512d | [
"Apache-2.0"
] | null | null | null | src/spaceone/config/info/domain_config_info.py | jihyungSong/config | c917c4c3cd27ee85d343d808967ae2da1638512d | [
"Apache-2.0"
] | null | null | null | import functools
from spaceone.api.config.v1 import domain_config_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.core import utils
from spaceone.config.model.domain_config_model import DomainConfig
__all__ = ['DomainConfigInfo', 'DomainConfigsInfo']
def DomainConfigInfo(domain_config_vo: DomainConfig, minimal=False):
info = {
'name': domain_config_vo.name,
}
if not minimal:
info.update({
'data': change_struct_type(domain_config_vo.data),
'tags': change_struct_type(utils.tags_to_dict(domain_config_vo.tags)),
'schema': domain_config_vo.schema,
'domain_id': domain_config_vo.domain_id,
'created_at': utils.datetime_to_iso8601(domain_config_vo.created_at)
})
return domain_config_pb2.DomainConfigInfo(**info)
def DomainConfigsInfo(domain_config_vos, total_count, **kwargs):
return domain_config_pb2.DomainConfigsInfo(results=list(
map(functools.partial(DomainConfigInfo, **kwargs), domain_config_vos)), total_count=total_count)
| 35.6 | 104 | 0.737828 |
4a209235300cd77b754f907836f692a89a589e67 | 905 | py | Python | setup.py | kbrgl/scroller | 7c7cb5a6f956072422a37404ac3386cf78faf987 | [
"MIT"
] | 34 | 2016-06-07T14:49:12.000Z | 2019-08-23T22:48:26.000Z | setup.py | kbrgl/scroller | 7c7cb5a6f956072422a37404ac3386cf78faf987 | [
"MIT"
] | 4 | 2016-06-12T00:33:09.000Z | 2016-06-25T12:07:40.000Z | setup.py | kbrgl/scroller | 7c7cb5a6f956072422a37404ac3386cf78faf987 | [
"MIT"
] | 3 | 2016-06-18T10:55:09.000Z | 2020-10-20T00:02:14.000Z | import os
from setuptools import setup, find_packages
import config
setup(
name="scroller",
version=config.version,
author="Kabir Goel",
author_email="[email protected]",
description=config.description,
license="MIT",
py_modules=["scroller", "config"],
keywords="animation utility scrolling text terminal cli",
classifiers=[
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Environment :: X11 Applications",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Desktop Environment",
"Topic :: Desktop Environment :: Window Managers",
"Topic :: Terminals",
"Topic :: Text Processing",
],
url="https://github.com/kbrgl/scroller",
entry_points={
'console_scripts': [
'scroller = scroller:main',
],
},
)
| 27.424242 | 61 | 0.605525 |
4a2093dae6d97d5297c09c9a45619ad52c5ae4e2 | 50,400 | py | Python | tests/integration/api_service_test.py | mitar/docker-py | 44034c4041dc4627941c11f7928058c80991f822 | [
"Apache-2.0"
] | null | null | null | tests/integration/api_service_test.py | mitar/docker-py | 44034c4041dc4627941c11f7928058c80991f822 | [
"Apache-2.0"
] | null | null | null | tests/integration/api_service_test.py | mitar/docker-py | 44034c4041dc4627941c11f7928058c80991f822 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import random
import time
import docker
import six
from ..helpers import (
force_leave_swarm, requires_api_version, requires_experimental
)
from .base import BaseAPIIntegrationTest, BUSYBOX
class ServiceTest(BaseAPIIntegrationTest):
@classmethod
def setup_class(cls):
client = cls.get_client_instance()
force_leave_swarm(client)
cls._init_swarm(client)
@classmethod
def teardown_class(cls):
client = cls.get_client_instance()
force_leave_swarm(client)
def tearDown(self):
for service in self.client.services(filters={'name': 'dockerpytest_'}):
try:
self.client.remove_service(service['ID'])
except docker.errors.APIError:
pass
super(ServiceTest, self).tearDown()
def get_service_name(self):
return 'dockerpytest_{0:x}'.format(random.getrandbits(64))
def get_service_container(self, service_name, attempts=20, interval=0.5,
include_stopped=False):
# There is some delay between the service's creation and the creation
# of the service's containers. This method deals with the uncertainty
# when trying to retrieve the container associated with a service.
while True:
containers = self.client.containers(
filters={'name': [service_name]}, quiet=True,
all=include_stopped
)
if len(containers) > 0:
return containers[0]
attempts -= 1
if attempts <= 0:
return None
time.sleep(interval)
def create_simple_service(self, name=None, labels=None):
if name:
name = 'dockerpytest_{0}'.format(name)
else:
name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
return name, self.client.create_service(
task_tmpl, name=name, labels=labels
)
@requires_api_version('1.24')
def test_list_services(self):
services = self.client.services()
assert isinstance(services, list)
test_services = self.client.services(filters={'name': 'dockerpytest_'})
assert len(test_services) == 0
self.create_simple_service()
test_services = self.client.services(filters={'name': 'dockerpytest_'})
assert len(test_services) == 1
assert 'dockerpytest_' in test_services[0]['Spec']['Name']
@requires_api_version('1.24')
def test_list_services_filter_by_label(self):
test_services = self.client.services(filters={'label': 'test_label'})
assert len(test_services) == 0
self.create_simple_service(labels={'test_label': 'testing'})
test_services = self.client.services(filters={'label': 'test_label'})
assert len(test_services) == 1
assert test_services[0]['Spec']['Labels']['test_label'] == 'testing'
def test_inspect_service_by_id(self):
svc_name, svc_id = self.create_simple_service()
svc_info = self.client.inspect_service(svc_id)
assert 'ID' in svc_info
assert svc_info['ID'] == svc_id['ID']
def test_inspect_service_by_name(self):
svc_name, svc_id = self.create_simple_service()
svc_info = self.client.inspect_service(svc_name)
assert 'ID' in svc_info
assert svc_info['ID'] == svc_id['ID']
@requires_api_version('1.29')
def test_inspect_service_insert_defaults(self):
svc_name, svc_id = self.create_simple_service()
svc_info = self.client.inspect_service(svc_id)
svc_info_defaults = self.client.inspect_service(
svc_id, insert_defaults=True
)
assert svc_info != svc_info_defaults
assert 'RollbackConfig' in svc_info_defaults['Spec']
assert 'RollbackConfig' not in svc_info['Spec']
def test_remove_service_by_id(self):
svc_name, svc_id = self.create_simple_service()
assert self.client.remove_service(svc_id)
test_services = self.client.services(filters={'name': 'dockerpytest_'})
assert len(test_services) == 0
def test_remove_service_by_name(self):
svc_name, svc_id = self.create_simple_service()
assert self.client.remove_service(svc_name)
test_services = self.client.services(filters={'name': 'dockerpytest_'})
assert len(test_services) == 0
def test_create_service_simple(self):
name, svc_id = self.create_simple_service()
assert self.client.inspect_service(svc_id)
services = self.client.services(filters={'name': name})
assert len(services) == 1
assert services[0]['ID'] == svc_id['ID']
@requires_api_version('1.25')
@requires_experimental(until='1.29')
def test_service_logs(self):
name, svc_id = self.create_simple_service()
assert self.get_service_container(name, include_stopped=True)
attempts = 20
while True:
if attempts == 0:
self.fail('No service logs produced by endpoint')
return
logs = self.client.service_logs(svc_id, stdout=True, is_tty=False)
try:
log_line = next(logs)
except StopIteration:
attempts -= 1
time.sleep(0.1)
continue
else:
break
if six.PY3:
log_line = log_line.decode('utf-8')
assert 'hello\n' in log_line
def test_create_service_custom_log_driver(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['echo', 'hello']
)
log_cfg = docker.types.DriverConfig('none')
task_tmpl = docker.types.TaskTemplate(
container_spec, log_driver=log_cfg
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
res_template = svc_info['Spec']['TaskTemplate']
assert 'LogDriver' in res_template
assert 'Name' in res_template['LogDriver']
assert res_template['LogDriver']['Name'] == 'none'
def test_create_service_with_volume_mount(self):
vol_name = self.get_service_name()
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['ls'],
mounts=[
docker.types.Mount(target='/test', source=vol_name)
]
)
self.tmp_volumes.append(vol_name)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
cspec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert 'Mounts' in cspec
assert len(cspec['Mounts']) == 1
mount = cspec['Mounts'][0]
assert mount['Target'] == '/test'
assert mount['Source'] == vol_name
assert mount['Type'] == 'volume'
def test_create_service_with_resources_constraints(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
resources = docker.types.Resources(
cpu_limit=4000000, mem_limit=3 * 1024 * 1024 * 1024,
cpu_reservation=3500000, mem_reservation=2 * 1024 * 1024 * 1024
)
task_tmpl = docker.types.TaskTemplate(
container_spec, resources=resources
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
res_template = svc_info['Spec']['TaskTemplate']
assert 'Resources' in res_template
assert res_template['Resources']['Limits'] == resources['Limits']
assert res_template['Resources']['Reservations'] == resources[
'Reservations'
]
def test_create_service_with_update_config(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, failure_action='pause'
)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, update_config=update_config, name=name
)
svc_info = self.client.inspect_service(svc_id)
assert 'UpdateConfig' in svc_info['Spec']
uc = svc_info['Spec']['UpdateConfig']
assert update_config['Parallelism'] == uc['Parallelism']
assert update_config['Delay'] == uc['Delay']
assert update_config['FailureAction'] == uc['FailureAction']
@requires_api_version('1.25')
def test_create_service_with_update_config_monitor(self):
container_spec = docker.types.ContainerSpec('busybox', ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
monitor=300000000, max_failure_ratio=0.4
)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, update_config=update_config, name=name
)
svc_info = self.client.inspect_service(svc_id)
assert 'UpdateConfig' in svc_info['Spec']
uc = svc_info['Spec']['UpdateConfig']
assert update_config['Monitor'] == uc['Monitor']
assert update_config['MaxFailureRatio'] == uc['MaxFailureRatio']
def test_create_service_with_restart_policy(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
policy = docker.types.RestartPolicy(
docker.types.RestartPolicy.condition_types.ANY,
delay=5, max_attempts=5
)
task_tmpl = docker.types.TaskTemplate(
container_spec, restart_policy=policy
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'RestartPolicy' in svc_info['Spec']['TaskTemplate']
assert policy == svc_info['Spec']['TaskTemplate']['RestartPolicy']
def test_create_service_with_custom_networks(self):
net1 = self.client.create_network(
'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net1['Id'])
net2 = self.client.create_network(
'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net2['Id'])
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, networks=[
'dockerpytest_1', {'Target': 'dockerpytest_2'}
]
)
svc_info = self.client.inspect_service(svc_id)
assert 'Networks' in svc_info['Spec']
assert svc_info['Spec']['Networks'] == [
{'Target': net1['Id']}, {'Target': net2['Id']}
]
def test_create_service_with_placement(self):
node_id = self.client.nodes()[0]['ID']
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=['node.id=={}'.format(node_id)]
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert (svc_info['Spec']['TaskTemplate']['Placement'] ==
{'Constraints': ['node.id=={}'.format(node_id)]})
def test_create_service_with_placement_object(self):
node_id = self.client.nodes()[0]['ID']
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
placemt = docker.types.Placement(
constraints=['node.id=={}'.format(node_id)]
)
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
@requires_api_version('1.30')
def test_create_service_with_placement_platform(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
placemt = docker.types.Placement(platforms=[('x86_64', 'linux')])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
@requires_api_version('1.27')
def test_create_service_with_placement_preferences(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
placemt = docker.types.Placement(preferences=[
{'Spread': {'SpreadDescriptor': 'com.dockerpy.test'}}
])
task_tmpl = docker.types.TaskTemplate(
container_spec, placement=placemt
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Placement' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Placement'] == placemt
def test_create_service_with_endpoint_spec(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
12357: (1990, 'udp'),
12562: (678,),
53243: 8080,
})
svc_id = self.client.create_service(
task_tmpl, name=name, endpoint_spec=endpoint_spec
)
svc_info = self.client.inspect_service(svc_id)
print(svc_info)
ports = svc_info['Spec']['EndpointSpec']['Ports']
for port in ports:
if port['PublishedPort'] == 12562:
assert port['TargetPort'] == 678
assert port['Protocol'] == 'tcp'
elif port['PublishedPort'] == 53243:
assert port['TargetPort'] == 8080
assert port['Protocol'] == 'tcp'
elif port['PublishedPort'] == 12357:
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
self.fail('Invalid port specification: {0}'.format(port))
assert len(ports) == 3
def test_create_service_with_env(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['true'], env={'DOCKER_PY_TEST': 1}
)
task_tmpl = docker.types.TaskTemplate(
container_spec,
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert 'Env' in con_spec
assert con_spec['Env'] == ['DOCKER_PY_TEST=1']
@requires_api_version('1.29')
def test_create_service_with_update_order(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, order='start-first'
)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, update_config=update_config, name=name
)
svc_info = self.client.inspect_service(svc_id)
assert 'UpdateConfig' in svc_info['Spec']
uc = svc_info['Spec']['UpdateConfig']
assert update_config['Parallelism'] == uc['Parallelism']
assert update_config['Delay'] == uc['Delay']
assert update_config['Order'] == uc['Order']
@requires_api_version('1.25')
def test_create_service_with_tty(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['true'], tty=True
)
task_tmpl = docker.types.TaskTemplate(
container_spec,
)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert 'TTY' in con_spec
assert con_spec['TTY'] is True
@requires_api_version('1.25')
def test_create_service_with_tty_dict(self):
container_spec = {
'Image': BUSYBOX,
'Command': ['true'],
'TTY': True
}
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
con_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert 'TTY' in con_spec
assert con_spec['TTY'] is True
def test_create_service_global_mode(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, mode='global'
)
svc_info = self.client.inspect_service(svc_id)
assert 'Mode' in svc_info['Spec']
assert 'Global' in svc_info['Spec']['Mode']
def test_create_service_replicated_mode(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name,
mode=docker.types.ServiceMode('replicated', 5)
)
svc_info = self.client.inspect_service(svc_id)
assert 'Mode' in svc_info['Spec']
assert 'Replicated' in svc_info['Spec']['Mode']
assert svc_info['Spec']['Mode']['Replicated'] == {'Replicas': 5}
@requires_api_version('1.25')
def test_update_service_force_update(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ForceUpdate' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 0
version_index = svc_info['Version']['Index']
task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
self.client.update_service(name, version_index, task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert svc_info['Spec']['TaskTemplate']['ForceUpdate'] == 10
@requires_api_version('1.25')
def test_create_service_with_secret(self):
secret_name = 'favorite_touhou'
secret_data = b'phantasmagoria of flower view'
secret_id = self.client.create_secret(secret_name, secret_data)
self.tmp_secrets.append(secret_id)
secret_ref = docker.types.SecretReference(secret_id, secret_name)
container_spec = docker.types.ContainerSpec(
'busybox', ['sleep', '999'], secrets=[secret_ref]
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
assert secrets[0] == secret_ref
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /run/secrets/{0}'.format(secret_name)
)
assert self.client.exec_start(exec_id) == secret_data
@requires_api_version('1.25')
def test_create_service_with_unicode_secret(self):
secret_name = 'favorite_touhou'
secret_data = u'東方花映塚'
secret_id = self.client.create_secret(secret_name, secret_data)
self.tmp_secrets.append(secret_id)
secret_ref = docker.types.SecretReference(secret_id, secret_name)
container_spec = docker.types.ContainerSpec(
'busybox', ['sleep', '999'], secrets=[secret_ref]
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Secrets' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
secrets = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
assert secrets[0] == secret_ref
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /run/secrets/{0}'.format(secret_name)
)
container_secret = self.client.exec_start(exec_id)
container_secret = container_secret.decode('utf-8')
assert container_secret == secret_data
@requires_api_version('1.30')
def test_create_service_with_config(self):
config_name = 'favorite_touhou'
config_data = b'phantasmagoria of flower view'
config_id = self.client.create_config(config_name, config_data)
self.tmp_configs.append(config_id)
config_ref = docker.types.ConfigReference(config_id, config_name)
container_spec = docker.types.ContainerSpec(
'busybox', ['sleep', '999'], configs=[config_ref]
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs']
assert configs[0] == config_ref
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /{0}'.format(config_name)
)
assert self.client.exec_start(exec_id) == config_data
@requires_api_version('1.30')
def test_create_service_with_unicode_config(self):
config_name = 'favorite_touhou'
config_data = u'東方花映塚'
config_id = self.client.create_config(config_name, config_data)
self.tmp_configs.append(config_id)
config_ref = docker.types.ConfigReference(config_id, config_name)
container_spec = docker.types.ContainerSpec(
'busybox', ['sleep', '999'], configs=[config_ref]
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Configs' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
configs = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Configs']
assert configs[0] == config_ref
container = self.get_service_container(name)
assert container is not None
exec_id = self.client.exec_create(
container, 'cat /{0}'.format(config_name)
)
container_config = self.client.exec_start(exec_id)
container_config = container_config.decode('utf-8')
assert container_config == config_data
@requires_api_version('1.25')
def test_create_service_with_hosts(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['sleep', '999'], hosts={
'foobar': '127.0.0.1',
'baz': '8.8.8.8',
}
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Hosts' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
hosts = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hosts']
assert len(hosts) == 2
assert '127.0.0.1 foobar' in hosts
assert '8.8.8.8 baz' in hosts
@requires_api_version('1.25')
def test_create_service_with_hostname(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['sleep', '999'], hostname='foobar.baz.com'
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Hostname' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert (
svc_info['Spec']['TaskTemplate']['ContainerSpec']['Hostname'] ==
'foobar.baz.com'
)
@requires_api_version('1.25')
def test_create_service_with_groups(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['sleep', '999'], groups=['shrinemaidens', 'youkais']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Groups' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
groups = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Groups']
assert len(groups) == 2
assert 'shrinemaidens' in groups
assert 'youkais' in groups
@requires_api_version('1.25')
def test_create_service_with_dns_config(self):
dns_config = docker.types.DNSConfig(
nameservers=['8.8.8.8', '8.8.4.4'],
search=['local'], options=['debug']
)
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['sleep', '999'], dns_config=dns_config
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'DNSConfig' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert (
dns_config ==
svc_info['Spec']['TaskTemplate']['ContainerSpec']['DNSConfig']
)
@requires_api_version('1.25')
def test_create_service_with_healthcheck(self):
second = 1000000000
hc = docker.types.Healthcheck(
test='true', retries=3, timeout=1 * second,
start_period=3 * second, interval=int(second / 2),
)
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['sleep', '999'], healthcheck=hc
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert (
'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
)
assert (
hc ==
svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
)
@requires_api_version('1.28')
def test_create_service_with_readonly(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['sleep', '999'], read_only=True
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert (
'ReadOnly' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
)
assert svc_info['Spec']['TaskTemplate']['ContainerSpec']['ReadOnly']
@requires_api_version('1.28')
def test_create_service_with_stop_signal(self):
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['sleep', '999'], stop_signal='SIGINT'
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert (
'StopSignal' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
)
assert (
svc_info['Spec']['TaskTemplate']['ContainerSpec']['StopSignal'] ==
'SIGINT'
)
@requires_api_version('1.30')
def test_create_service_with_privileges(self):
priv = docker.types.Privileges(selinux_disable=True)
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['sleep', '999'], privileges=priv
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert (
'Privileges' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
)
privileges = (
svc_info['Spec']['TaskTemplate']['ContainerSpec']['Privileges']
)
assert privileges['SELinuxContext']['Disable'] is True
@requires_api_version('1.25')
def test_update_service_with_defaults_name(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert 'Name' in svc_info['Spec']
assert svc_info['Spec']['Name'] == name
version_index = svc_info['Version']['Index']
task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
self._update_service(
svc_id, name, version_index, task_tmpl, fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert 'Name' in svc_info['Spec']
assert svc_info['Spec']['Name'] == name
@requires_api_version('1.25')
def test_update_service_with_defaults_labels(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
)
svc_info = self.client.inspect_service(svc_id)
assert 'Labels' in svc_info['Spec']
assert 'service.label' in svc_info['Spec']['Labels']
assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
version_index = svc_info['Version']['Index']
task_tmpl = docker.types.TaskTemplate(container_spec, force_update=10)
self._update_service(
svc_id, name, version_index, task_tmpl, name=name,
fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert 'Labels' in svc_info['Spec']
assert 'service.label' in svc_info['Spec']['Labels']
assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
def test_update_service_with_defaults_mode(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name,
mode=docker.types.ServiceMode(mode='replicated', replicas=2)
)
svc_info = self.client.inspect_service(svc_id)
assert 'Mode' in svc_info['Spec']
assert 'Replicated' in svc_info['Spec']['Mode']
assert 'Replicas' in svc_info['Spec']['Mode']['Replicated']
assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2
version_index = svc_info['Version']['Index']
self._update_service(
svc_id, name, version_index, labels={'force': 'update'},
fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert 'Mode' in svc_info['Spec']
assert 'Replicated' in svc_info['Spec']['Mode']
assert 'Replicas' in svc_info['Spec']['Mode']['Replicated']
assert svc_info['Spec']['Mode']['Replicated']['Replicas'] == 2
def test_update_service_with_defaults_container_labels(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello'],
labels={'container.label': 'SampleLabel'}
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
assert labels['container.label'] == 'SampleLabel'
version_index = svc_info['Version']['Index']
self._update_service(
svc_id, name, version_index, labels={'force': 'update'},
fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
assert labels['container.label'] == 'SampleLabel'
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
self._update_service(
svc_id, name, new_index, task_tmpl, fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
newer_index = svc_info['Version']['Index']
assert newer_index > new_index
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
assert labels['container.label'] == 'SampleLabel'
def test_update_service_with_defaults_update_config(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
update_config = docker.types.UpdateConfig(
parallelism=10, delay=5, failure_action='pause'
)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, update_config=update_config, name=name
)
svc_info = self.client.inspect_service(svc_id)
assert 'UpdateConfig' in svc_info['Spec']
uc = svc_info['Spec']['UpdateConfig']
assert update_config['Parallelism'] == uc['Parallelism']
assert update_config['Delay'] == uc['Delay']
assert update_config['FailureAction'] == uc['FailureAction']
version_index = svc_info['Version']['Index']
self._update_service(
svc_id, name, version_index, labels={'force': 'update'},
fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert 'UpdateConfig' in svc_info['Spec']
uc = svc_info['Spec']['UpdateConfig']
assert update_config['Parallelism'] == uc['Parallelism']
assert update_config['Delay'] == uc['Delay']
assert update_config['FailureAction'] == uc['FailureAction']
def test_update_service_with_defaults_networks(self):
net1 = self.client.create_network(
'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net1['Id'])
net2 = self.client.create_network(
'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net2['Id'])
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, networks=[
'dockerpytest_1', {'Target': 'dockerpytest_2'}
]
)
svc_info = self.client.inspect_service(svc_id)
assert 'Networks' in svc_info['Spec']
assert svc_info['Spec']['Networks'] == [
{'Target': net1['Id']}, {'Target': net2['Id']}
]
version_index = svc_info['Version']['Index']
self._update_service(
svc_id, name, version_index, labels={'force': 'update'},
fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert 'Networks' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Networks'] == [
{'Target': net1['Id']}, {'Target': net2['Id']}
]
self._update_service(
svc_id, name, new_index, networks=[net1['Id']],
fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
assert 'Networks' in svc_info['Spec']['TaskTemplate']
assert svc_info['Spec']['TaskTemplate']['Networks'] == [
{'Target': net1['Id']}
]
def test_update_service_with_defaults_endpoint_spec(self):
container_spec = docker.types.ContainerSpec(BUSYBOX, ['true'])
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
endpoint_spec = docker.types.EndpointSpec(ports={
12357: (1990, 'udp'),
12562: (678,),
53243: 8080,
})
svc_id = self.client.create_service(
task_tmpl, name=name, endpoint_spec=endpoint_spec
)
svc_info = self.client.inspect_service(svc_id)
print(svc_info)
ports = svc_info['Spec']['EndpointSpec']['Ports']
for port in ports:
if port['PublishedPort'] == 12562:
assert port['TargetPort'] == 678
assert port['Protocol'] == 'tcp'
elif port['PublishedPort'] == 53243:
assert port['TargetPort'] == 8080
assert port['Protocol'] == 'tcp'
elif port['PublishedPort'] == 12357:
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
self.fail('Invalid port specification: {0}'.format(port))
assert len(ports) == 3
svc_info = self.client.inspect_service(svc_id)
version_index = svc_info['Version']['Index']
self._update_service(
svc_id, name, version_index, labels={'force': 'update'},
fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
ports = svc_info['Spec']['EndpointSpec']['Ports']
for port in ports:
if port['PublishedPort'] == 12562:
assert port['TargetPort'] == 678
assert port['Protocol'] == 'tcp'
elif port['PublishedPort'] == 53243:
assert port['TargetPort'] == 8080
assert port['Protocol'] == 'tcp'
elif port['PublishedPort'] == 12357:
assert port['TargetPort'] == 1990
assert port['Protocol'] == 'udp'
else:
self.fail('Invalid port specification: {0}'.format(port))
assert len(ports) == 3
@requires_api_version('1.25')
def test_update_service_remove_healthcheck(self):
second = 1000000000
hc = docker.types.Healthcheck(
test='true', retries=3, timeout=1 * second,
start_period=3 * second, interval=int(second / 2),
)
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['sleep', '999'], healthcheck=hc
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(task_tmpl, name=name)
svc_info = self.client.inspect_service(svc_id)
assert (
'Healthcheck' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
)
assert (
hc ==
svc_info['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
)
container_spec = docker.types.ContainerSpec(
BUSYBOX, ['sleep', '999'], healthcheck={}
)
task_tmpl = docker.types.TaskTemplate(container_spec)
version_index = svc_info['Version']['Index']
self._update_service(
svc_id, name, version_index, task_tmpl, fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert (
'Healthcheck' not in container_spec or
not container_spec['Healthcheck']
)
def test_update_service_remove_labels(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
)
svc_info = self.client.inspect_service(svc_id)
assert 'Labels' in svc_info['Spec']
assert 'service.label' in svc_info['Spec']['Labels']
assert svc_info['Spec']['Labels']['service.label'] == 'SampleLabel'
version_index = svc_info['Version']['Index']
self._update_service(
svc_id, name, version_index, labels={}, fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert not svc_info['Spec'].get('Labels')
def test_update_service_remove_container_labels(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello'],
labels={'container.label': 'SampleLabel'}
)
task_tmpl = docker.types.TaskTemplate(container_spec)
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, labels={'service.label': 'SampleLabel'}
)
svc_info = self.client.inspect_service(svc_id)
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
assert 'Labels' in svc_info['Spec']['TaskTemplate']['ContainerSpec']
labels = svc_info['Spec']['TaskTemplate']['ContainerSpec']['Labels']
assert labels['container.label'] == 'SampleLabel'
version_index = svc_info['Version']['Index']
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello'],
labels={}
)
task_tmpl = docker.types.TaskTemplate(container_spec)
self._update_service(
svc_id, name, version_index, task_tmpl, fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
assert 'TaskTemplate' in svc_info['Spec']
assert 'ContainerSpec' in svc_info['Spec']['TaskTemplate']
container_spec = svc_info['Spec']['TaskTemplate']['ContainerSpec']
assert not container_spec.get('Labels')
@requires_api_version('1.29')
def test_update_service_with_network_change(self):
container_spec = docker.types.ContainerSpec(
'busybox', ['echo', 'hello']
)
task_tmpl = docker.types.TaskTemplate(container_spec)
net1 = self.client.create_network(
'dockerpytest_1', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net1['Id'])
net2 = self.client.create_network(
'dockerpytest_2', driver='overlay', ipam={'Driver': 'default'}
)
self.tmp_networks.append(net2['Id'])
name = self.get_service_name()
svc_id = self.client.create_service(
task_tmpl, name=name, networks=[net1['Id']]
)
svc_info = self.client.inspect_service(svc_id)
assert 'Networks' in svc_info['Spec']
assert len(svc_info['Spec']['Networks']) > 0
assert svc_info['Spec']['Networks'][0]['Target'] == net1['Id']
svc_info = self.client.inspect_service(svc_id)
version_index = svc_info['Version']['Index']
task_tmpl = docker.types.TaskTemplate(container_spec)
self._update_service(
svc_id, name, version_index, task_tmpl, name=name,
networks=[net2['Id']], fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
task_template = svc_info['Spec']['TaskTemplate']
assert 'Networks' in task_template
assert len(task_template['Networks']) > 0
assert task_template['Networks'][0]['Target'] == net2['Id']
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
assert new_index > version_index
self._update_service(
svc_id, name, new_index, name=name, networks=[net1['Id']],
fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
task_template = svc_info['Spec']['TaskTemplate']
assert 'ContainerSpec' in task_template
new_spec = task_template['ContainerSpec']
assert 'Image' in new_spec
assert new_spec['Image'].split(':')[0] == 'busybox'
assert 'Command' in new_spec
assert new_spec['Command'] == ['echo', 'hello']
assert 'Networks' in task_template
assert len(task_template['Networks']) > 0
assert task_template['Networks'][0]['Target'] == net1['Id']
svc_info = self.client.inspect_service(svc_id)
new_index = svc_info['Version']['Index']
task_tmpl = docker.types.TaskTemplate(
container_spec, networks=[net2['Id']]
)
self._update_service(
svc_id, name, new_index, task_tmpl, name=name,
fetch_current_spec=True
)
svc_info = self.client.inspect_service(svc_id)
task_template = svc_info['Spec']['TaskTemplate']
assert 'Networks' in task_template
assert len(task_template['Networks']) > 0
assert task_template['Networks'][0]['Target'] == net2['Id']
def _update_service(self, svc_id, *args, **kwargs):
# service update tests seem to be a bit flaky
# give them a chance to retry the update with a new version index
try:
self.client.update_service(*args, **kwargs)
except docker.errors.APIError as e:
if e.explanation.endswith("update out of sequence"):
svc_info = self.client.inspect_service(svc_id)
version_index = svc_info['Version']['Index']
if len(args) > 1:
args = (args[0], version_index) + args[2:]
else:
kwargs['version'] = version_index
self.client.update_service(*args, **kwargs)
else:
raise
| 42.60355 | 79 | 0.62619 |
4a20948d1e8bd2f90e31bacf98147d3a165568b2 | 4,526 | py | Python | mmpose/models/backbones/seresnet.py | chaowentao/mmpose | b528c60ef4fab56d35d1ed7e187023794639be26 | [
"Apache-2.0"
] | 367 | 2022-01-14T03:32:25.000Z | 2022-03-31T04:48:20.000Z | mmpose/models/backbones/seresnet.py | chaowentao/mmpose | b528c60ef4fab56d35d1ed7e187023794639be26 | [
"Apache-2.0"
] | 27 | 2022-01-27T07:12:49.000Z | 2022-03-31T04:31:13.000Z | mmpose/models/backbones/seresnet.py | chaowentao/mmpose | b528c60ef4fab56d35d1ed7e187023794639be26 | [
"Apache-2.0"
] | 53 | 2022-01-18T11:21:43.000Z | 2022-03-31T06:42:41.000Z | import torch.utils.checkpoint as cp
from ..registry import BACKBONES
from .resnet import Bottleneck, ResLayer, ResNet
from .utils.se_layer import SELayer
class SEBottleneck(Bottleneck):
"""SEBottleneck block for SEResNet.
Args:
in_channels (int): The input channels of the SEBottleneck block.
out_channels (int): The output channel of the SEBottleneck block.
se_ratio (int): Squeeze ratio in SELayer. Default: 16
"""
def __init__(self, in_channels, out_channels, se_ratio=16, **kwargs):
super().__init__(in_channels, out_channels, **kwargs)
self.se_layer = SELayer(out_channels, ratio=se_ratio)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
out = self.se_layer(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@BACKBONES.register_module()
class SEResNet(ResNet):
"""SEResNet backbone.
Please refer to the `paper <https://arxiv.org/abs/1709.01507>`__ for
details.
Args:
depth (int): Network depth, from {50, 101, 152}.
se_ratio (int): Squeeze ratio in SELayer. Default: 16.
in_channels (int): Number of input image channels. Default: 3.
stem_channels (int): Output channels of the stem layer. Default: 64.
num_stages (int): Stages of the network. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
Default: ``(1, 2, 2, 2)``.
dilations (Sequence[int]): Dilation of each stage.
Default: ``(1, 1, 1, 1)``.
out_indices (Sequence[int]): Output from which stages. If only one
stage is specified, a single tensor (feature map) is returned,
otherwise multiple stages are specified, a tuple of tensors will
be returned. Default: ``(3, )``.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv.
Default: False.
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
conv_cfg (dict | None): The config dict for conv layers. Default: None.
norm_cfg (dict): The config dict for norm layers.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: True.
Example:
>>> from mmpose.models import SEResNet
>>> import torch
>>> self = SEResNet(depth=50)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 224, 224)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 56, 56)
(1, 128, 28, 28)
(1, 256, 14, 14)
(1, 512, 7, 7)
"""
arch_settings = {
50: (SEBottleneck, (3, 4, 6, 3)),
101: (SEBottleneck, (3, 4, 23, 3)),
152: (SEBottleneck, (3, 8, 36, 3))
}
def __init__(self, depth, se_ratio=16, **kwargs):
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for SEResNet')
self.se_ratio = se_ratio
super().__init__(depth, **kwargs)
def make_res_layer(self, **kwargs):
return ResLayer(se_ratio=self.se_ratio, **kwargs)
| 36.208 | 79 | 0.602077 |
4a2094ccdaaf07899a5c48fa398a7eb09a6acb1a | 25,378 | py | Python | src/cogent3/evolve/fast_distance.py | GavinHuttley/cogent3 | efb25e27000732294ae3471428dea351a531267b | [
"BSD-3-Clause"
] | null | null | null | src/cogent3/evolve/fast_distance.py | GavinHuttley/cogent3 | efb25e27000732294ae3471428dea351a531267b | [
"BSD-3-Clause"
] | 9 | 2021-07-07T19:10:31.000Z | 2022-01-23T08:31:30.000Z | src/cogent3/evolve/fast_distance.py | genomematt/cogent3 | 7594710560a148164d64fdb9231aefd3f5b33ee2 | [
"BSD-3-Clause"
] | null | null | null | from collections import defaultdict, namedtuple
from numbers import Number
import numpy
from numpy import array, diag, dot, eye, float64, int32, log, sqrt, zeros
from numpy.linalg import det, inv
from cogent3 import DNA, RNA, get_moltype
from cogent3.util.dict_array import DictArray
from cogent3.util.misc import get_object_provenance
from cogent3.util.progress_display import display_wrap
from .pairwise_distance_numba import fill_diversity_matrix
__author__ = "Gavin Huttley, Yicheng Zhu and Ben Kaehler"
__copyright__ = "Copyright 2007-2021, The Cogent Project"
__credits__ = ["Gavin Huttley", "Yicheng Zhu", "Ben Kaehler"]
__license__ = "BSD-3"
__version__ = "2021.10.12a1"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Alpha" # pending addition of protein distance metrics
def _same_moltype(ref, query):
"""if ref and query have the same states"""
return set(ref) == set(query)
def get_pyrimidine_indices(moltype):
"""returns pyrimidine indices for the moltype"""
states = list(moltype)
if _same_moltype(RNA, moltype):
return list(map(states.index, "CU"))
elif _same_moltype(DNA, moltype):
return list(map(states.index, "CT"))
else:
raise RuntimeError("Non-nucleic acid MolType")
def get_purine_indices(moltype):
"""returns purine indices for the moltype"""
states = list(moltype)
if not _same_moltype(RNA, moltype) and not _same_moltype(DNA, moltype):
raise RuntimeError("Non-nucleic acid MolType")
return list(map(states.index, "AG"))
def get_matrix_diff_coords(indices):
"""returns coordinates for off diagonal elements"""
return [(i, j) for i in indices for j in indices if i != j]
def get_moltype_index_array(moltype, invalid=-9):
"""returns the index array for a molecular type"""
canonical_chars = list(moltype)
# maximum ordinal for an allowed character, this defines the length of
# the required numpy array
max_ord = max(list(map(ord, list(moltype.All.keys()))))
char_to_index = zeros(max_ord + 1, int32)
# all non canonical_chars are ``invalid''
char_to_index.fill(invalid)
for i in range(len(canonical_chars)):
c = canonical_chars[i]
o = ord(c)
char_to_index[o] = i
return char_to_index
def seq_to_indices(seq, char_to_index):
"""returns an array with sequence characters replaced by their index"""
ords = list(map(ord, seq))
return char_to_index.take(ords)
def _fill_diversity_matrix(matrix, seq1, seq2):
"""fills the diversity matrix for valid positions.
Assumes the provided sequences have been converted to indices with
invalid characters being negative numbers (use get_moltype_index_array
plus seq_to_indices)."""
paired = array([seq1, seq2]).T
paired = paired[paired.min(axis=1) >= 0]
for i in range(len(paired)):
matrix[paired[i][0], paired[i][1]] += 1
def _hamming(matrix):
"""computes the edit distance
Parameters
----------
matrix : array
2D numpy array of counts
Returns
-------
total of the matrix, the proportion of changes, hamming distance, variance
(the variance calculation is not yet implemented)
"""
# todo implement the estimate of the variance
invalid = None, None, None, None
total = matrix.sum()
dist = total - diag(matrix).sum()
if total == 0:
return invalid
p = dist / total
return total, p, dist, None
def _jc69_from_matrix(matrix):
"""computes JC69 stats from a diversity matrix"""
invalid = None, None, None, None
total = matrix.sum()
diffs = total - diag(matrix).sum()
if total == 0:
return invalid
p = diffs / total
if p >= 0.75: # cannot take log
return invalid
factor = 1 - (4 / 3) * p
dist = -3.0 * log(factor) / 4
var = p * (1 - p) / (factor * factor * total)
return total, p, dist, var
def _tn93_from_matrix(
matrix, freqs, pur_indices, pyr_indices, pur_coords, pyr_coords, tv_coords
):
invalid = None, None, None, None
total = matrix.sum()
freqs = matrix.sum(axis=0) + matrix.sum(axis=1)
freqs /= 2 * total
if total == 0:
return invalid
p = matrix.take(pur_coords + pyr_coords + tv_coords).sum() / total
freq_purs = freqs.take(pur_indices).sum()
prod_purs = freqs.take(pur_indices).prod()
freq_pyrs = freqs.take(pyr_indices).sum()
prod_pyrs = freqs.take(pyr_indices).prod()
# purine transition diffs
pur_ts_diffs = matrix.take(pur_coords).sum()
pur_ts_diffs /= total
# pyr transition diffs
pyr_ts_diffs = matrix.take(pyr_coords).sum()
pyr_ts_diffs /= total
# transversions
tv_diffs = matrix.take(tv_coords).sum() / total
coeff1 = 2 * prod_purs / freq_purs
coeff2 = 2 * prod_pyrs / freq_pyrs
coeff3 = 2 * (
freq_purs * freq_pyrs
- (prod_purs * freq_pyrs / freq_purs)
- (prod_pyrs * freq_purs / freq_pyrs)
)
term1 = 1 - pur_ts_diffs / coeff1 - tv_diffs / (2 * freq_purs)
term2 = 1 - pyr_ts_diffs / coeff2 - tv_diffs / (2 * freq_pyrs)
term3 = 1 - tv_diffs / (2 * freq_purs * freq_pyrs)
if term1 <= 0 or term2 <= 0 or term3 <= 0: # log will fail
return invalid
dist = -coeff1 * log(term1) - coeff2 * log(term2) - coeff3 * log(term3)
v1 = 1 / term1
v2 = 1 / term2
v3 = 1 / term3
v4 = (
(coeff1 * v1 / (2 * freq_purs))
+ (coeff2 * v2 / (2 * freq_pyrs))
+ (coeff3 * v3 / (2 * freq_purs * freq_pyrs))
)
var = (
v1 ** 2 * pur_ts_diffs
+ v2 ** 2 * pyr_ts_diffs
+ v4 ** 2 * tv_diffs
- (v1 * pur_ts_diffs + v2 * pyr_ts_diffs + v4 * tv_diffs) ** 2
)
var /= total
return total, p, dist, var
def _logdetcommon(matrix):
invalid = (None,) * 5
total = matrix.sum()
diffs = total - matrix.diagonal().sum()
if total == 0:
return invalid
p = diffs / total
if diffs == 0: # seqs indentical
return invalid
# we replace the missing diagonal states with a frequency of 0.5,
# then normalise
frequency = matrix.copy()
frequency[(frequency == 0) * eye(*matrix.shape, dtype=bool)] = 0.5
frequency /= frequency.sum()
if det(frequency) <= 0: # if the result is nan
return invalid
# the inverse matrix of frequency, every element is squared
M_matrix = inv(frequency) ** 2
freqs = [frequency.sum(axis=axis) for axis in (0, 1)]
var_term = dot(M_matrix, frequency).diagonal().sum()
return total, p, frequency, freqs, var_term
def _paralinear(matrix):
"""the paralinear distance from a diversity matrix"""
invalid = (None,) * 4
total, p, frequency, freqs, var_term = _logdetcommon(matrix)
if frequency is None:
return invalid
r = matrix.shape[0]
d_xy = -log(det(frequency) / sqrt((freqs[0] * freqs[1]).prod())) / r
var = (var_term - (1 / sqrt(freqs[0] * freqs[1])).sum()) / (r ** 2 * total)
return total, p, d_xy, var
def _logdet(matrix, use_tk_adjustment=True):
"""returns the LogDet from a diversity matrix
Parameters
----------
use_tk_adjustment
when True, unequal state frequencies are allowed
"""
invalid = (None,) * 4
total, p, frequency, freqs, var_term = _logdetcommon(matrix)
if frequency is None:
return invalid
r = matrix.shape[0]
if use_tk_adjustment:
coeff = (sum(sum(freqs) ** 2) / 4 - 1) / (r - 1)
d_xy = coeff * log(det(frequency) / sqrt((freqs[0] * freqs[1]).prod()))
var = None
else:
d_xy = -log(det(frequency)) / r - log(r)
var = (var_term / r ** 2 - 1) / total
return total, p, d_xy, var
def _number_formatter(template):
"""flexible number formatter"""
def call(val):
try:
result = template % val
except TypeError:
result = val
return result
return call
Stats = namedtuple("Stats", ["length", "fraction_variable", "dist", "variance"])
def _make_stat_table(stats, names, **kwargs):
from cogent3.util.table import Table
header = [r"Seq1 \ Seq2"] + names
rows = zeros((len(names), len(names)), dtype="O")
for i in range(len(names) - 1):
n1 = names[i]
for j in range(i + 1, len(names)):
n2 = names[j]
val = stats[(n1, n2)]
rows[i, j] = val
rows[j, i] = val
rows = rows.tolist()
for i in range(len(names)):
rows[i].insert(0, names[i])
return Table(
header=header, data=rows, index_name=r"Seq1 \ Seq2", missing_data="*", **kwargs
)
class _PairwiseDistance(object):
"""base class for computing pairwise distances"""
valid_moltypes = ()
def __init__(self, moltype, invalid=-9, alignment=None, invalid_raises=False):
super(_PairwiseDistance, self).__init__()
moltype = get_moltype(moltype)
if moltype.label not in self.valid_moltypes:
name = self.__class__.__name__
msg = (
f"Invalid moltype for {name}: '{moltype.label}' not "
f"in {self.valid_moltypes}"
)
raise ValueError(msg)
self.moltype = moltype
self.char_to_indices = get_moltype_index_array(moltype, invalid=invalid)
self._dim = len(list(moltype))
self._dists = None
self._dupes = None
self._duped = None
self._invalid_raises = invalid_raises
self.names = None
self.indexed_seqs = None
if alignment is not None:
self._convert_seqs_to_indices(alignment)
self._func_args = []
def _convert_seqs_to_indices(self, alignment):
assert isinstance(
alignment.moltype, type(self.moltype)
), "Alignment does not have correct MolType"
self._dists = {}
self.names = alignment.names[:]
indexed_seqs = []
for name in self.names:
seq = alignment.get_gapped_seq(name)
indexed = seq_to_indices(str(seq), self.char_to_indices)
indexed_seqs.append(indexed)
self.indexed_seqs = array(indexed_seqs)
@property
def duplicated(self):
"""returns mapping of IDs to duplicates as {id:[dupe1, ..], },
or None"""
return self._duped
@staticmethod
def func():
pass # over ride in subclasses
@display_wrap
def run(self, alignment=None, ui=None):
"""computes the pairwise distances"""
self._dupes = None
self._duped = None
dupes = set()
duped = defaultdict(list)
if alignment is not None:
self._convert_seqs_to_indices(alignment)
names = self.names[:]
matrix = zeros((self._dim, self._dim), float64)
off_diag = [
(i, j) for i in range(self._dim) for j in range(self._dim) if i != j
]
off_diag = tuple([tuple(a) for a in zip(*off_diag)])
done = 0.0
to_do = (len(names) * len(names) - 1) / 2
for i in range(len(names) - 1):
if i in dupes:
continue
name_1 = names[i]
s1 = self.indexed_seqs[i]
for j in range(i + 1, len(names)):
if j in dupes:
continue
name_2 = names[j]
ui.display("%s vs %s" % (name_1, name_2), done / to_do)
done += 1
matrix.fill(0)
s2 = self.indexed_seqs[j]
fill_diversity_matrix(matrix, s1, s2)
if not (matrix[off_diag] > 0).any():
# j is a duplicate of i
dupes.update([j])
duped[i].append(j)
continue
total, p, dist, var = self.func(matrix, *self._func_args)
if self._invalid_raises and not isinstance(dist, Number):
msg = f"distance could not be calculated for {name_1} - {name_2}"
raise ArithmeticError(msg)
result = Stats(total, p, dist, var)
self._dists[(name_1, name_2)] = result
self._dists[(name_2, name_1)] = result
self._dupes = [names[i] for i in dupes] or None
if duped:
self._duped = {}
for k, v in duped.items():
key = names[k]
vals = [names[i] for i in v]
self._duped[key] = vals
# clean the distances so only unique seqs included
remove = set(self._dupes)
keys = list(self._dists.keys())
for key in keys:
if set(key) & remove:
del self._dists[key]
__call__ = run
def get_pairwise_distances(self, include_duplicates=True):
"""returns a matrix of pairwise distances.
Parameters
----------
include_duplicates : bool
all seqs included in the distances, otherwise only unique sequences
are included.
"""
if self._dists is None:
return None
dists = {k: self._dists[k].dist for k in self._dists}
if include_duplicates:
dists = self._expand(dists)
return DistanceMatrix(dists)
def _expand(self, pwise):
"""returns a pwise statistic dict that includes duplicates"""
if not self.duplicated:
# no duplicates, nothing to do
return pwise
redundants = {}
for k in self.duplicated:
for r in self.duplicated[k]:
redundants[r] = k
names = self.names[:]
for add, alias in redundants.items():
for name in names:
if name == add:
continue
if name == alias:
val = 0
else:
val = pwise.get((alias, name), None)
pwise[(add, name)] = pwise[(name, add)] = val
return pwise
@property
def dists(self):
if self._dists is None:
return None
return self.get_pairwise_distances(include_duplicates=True)
@property
def stderr(self):
if self._dists is None:
return None
stats = {k: sqrt(self._dists[k].variance) for k in self._dists}
stats = self._expand(stats)
kwargs = dict(title="Standard Error of Pairwise Distances", digits=4)
return _make_stat_table(stats, self.names, **kwargs)
@property
def variances(self):
if self._dists is None:
return None
stats = {k: self._dists[k].variance for k in self._dists}
stats = self._expand(stats)
kwargs = dict(title="Variances of Pairwise Distances", digits=4)
t = _make_stat_table(stats, self.names, **kwargs)
var_formatter = _number_formatter("%.2e")
for name in self.names:
t.format_column(name, var_formatter)
return t
@property
def proportions(self):
if self._dists is None:
return None
stats = {k: self._dists[k].fraction_variable for k in self._dists}
stats = self._expand(stats)
kwargs = dict(title="Proportion variable sites", digits=4)
return _make_stat_table(stats, self.names, **kwargs)
@property
def lengths(self):
if self._dists is None:
return None
stats = {k: self._dists[k].length for k in self._dists}
stats = self._expand(stats)
kwargs = dict(title="Pairwise Aligned Lengths", digits=0)
return _make_stat_table(stats, self.names, **kwargs)
class HammingPair(_PairwiseDistance):
"""Hamming distance calculator for pairwise alignments"""
valid_moltypes = ("dna", "rna", "protein", "text", "bytes")
def __init__(self, moltype="text", *args, **kwargs):
"""states: the valid sequence states"""
super(HammingPair, self).__init__(moltype, *args, **kwargs)
self.func = _hamming
class PercentIdentityPair(_PairwiseDistance):
"""Percent identity distance calculator for pairwise alignments"""
valid_moltypes = ("dna", "rna", "protein", "text", "bytes")
def __init__(self, moltype="text", *args, **kwargs):
"""states: the valid sequence states"""
super(PercentIdentityPair, self).__init__(moltype, *args, **kwargs)
self.func = _hamming
def get_pairwise_distances(self, include_duplicates=True):
"""returns a matrix of pairwise distances.
Parameters
----------
include_duplicates : bool
all seqs included in the distances, otherwise only unique sequences
are included.
"""
if self._dists is None:
return None
dists = {k: self._dists[k].fraction_variable for k in self._dists}
if include_duplicates:
dists = self._expand(dists)
return DistanceMatrix(dists)
class _NucleicSeqPair(_PairwiseDistance):
"""base class pairwise distance calculator for nucleic acid seqs"""
valid_moltypes = ("dna", "rna")
def __init__(self, moltype="dna", *args, **kwargs):
super(_NucleicSeqPair, self).__init__(moltype, *args, **kwargs)
if not _same_moltype(DNA, self.moltype) and not _same_moltype(
RNA, self.moltype
):
raise RuntimeError("Invalid MolType for this metric")
class JC69Pair(_NucleicSeqPair):
"""JC69 distance calculator for pairwise alignments"""
def __init__(self, moltype="dna", *args, **kwargs):
"""states: the valid sequence states"""
super(JC69Pair, self).__init__(moltype, *args, **kwargs)
self.func = _jc69_from_matrix
class TN93Pair(_NucleicSeqPair):
"""TN93 calculator for pairwise alignments"""
def __init__(self, moltype="dna", *args, **kwargs):
"""states: the valid sequence states"""
super(TN93Pair, self).__init__(moltype, *args, **kwargs)
self._freqs = zeros(self._dim, float64)
self.pur_indices = get_purine_indices(self.moltype)
self.pyr_indices = get_pyrimidine_indices(self.moltype)
# matrix coordinates
self.pyr_coords = get_matrix_diff_coords(self.pyr_indices)
self.pur_coords = get_matrix_diff_coords(self.pur_indices)
self.tv_coords = get_matrix_diff_coords(list(range(self._dim)))
for coord in self.pur_coords + self.pyr_coords:
self.tv_coords.remove(coord)
# flattened
self.pyr_coords = [i * 4 + j for i, j in self.pyr_coords]
self.pur_coords = [i * 4 + j for i, j in self.pur_coords]
self.tv_coords = [i * 4 + j for i, j in self.tv_coords]
self.func = _tn93_from_matrix
self._func_args = [
self._freqs,
self.pur_indices,
self.pyr_indices,
self.pur_coords,
self.pyr_coords,
self.tv_coords,
]
class LogDetPair(_PairwiseDistance):
"""computes logdet distance between sequence pairs"""
valid_moltypes = ("dna", "rna", "protein")
def __init__(self, moltype="dna", use_tk_adjustment=True, *args, **kwargs):
"""Arguments:
- moltype: string or moltype instance (must be dna or rna)
- use_tk_adjustment: use the correction of Tamura and Kumar 2002
"""
super(LogDetPair, self).__init__(moltype, *args, **kwargs)
self.func = _logdet
self._func_args = [use_tk_adjustment]
def run(self, use_tk_adjustment=None, *args, **kwargs):
if use_tk_adjustment is not None:
self._func_args = [use_tk_adjustment]
super(LogDetPair, self).run(*args, **kwargs)
class ParalinearPair(_PairwiseDistance):
"""computes the paralinear distance (Lake 1994) between sequence pairs"""
valid_moltypes = ("dna", "rna", "protein")
def __init__(self, moltype="dna", *args, **kwargs):
super(ParalinearPair, self).__init__(moltype, *args, **kwargs)
self.func = _paralinear
_calculators = {
"paralinear": ParalinearPair,
"logdet": LogDetPair,
"jc69": JC69Pair,
"tn93": TN93Pair,
"hamming": HammingPair,
"percent": PercentIdentityPair,
}
def get_distance_calculator(name, *args, **kwargs):
"""returns a pairwise distance calculator
name is converted to lower case"""
name = name.lower()
if "moltype" in kwargs and kwargs.get("moltype") is None:
kwargs.pop("moltype")
if name not in _calculators:
raise ValueError('Unknown pairwise distance calculator "%s"' % name)
calc = _calculators[name]
return calc(*args, **kwargs)
def available_distances():
"""returns Table listing available fast pairwise genetic distance calculator
Notes
-----
For more complicated genetic distance methods, see the evolve.models module.
"""
from cogent3.util.table import Table
rows = []
for n, c in _calculators.items():
rows.append([n, ", ".join(c.valid_moltypes)])
table = Table(
header=["Abbreviation", "Suitable for moltype"],
data=rows,
title=(
"Specify a pairwise genetic distance calculator "
"using 'Abbreviation' (case insensitive)."
),
index_name="Abbreviation",
)
return table
class DistanceMatrix(DictArray):
"""pairwise distance matrix"""
def __init__(self, dists, invalid=None):
super(DistanceMatrix, self).__init__(dists, dtype=float)
self._invalid = invalid
def __setitem__(self, names, value):
(index, remaining) = self.template.interpret_index(names)
self.array[index] = value
return
def __getitem__(self, names):
(index, remaining) = self.template.interpret_index(names)
result = self.array[index]
if remaining is not None:
result = self.__class__(result, remaining)
self.template.names = array(self.template.names)[index]
result.template = self.template
return result
@property
def names(self):
return self.template.names[0]
def to_table(self):
"""converted to a Table"""
from cogent3.util.table import Table
data = {"names": self.names}
for i, name in enumerate(self.names):
column = self.array[:, i]
data[name] = column
header = ["names"] + list(self.names)
return Table(header=header, data=data, index_name="names")
def to_dict(self, **kwargs):
"""Returns a flattened dict with diagonal elements removed"""
result = super(DistanceMatrix, self).to_dict(flatten=True)
for n1 in self.names:
del result[(n1, n1)]
return result
def to_rich_dict(self):
# because dicts with tuples as keys cannot be json'ed, we convert to
# a list of tuples
dists = self.to_dict()
json_safe = [(k[0], k[1], dists[k]) for k in dists]
return dict(
dists=json_safe,
invalid=self._invalid,
type=get_object_provenance(self),
version=__version__,
)
def take_dists(self, names, negate=False):
"""
Parameters
----------
names
series of names
negate : bool
if True, elements in names will be excluded
Returns
-------
DistanceMatrix for names x names
"""
if type(names) == str:
names = [names]
current_names = array(self.names)
if negate:
keep = [i for i, n in enumerate(current_names) if n not in names]
else:
keep = [i for i, n in enumerate(current_names) if n in names]
data = self.array.take(keep, axis=0)
data = data.take(keep, axis=1)
names = current_names.take(keep)
dists = {
(names[i], names[j]): data[i, j]
for i in range(len(names))
for j in range(len(names))
if i != j
}
if not dists:
result = None
else:
result = self.__class__(dists)
return result
def drop_invalid(self):
"""drops all rows / columns with an invalid entry"""
if (
self.shape[0] != self.shape[1]
or self.template.names[0] != self.template.names[1]
):
raise RuntimeError("Must be a square matrix")
names = array(self.names)
# NaN is an invalid value
cols = numpy.isnan(self.array).sum(axis=0)
exclude = names[cols != 0].tolist()
rows = numpy.isnan(self.array).sum(axis=1)
exclude += names[rows != 0].tolist()
exclude = set(exclude)
keep = set(names) ^ exclude
return self.take_dists(keep)
def quick_tree(self, show_progress=False):
"""returns a neighbour joining tree
Returns
-------
an estimated Neighbour Joining Tree, note that invalid distances are dropped
prior to building the tree
"""
from cogent3.phylo.nj import nj
dists = self.drop_invalid()
if not dists or dists.shape[0] == 1:
raise ValueError("Too few distances to build a treenj")
dists = dists.to_dict()
return nj(dists, show_progress=show_progress)
| 30.68682 | 87 | 0.599929 |
4a2095b376a4e35729544bf3e95f58bdfa7cf3f3 | 971 | py | Python | python/test/test_attachment_created_event.py | dlens/dlxapi | 189a6519240ce625d7a9cdb89e305a335d2aa045 | [
"MIT"
] | null | null | null | python/test/test_attachment_created_event.py | dlens/dlxapi | 189a6519240ce625d7a9cdb89e305a335d2aa045 | [
"MIT"
] | 1 | 2020-08-20T17:31:43.000Z | 2020-08-20T17:31:43.000Z | python/test/test_attachment_created_event.py | dlens/dlxapi | 189a6519240ce625d7a9cdb89e305a335d2aa045 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import dlxapi
from dlxapi.models.attachment_created_event import AttachmentCreatedEvent # noqa: E501
from dlxapi.rest import ApiException
class TestAttachmentCreatedEvent(unittest.TestCase):
"""AttachmentCreatedEvent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAttachmentCreatedEvent(self):
"""Test AttachmentCreatedEvent"""
# FIXME: construct object with mandatory attributes with example values
# model = dlxapi.models.attachment_created_event.AttachmentCreatedEvent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.682927 | 119 | 0.719876 |
4a209695b9be53ddf31a7134d31a3f26a8b7a2a3 | 1,183 | py | Python | plugins/urls/test_plugin.py | dkim286/Cardinal | 716d38fae5c992315e8087de74ad0db2947d0d48 | [
"MIT"
] | 96 | 2015-01-24T00:00:45.000Z | 2022-03-15T14:06:57.000Z | plugins/urls/test_plugin.py | dkim286/Cardinal | 716d38fae5c992315e8087de74ad0db2947d0d48 | [
"MIT"
] | 166 | 2015-01-02T02:30:24.000Z | 2022-03-24T20:03:55.000Z | plugins/urls/test_plugin.py | dkim286/Cardinal | 716d38fae5c992315e8087de74ad0db2947d0d48 | [
"MIT"
] | 63 | 2015-02-13T06:46:01.000Z | 2022-02-24T10:50:34.000Z | import re
import pytest
from . import plugin
class TestURLRegex:
@staticmethod
def assertFindUrl(message, url):
m = plugin.get_urls(message)
assert len(m) == 1
assert m[0] == url
@pytest.mark.parametrize("url,expected", [
["http://tiny.cc/PiratesLive", "http://tiny.cc/PiratesLive"],
["http://tiny.cc/PiratesLive\x0f", "http://tiny.cc/PiratesLive"],
["http://tiny.cc/PiratesLive\x0f\x0f", "http://tiny.cc/PiratesLive"],
["\x1fhttp://tiny.cc/PiratesLive\x0f", "http://tiny.cc/PiratesLive"],
["\x1f\x0f\x0fhttp://tiny.cc/PiratesLive\x0f", "http://tiny.cc/PiratesLive"],
["\x1f\x0f\x0fhttp://tiny.cc/PiratesLive", "http://tiny.cc/PiratesLive"],
])
def test_url_cant_contain_control_characters(self, url, expected):
self.assertFindUrl(url, expected)
@pytest.mark.parametrize("url", [
"http://google.com/",
"http://google.google/",
"google.google",
"google.com",
"https://google.com/",
"https://mail.google.com/u/0",
"http://tiny.cc/PiratesLive",
])
def test_valid(self, url):
self.assertFindUrl(url, url)
| 31.972973 | 85 | 0.602705 |
4a2096ecc7e1a006940ad53e8574834be62df043 | 2,867 | py | Python | comment_parser/parsers/tests/go_parser_test.py | rhp94/comment_parser | f373a01b5023b20e7e73d05382b1e26f92e00e68 | [
"MIT"
] | 1 | 2018-04-01T16:08:30.000Z | 2018-04-01T16:08:30.000Z | comment_parser/parsers/tests/go_parser_test.py | johnjchung/comment_parser | aeb892816f008fc7d74625d6b617a655581d4487 | [
"MIT"
] | null | null | null | comment_parser/parsers/tests/go_parser_test.py | johnjchung/comment_parser | aeb892816f008fc7d74625d6b617a655581d4487 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""Tests for comment_parser.parsers.go_parser.py"""
from comment_parser.parsers import common as common
from comment_parser.parsers import go_parser as go_parser
import unittest
import builtins
from unittest import mock
from io import StringIO
class GoParserTest(unittest.TestCase):
@mock.patch.object(builtins, 'open')
def ExtractComments(self, text, mock_open):
mock_file = StringIO(text)
mock_open.return_value = mock_file
return go_parser.extract_comments('filename')
def testSingleLineComment(self):
text = '// single line comment'
comments = self.ExtractComments(text)
expected = [common.Comment(text[2:], 1, multiline=False)]
self.assertEqual(comments, expected)
def testSingleLineCommentInRuneLiteral(self):
text = "msg := '// this is not a comment'"
comments = self.ExtractComments(text)
self.assertEqual(comments, [])
def testSingleLineCommentInBackTickedLiteral(self):
text = "msg := `// this is not a comment`"
comments = self.ExtractComments(text)
self.assertEqual(comments, [])
def testSingleLineCommentInDoubleQuotedStringLiteral(self):
text = 'msg := "// this is not a comment"'
comments = self.ExtractComments(text)
self.assertEqual(comments, [])
def testMultiLineComment(self):
text = '/* multiline\ncomment */'
comments = self.ExtractComments(text)
expected = [common.Comment(text[2:-2], 1, multiline=True)]
self.assertEqual(comments, expected)
def testMultiLineCommentWithStars(self):
text = "/***************/"
comments = self.ExtractComments(text)
expected = [common.Comment(text[2:-2], 1, multiline=True)]
self.assertEqual(comments, expected)
def testMultiLineCommentInRuneLiteral(self):
text = "msg := '/* This is not a\\nmultiline comment */'"
comments = self.ExtractComments(text)
self.assertEqual(comments, [])
def testMultiLineCommentInDoubleQuotedStringLiteral(self):
text = 'msg := "/* This is not a\\nmultiline comment */"'
comments = self.ExtractComments(text)
self.assertEqual(comments, [])
def testMultiLineCommentInBackTickedLiteral(self):
text = 'msg := `/* This is not a\\nmultiline comment */`'
comments = self.ExtractComments(text)
self.assertEqual(comments, [])
def testMultiLineCommentUnterminated(self):
text = 'a := 1 /* Unterminated\\n comment'
self.assertRaises(
common.UnterminatedCommentError, self.ExtractComments, text)
@mock.patch.object(builtins, 'open')
def testExtractCommentsFileError(self, mock_open):
mock_open.side_effect = FileNotFoundError()
self.assertRaises(common.FileError, go_parser.extract_comments, '')
| 36.75641 | 75 | 0.67248 |
4a20971c77b0c59e80151e18031218d003ff2f2d | 6,905 | py | Python | apps/web/views.py | Anustup900/EvalAI | c7263300d1e80734551d6cd842b354ad664a8320 | [
"BSD-3-Clause"
] | 1 | 2020-10-17T20:35:10.000Z | 2020-10-17T20:35:10.000Z | apps/web/views.py | Anustup900/EvalAI | c7263300d1e80734551d6cd842b354ad664a8320 | [
"BSD-3-Clause"
] | 65 | 2020-12-14T08:27:18.000Z | 2022-03-26T21:12:42.000Z | apps/web/views.py | Anustup900/EvalAI | c7263300d1e80734551d6cd842b354ad664a8320 | [
"BSD-3-Clause"
] | 1 | 2021-02-13T17:47:16.000Z | 2021-02-13T17:47:16.000Z | import logging
import traceback
from base.utils import send_slack_notification
from django.contrib.auth.models import User
from django.conf import settings
from django.core.mail import EmailMessage
from django.shortcuts import render
from smtplib import SMTPException
from .models import Subscribers, Team
from .serializers import ContactSerializer, SubscribeSerializer, TeamSerializer
from rest_framework import permissions, status
from rest_framework.decorators import (
api_view,
permission_classes,
throttle_classes,
)
from rest_framework.response import Response
from rest_framework.throttling import AnonRateThrottle
logger = logging.getLogger(__name__)
def home(request, template_name="index.html"):
"""
Home Page View
"""
return render(request, template_name)
def page_not_found(request):
response = render(request, "error404.html")
response.status_code = 404
return response
def internal_server_error(request):
response = render(request, "error500.html")
response.status_code = 500
return response
def notify_users_about_challenge(request):
"""
Email New Challenge Details to EvalAI Users
"""
if request.user.is_authenticated() and request.user.is_superuser:
if request.method == "GET":
template_name = "notification_email_data.html"
return render(request, template_name)
elif request.method == "POST":
users = User.objects.exclude(email__exact="").values_list(
"email", flat=True
)
subject = request.POST.get("subject")
body_html = request.POST.get("body")
sender = settings.CLOUDCV_TEAM_EMAIL
email = EmailMessage(
subject,
body_html,
sender,
[settings.CLOUDCV_TEAM_EMAIL],
bcc=users,
)
email.content_subtype = "html"
try:
email.send()
return render(
request,
"notification_email_conformation.html",
{"message": "All the emails are sent successfully!"},
)
except SMTPException:
logger.exception(traceback.format_exc())
return render(
request, "notification_email_data.html", {"errors": 1}
)
else:
return render(request, "error404.html")
else:
return render(request, "error404.html")
@api_view(["GET", "POST"])
@throttle_classes([AnonRateThrottle])
@permission_classes((permissions.AllowAny,))
def contact_us(request):
user_does_not_exist = False
try:
user = User.objects.get(username=request.user)
name = user.username
email = user.email
request_data = {"name": name, "email": email}
except User.DoesNotExist:
request_data = request.data
user_does_not_exist = True
if request.method == "POST" or user_does_not_exist:
if request.POST.get("message"):
request_data["message"] = request.POST.get("message")
serializer = ContactSerializer(data=request_data)
if serializer.is_valid():
serializer.save()
response_data = {
"message": "We have received your request and will contact you shortly."
}
if not settings.DEBUG:
message = {
"text": "A *contact message* is received!",
"fields": [
{
"title": "Name",
"value": request.data["name"],
"short": True,
},
{
"title": "Email",
"value": request.data["email"],
"short": True,
},
{
"title": "Message",
"value": request.data["message"],
"short": False,
},
],
}
send_slack_notification(message=message)
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "GET":
response_data = {"name": name, "email": email}
return Response(response_data, status=status.HTTP_200_OK)
@api_view(["GET", "POST"])
@throttle_classes([AnonRateThrottle])
@permission_classes((permissions.AllowAny,))
def subscribe(request):
if request.method == "GET":
subscribers = Subscribers.objects.all().order_by("-pk")
serializer = SubscribeSerializer(
subscribers, many=True, context={"request": request}
)
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
elif request.method == "POST":
email = request.data.get("email")
# When user has already subscribed
if Subscribers.objects.filter(email=email).exists():
response_data = {
"message": "You have already subscribed to EvalAI"
}
return Response(response_data, status=status.HTTP_400_BAD_REQUEST)
serializer = SubscribeSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
response_data = {
"message",
"You will be notified about our latest updates at {}.".format(
email
),
}
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET", "POST"])
@throttle_classes([AnonRateThrottle])
@permission_classes((permissions.AllowAny,))
def our_team(request):
if request.method == "GET":
teams = Team.objects.all().order_by("position")
serializer = TeamSerializer(
teams, many=True, context={"request": request}
)
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
elif request.method == "POST":
# team_type is set to Team.CONTRIBUTOR by default and can be overridden by the requester
request.data["team_type"] = request.data.get(
"team_type", Team.CONTRIBUTOR
)
serializer = TeamSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
response_data = {"message", "Successfully added the contributor."}
return Response(response_data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 34.873737 | 96 | 0.590007 |
4a20973928c4986eadc2c72a2c2d796628ef1126 | 3,425 | py | Python | boxuegu/apps/users/adminx.py | boxuegushixun/boxuegu | d7d6d014bc85673d7db30936a3b1e07ba8b58d5d | [
"MIT"
] | 1 | 2019-06-13T10:08:25.000Z | 2019-06-13T10:08:25.000Z | boxuegu/apps/users/adminx.py | boxuegushixun/boxuegu | d7d6d014bc85673d7db30936a3b1e07ba8b58d5d | [
"MIT"
] | 6 | 2020-06-05T21:36:10.000Z | 2022-01-13T01:21:59.000Z | boxuegu/apps/users/adminx.py | boxuegushixun/boxuegu | d7d6d014bc85673d7db30936a3b1e07ba8b58d5d | [
"MIT"
] | null | null | null | # encoding: utf-8
from courses.models import Course, Video, Lesson, CourseResource
import xadmin
from django.contrib.auth.models import Group, Permission
from operation.models import CourseComments, UserFavorite, UserMessage, UserCourse, UserAsk
from organization.models import CityDict, Teacher, CourseOrg
from xadmin.models import Log
# 和X admin的view绑定
from xadmin import views
from .models import EmailVerifyRecord, Banner, UserProfile
class BaseSetting(object):
"""X admin的全局配置设置"""
# 主题功能开启
enable_themes = True
use_bootswatch = True
class GlobalSettings(object):
"""xadmin 全局配置参数信息设置"""
site_title = "博学谷后台管理站"
site_footer = "mtianyan's admin"
# 收起菜单
# menu_style = "accordion"
def get_site_menu(self):
return (
{'title': '学校管理', 'menus': (
{'title': '所在城市', 'url': self.get_model_url(CityDict, 'changelist')},
{'title': '学校信息', 'url': self.get_model_url(CourseOrg, 'changelist')},
{'title': '学校讲师', 'url': self.get_model_url(Teacher, 'changelist')},
)},
{'title': '课程管理', 'menus': (
{'title': '课程信息', 'url': self.get_model_url(Course, 'changelist')},
{'title': '章节信息', 'url': self.get_model_url(Lesson, 'changelist')},
{'title': '视频信息', 'url': self.get_model_url(Video, 'changelist')},
{'title': '课程资源', 'url': self.get_model_url(CourseResource, 'changelist')},
{'title': '课程评论', 'url': self.get_model_url(CourseComments, 'changelist')},
)},
{'title': '用户管理', 'menus': (
{'title': '用户信息', 'url': self.get_model_url(UserProfile, 'changelist')},
{'title': '用户验证', 'url': self.get_model_url(EmailVerifyRecord, 'changelist')},
{'title': '用户课程', 'url': self.get_model_url(UserCourse, 'changelist')},
{'title': '用户收藏', 'url': self.get_model_url(UserFavorite, 'changelist')},
{'title': '用户消息', 'url': self.get_model_url(UserMessage, 'changelist')},
)},
{'title': '系统管理', 'menus': (
{'title': '用户咨询', 'url': self.get_model_url(UserAsk, 'changelist')},
{'title': '首页轮播', 'url': self.get_model_url(Banner, 'changelist')},
{'title': '用户分组', 'url': self.get_model_url(Group, 'changelist')},
{'title': '用户权限', 'url': self.get_model_url(Permission, 'changelist')},
{'title': '日志记录', 'url': self.get_model_url(Log, 'changelist')},
)},
)
class EmailVerifyRecordAdmin(object):
"""创建admin的管理类,这里不再是继承admin,而是继承object"""
# 配置后台我们需要显示的列
list_display = ['code', 'email', 'send_type', 'send_time']
# 配置搜索字段,不做时间搜索
search_fields = ['code', 'email', 'send_type']
# 配置筛选字段
list_filter = ['code', 'email', 'send_type', 'send_time']
class BannerAdmin(object):
"""创建banner的管理类"""
list_display = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
# 将model与admin管理器进行关联注册
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
# 将Xadmin全局管理器与我们的view绑定注册。
xadmin.site.register(views.BaseAdminView, BaseSetting)
# 将头部与脚部信息进行注册:
xadmin.site.register(views.CommAdminView, GlobalSettings)
| 37.228261 | 94 | 0.608759 |
4a20974424b5fc17dd43fd618bad75ba7910163f | 11,102 | py | Python | burger_war_dev/scripts/connech_main.py | Satori063/burger_war_dev | 68d8be082278fd55f5f5f2c7d883a4a7213f0564 | [
"BSD-3-Clause"
] | 3 | 2022-01-23T02:03:14.000Z | 2022-02-27T11:30:35.000Z | burger_war_dev/scripts/connech_main.py | YKKAP000/burger_war_dev | 668ec011bfa7d18fcfb1a2f2c99743edcb18fa0b | [
"BSD-3-Clause"
] | 1 | 2022-02-25T00:28:23.000Z | 2022-02-25T00:28:23.000Z | burger_war_dev/scripts/connech_main.py | Satori063/burger_war_dev | 68d8be082278fd55f5f5f2c7d883a4a7213f0564 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import random
import os
import math
import cv2
import tf
import angles
from geometry_msgs.msg import Twist, Pose, Point, Quaternion, PoseWithCovarianceStamped
from sensor_msgs.msg import Image
from sensor_msgs.msg import Imu
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from std_msgs.msg import String
from cv_bridge import CvBridge, CvBridgeError
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
import actionlib_msgs
from utils import readCsv
from camera import processImage, showImage
# Ref: https://hotblackrobotics.github.io/en/blog/2018/01/29/action-client-py/
# Respect seigot
class ConnechBot():
def __init__(self,
use_lidar=False, use_camera=False, use_imu=False,
use_odom=True, use_joint_states=False):
def get_goallist(self):
goalpoints = readCsv(os.path.dirname(__file__) + "/input/strategy2.csv")
return goalpoints
self.goals = get_goallist(self)
self.yellow_flag = False
self.yellow_detected = False
# velocity publisher
self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)
self.client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
self.listener = tf.TransformListener()
# lidar scan subscriber
if use_lidar:
self.scan = LaserScan()
self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)
# camera subscribver
# please uncoment out if you use camera
if use_camera:
# for convert image topic to opencv obj
self.img = None
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber('image_raw', Image, self.imageCallback)
# self.image_pub = rospy.Publisher('output_image', Image, queue_size=1)
# imu subscriber
if use_imu:
self.imu_sub = rospy.Subscriber('imu', Imu, self.imuCallback)
# odom subscriber
if use_odom:
self.listener = tf.TransformListener()
self.enemy_position = Odometry()
self.enemy_info = [0.0, 0.0, 0.0]
self.detect_counter = 0
self.goal_pointID = 0
self.escape_pointID = -1
rospy.Subscriber('enemy_position', Odometry, self.enemylocationCallback)
self.odom_sub = rospy.Subscriber('odom', Odometry, self.odomCallback)
# joint_states subscriber
if use_joint_states:
self.odom_sub = rospy.Subscriber('joint_states', JointState, self.jointstateCallback)
# Respect seigot
def get_rosparam(self):
self.robot_namespace = rospy.get_param('~robot_namespace')
def setGoal(self, pose2d):
self.client.wait_for_server()
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = pose2d[0]
goal.target_pose.pose.position.y = pose2d[1]
# Euler to Quartanion
q=tf.transformations.quaternion_from_euler(0,0,pose2d[2])
goal.target_pose.pose.orientation.x = q[0]
goal.target_pose.pose.orientation.y = q[1]
goal.target_pose.pose.orientation.z = q[2]
goal.target_pose.pose.orientation.w = q[3]
self.client.send_goal(goal)
wait = self.client.wait_for_result()
if not wait:
rospy.logerr("Action server not available!")
rospy.signal_shutdown("Action server not available!")
else:
return self.client.get_result()
def canselGoal(self):
# stop navigating
self.client.cancel_all_goals()
return
def patrol(self):
r = rospy.Rate(5) # change speed 5fps
if self.escape_pointID == -1:
if self.goal_pointID == len(self.goals) - 1:
self.goal_pointID = 0 # reset self.goal_pointID
goal = self.goal_pointID
self.goal_pointID = self.goal_pointID + 1
else:
goal = self.escape_pointID + 1
self.goal_pointID = goal # set next goalpoint
self.escape_pointID = -1 # reset self.escape_pointID
self.setGoal(self.goals[goal])
def escape(self):
escape_goals = self.setEscapepoint()
rospy.loginfo(self.goals[escape_goals])
self.setGoal(self.goals[escape_goals])
def setEscapepoint(self):
enemy_x = self.enemy_info[0] # detecting enemybot x
enemy_y = self.enemy_info[1] # detecting enemybot y
enemy_direction = self.enemy_info[2] # detecting enemybot direction [deg]
# select escape point
if enemy_x > 0 and enemy_y > 0:
rospy.loginfo("escape point1")
self.escape_pointID = 1
elif enemy_x < 0 and enemy_y > 0:
rospy.loginfo("escape point2")
self.escape_pointID = 18
elif enemy_x < 0 and enemy_y < 0:
rospy.loginfo("escape point3")
self.escape_pointID = 8
elif enemy_x > 0 and enemy_y < 0:
rospy.loginfo("escape point4")
self.escape_pointID = 5
return self.escape_pointID
def detect_enemy(self):
state, distance, direction_deff = self.detect_enemylocation()
return state, distance, direction_deff
def listen_selflocation(self, frame1, frame2):
trans = []
rot = [] # quatarnion
try:
(trans, rot) = self.listener.lookupTransform(frame1, frame2, rospy.Time(0))
return True, trans, rot
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
#continue
return False, trans, rot
# Respect seigot
def detect_enemylocation(self):
# data management
time_width = 0.1
counter_width = 3
# time_diff = rospy.Time.now().to_sec() - self.enemy_position.header.stamp.to_sec()
# if time_diff > time_width:
# self.detect_counter = 0
# rospy.loginfo("is here")
# return False, 0.0, 0.0
# else:
# self.detect_counter = self.detect_counter+1
# if self.detect_counter < counter_width:
# rospy.loginfo("is here")
# return False, 0.0, 0.0
# set flame
map_frame = "map"
link_frame = "base_link"
# get self position
valid, trans, rot = self.listen_selflocation(map_frame, link_frame)
if valid == False:
# rospy.loginfo("Here tf False")
return False, 0.0, 0.0
# Calculating the distance from enemybot
dx = self.enemy_position.pose.pose.position.x - trans[0]
dy = self.enemy_position.pose.pose.position.y - trans[1]
distance = math.sqrt( pow(dx, 2) + pow(dy, 2) )
# Calculating the direction from enemybot
direction = math.atan2(dx, dy)
roll, pitch, yaw = tf.transformations.euler_from_quaternion(rot)
direction_diff = direction - yaw # radians
deg_direction_diff = (direction - yaw)*180/3.14159 # radians to degree
self.enemy_info = [dx, dy, direction] # set enemybot imformation
rospy.loginfo("distance: {}".format(distance))
rospy.loginfo("direction: {}".format(direction))
# rospy.loginfo("direction_deff: {}".format(direction_diff))
# rospy.loginfo("deg_direction_diff: {}".format(deg_direction_diff))
return True, distance, direction_diff
# lidar scan topic call back sample
# update lidar scan state
def lidarCallback(self, data):
self.scan = data
# rospy.loginfo(self.scan)
# camera image call back sample
# comvert image topic to opencv object and show
def imageCallback(self, data):
if not self.yellow_flag and self.yellow_detected:
self.yellow_detected = False
try:
in_img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logerr(e)
# color detection
self.yellow_flag, yellow_img = processImage(in_img, "yellow")
self.blue_flag, blue_img = processImage(in_img, "blue")
self.green_flag, green_img = processImage(in_img, "green")
self.red_flag, red_img = processImage(in_img, "red")
rospy.loginfo("YELLOW: {}".format(self.yellow_flag))
#rospy.loginfo("blue: {}".format(self.blue_flag))
#rospy.loginfo("green: {}".format(self.green_flag))
#rospy.loginfo("red: {}".format(self.red_flag))
# Show processed image on a Window
showImage(yellow_img)
#showImage(blue_img)
#showImage(green_img)
#showImage(red_img)
if self.yellow_flag:
if not self.yellow_detected:
print("YELLOW IS DETECTED!!!")
self.client.cancel_all_goals()
self.yellow_detected = True
twist = Twist()
twist.angular.z = -10
self.vel_pub.publish(twist)
# imu call back sample
# update imu state
def imuCallback(self, data):
self.imu = data
# rospy.loginfo(self.imu)
# odom call back sample
# update odometry state
def odomCallback(self, data):
self.pose_x = data.pose.pose.position.x
self.pose_y = data.pose.pose.position.y
# rospy.loginfo("odom pose_x: {}".format(self.pose_x))
# rospy.loginfo("odom pose_y: {}".format(self.pose_y))
# jointstate call back sample
# update joint state
def jointstateCallback(self, data):
self.wheel_rot_r = data.position[0]
self.wheel_rot_l = data.position[1]
# rospy.loginfo("joint_state R: {}".format(self.wheel_rot_r))
# rospy.loginfo("joint_state L: {}".format(self.wheel_rot_l))
def enemylocationCallback(self, position):
self.enemy_position = position
# rospy.loginfo("enemypos_x: {}".format(self.enemy_position.pose.pose.position.x))
# rospy.loginfo("enemypos_y: {}".format(self.enemy_position.pose.pose.position.y))
self.set_status()
def set_status(self):
# get enemy_detector result
state, distance, direction_deff = self.detect_enemy()
# non-detecting enemybot
if distance > 0.5:
rospy.loginfo("PATROL_MODE")
self.patrol()
# detected enemybot
else:
rospy.loginfo("ESCAPE_MODE")
self.canselGoal()
self.escape()
if __name__ == '__main__':
rospy.init_node('connechRun')
node = ConnechBot()
bot = ConnechBot(use_lidar=True, use_camera=True, use_imu=True, use_odom=True, use_joint_states=True)
rate = rospy.Rate(30)
while not rospy.is_shutdown():
node.set_status()
rate.sleep() | 36.281046 | 105 | 0.618087 |
4a2099ae46c4c4ee15f7c4d1bad8d36f198ea4f6 | 1,759 | py | Python | py12306/cluster/redis.py | Heliner/ticket_12306 | 28a97129dbc93eed2dd9e7922f8059ca63f123d6 | [
"Apache-2.0"
] | 4 | 2021-12-29T15:38:26.000Z | 2022-01-07T09:19:02.000Z | py12306/cluster/redis.py | Heliner/py12306 | 28a97129dbc93eed2dd9e7922f8059ca63f123d6 | [
"Apache-2.0"
] | null | null | null | py12306/cluster/redis.py | Heliner/py12306 | 28a97129dbc93eed2dd9e7922f8059ca63f123d6 | [
"Apache-2.0"
] | 1 | 2022-01-12T02:16:32.000Z | 2022-01-12T02:16:32.000Z | import pickle
from redis import Redis as PyRedis
from py12306.helpers.func import *
from py12306.inner_config import Config
from py12306.log.redis_log import RedisLog
@singleton
class Redis(PyRedis):
# session = None
def __init__(self, *args):
if Config.is_cluster_enabled():
args = {
'host': Config().REDIS_HOST,
'port': Config().REDIS_PORT,
'db': 0,
'password': Config().REDIS_PASSWORD,
'decode_responses': True
}
super().__init__(**args)
RedisLog.add_quick_log(RedisLog.MESSAGE_REDIS_INIT_SUCCESS)
else:
super().__init__(**args)
return self
def get(self, name, default=None):
res = super().get(name)
# if decode: res = res.decode()
return res if res else default
def set(self, name, value, ex=None, px=None, nx=False, xx=False):
return super().set(name, available_value(value), ex=ex, px=px, nx=nx, xx=xx)
def set_dict(self, name, value):
return self.set_pickle(name, value)
# return self.set(name, json.dumps(value))
def get_dict(self, name, default={}):
return self.get_pickle(name, default)
# res = self.get(name)
# if res:
# return json.loads(res)
# return default
def set_pickle(self, name, value):
return self.set(name, pickle.dumps(value, 0).decode())
def get_pickle(self, name, default=None):
res = self.get(name)
return pickle.loads(res.encode()) if res else default
# def smembers(self, name, default=[]):
# res = super().smembers(name)
# return [val.decode() for val in list(res)] if res else default
| 30.327586 | 84 | 0.592382 |
4a209a71023b79c7878f660c7b753b5936fa5d63 | 8,532 | py | Python | Cogs/setup.py | Code-Cecilia/BotMan.py | be0ed8c634f8aea6a8fd0b28875e5198047089ac | [
"MIT"
] | 4 | 2021-07-10T10:31:05.000Z | 2021-12-28T13:42:42.000Z | Cogs/setup.py | prakarsh17/BotMan.py | be0ed8c634f8aea6a8fd0b28875e5198047089ac | [
"MIT"
] | null | null | null | Cogs/setup.py | prakarsh17/BotMan.py | be0ed8c634f8aea6a8fd0b28875e5198047089ac | [
"MIT"
] | 5 | 2021-07-11T10:50:21.000Z | 2022-02-07T11:03:47.000Z | import json
import os
import discord
from discord.ext import commands
with open('config.json') as configFile:
configs = json.load(configFile)
prefix = configs.get('prefix')
class Setup(commands.Cog, description='Used to set up the bot for welcome messages, mute/unmute etc.'):
def __init__(self, bot):
self.bot = bot
@commands.command(name='setup', description='Used to set the bot up, for welcome messages, mute roles, etc.\n'
'Recommended to set the bot up as early as possible when it joins a '
'server.')
@commands.guild_only()
async def setup_welcome(self, ctx):
embed = discord.Embed(title='You can setup preferences for your server with these commands.',
timestamp=ctx.message.created_at,
color=discord.Color.random())
embed.add_field(name='Set channel for welcome messages',
value=f'`{prefix}setwelcomechannel [channel]`\nExample: `{prefix}setwelcomechannel #welcome`\n'
f'__**What you\'d see:**__\n'
f'{ctx.author.mention} has joined **{ctx.guild.name}**! Say hi!\n'
f'{ctx.author.mention} has left **{ctx.guild.name}**. Until Next time!',
inline=False)
embed.add_field(name='Set default reason when kicking/banning members',
value=f'`{prefix}setkickreason [reason]`\nExample: `{prefix}setkickreason Being a jerk`\n'
f'__**What the kicked member would see**__:\n'
f'You have been kicked from **{ctx.guild.name}** for **Being a jerk**.',
inline=False)
embed.add_field(name='Set the mute role for this server',
value=f'`{prefix}setmuterole [role]`\nExample: `{prefix}setmuterole muted` '
f'(muted must be an actual role).\n'
f'You can create a mute role by `{prefix}createmuterole [role name]`',
inline=False)
embed.add_field(name='Set the default Member role for this server',
value=f'`{prefix}setmemberrole [role]`\nExample: `{prefix}setmemberrole Member`'
f' (Member must be an actual role).\n'
f'If you want to turn off AutoRole, make a role, assign the member role to that role, and delete the role',
inline=False)
embed.add_field(name='Set the default channel for BotChat.',
value=f'`{prefix}setbotchatchannel [channel]`\nExample: `{prefix}setbotchatchannel #botchat`'
f' (`channel` must be an actual channel).\n'
f'If you want to turn off botchat, make a channel, assign botchat to that channel, and delete the channel.',
inline=False)
embed.add_field(name='Set a custom prefix for this server.',
value=f'`{prefix}setprefix [prefix]`',
inline=False)
embed.set_footer(text=f'Requested by {ctx.author.name}')
await ctx.send(embed=embed)
@commands.command(name='setwelcomechannel', description="Used to set the channel welcome messages arrive. "
"See description of the `setup` command for more info.")
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def set_welcome_channel(self, ctx, channel: discord.TextChannel):
channel_id = channel.id
if os.path.exists(f'./configs/{ctx.guild.id}.json'):
with open(f'./configs/{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
else:
data = {}
data['welcome_channel'] = channel_id
with open(f'./configs/{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile, indent=3)
await ctx.send(f'Welcome channel set to {channel.mention} successfully.')
@commands.command(name='setkickreason', description='Used to set the default kick/ban reason '
'in a case where no reason is given.\n'
'Check the description of the `setup` command '
'for more information.')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def set_kick_reason(self, ctx, *, reason):
if os.path.exists(f'./configs/{ctx.guild.id}.json'):
with open(f'./configs/{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
else:
data = {}
data['default_kick_ban_reason'] = str(reason)
with open(f'./configs/{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
await ctx.send(f'Default kick/ban reason set to **{reason}** successfully.')
@commands.command(name='setmemberrole', description='Used to set the role which is given to every member upon '
'joining. '
'Check description of `setup` command for more info.')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def set_member_role(self, ctx, role: discord.Role):
if os.path.exists(f'./configs/{ctx.guild.id}.json'):
with open(f'./configs/{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
else:
data = {}
data['member_role'] = role.id
with open(f'./configs/{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile, indent=3)
await ctx.send(f'Member role set to **{role.name}** successfully.')
@commands.command(name='setmuterole', description='Sets the role assigned to muted people. '
'Use `createmuterole` for creating a muted role and '
'automatically setting permissions to every channel.')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def set_mute_role(self, ctx, role: discord.Role):
if os.path.exists(f'./configs/{ctx.guild.id}.json'):
with open(f'./configs/{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
else:
data = {}
data['mute_role'] = role.id
with open(f'./configs/{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile, indent=4)
await ctx.send(f'Mute role set to **{role.name}** successfully.')
@commands.command(name='createmuterole', description='Creates a mute role, and sets messaging permissions to '
'every channel.\n '
'the `rolename` argument is optional. (Defaults to "Muted")')
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def create_mute_role(self, ctx, rolename=None):
if rolename is None:
rolename = 'Muted'
guild = ctx.guild
mutedRole = await guild.create_role(name=rolename) # creating the role
for channel in guild.channels:
await channel.set_permissions(mutedRole, speak=False, send_messages=False, use_slash_commands=False)
# setting permissions for each channel
await ctx.send(f'Created role **{mutedRole}** and set permissions accordingly.')
await Setup.set_mute_role(self, ctx, mutedRole)
@commands.command(name='changeprefix', aliases=['setprefix'], description='Sets the server-specific prefix')
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def change_prefix_func(self, ctx, prefix):
with open('./storage/prefixes.json', 'r') as f:
data = json.load(f)
data[str(ctx.guild.id)] = prefix
with open('./storage/prefixes.json', 'w') as f:
json.dump(data, f, indent=4)
await ctx.send(f'The prefix for this server has changed to {prefix}')
def setup(bot):
bot.add_cog(Setup(bot))
| 50.188235 | 138 | 0.565752 |
4a209ab76059c48defe0f169d2e11a141cf1e1ae | 2,799 | py | Python | utils/examples/example_zw.py | shinji-s/scrapy-cluster | 0153bf61f46068e61f76798147a850769a190b95 | [
"MIT"
] | 1,108 | 2015-04-15T16:02:26.000Z | 2022-03-31T11:46:29.000Z | utils/examples/example_zw.py | shinji-s/scrapy-cluster | 0153bf61f46068e61f76798147a850769a190b95 | [
"MIT"
] | 246 | 2015-07-08T18:37:12.000Z | 2021-06-28T14:33:51.000Z | utils/examples/example_zw.py | shinji-s/scrapy-cluster | 0153bf61f46068e61f76798147a850769a190b95 | [
"MIT"
] | 382 | 2015-04-20T07:16:05.000Z | 2022-03-21T11:34:59.000Z | from scutils.zookeeper_watcher import ZookeeperWatcher
from time import sleep
import argparse
parser = argparse.ArgumentParser(
description="Zookeeper file watcher")
parser.add_argument('-z', '--zoo-keeper', action='store', required=True,
help="The Zookeeper connection <host>:<port>")
parser.add_argument('-f', '--file', action='store', required=True,
help="The full path to the file to watch in Zookeeper")
parser.add_argument('-p', '--pointer', action='store_const', const=True,
help="The file contents point to another file")
parser.add_argument('-s', '--sleep', nargs='?', const=1, default=1,
type=int, help="The time to sleep between poll checks")
parser.add_argument('-v', '--valid-init', action='store_false',
help="Do not ensure zookeeper is up upon initial setup",
default=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--poll', action='store_true', help="Polling example")
group.add_argument('--event', action='store_true',
help="Event driven example")
args = vars(parser.parse_args())
hosts = args['zoo_keeper']
file = args['file']
pointer = args['pointer']
sleep_time = args['sleep']
poll = args['poll']
event = args['event']
valid = args['valid_init']
def valid_file(state):
print("The valid state is now", state)
def change_file(conf_string):
print("Your file contents:", conf_string)
def error_file(message):
print("An error was thrown:", message)
# You can use any or all of these, polling + handlers, some handlers, etc
if pointer:
if poll:
zoo_watcher = ZookeeperWatcher(hosts, file, ensure=True,pointer=True)
elif event:
zoo_watcher = ZookeeperWatcher(hosts, file,
valid_handler=valid_file,
config_handler=change_file,
error_handler=error_file,
pointer=True, ensure=True, valid_init=valid)
else:
if poll:
zoo_watcher = ZookeeperWatcher(hosts, file, ensure=True)
elif event:
zoo_watcher = ZookeeperWatcher(hosts, file,
valid_handler=valid_file,
config_handler=change_file,
error_handler=error_file,
valid_init=valid, ensure=True)
print("Use a keyboard interrupt to shut down the process.")
try:
while True:
if poll:
print("Valid File?", zoo_watcher.is_valid())
print("Contents:", zoo_watcher.get_file_contents())
sleep(sleep_time)
except:
pass
zoo_watcher.close() | 39.422535 | 83 | 0.606288 |
4a209b17a9567de17edcb72b30a340ba10b42c61 | 1,817 | py | Python | flaskApp/evaluation/example.py | baldeosinghm/rely | a29288035267919c78a1e161ad9b7e789cc2362a | [
"MIT"
] | 4 | 2020-02-12T20:46:37.000Z | 2021-02-04T17:03:58.000Z | flaskApp/evaluation/example.py | baldeosinghm/rely | a29288035267919c78a1e161ad9b7e789cc2362a | [
"MIT"
] | 1 | 2021-03-03T00:14:26.000Z | 2021-06-11T19:05:05.000Z | flaskApp/evaluation/example.py | baldeosinghm/rely | a29288035267919c78a1e161ad9b7e789cc2362a | [
"MIT"
] | 1 | 2020-11-30T06:48:31.000Z | 2020-11-30T06:48:31.000Z | import pandas as pd
import numpy as np
# import matplotlib
# matplotlib.use("TKagg")
# import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict
from sklearn.linear_model import LinearRegression
def predictPrice(stock, days):
# make data frame
df = pd.read_csv("csv/" + stock + ".csv", sep='\s*,\s*', header=0, encoding='ascii', engine='python')
# Store the "Low" in the x var, and the "High" in the y var
# We want to use the Low as the predictor, the independent variable
# Stock's High is the dependent variable, what we want to predict
X = df['Low'].values.reshape(-1,1)
y = df['High'].values.reshape(-1,1)
# Here we split the data: 80% for training, 20% for testing in test_size
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Here we train our algorithm, "regressor"
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Forecast out inputted # of days
# Shift data frame columns up to predict "days" amount into the future
df['Prediction'] = df['High'].shift(days)
# Drop unessential columns and remove "days" amount of bottom rows
y_forecast = df.drop(['Date', 'Open', 'High', 'Low'],1)[days:]
prediction = regressor.predict(y_forecast)
# Evaluate the performance of the Linear Regression algorithm by using the following four metrics
print('R^2:', metrics.score(y_test, y_pred))
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print("______________________________________\n")
return prediction[days]
| 49.108108 | 105 | 0.715465 |
4a209c9c82b72d0d12c1e148772db4912cd765dc | 25,024 | py | Python | graph_objs/scattergeo/marker/_line.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | graph_objs/scattergeo/marker/_line.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | graph_objs/scattergeo/marker/_line.py | wwwidonja/changed_plotly | 1bda35a438539a97c84a3ab3952e95e8848467bd | [
"MIT"
] | null | null | null | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergeo.marker"
_path_str = "scattergeo.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
"widthsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color`is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color`is set to a numerical array. Value should
have the same units as in `marker.line.color` and if set,
`marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarker.linecolor. It accepts either a specific color or
an array of numbers that are mapped to the colorscale relative
to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scattergeo.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the new_plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color`is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for width .
The 'widthsrc' property must be specified as a string or
as a new_plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.scattergeo.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color`is set to a
numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are set by
the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color`is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color`is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets themarker.linecolor. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color`is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
width .
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.scattergeo.marker.Line
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.scattergeo.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 37.857791 | 87 | 0.570133 |
4a209e9a7ed3e3ac43d4111e4b375ba9a2156522 | 763 | py | Python | deepxde/nn/jax/nn.py | pescap/deepxde | 8792af7a62dc89a08d1ad4e67fc6c2431422e4be | [
"Apache-2.0"
] | 5 | 2019-02-08T02:48:00.000Z | 2019-03-29T16:21:40.000Z | deepxde/nn/jax/nn.py | pescap/deepxde | 8792af7a62dc89a08d1ad4e67fc6c2431422e4be | [
"Apache-2.0"
] | null | null | null | deepxde/nn/jax/nn.py | pescap/deepxde | 8792af7a62dc89a08d1ad4e67fc6c2431422e4be | [
"Apache-2.0"
] | 2 | 2019-02-08T18:15:47.000Z | 2019-03-31T17:23:42.000Z | from flax import linen as nn
class NN(nn.Module):
"""Base class for all neural network modules."""
# All sub-modules should have the following variables:
# params: Any = None
# _input_transform: Optional[Callable] = None
# _output_transform: Optional[Callable] = None
def apply_feature_transform(self, transform):
"""Compute the features by appling a transform to the network inputs, i.e.,
features = transform(inputs). Then, outputs = network(features).
"""
self._input_transform = transform
def apply_output_transform(self, transform):
"""Apply a transform to the network outputs, i.e.,
outputs = transform(inputs, outputs).
"""
self._output_transform = transform
| 33.173913 | 83 | 0.668414 |
4a209ea857bf745da397066c5bb4a51971cfa41f | 126 | py | Python | Project1/test_social.py | automationmaki/pytest_simple_examples | 4cd5cdde2b0a359a4f14294ea9e7df2de949f309 | [
"MIT"
] | null | null | null | Project1/test_social.py | automationmaki/pytest_simple_examples | 4cd5cdde2b0a359a4f14294ea9e7df2de949f309 | [
"MIT"
] | null | null | null | Project1/test_social.py | automationmaki/pytest_simple_examples | 4cd5cdde2b0a359a4f14294ea9e7df2de949f309 | [
"MIT"
] | null | null | null | import requests
def test_twitter_is_present():
resp = requests.get("https://google.com")
assert "google" in resp.text | 25.2 | 45 | 0.722222 |
4a209ea9bcf74856dc7ed226e99b61cb9d7cd21c | 16,530 | py | Python | src/poliastro/neos/dastcom5.py | sundeshgupta/poliastro | 0a269d43c8a082df3323d38ce73f5e1ae3262ccd | [
"MIT"
] | 1 | 2019-04-12T12:28:37.000Z | 2019-04-12T12:28:37.000Z | src/poliastro/neos/dastcom5.py | sundeshgupta/poliastro | 0a269d43c8a082df3323d38ce73f5e1ae3262ccd | [
"MIT"
] | 1 | 2018-02-22T08:06:06.000Z | 2018-02-22T08:06:06.000Z | src/poliastro/neos/dastcom5.py | sundeshgupta/poliastro | 0a269d43c8a082df3323d38ce73f5e1ae3262ccd | [
"MIT"
] | null | null | null | """NEOs orbit from DASTCOM5 database.
"""
import os
import re
import urllib.request
import zipfile
import astropy.units as u
import numpy as np
import pandas as pd
from astropy.time import Time
from poliastro.bodies import Sun
from poliastro.frames.ecliptic import HeliocentricEclipticJ2000
from poliastro.twobody.angles import M_to_nu
from poliastro.twobody.orbit import Orbit
AST_DTYPE = np.dtype(
[
("NO", np.int32),
("NOBS", np.int32),
("OBSFRST", np.int32),
("OBSLAST", np.int32),
("EPOCH", np.float64),
("CALEPO", np.float64),
("MA", np.float64),
("W", np.float64),
("OM", np.float64),
("IN", np.float64),
("EC", np.float64),
("A", np.float64),
("QR", np.float64),
("TP", np.float64),
("TPCAL", np.float64),
("TPFRAC", np.float64),
("SOLDAT", np.float64),
("SRC1", np.float64),
("SRC2", np.float64),
("SRC3", np.float64),
("SRC4", np.float64),
("SRC5", np.float64),
("SRC6", np.float64),
("SRC7", np.float64),
("SRC8", np.float64),
("SRC9", np.float64),
("SRC10", np.float64),
("SRC11", np.float64),
("SRC12", np.float64),
("SRC13", np.float64),
("SRC14", np.float64),
("SRC15", np.float64),
("SRC16", np.float64),
("SRC17", np.float64),
("SRC18", np.float64),
("SRC19", np.float64),
("SRC20", np.float64),
("SRC21", np.float64),
("SRC22", np.float64),
("SRC23", np.float64),
("SRC24", np.float64),
("SRC25", np.float64),
("SRC26", np.float64),
("SRC27", np.float64),
("SRC28", np.float64),
("SRC29", np.float64),
("SRC30", np.float64),
("SRC31", np.float64),
("SRC32", np.float64),
("SRC33", np.float64),
("SRC34", np.float64),
("SRC35", np.float64),
("SRC36", np.float64),
("SRC37", np.float64),
("SRC38", np.float64),
("SRC39", np.float64),
("SRC40", np.float64),
("SRC41", np.float64),
("SRC42", np.float64),
("SRC43", np.float64),
("SRC44", np.float64),
("SRC45", np.float64),
("PRELTV", np.int8),
("SPHMX3", np.int8),
("SPHMX5", np.int8),
("JGSEP", np.int8),
("TWOBOD", np.int8),
("NSATS", np.int8),
("UPARM", np.int8),
("LSRC", np.int8),
("NDEL", np.int16),
("NDOP", np.int16),
("H", np.float32),
("G", np.float32),
("A1", np.float32),
("A2", np.float32),
("A3", np.float32),
("R0", np.float32),
("ALN", np.float32),
("NM", np.float32),
("NN", np.float32),
("NK", np.float32),
("LGK", np.float32),
("RHO", np.float32),
("AMRAT", np.float32),
("ALF", np.float32),
("DEL", np.float32),
("SPHLM3", np.float32),
("SPHLM5", np.float32),
("RP", np.float32),
("GM", np.float32),
("RAD", np.float32),
("EXTNT1", np.float32),
("EXTNT2", np.float32),
("EXTNT3", np.float32),
("MOID", np.float32),
("ALBEDO", np.float32),
("BVCI", np.float32),
("UBCI", np.float32),
("IRCI", np.float32),
("RMSW", np.float32),
("RMSU", np.float32),
("RMSN", np.float32),
("RMSNT", np.float32),
("RMSH", np.float32),
("EQUNOX", "|S4"),
("PENAM", "|S6"),
("SBNAM", "|S12"),
("SPTYPT", "|S5"),
("SPTYPS", "|S5"),
("DARC", "|S9"),
("COMNT1", "|S41"),
("COMNT2", "|S80"),
("DESIG", "|S13"),
("ASTEST", "|S8"),
("IREF", "|S10"),
("ASTNAM", "|S18"),
]
)
COM_DTYPE = np.dtype(
[
("NO", np.int32),
("NOBS", np.int32),
("OBSFRST", np.int32),
("OBSLAST", np.int32),
("EPOCH", np.float64),
("CALEPO", np.float64),
("MA", np.float64),
("W", np.float64),
("OM", np.float64),
("IN", np.float64),
("EC", np.float64),
("A", np.float64),
("QR", np.float64),
("TP", np.float64),
("TPCAL", np.float64),
("TPFRAC", np.float64),
("SOLDAT", np.float64),
("SRC1", np.float64),
("SRC2", np.float64),
("SRC3", np.float64),
("SRC4", np.float64),
("SRC5", np.float64),
("SRC6", np.float64),
("SRC7", np.float64),
("SRC8", np.float64),
("SRC9", np.float64),
("SRC10", np.float64),
("SRC11", np.float64),
("SRC12", np.float64),
("SRC13", np.float64),
("SRC14", np.float64),
("SRC15", np.float64),
("SRC16", np.float64),
("SRC17", np.float64),
("SRC18", np.float64),
("SRC19", np.float64),
("SRC20", np.float64),
("SRC21", np.float64),
("SRC22", np.float64),
("SRC23", np.float64),
("SRC24", np.float64),
("SRC25", np.float64),
("SRC26", np.float64),
("SRC27", np.float64),
("SRC28", np.float64),
("SRC29", np.float64),
("SRC30", np.float64),
("SRC31", np.float64),
("SRC32", np.float64),
("SRC33", np.float64),
("SRC34", np.float64),
("SRC35", np.float64),
("SRC36", np.float64),
("SRC37", np.float64),
("SRC38", np.float64),
("SRC39", np.float64),
("SRC40", np.float64),
("SRC41", np.float64),
("SRC42", np.float64),
("SRC43", np.float64),
("SRC44", np.float64),
("SRC45", np.float64),
("SRC46", np.float64),
("SRC47", np.float64),
("SRC48", np.float64),
("SRC49", np.float64),
("SRC50", np.float64),
("SRC51", np.float64),
("SRC52", np.float64),
("SRC53", np.float64),
("SRC54", np.float64),
("SRC55", np.float64),
("PRELTV", np.int8),
("SPHMX3", np.int8),
("SPHMX5", np.int8),
("JGSEP", np.int8),
("TWOBOD", np.int8),
("NSATS", np.int8),
("UPARM", np.int8),
("LSRC", np.int8),
("IPYR", np.int16),
("NDEL", np.int16),
("NDOP", np.int16),
("NOBSMT", np.int16),
("NOBSMN", np.int16),
("H", np.float32),
("G", np.float32),
("M1 (MT)", np.float32),
("M2 (MN)", np.float32),
("K1 (MTSMT)", np.float32),
("K2 (MNSMT)", np.float32),
("PHCOF (MNP)", np.float32),
("A1", np.float32),
("A2", np.float32),
("A3", np.float32),
("DT", np.float32),
("R0", np.float32),
("ALN", np.float32),
("NM", np.float32),
("NN", np.float32),
("NK", np.float32),
("S0", np.float32),
("TCL", np.float32),
("RHO", np.float32),
("AMRAT", np.float32),
("AJ1", np.float32),
("AJ2", np.float32),
("ET1", np.float32),
("ET2", np.float32),
("DTH", np.float32),
("ALF", np.float32),
("DEL", np.float32),
("SPHLM3", np.float32),
("SPHLM5", np.float32),
("RP", np.float32),
("GM", np.float32),
("RAD", np.float32),
("EXTNT1", np.float32),
("EXTNT2", np.float32),
("EXTNT3", np.float32),
("MOID", np.float32),
("ALBEDO", np.float32),
("RMSW", np.float32),
("RMSU", np.float32),
("RMSN", np.float32),
("RMSNT", np.float32),
("RMSMT", np.float32),
("RMSMN", np.float32),
("EQUNOX", "|S4"),
("PENAM", "|S6"),
("SBNAM", "|S12"),
("DARC", "|S9"),
("COMNT3", "|S49"),
("COMNT2", "|S80"),
("DESIG", "|S13"),
("COMEST", "|S14"),
("IREF", "|S10"),
("COMNAM", "|S29"),
]
)
POLIASTRO_LOCAL_PATH = os.path.join(os.path.expanduser("~"), ".poliastro")
DBS_LOCAL_PATH = os.path.join(POLIASTRO_LOCAL_PATH, "dastcom5", "dat")
AST_DB_PATH = os.path.join(DBS_LOCAL_PATH, "dast5_le.dat")
COM_DB_PATH = os.path.join(DBS_LOCAL_PATH, "dcom5_le.dat")
FTP_DB_URL = "ftp://ssd.jpl.nasa.gov/pub/ssd/"
def asteroid_db():
"""Return complete DASTCOM5 asteroid database.
Returns
-------
database : numpy.ndarray
Database with custom dtype.
"""
with open(AST_DB_PATH, "rb") as f:
f.seek(835, os.SEEK_SET)
data = np.fromfile(f, dtype=AST_DTYPE)
return data
def comet_db():
"""Return complete DASTCOM5 comet database.
Returns
-------
database : numpy.ndarray
Database with custom dtype.
"""
with open(COM_DB_PATH, "rb") as f:
f.seek(976, os.SEEK_SET)
data = np.fromfile(f, dtype=COM_DTYPE)
return data
def orbit_from_name(name):
"""Return :py:class:`~poliastro.twobody.orbit.Orbit` given a name.
Retrieve info from JPL DASTCOM5 database.
Parameters
----------
name : str
NEO name.
Returns
-------
orbit : list (~poliastro.twobody.orbit.Orbit)
NEO orbits.
"""
records = record_from_name(name)
orbits = []
for record in records:
orbits.append(orbit_from_record(record))
return orbits
def orbit_from_record(record):
"""Return :py:class:`~poliastro.twobody.orbit.Orbit` given a record.
Retrieve info from JPL DASTCOM5 database.
Parameters
----------
record : int
Object record.
Returns
-------
orbit : ~poliastro.twobody.orbit.Orbit
NEO orbit.
"""
body_data = read_record(record)
a = body_data["A"].item() * u.au
ecc = body_data["EC"].item() * u.one
inc = body_data["IN"].item() * u.deg
raan = body_data["OM"].item() * u.deg
argp = body_data["W"].item() * u.deg
m = body_data["MA"].item() * u.deg
nu = M_to_nu(m, ecc)
epoch = Time(body_data["EPOCH"].item(), format="jd", scale="tdb")
orbit = Orbit.from_classical(Sun, a, ecc, inc, raan, argp, nu, epoch)
orbit._frame = HeliocentricEclipticJ2000(obstime=epoch)
return orbit
def record_from_name(name):
"""Search `dastcom.idx` and return logical records that match a given string.
Body name, SPK-ID, or alternative designations can be used.
Parameters
----------
name : str
Body name.
Returns
-------
records : list (int)
DASTCOM5 database logical records matching str.
"""
records = []
lines = string_record_from_name(name)
for line in lines:
records.append(int(line[:6].lstrip()))
return records
def string_record_from_name(name):
"""Search `dastcom.idx` and return body full record.
Search DASTCOM5 index and return body records that match string,
containing logical record, name, alternative designations, SPK-ID, etc.
Parameters
----------
name : str
Body name.
Returns
-------
lines: list(str)
Body records
"""
idx_path = os.path.join(DBS_LOCAL_PATH, "dastcom.idx")
lines = []
with open(idx_path, "r") as inF:
for line in inF:
if re.search(r"\b" + name.casefold() + r"\b", line.casefold()):
lines.append(line)
return lines
def read_headers():
"""Read `DASTCOM5` headers and return asteroid and comet headers.
Headers are two numpy arrays with custom dtype.
Returns
-------
ast_header, com_header : tuple (numpy.ndarray)
DASTCOM5 headers.
"""
ast_path = os.path.join(DBS_LOCAL_PATH, "dast5_le.dat")
ast_dtype = np.dtype(
[
("IBIAS1", np.int32),
("BEGINP1", "|S8"),
("BEGINP2", "|S8"),
("BEGINP3", "|S8"),
("ENDPT1", "|S8"),
("ENDPT2", "|S8"),
("ENDPT3", "|S8"),
("CALDATE", "|S19"),
("JDDATE", np.float64),
("FTYP", "|S1"),
("BYTE2A", np.int16),
("IBIAS0", np.int32),
]
)
with open(ast_path, "rb") as f:
ast_header = np.fromfile(f, dtype=ast_dtype, count=1)
com_path = os.path.join(DBS_LOCAL_PATH, "dcom5_le.dat")
com_dtype = np.dtype(
[
("IBIAS2", np.int32),
("BEGINP1", "|S8"),
("BEGINP2", "|S8"),
("BEGINP3", "|S8"),
("ENDPT1", "|S8"),
("ENDPT2", "|S8"),
("ENDPT3", "|S8"),
("CALDATE", "|S19"),
("JDDATE", np.float64),
("FTYP", "|S1"),
("BYTE2C", np.int16),
]
)
with open(com_path, "rb") as f:
com_header = np.fromfile(f, dtype=com_dtype, count=1)
return ast_header, com_header
def read_record(record):
"""Read `DASTCOM5` record and return body data.
Body data consists of numpy array with custom dtype.
Parameters
----------
record : int
Body record.
Returns
-------
body_data : numpy.ndarray
Body information.
"""
ast_header, com_header = read_headers()
ast_path = os.path.join(DBS_LOCAL_PATH, "dast5_le.dat")
com_path = os.path.join(DBS_LOCAL_PATH, "dcom5_le.dat")
# ENDPT1 indicates end of numbered asteroids records
if record <= int(ast_header["ENDPT2"][0].item()):
# ENDPT2 indicates end of unnumbered asteroids records
if record <= int(ast_header["ENDPT1"][0].item()):
# phis_rec = record_size * (record_number - IBIAS - 1 (header record))
phis_rec = 835 * (record - ast_header["IBIAS0"][0].item() - 1)
else:
phis_rec = 835 * (record - ast_header["IBIAS1"][0].item() - 1)
with open(ast_path, "rb") as f:
f.seek(phis_rec, os.SEEK_SET)
body_data = np.fromfile(f, dtype=AST_DTYPE, count=1)
else:
phis_rec = 976 * (record - com_header["IBIAS2"][0].item() - 1)
with open(com_path, "rb") as f:
f.seek(phis_rec, os.SEEK_SET)
body_data = np.fromfile(f, dtype=COM_DTYPE, count=1)
return body_data
def download_dastcom5():
"""Downloads DASTCOM5 database.
Downloads and unzip DASTCOM5 file in default poliastro path (~/.poliastro).
"""
dastcom5_dir = os.path.join(POLIASTRO_LOCAL_PATH, "dastcom5")
dastcom5_zip_path = os.path.join(POLIASTRO_LOCAL_PATH, "dastcom5.zip")
if os.path.isdir(dastcom5_dir):
raise FileExistsError(
"dastcom5 is already created in " + os.path.abspath(dastcom5_dir)
)
if not zipfile.is_zipfile(dastcom5_zip_path):
if not os.path.isdir(POLIASTRO_LOCAL_PATH):
os.makedirs(POLIASTRO_LOCAL_PATH)
urllib.request.urlretrieve(
FTP_DB_URL + "dastcom5.zip", dastcom5_zip_path, _show_download_progress
)
with zipfile.ZipFile(dastcom5_zip_path) as myzip:
myzip.extractall(POLIASTRO_LOCAL_PATH)
def _show_download_progress(transferred, block, totalsize):
trans_mb = transferred * block / (1024 * 1024)
total_mb = totalsize / (1024 * 1024)
print("%.2f MB / %.2f MB" % (trans_mb, total_mb), end="\r", flush=True)
def entire_db():
"""Return complete DASTCOM5 database.
Merge asteroid and comet databases, only with fields
related to orbital data, discarding the rest.
Returns
-------
database : numpy.ndarray
Database with custom dtype.
"""
ast_database = asteroid_db()
com_database = comet_db()
ast_database = pd.DataFrame(
ast_database[
list(ast_database.dtype.names[:17])
+ list(ast_database.dtype.names[-4:-3])
+ list(ast_database.dtype.names[-2:])
]
)
ast_database.rename(
columns={"ASTNAM": "NAME", "NO": "NUMBER", "CALEPO": "CALEPOCH"}, inplace=True
)
com_database = pd.DataFrame(
com_database[
list(com_database.dtype.names[:17])
+ list(com_database.dtype.names[-4:-3])
+ list(com_database.dtype.names[-2:])
]
)
com_database.rename(
columns={"COMNAM": "NAME", "NO": "NUMBER", "CALEPO": "CALEPOCH"}, inplace=True
)
df = ast_database.append(com_database, ignore_index=True)
df[["NAME", "DESIG", "IREF"]] = df[["NAME", "DESIG", "IREF"]].apply(
lambda x: x.str.strip().str.decode("utf-8")
)
return df
| 27.969543 | 86 | 0.514519 |
4a209f436dfa41fef225a73b6f87a3b34434dc27 | 912 | py | Python | openslides_backend/action/speaker/create_update_delete.py | reiterl/openslides-backend | d36667f00087ae8baf25853d4cef18a5e6dc7b3b | [
"MIT"
] | null | null | null | openslides_backend/action/speaker/create_update_delete.py | reiterl/openslides-backend | d36667f00087ae8baf25853d4cef18a5e6dc7b3b | [
"MIT"
] | null | null | null | openslides_backend/action/speaker/create_update_delete.py | reiterl/openslides-backend | d36667f00087ae8baf25853d4cef18a5e6dc7b3b | [
"MIT"
] | null | null | null | from ...models.models import Speaker
from ..create_action_with_inferred_meeting import CreateActionWithInferredMeeting
from ..default_schema import DefaultSchema
from ..generics import DeleteAction, UpdateAction
from ..register import register_action
@register_action("speaker.create")
class SpeakerCreateAction(CreateActionWithInferredMeeting):
model = Speaker()
relation_field_for_meeting = "list_of_speakers_id"
schema = DefaultSchema(Speaker()).get_create_schema(
required_properties=["list_of_speakers_id", "user_id"],
optional_properties=["marked"],
)
@register_action("speaker.update")
class SpeakerUpdate(UpdateAction):
model = Speaker()
schema = DefaultSchema(Speaker()).get_update_schema(["marked"])
@register_action("speaker.delete")
class SpeakerDeleteAction(DeleteAction):
model = Speaker()
schema = DefaultSchema(Speaker()).get_delete_schema()
| 32.571429 | 81 | 0.776316 |
4a20a048388af1e7805b57058bf85b0ef0a2ed7c | 1,575 | py | Python | ScratchPaper/dpdp_competition/src/common/factory.py | 123zhangzq/HW_DPDP | 27e5e63bb96d0e8b8c5cef500262b267b25f9666 | [
"MIT"
] | null | null | null | ScratchPaper/dpdp_competition/src/common/factory.py | 123zhangzq/HW_DPDP | 27e5e63bb96d0e8b8c5cef500262b267b25f9666 | [
"MIT"
] | null | null | null | ScratchPaper/dpdp_competition/src/common/factory.py | 123zhangzq/HW_DPDP | 27e5e63bb96d0e8b8c5cef500262b267b25f9666 | [
"MIT"
] | null | null | null | # Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
class Factory(object):
def __init__(self, factory_id: str, lng: float, lat: float, dock_num: int):
"""
:param factory_id: 站点编号
:param lng: 经度 longitude
:param lat: 纬度 latitude
:param dock_num: 货口数量, 用于车辆装卸货, number of cargo docks for loading and unloading
"""
self.id = str(factory_id)
self.lng = float(lng)
self.lat = float(lat)
self.dock_num = int(dock_num)
| 46.323529 | 87 | 0.726984 |
4a20a19792ba932090b58074c9f19d9049e629b4 | 1,191 | py | Python | train_dev_test.py | clulab/incivility | 82d8e8164b81e9f4d5737520f2cbf308d3fcd033 | [
"Apache-2.0"
] | 1 | 2020-09-18T12:05:13.000Z | 2020-09-18T12:05:13.000Z | train_dev_test.py | clulab/incivility | 82d8e8164b81e9f4d5737520f2cbf308d3fcd033 | [
"Apache-2.0"
] | null | null | null | train_dev_test.py | clulab/incivility | 82d8e8164b81e9f4d5737520f2cbf308d3fcd033 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
import pandas as pd
import sklearn.model_selection as ms
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("csv_path")
parser.add_argument("--train-frac", type=float, default=0.8)
args = parser.parse_args()
with open(args.csv_path, encoding="utf-8", errors="ignore") as csv_file:
df = pd.read_csv(csv_file).dropna()
[label_col] = [col for col in df.columns if "name" in col.lower()]
train_df, dev_test_df = ms.train_test_split(
df,
train_size=args.train_frac,
test_size=None,
stratify=df[label_col],
random_state=42)
dev_df, test_df = ms.train_test_split(
dev_test_df,
train_size=0.5,
test_size=None,
stratify=dev_test_df[label_col],
random_state=42)
print(f"train: {len(train_df)}\n"
f"dev: {len(dev_df)}\n"
f"test: {len(test_df)}")
csv_name, _ = os.path.splitext(args.csv_path)
train_df.to_csv(csv_name + ".train.csv", encoding='utf-8-sig')
dev_df.to_csv(csv_name + ".dev.csv", encoding='utf-8-sig')
test_df.to_csv(csv_name + ".test.csv", encoding='utf-8-sig')
| 30.538462 | 76 | 0.643157 |
4a20a28073335112636f50bd7c823557f6ae17fb | 667 | py | Python | pkg/workloads/cortex/consts.py | gvvynplaine/cortex | aa3daf0d138a880df29a2c075af41176119da47f | [
"Apache-2.0"
] | 1 | 2021-12-08T03:43:30.000Z | 2021-12-08T03:43:30.000Z | pkg/workloads/cortex/consts.py | ourobouros/cortex | 1b3aaf909816b93f6a6e3edd0da8c10891e05be9 | [
"Apache-2.0"
] | null | null | null | pkg/workloads/cortex/consts.py | ourobouros/cortex | 1b3aaf909816b93f6a6e3edd0da8c10891e05be9 | [
"Apache-2.0"
] | 1 | 2021-05-25T03:49:14.000Z | 2021-05-25T03:49:14.000Z | # Copyright 2020 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SINGLE_MODEL_NAME = "_cortex_default"
INFERENTIA_NEURON_SOCKET = "/sock/neuron.sock"
| 39.235294 | 74 | 0.767616 |
4a20a2c071537d9d5ab73ccdd46557dc90f0a2a6 | 241 | py | Python | helloworld_project/pages/views.py | helpthx/hello-jenkins | 7cac1b93a769a79bbe12be26e8de9a15ab13e4eb | [
"MIT"
] | null | null | null | helloworld_project/pages/views.py | helpthx/hello-jenkins | 7cac1b93a769a79bbe12be26e8de9a15ab13e4eb | [
"MIT"
] | 7 | 2020-06-06T01:08:55.000Z | 2022-02-10T12:48:39.000Z | helloworld_project/pages/views.py | helpthx/hello-jenkins | 7cac1b93a769a79bbe12be26e8de9a15ab13e4eb | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from helloworld_project.settings import SECRET_WORD
# Create your views here.
def home_page_view(request):
return HttpResponse('Hello, {0}!'.format(SECRET_WORD))
| 26.777778 | 58 | 0.804979 |
4a20a2f54f6e3cc02e877cc5d820125b9e8c5e0c | 1,887 | py | Python | lib/wwmgr/test_work_managers/test_futures.py | poharrison/westpa | 8618ab598f9bb38a7bc1479932f5332b137dfcbc | [
"MIT"
] | 140 | 2015-01-07T23:30:36.000Z | 2022-03-28T17:15:30.000Z | lib/wwmgr/test_work_managers/test_futures.py | burntyellow/westpa | 9dc62478fcef0001b9c038cd56a40b6be1b9d64a | [
"MIT"
] | 157 | 2015-01-03T03:38:36.000Z | 2022-03-31T14:12:16.000Z | lib/wwmgr/test_work_managers/test_futures.py | burntyellow/westpa | 9dc62478fcef0001b9c038cd56a40b6be1b9d64a | [
"MIT"
] | 56 | 2015-01-02T21:21:40.000Z | 2022-03-03T16:27:54.000Z |
from work_managers.serial import SerialWorkManager
from nose.tools import assert_raises #@UnresolvedImport
from .tsupport import *
class TestWMFuture:
def test_result(self):
with SerialWorkManager() as work_manager:
future = work_manager.submit(will_succeed)
assert future.get_result() is True
def test_discarded_result(self):
with SerialWorkManager() as work_manager:
future = work_manager.submit(will_succeed)
assert future.get_result(discard=True) is True
assert_raises(AttributeError, getattr, future, '_result')
@raises(ExceptionForTest)
def test_exception_raise(self):
with SerialWorkManager() as work_manager:
future = work_manager.submit(will_fail)
future.get_result()
def test_exception_retrieve(self):
with SerialWorkManager() as work_manager:
future = work_manager.submit(will_fail)
exc = future.get_exception()
assert exc.args[0] == 'failed as expected'
def test_callback(self):
with SerialWorkManager() as work_manager:
future = work_manager.submit(will_succeed)
def cbfn(future):
assert future.get_result() is True
future._add_callback(cbfn)
def test_success_wait(self):
with SerialWorkManager() as work_manager:
future = work_manager.submit(will_succeed)
future.wait()
def test_exception_wait(self):
with SerialWorkManager() as work_manager:
future = work_manager.submit(will_fail)
future.wait()
def test_is_done(self):
with SerialWorkManager() as work_manager:
future = work_manager.submit(will_succeed)
future.wait()
assert future.done
| 35.603774 | 69 | 0.63275 |
4a20a3173f1b064f060ad6d1c2ac6ce3c36cd2d6 | 6,866 | py | Python | modules/phrases/phrase_file_manager.py | naschorr/hawking | cdc98b7bc90c72d634f1fe877c34e7f9908ec4a8 | [
"MIT"
] | 21 | 2017-08-06T02:47:05.000Z | 2022-03-13T17:39:00.000Z | modules/phrases/phrase_file_manager.py | naschorr/hawking | cdc98b7bc90c72d634f1fe877c34e7f9908ec4a8 | [
"MIT"
] | 87 | 2017-12-26T17:07:59.000Z | 2022-03-11T01:31:48.000Z | modules/phrases/phrase_file_manager.py | naschorr/hawking | cdc98b7bc90c72d634f1fe877c34e7f9908ec4a8 | [
"MIT"
] | 7 | 2019-10-23T17:30:34.000Z | 2022-03-31T05:56:43.000Z | import json
import logging
import os
import re
from pathlib import Path
from typing import List
from common import utilities
from models.phrase import Phrase
from models.phrase_group import PhraseGroup
from models.phrase_encoding import PhraseEncoding
from phrase_encoder_decoder import PhraseEncoderDecoder
## Config
CONFIG_OPTIONS = utilities.load_module_config(Path(__file__).parent)
## Logging
logger = utilities.initialize_logging(logging.getLogger(__name__))
class PhraseFileManager:
def __init__(self):
self.phrases_file_extension = CONFIG_OPTIONS.get('phrases_file_extension', '.json')
self.non_letter_regex = re.compile('\W+') # Compile a regex for filtering non-letter characters
phrases_folder_path = CONFIG_OPTIONS.get('phrases_folder_path')
if (phrases_folder_path):
self.phrases_folder_path = Path(phrases_folder_path)
else:
self.phrases_folder_path = Path.joinpath(Path(__file__).parent, CONFIG_OPTIONS.get('phrases_folder', 'phrases'))
def discover_phrase_groups(self, path_to_scan: Path) -> List[Path]:
'''Searches the phrases folder for .json files that can potentially contain phrase groups & phrases'''
phrase_files = []
for file in os.listdir(path_to_scan):
file_path = Path(file)
if(file_path.suffix == self.phrases_file_extension):
phrase_files.append(Path.joinpath(path_to_scan, file_path))
return phrase_files
def _build_phrase_encoding(self, phrase_json: dict) -> PhraseEncoding:
'''Builds a PhraseEncoding object from raw JSON'''
if ('cipher' in phrase_json and 'fields' in phrase_json):
return PhraseEncoding(phrase_json['cipher'], phrase_json['fields'])
else:
return None
def _build_phrases(self, phrases_json: dict, decode = True) -> List[Phrase]:
'''
Given a JSON dict representing an unparsed PhraseGroup's list of Phrases, build a list of Phrase objects from
it, and return that list
'''
## Insert source[key] (if it exists) into target[key], else insert a default string
def insert_if_exists(target, source, key, default=None):
if(key in source):
target[key] = source[key]
return target
phrases = []
for phrase_raw in phrases_json:
try:
name = phrase_raw['name']
message = phrase_raw['message']
kwargs = {}
if ('encoding' in phrase_raw != None):
encoding = self._build_phrase_encoding(phrase_raw['encoding'])
else:
encoding = None
kwargs['encoded'] = phrase_raw.get('encoded', False)
## Todo: make this less ugly
help_value = phrase_raw.get('help') # fallback for the help submenus
kwargs = insert_if_exists(kwargs, phrase_raw, 'help')
kwargs = insert_if_exists(kwargs, phrase_raw, 'brief', help_value)
## Attempt to populate the description kwarg, but if it isn't available, then try and parse the
## message down into something usable instead.
if ('description' in phrase_raw):
kwargs['description'] = phrase_raw['description']
else:
kwargs['description'] = self.non_letter_regex.sub(' ', message).lower()
kwargs['derived_description'] = True
kwargs['is_music'] = phrase_raw.get('music', False)
phrase = Phrase(
name,
message,
encoding,
**kwargs
)
## Decode the phrase!
if (decode and phrase.encoded):
PhraseEncoderDecoder.decode(phrase)
phrases.append(phrase)
except Exception as e:
logger.warn(f"Error loading phrase '{phrase_raw['name']}'. Skipping...", e)
continue
return sorted(phrases, key=lambda phrase: phrase.name)
def load_phrase_group(self, path: Path, decode = True) -> PhraseGroup:
'''
Loads a PhraseGroup from a given phrase file json path.
Traverses the json file, creates a PhraseGroup, populates the metadata, and then traverses the phrase objects.
Phrases are built from that data, and added to the PhraseGroup. The completed PhraseGroup is returned.
'''
with open(path) as fd:
data = json.load(fd)
try:
phrase_group_name = None
phrase_group_key = None
phrase_group_description = None
kwargs = {}
## Loop over the key-values in the json file. Handle each expected pair appropriately, and store
## unexpected pairs in the kwargs variable. Unexpected data is fine, but it needs to be preserved so
## that re-saved files will be equivalent to the original file.
for key, value in data.items():
if (key == 'name'):
phrase_group_name = value
elif (key == 'key'):
phrase_group_key = value
elif (key == 'description'):
phrase_group_description = value
elif (key == 'phrases'):
phrases = self._build_phrases(value, decode)
else:
kwargs[key] = value
## With the loose pieces processed, make sure the required pieces exist.
if (phrase_group_name == None or phrase_group_key == None or phrase_group_description == None or len(phrases) == 0):
logger.warning(f"Error loading phrase group '{phrase_group_name}', from '{path}'. Missing 'name', 'key', 'description', or non-zero length 'phrases' list. Skipping...")
return None
## Construct the PhraseGroup, and add the Phrases to it.
phrase_group = PhraseGroup(phrase_group_name, phrase_group_key, phrase_group_description, path, **kwargs)
phrase_group.add_all_phrases(phrases)
return phrase_group
except Exception as e:
logger.warning(f"Error loading phrase group '{phrase_group_name}' from '{path}''. Skipping...", e)
return None
def save_phrase_group(self, path: Path, phrase_group: PhraseGroup):
'''Saves the given PhraseGroup as a JSON object at the given path.'''
data = phrase_group.to_dict()
with open(path, 'w') as fd:
json.dump(data, fd, indent=4, ensure_ascii=False)
| 40.869048 | 188 | 0.594232 |
4a20a47d066bbafd039c08c4eb57a47b622fbbae | 740 | py | Python | CobraWinLDTP/ldtp/client_exception.py | brendano86/cobra | 6407c2911696b1c2a2b4501c118fff9931fa659e | [
"X11",
"MIT"
] | 72 | 2015-02-09T00:44:41.000Z | 2021-12-01T21:56:15.000Z | CobraWinLDTP/ldtp/client_exception.py | brendano86/cobra | 6407c2911696b1c2a2b4501c118fff9931fa659e | [
"X11",
"MIT"
] | 19 | 2015-06-03T09:31:18.000Z | 2021-12-02T16:35:52.000Z | CobraWinLDTP/ldtp/client_exception.py | brendano86/cobra | 6407c2911696b1c2a2b4501c118fff9931fa659e | [
"X11",
"MIT"
] | 37 | 2015-05-08T09:25:58.000Z | 2021-03-10T10:24:58.000Z | """
LDTP v2 client exception
@author: Eitan Isaacson <[email protected]>
@author: Nagappan Alagappan <[email protected]>
@copyright: Copyright (c) 2009-13 Nagappan Alagappan
@copyright: Copyright (c) 2009 Eitan Isaacson
@license: LGPL
http://ldtp.freedesktop.org
This file may be distributed and/or modified under the terms of the GNU Lesser General
Public License version 2 as published by the Free Software Foundation. This file
is distributed without any warranty; without even the implied warranty of
merchantability or fitness for a particular purpose.
See 'COPYING' in the source distribution for more information.
Headers in this file shall remain intact.
"""
ERROR_CODE = 123
class LdtpExecutionError(Exception):
pass
| 28.461538 | 86 | 0.790541 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.