code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import bcrypt
from hashlib import sha512
from helptux import db, login_manager
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
role = db.Column(db.String(255), index=True, unique=True)
def __repr__(self):
return '<Role {0}>'.format(self.role)
def __init__(self, role):
self.role = role
users_roles = db.Table('users_roles',
db.Column('user_id', db.Integer, db.ForeignKey('users.id')),
db.Column('role_id', db.Integer, db.ForeignKey('roles.id'))
)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), index=True, unique=True, nullable=False)
email = db.Column(db.String(255), index=True, unique=True, nullable=False)
password_hash = db.Column(db.String(), nullable=False)
posts = db.relationship('Post', backref='author', lazy='dynamic')
authenticated = db.Column(db.Boolean, default=False)
roles = db.relationship('Role',
secondary=users_roles,
primaryjoin=(users_roles.c.user_id == id),
secondaryjoin=(users_roles.c.role_id == Role.id),
backref=db.backref('users', lazy='dynamic'),
lazy='dynamic')
def __init__(self, email, password):
self.email = email
self.username = self.email
self.set_password(password)
def __repr__(self):
return '<User {0}>'.format(self.username)
def output_obj(self):
return {
'id': self.id,
'username': self.username,
'posts': [p.id for p in self.posts],
'roles': [r.id for r in self.roles]
}
def set_password(self, input_password):
bit_input = input_password.encode('utf-8')
self.password_hash = bcrypt.hashpw(bit_input, bcrypt.gensalt())
def verify_password(self, input_password):
bit_input = input_password.encode('utf-8')
if bcrypt.hashpw(bit_input, self.password_hash) == self.password_hash:
return True
else:
return False
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
def is_authenticated(self):
return self.authenticated
def has_role(self, role_name):
for role in self.roles:
if role.role == role_name:
return True
return False
| pieterdp/helptux | helptux/models/user.py | Python | gpl-2.0 | 2,613 |
# FCE: feature confidence extimator
import math
import heapq
import sklearn
import sklearn.kernel_ridge
import sklearn.svm
import numpy as np
import sklearn.cross_validation as cv
import sklearn.gaussian_process as gp
from sklearn.base import BaseEstimator
import scipy.special
from joblib import Parallel, delayed
import GPy
from common.misc import Misc
from common.rdc import R_rdc
class RidgeBasedFCE(BaseEstimator):
""" This class uses Ridge Regression as the regression
algorithm. It then calculates the confidence from the difference
between observed and predicted value, and expected variance which
is calculated from training data.
"""
def __init__(self, logger=None, n_jobs=1, verbose=0):
model = sklearn.svm.SVR(C=0.1, kernel='linear')
param_dist = {'C': pow(2.0, np.arange(-10, 11))}
self._learner = sklearn.grid_search.GridSearchCV(model, param_grid=param_dist,
n_jobs=n_jobs, cv=5,
verbose=0)
self._learner = sklearn.svm.SVR(C=0.1, kernel='linear')
self.feature = None
self.error_mean = None
self.error_std = None
self.input_col_count = None
if logger is None:
self.logger = print
else:
self.logger = logger
def fit(self, X, feature):
try:
feature = int(feature)
except Exception:
self.logger("feature should be int")
raise TypeError("feature should be int")
X = X.view(np.ndarray)
self.input_col_count = X.shape[1]
self.feature = feature
my_X = Misc.exclude_cols(X, self.feature)
my_y = X[:, self.feature]
y_mean = np.mean(my_y)
y_std = np.std(my_y)
# ref: http://www.sciencedirect.com/science/article/pii/S0893608004002102
self._learner.C = max(abs(y_mean + 3 * y_std), abs(y_mean - 3 * y_std))
cvs = cv.KFold(len(X), 10, shuffle=True)
output_errors = np.empty(0)
for train, test in cvs:
tmp_l = sklearn.clone(self._learner)
tmp_l.fit(my_X[train, :], X[train, self.feature])
output_errors = np.hstack((output_errors, tmp_l.predict(my_X[test, :]) - X[test, self.feature]))
self.error_std = np.std(output_errors)
self.error_mean = np.mean(output_errors)
self._learner.fit(my_X, X[:, self.feature])
return self
def predict(self, X):
X = X.view(np.ndarray)
if X.ndim == 1:
X = X.reshape(1, -1)
return self._learner.predict(Misc.exclude_cols(X, self.feature))
def getConfidence(self, X):
def phi(x):
return 0.5 + 0.5 * scipy.special.erf(x / math.sqrt(2))
def my_score(x):
return 1 - abs(phi(x) - phi(-x))
X = X.view(np.ndarray)
if X.ndim == 1:
X = X.reshape(1, -1)
observed_diff = self._learner.predict(Misc.exclude_cols(X, self.feature)) - X[:, self.feature]
return my_score((observed_diff - self.error_mean) / self.error_std)
def getFeatures(self):
if hasattr(self._learner, "coef_"):
local_cols = np.arange(self._learner.coef_.shape[0])[self._learner.coef_ != 0]
return np.delete(np.arange(self.input_col_count), self.feature)[local_cols]
else:
return []
class PredictBasedFCE(BaseEstimator):
''' This class uses Gaussian Processes as the regression
algorithm. It uses Mutual Information to select features
to give to the GP, and at the end uses GP's output, compared
to the observed value, and the predicted_MSE of the GP, to
calculate the confidence.
'''
def __init__(self, feature_count=10, n_jobs=1,
logger=None, verbose=0):
#self._learner = gp.GaussianProcessRegressor(alpha=1e-2, n_restarts_optimizer=5)
self.feature_count = feature_count
self.n_jobs = n_jobs
#self.n_jobs = 1 # no gain was observed with multithreading
if logger is None:
self.logger = print
else:
self.logger = logger
self.verbose = verbose
self._selected_features = None
def fit(self, X, feature, fit_rdcs = True, fit_gp = True):
try:
feature = int(feature)
except Exception:
if self.verbose > 0:
self.logger("feature should be int")
raise TypeError("feature should be int")
X = X.view(np.ndarray)
self.input_col_count = X.shape[1]
self.feature = feature
my_X = Misc.exclude_cols(X, self.feature)
my_y = X[:, self.feature]
if fit_rdcs:
scores = R_rdc(my_X, my_y)
"""
if self.n_jobs > 1:
scores = Parallel(n_jobs=self.n_jobs, backend="multiprocessing")(
delayed(rdc)(X[:,self.feature], my_X[:,i])
for i in range(my_X.shape[1]))
else:
scores = [rdc(my_y, my_X[:,i])
for i in range(my_X.shape[1])]
"""
if self.verbose > 0:
self.logger("rdc scores calculated")
scores = np.array(scores)
scores[np.isnan(scores)] = 0
self._selected_features = self._selectFeatures(scores = scores,
k = self.feature_count)
if fit_gp:
if self._selected_features == None:
if self.verbose > 0:
self.logger("you need to fit the rdcs first")
raise RuntimeError("you need to fit the rdcs first")
cols = len(self._selected_features)
if self.verbose > 0:
self.logger("training GP with %d input features" % cols)
kernel = GPy.kern.Linear(input_dim = cols) + GPy.kern.White(input_dim = cols)
self._learner = GPy.models.GPRegression(my_X[:, self._selected_features],
my_y.reshape(-1,1), kernel)
self._learner.optimize()
self._trained = True
return(self)
def _selectFeatures(self, scores, k = 10):
''' computes mutual information of all features with
the target feature. Note that excluded_features and the
target feature are excluded from self.X in initialization.
Then returns k features of corresponding to most MICs.
The precision can be improved by increasing alpha of the
MINE object, but it drastically increases the computation
time, and the ordering of the features doesn't change much.
'''
#res = (np.arange(len(scores))[scores >
# np.max(scores) * 0.90])
#if (res.shape[0] < 5):
res = (np.array([t[0] for t in heapq.nlargest(k,
enumerate(scores),
lambda t:t[1])]))
return(res)
def predict(self, X):
X = X.view(np.ndarray)
if (X.ndim == 1):
X = X.reshape(1, -1)
my_X = Misc.exclude_cols(X, self.feature)
mean, _ = self._learner.predict(my_X[:, self._selected_features], full_cov=False, include_likelihood=True)
return mean.reshape(-1)
def getConfidence(self, X):
def phi(x): return(0.5 + 0.5 * scipy.special.erf(x / math.sqrt(2)))
def my_score(x): return(1 - abs(phi(x) - phi(-x)))
X = X.view(np.ndarray)
if (X.ndim == 1):
X = X.reshape(1, -1)
my_X = Misc.exclude_cols(X, self.feature)
mean, var = self._learner.predict(my_X[:, self._selected_features], full_cov=False, include_likelihood=True)
y_obs = X[:, self.feature]
normalized_y = ((y_obs - mean) / np.sqrt(var)).reshape(-1)
yscore = np.array([my_score(iy) for iy in normalized_y])
return yscore
def getFeatures(self):
local_cols = self._selected_features
return(np.delete(np.arange(self._X_colcount),
self.excluded_features)[local_cols])
| adrinjalali/Network-Classifier | common/FCE.py | Python | gpl-3.0 | 8,343 |
# coding: utf-8
from django.db import migrations, models
import kpi.fields
class Migration(migrations.Migration):
dependencies = [
('kpi', '0020_add_validate_submissions_permission_to_asset'),
]
operations = [
migrations.AddField(
model_name='asset',
name='map_custom',
field=kpi.fields.LazyDefaultJSONBField(default=dict),
),
migrations.AddField(
model_name='asset',
name='map_styles',
field=kpi.fields.LazyDefaultJSONBField(default=dict),
),
]
| onaio/kpi | kpi/migrations/0021_map-custom-styles.py | Python | agpl-3.0 | 578 |
with open('./input.txt') as f:
program = [int(x) for x in f.read().split(',')]
def run(program):
program = program[:]
i = 0
while True:
operation = program[i:i+4]
op = operation[0]
if op == 99:
return program[0]
_ , a, b, to = operation
if op == 1:
program[to] = program[a] + program[b]
elif op == 2:
program[to] = program[a] * program[b]
i += 4
# p1
p1 = program[:]
p1[1] = 12
p1[2] = 2
print(run(p1))
# p2
for x in range(100):
for y in range(100):
p2 = program[:]
p2[1] = x
p2[2] = y
if run(p2) == 19690720: # replace this with the number you're given
print(100*x + y)
| marcosfede/algorithms | adventofcode/2019/d2/d2.py | Python | gpl-3.0 | 730 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.cast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import platform
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class CastOpTest(test.TestCase):
def _toDataType(self, dtype):
"""Returns TensorFlow data type for numpy type."""
if dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
elif dtype == np.bool:
return dtypes.bool
elif dtype == np.complex64:
return dtypes.complex64
elif dtype == np.complex128:
return dtypes.complex128
else:
return None
def _cast(self, x, dtype, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
val = constant_op.constant(x, self._toDataType(np.array([x]).dtype))
return math_ops.cast(val, self._toDataType(dtype), name="cast").eval()
def _test(self, x, dtype, use_gpu=False):
"""Tests cast(x) to dtype behaves the same as numpy.astype."""
np_ans = x.astype(dtype)
tf_ans = self._cast(x, dtype, use_gpu)
self.assertAllEqual(np_ans, tf_ans)
def _testTypes(self, x, use_gpu=False):
"""Tests cast(x) to different tf."""
if use_gpu:
type_list = [
np.float32, np.float64, np.int64, np.complex64, np.complex128
]
else:
type_list = [
np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]
for from_type in type_list:
for to_type in type_list:
self._test(x.astype(from_type), to_type, use_gpu)
self._test(x.astype(np.bool), np.float32, use_gpu)
self._test(x.astype(np.uint8), np.float32, use_gpu)
if not use_gpu:
self._test(x.astype(np.bool), np.int32, use_gpu)
self._test(x.astype(np.int32), np.int32, use_gpu)
def _testAll(self, x):
self._testTypes(x, use_gpu=False)
if x.dtype == np.float32 or x.dtype == np.float64:
self._testTypes(x, use_gpu=True)
def testBasic(self):
self._testAll(np.arange(-10, 10).reshape(2, 10))
self._testAll(np.linspace(-10, 10, 17))
def testSmallValues(self):
f4 = np.finfo(np.float32)
f8 = np.finfo(np.float64)
self._testAll(
np.array([
0, -1, 1, -f4.resolution, f4.resolution, f8.resolution,
-f8.resolution
]))
def testBfloat16(self):
a = np.random.uniform(-100, 100, 100).astype(np.float32)
with self.test_session(use_gpu=False):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
with self.test_session(use_gpu=True):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
def testRandom(self):
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))
# Special values like int32max, int64min, inf, -inf, nan casted to
# integer values in somewhat unexpected ways. And they behave
# differently on CPU and GPU.
def _compare(self, x, dst_dtype, expected, use_gpu=False):
np.testing.assert_equal(
self._cast(
x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))
def testIntToFloatBoundary(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(i4.min, np.float32, i4.min, False)
self._compare(i4.max, np.float32, i4.max, False)
self._compare(i8.min, np.float32, i8.min, False)
self._compare(i8.max, np.float32, i8.max, False)
self._compare(i4.min, np.float64, i4.min, False)
self._compare(i4.max, np.float64, i4.max, False)
self._compare(i8.min, np.float64, i8.min, False)
self._compare(i8.max, np.float64, i8.max, False)
# NOTE: GPU does not support int32/int64 for casting.
def testInfNan(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(np.inf, np.float32, np.inf, False)
self._compare(np.inf, np.float64, np.inf, False)
if sys.byteorder == "big":
self._compare(np.inf, np.int32, i4.max, False)
self._compare(np.inf, np.int64, i8.max, False)
else:
# np.float64("np.inf").astype(np.int32) is negative on x86 but positive on ppc64le
# Numpy link to relevant discussion - https://github.com/numpy/numpy/issues/9040
# Tensorflow link to relevant discussion - https://github.com/tensorflow/tensorflow/issues/9360
if platform.machine() == "ppc64le":
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
else:
self._compare(np.inf, np.int32, i4.min, False)
self._compare(np.inf, np.int64, i8.min, False)
self._compare(-np.inf, np.float32, -np.inf, False)
self._compare(-np.inf, np.float64, -np.inf, False)
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)
self._compare(np.nan, np.int32, i4.min, False)
self._compare(np.nan, np.int64, i8.min, False)
self._compare(np.inf, np.float32, np.inf, True)
self._compare(np.inf, np.float64, np.inf, True)
self._compare(-np.inf, np.float32, -np.inf, True)
self._compare(-np.inf, np.float64, -np.inf, True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)
def _OpError(self, x, dtype, err):
with self.cached_session():
with self.assertRaisesOpError(err):
math_ops.cast(x, dtype).eval()
def testNotImplemented(self):
self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int64.*string.*")
def testCastToTypeOfVariable(self):
with self.cached_session() as sess:
x = variables.Variable(5, dtype=dtypes.float32)
y = variables.Variable(True, dtype=dtypes.bool)
cast = math_ops.cast(y, x.dtype)
variables.global_variables_initializer().run()
self.assertEqual(1.0, sess.run(cast))
def testGradients(self):
t = [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
for src_t in t:
for dst_t in t:
with self.cached_session():
x = constant_op.constant(1.0, src_t)
z = array_ops.identity(x)
y = math_ops.cast(z, dst_t)
err = gradient_checker.compute_gradient_error(x, [], y, [])
self.assertLess(err, 1e-3)
class SparseTensorCastTest(test.TestCase):
def testCast(self):
indices = constant_op.constant([[0], [1], [2]], dtypes.int64)
values = constant_op.constant(np.array([1, 2, 3], np.int64))
shape = constant_op.constant([3], dtypes.int64)
st = sparse_tensor.SparseTensor(indices, values, shape)
st_cast = math_ops.cast(st, dtypes.float32)
with self.cached_session():
self.assertAllEqual(st_cast.indices.eval(), [[0], [1], [2]])
self.assertAllEqual(st_cast.values.eval(),
np.array([1, 2, 3], np.float32))
self.assertAllEqual(st_cast.dense_shape.eval(), [3])
class SaturateCastTest(test.TestCase):
def testSaturate(self):
in_types = dtypes.float32,
out_types = dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.float32
with self.cached_session() as sess:
for in_type in in_types:
for out_type in out_types:
lo, hi = in_type.min, in_type.max
x = constant_op.constant(
[lo, lo + 1, lo // 2, hi // 2, hi - 1, hi], dtype=in_type)
y = math_ops.saturate_cast(x, dtype=out_type)
self.assertEqual(y.dtype, out_type)
x, y = sess.run([x, y])
correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
self.assertAllEqual(correct, y)
if __name__ == "__main__":
test.main()
| kobejean/tensorflow | tensorflow/python/kernel_tests/cast_op_test.py | Python | apache-2.0 | 9,083 |
__author__ = 'rohe0002'
import json
import logging
from urlparse import urlparse
from bs4 import BeautifulSoup
from mechanize import ParseResponseEx
from mechanize._form import ControlNotFoundError, AmbiguityError
from mechanize._form import ListControl
logger = logging.getLogger(__name__)
NO_CTRL = "No submit control with the name='%s' and value='%s' could be found"
class FlowException(Exception):
def __init__(self, function="", content="", url=""):
Exception.__init__(self)
self.function = function
self.content = content
self.url = url
def __str__(self):
return json.dumps(self.__dict__)
class InteractionNeeded(Exception):
pass
def NoneFunc():
return None
class RResponse():
"""
A Response class that behaves in the way that mechanize expects it.
Links to a requests.Response
"""
def __init__(self, resp):
self._resp = resp
self.index = 0
self.text = resp.text
if isinstance(self.text, unicode):
if resp.encoding == "UTF-8":
self.text = self.text.encode("utf-8")
else:
self.text = self.text.encode("latin-1")
self._len = len(self.text)
self.url = str(resp.url)
self.statuscode = resp.status_code
def geturl(self):
return self._resp.url
def __getitem__(self, item):
try:
return getattr(self._resp, item)
except AttributeError:
return getattr(self._resp.headers, item)
def __getattribute__(self, item):
try:
return getattr(self._resp, item)
except AttributeError:
return getattr(self._resp.headers, item)
def read(self, size=0):
"""
Read from the content of the response. The class remembers what has
been read so it's possible to read small consecutive parts of the
content.
:param size: The number of bytes to read
:return: Somewhere between zero and 'size' number of bytes depending
on how much it left in the content buffer to read.
"""
if size:
if self._len < size:
return self.text
else:
if self._len == self.index:
part = None
elif self._len - self.index < size:
part = self.text[self.index:]
self.index = self._len
else:
part = self.text[self.index:self.index + size]
self.index += size
return part
else:
return self.text
class Interaction(object):
def __init__(self, httpc, interactions=None):
self.httpc = httpc
self.interactions = interactions
self.who = "Form process"
def pick_interaction(self, _base="", content="", req=None):
logger.info("pick_interaction baseurl: %s" % _base)
unic = content
if content:
_bs = BeautifulSoup(content)
else:
_bs = None
for interaction in self.interactions:
_match = 0
for attr, val in interaction["matches"].items():
if attr == "url":
logger.info("matching baseurl against: %s" % val)
if val == _base:
_match += 1
elif attr == "title":
logger.info("matching '%s' against title" % val)
if _bs is None:
break
if _bs.title is None:
break
if val in _bs.title.contents:
_match += 1
else:
_c = _bs.title.contents
if isinstance(_c, list) and not isinstance(_c,
basestring):
for _line in _c:
if val in _line:
_match += 1
continue
elif attr == "content":
if unic and val in unic:
_match += 1
elif attr == "class":
if req and val == req:
_match += 1
if _match == len(interaction["matches"]):
logger.info("Matched: %s" % interaction["matches"])
return interaction
raise InteractionNeeded("No interaction matched")
def pick_form(self, response, url=None, **kwargs):
"""
Picks which form in a web-page that should be used
:param response: A HTTP request response. A DResponse instance
:param content: The HTTP response content
:param url: The url the request was sent to
:param kwargs: Extra key word arguments
:return: The picked form or None of no form matched the criteria.
"""
forms = ParseResponseEx(response)
if not forms:
raise FlowException(content=response.text, url=url)
#if len(forms) == 1:
# return forms[0]
#else:
_form = None
# ignore the first form, because I use ParseResponseEx which adds
# one form at the top of the list
forms = forms[1:]
if len(forms) == 1:
_form = forms[0]
else:
if "pick" in kwargs:
_dict = kwargs["pick"]
for form in forms:
if _form:
break
for key, _ava in _dict.items():
if key == "form":
_keys = form.attrs.keys()
for attr, val in _ava.items():
if attr in _keys and val == form.attrs[attr]:
_form = form
elif key == "control":
prop = _ava["id"]
_default = _ava["value"]
try:
orig_val = form[prop]
if isinstance(orig_val, basestring):
if orig_val == _default:
_form = form
elif _default in orig_val:
_form = form
except KeyError:
pass
except ControlNotFoundError:
pass
elif key == "method":
if form.method == _ava:
_form = form
else:
_form = None
if not _form:
break
elif "index" in kwargs:
_form = forms[int(kwargs["index"])]
return _form
def do_click(self, form, **kwargs):
"""
Emulates the user clicking submit on a form.
:param form: The form that should be submitted
:return: What do_request() returns
"""
if "click" in kwargs:
request = None
_name = kwargs["click"]
try:
_ = form.find_control(name=_name)
request = form.click(name=_name)
except AmbiguityError:
# more than one control with that name
_val = kwargs["set"][_name]
_nr = 0
while True:
try:
cntrl = form.find_control(name=_name, nr=_nr)
if cntrl.value == _val:
request = form.click(name=_name, nr=_nr)
break
else:
_nr += 1
except ControlNotFoundError:
raise Exception(NO_CTRL % (_name, _val))
else:
request = form.click()
headers = {}
for key, val in request.unredirected_hdrs.items():
headers[key] = val
url = request._Request__original
if form.method == "POST":
return self.httpc.send(url, "POST", data=request.data,
headers=headers)
else:
return self.httpc.send(url, "GET", headers=headers)
def select_form(self, orig_response, **kwargs):
"""
Pick a form on a web page, possibly enter some information and submit
the form.
:param orig_response: The original response (as returned by requests)
:return: The response do_click() returns
"""
logger.info("select_form")
response = RResponse(orig_response)
try:
_url = response.url
except KeyError:
_url = kwargs["location"]
form = self.pick_form(response, _url, **kwargs)
#form.backwards_compatible = False
if not form:
raise Exception("Can't pick a form !!")
if "set" in kwargs:
for key, val in kwargs["set"].items():
if key.startswith("_"):
continue
if "click" in kwargs and kwargs["click"] == key:
continue
try:
form[key] = val
except ControlNotFoundError:
pass
except TypeError:
cntrl = form.find_control(key)
if isinstance(cntrl, ListControl):
form[key] = [val]
else:
raise
if form.action in kwargs["conv"].my_endpoints():
return {"SAMLResponse": form["SAMLResponse"],
"RelayState": form["RelayState"]}
return self.do_click(form, **kwargs)
#noinspection PyUnusedLocal
def chose(self, orig_response, path, **kwargs):
"""
Sends a HTTP GET to a url given by the present url and the given
relative path.
:param orig_response: The original response
:param content: The content of the response
:param path: The relative path to add to the base URL
:return: The response do_click() returns
"""
if not path.startswith("http"):
try:
_url = orig_response.url
except KeyError:
_url = kwargs["location"]
part = urlparse(_url)
url = "%s://%s%s" % (part[0], part[1], path)
else:
url = path
logger.info("GET %s" % url)
return self.httpc.send(url, "GET")
#return resp, ""
def post_form(self, orig_response, **kwargs):
"""
The same as select_form but with no possibility of change the content
of the form.
:param httpc: A HTTP Client instance
:param orig_response: The original response (as returned by requests)
:param content: The content of the response
:return: The response do_click() returns
"""
response = RResponse(orig_response)
form = self.pick_form(response, **kwargs)
return self.do_click(form, **kwargs)
#noinspection PyUnusedLocal
def parse(self, orig_response, **kwargs):
# content is a form from which I get the SAMLResponse
response = RResponse(orig_response)
form = self.pick_form(response, **kwargs)
#form.backwards_compatible = False
if not form:
raise InteractionNeeded("Can't pick a form !!")
return {"SAMLResponse": form["SAMLResponse"],
"RelayState": form["RelayState"]}
#noinspection PyUnusedLocal
def interaction(self, args):
_type = args["type"]
if _type == "form":
return self.select_form
elif _type == "link":
return self.chose
elif _type == "response":
return self.parse
else:
return NoneFunc
# ========================================================================
class Action(object):
def __init__(self, args):
self.args = args or {}
self.request = None
def update(self, dic):
self.args.update(dic)
#noinspection PyUnusedLocal
def post_op(self, result, conv, args):
pass
def __call__(self, httpc, conv, location, response, content, features):
intact = Interaction(httpc)
function = intact.interaction(self.args)
try:
_args = self.args.copy()
except (KeyError, AttributeError):
_args = {}
_args.update({"location": location, "features": features, "conv": conv})
logger.info("<-- FUNCTION: %s" % function.__name__)
logger.info("<-- ARGS: %s" % _args)
result = function(response, **_args)
self.post_op(result, conv, _args)
return result
| rohe/saml2test | src/saml2test/interaction.py | Python | bsd-2-clause | 13,079 |
from kernel.output import Output, OutputResult
from io import StringIO
import sys
# @url: http://stackoverflow.com/a/16571630
# STDOUT capture class
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
sys.stdout = self._stdout
def test_main_output_func():
with Capturing() as output:
Output.do("Test")
assert output._stringio.getvalue().find("Test") != -1
assert not output._stringio.getvalue().find("Nonexistent") != -1
output_result_types = [
OutputResult.OK, OutputResult.Fail, OutputResult.Info,
OutputResult.Warn, OutputResult.Error, OutputResult.Log
]
for R in output_result_types:
with Capturing() as output:
Output.do("UnitTestCase test")
assert output._stringio.getvalue().find("UnitTestCase") != -1
assert not output._stringio.getvalue().find("Someothervalue") != -1
| vdjagilev/desefu | tests/output_test.py | Python | mit | 1,065 |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for Google API Python client.
Also installs included versions of third party libraries, if those libraries
are not already installed.
"""
import setup_utils
has_setuptools = False
try:
from setuptools import setup
has_setuptools = True
except ImportError:
from distutils.core import setup
packages = [
'apiclient',
'oauth2client',
'apiclient.ext',
'apiclient.contrib',
'apiclient.contrib.buzz',
'apiclient.contrib.latitude',
'apiclient.contrib.moderator',
'uritemplate',
]
install_requires = []
py_modules = []
# (module to test for, install_requires to add if missing, packages to add if missing, py_modules to add if missing)
REQUIREMENTS = [
('httplib2', 'httplib2', 'httplib2', None),
('oauth2', 'oauth2', 'oauth2', None),
('gflags', 'python-gflags', None, ['gflags', 'gflags_validators']),
(['json', 'simplejson', 'django.utils'], 'simplejson', 'simplejson', None)
]
for import_name, requires, package, modules in REQUIREMENTS:
if setup_utils.is_missing(import_name):
if has_setuptools:
install_requires.append(requires)
else:
if package is not None:
packages.append(package)
else:
py_modules.extend(modules)
long_desc = """The Google API Client for Python is a client library for
accessing the Buzz, Moderator, and Latitude APIs."""
setup(name="google-api-python-client",
version="1.0beta2",
description="Google API Client Library for Python",
long_description=long_desc,
author="Joe Gregorio",
author_email="[email protected]",
url="http://code.google.com/p/google-api-python-client/",
install_requires=install_requires,
packages=packages,
py_modules=py_modules,
package_data={
'apiclient': ['contrib/*/*.json']
},
scripts=['bin/enable-app-engine-project'],
license="Apache 2.0",
keywords="google api client",
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP'])
| MapofLife/MOL | earthengine/google-api-python-client/setup.py | Python | bsd-3-clause | 2,777 |
# Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing XGBoost installation and cleanup functions."""
import posixpath
from absl import flags
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker.linux_packages import nvidia_driver
_ENV = flags.DEFINE_string('xgboost_env', 'PATH=/opt/conda/bin:$PATH',
'The xboost install environment.')
_VERSION = flags.DEFINE_string('xgboost_version', '1.4.2',
'The XGBoost version.')
FLAGS = flags.FLAGS
def GetXgboostVersion(vm):
"""Returns the XGBoost version installed on the vm.
Args:
vm: the target vm on which to check the XGBoost version
Returns:
Installed python XGBoost version as a string
"""
stdout, _ = vm.RemoteCommand(
'echo -e "import xgboost\nprint(xgboost.__version__)" | '
f'{_ENV.value} python3'
)
return stdout.strip()
def Install(vm):
"""Installs XGBoost on the VM."""
vm.Install('build_tools')
install_dir = posixpath.join(linux_packages.INSTALL_DIR, 'xgboost')
vm.RemoteCommand('git clone --recursive https://github.com/dmlc/xgboost '
f'--branch v{_VERSION.value} {install_dir}')
nccl_make_option = ''
nccl_install_option = ''
if nvidia_driver.QueryNumberOfGpus(vm) > 1:
nccl_make_option = '-DUSE_NCCL=ON -DNCCL_ROOT=/usr/local/nccl2'
nccl_install_option = '--use-nccl'
cuda_env = ''
cuda_make_option = ''
cuda_install_option = ''
if nvidia_driver.CheckNvidiaGpuExists:
cuda_make_option = '-DUSE_CUDA=ON'
cuda_env = 'CUDACXX=/usr/local/cuda/bin/nvcc'
cuda_install_option = '--use-cuda'
build_dir = posixpath.join(install_dir, 'build')
package_dir = posixpath.join(install_dir, 'python-package')
vm.RemoteCommand(f'mkdir -p {build_dir}')
vm.RemoteCommand(f'cd {build_dir} && '
f'{cuda_env} cmake .. {cuda_make_option} {nccl_make_option}')
vm.RemoteCommand(f'cd {build_dir} && make -j4')
vm.RemoteCommand(f'cd {package_dir} && '
f'{_ENV.value} python3 setup.py install '
f'{cuda_install_option} {nccl_install_option}')
| GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/linux_packages/xgboost.py | Python | apache-2.0 | 2,700 |
from app.extensions import db, bcrypt
from app.core.models import CRUDMixin
from datetime import datetime
class Users(CRUDMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False)
email = db.Column(db.String(200), unique=True, nullable=False)
firstname = db.Column(db.String(120), nullable=False)
lastname = db.Column(db.String(120), nullable=False)
password = db.Column(db.String(120), nullable=False)
created_at = db.Column(db.DateTime, nullable=False)
def __init__(self, email, firstname, lastname, password):
super().__init__()
self.created_at = datetime.utcnow()
self.email = email
self.password = bcrypt.generate_password_hash(password)
self.firstname = firstname
self.lastname = lastname
def __repr__(self):
return '<User {0}>'.format(self.email)
def verify_password(self, password):
return bcrypt.check_password_hash(self.password, password)
| lwalter/flask-angular-starter | app/user/models.py | Python | mit | 1,026 |
from mock import patch
from base import ClientTests, Response
from pulp_node import constants
from pulp_node.extensions.consumer import commands
NODE_ID = 'test_node'
REPOSITORY_ID = 'test_repository'
LOAD_CONSUMER_API = 'pulp_node.extensions.consumer.commands.load_consumer_id'
NODE_ACTIVATED_CHECK = 'pulp_node.extensions.consumer.commands.node_activated'
REPO_ENABLED_CHECK = 'pulp_node.extensions.consumer.commands.repository_enabled'
NODE_ACTIVATE_API = 'pulp.bindings.consumer.ConsumerAPI.update'
BIND_API = 'pulp.bindings.consumer.BindingsAPI.bind'
UNBIND_API = 'pulp.bindings.consumer.BindingsAPI.unbind'
NON_NODES_DISTRIBUTORS_ONLY = [
{'id': 1, 'distributor_type_id': 1},
{'id': 2, 'distributor_type_id': 2},
]
MIXED_DISTRIBUTORS = [
{'id': 1, 'distributor_type_id': 1},
{'id': 2, 'distributor_type_id': 2},
{'id': 3, 'distributor_type_id': constants.HTTP_DISTRIBUTOR},
{'id': 4, 'distributor_type_id': constants.HTTP_DISTRIBUTOR},
]
class TestActivationCommands(ClientTests):
@patch(NODE_ACTIVATED_CHECK, return_value=False)
@patch(LOAD_CONSUMER_API, return_value=NODE_ID)
@patch(NODE_ACTIVATE_API, return_value=Response(200, {}))
def test_activate(self, mock_binding, *unused):
# Test
command = commands.NodeActivateCommand(self.context)
keywords = {commands.STRATEGY_OPTION.keyword: constants.DEFAULT_STRATEGY}
command.run(**keywords)
# Verify
delta = {
'notes': {
constants.NODE_NOTE_KEY: True,
constants.STRATEGY_NOTE_KEY: constants.DEFAULT_STRATEGY
}
}
mock_binding.assert_called_with(NODE_ID, delta)
@patch(NODE_ACTIVATED_CHECK, return_value=True)
@patch(LOAD_CONSUMER_API, return_value=NODE_ID)
@patch(NODE_ACTIVATE_API, return_value=Response(200, {}))
def test_activate_already_activated(self, mock_binding, *unused):
command = commands.NodeActivateCommand(self.context)
keywords = {commands.STRATEGY_OPTION.keyword: constants.DEFAULT_STRATEGY}
command.run(**keywords)
# Verify
self.assertFalse(mock_binding.called)
@patch(LOAD_CONSUMER_API, return_value=NODE_ID)
@patch(NODE_ACTIVATED_CHECK, return_value=True)
@patch(NODE_ACTIVATE_API, return_value=Response(200, {}))
def test_deactivate(self, mock_binding, mock_activated, *unused):
# Test
command = commands.NodeDeactivateCommand(self.context)
command.run()
# Verify
delta = {
'notes': {
constants.NODE_NOTE_KEY: None,
constants.STRATEGY_NOTE_KEY: None
}
}
mock_activated.assert_called_with(self.context, NODE_ID)
mock_binding.assert_called_with(NODE_ID, delta)
@patch(LOAD_CONSUMER_API, return_value=NODE_ID)
@patch(NODE_ACTIVATED_CHECK, return_value=False)
@patch(NODE_ACTIVATE_API, return_value=Response(200, {}))
def test_deactivate_not_activated(self, mock_binding, mock_activated, *unused):
# Test
command = commands.NodeDeactivateCommand(self.context)
command.run()
# Verify
mock_activated.assert_called_with(self.context, NODE_ID)
self.assertFalse(mock_binding.called)
class TestBindCommands(ClientTests):
@patch(LOAD_CONSUMER_API, return_value=NODE_ID)
@patch(NODE_ACTIVATED_CHECK, return_value=True)
@patch(REPO_ENABLED_CHECK, return_value=True)
@patch(BIND_API, return_value=Response(200, {}))
def test_bind(self, mock_binding, *unused):
# Test
command = commands.NodeBindCommand(self.context)
keywords = {
commands.OPTION_REPO_ID.keyword: REPOSITORY_ID,
commands.STRATEGY_OPTION.keyword: constants.DEFAULT_STRATEGY,
}
command.run(**keywords)
# Verify
self.assertTrue(commands.OPTION_REPO_ID in command.options)
self.assertTrue(commands.STRATEGY_OPTION in command.options)
mock_binding.assert_called_with(
NODE_ID,
REPOSITORY_ID,
constants.HTTP_DISTRIBUTOR,
notify_agent=False,
binding_config={constants.STRATEGY_KEYWORD: constants.DEFAULT_STRATEGY})
@patch(LOAD_CONSUMER_API, return_value=NODE_ID)
@patch(NODE_ACTIVATED_CHECK, return_value=True)
@patch(REPO_ENABLED_CHECK, return_value=True)
@patch(BIND_API, return_value=Response(200, {}))
def test_bind_with_strategy(self, mock_binding, *unused):
# Test
command = commands.NodeBindCommand(self.context)
keywords = {
commands.OPTION_REPO_ID.keyword: REPOSITORY_ID,
commands.STRATEGY_OPTION.keyword: constants.MIRROR_STRATEGY,
}
command.run(**keywords)
# Verify
self.assertTrue(commands.OPTION_REPO_ID in command.options)
self.assertTrue(commands.STRATEGY_OPTION in command.options)
mock_binding.assert_called_with(
NODE_ID,
REPOSITORY_ID,
constants.HTTP_DISTRIBUTOR,
notify_agent=False,
binding_config={constants.STRATEGY_KEYWORD: constants.MIRROR_STRATEGY})
@patch(LOAD_CONSUMER_API, return_value=NODE_ID)
@patch(NODE_ACTIVATED_CHECK, return_value=False)
@patch(REPO_ENABLED_CHECK, return_value=False)
@patch(BIND_API, return_value=Response(200, {}))
def test_bind_not_activated(self, mock_binding, mock_repo, mock_node, mock_cons):
# Test
command = commands.NodeBindCommand(self.context)
keywords = {
commands.OPTION_REPO_ID.keyword: REPOSITORY_ID,
commands.STRATEGY_OPTION.keyword: constants.MIRROR_STRATEGY,
}
command.run(**keywords)
# Verify
self.assertTrue(commands.OPTION_REPO_ID in command.options)
self.assertTrue(commands.STRATEGY_OPTION in command.options)
self.assertTrue(mock_node.called)
self.assertFalse(mock_repo.called)
self.assertFalse(mock_binding.called)
@patch(LOAD_CONSUMER_API, return_value=NODE_ID)
@patch(NODE_ACTIVATED_CHECK, return_value=True)
@patch(REPO_ENABLED_CHECK, return_value=False)
@patch(BIND_API, return_value=Response(200, {}))
def test_bind_not_enabled(self, mock_binding, mock_repo, mock_node, mock_cons):
# Test
command = commands.NodeBindCommand(self.context)
keywords = {
commands.OPTION_REPO_ID.keyword: REPOSITORY_ID,
commands.STRATEGY_OPTION.keyword: constants.MIRROR_STRATEGY,
}
command.run(**keywords)
# Verify
self.assertTrue(commands.OPTION_REPO_ID in command.options)
self.assertTrue(commands.STRATEGY_OPTION in command.options)
self.assertTrue(mock_node.called)
self.assertTrue(mock_repo.called)
self.assertFalse(mock_binding.called)
@patch(LOAD_CONSUMER_API, return_value=NODE_ID)
@patch(NODE_ACTIVATED_CHECK, return_value=True)
@patch(UNBIND_API, return_value=Response(200, {}))
def test_unbind(self, mock_binding, *unused):
# Test
command = commands.NodeUnbindCommand(self.context)
keywords = {commands.OPTION_REPO_ID.keyword: REPOSITORY_ID}
command.run(**keywords)
# Verify
self.assertTrue(commands.OPTION_REPO_ID in command.options)
mock_binding.assert_called_with(NODE_ID, REPOSITORY_ID, constants.HTTP_DISTRIBUTOR)
| ulif/pulp | nodes/test/nodes_tests/test_consumer_extensions.py | Python | gpl-2.0 | 7,438 |
from __future__ import absolute_import, print_function, division
import theano
from theano import tensor
from theano.gof.opt import check_stack_trace
from theano.tensor.nnet.blocksparse import (
sparse_block_dot, sparse_block_gemv_inplace, sparse_block_outer_inplace,
sparse_block_gemv, sparse_block_outer)
def test_blocksparse_inplace_gemv_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx], o)
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
assert check_stack_trace(f, ops_to_check=[sparse_block_gemv])
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
assert check_stack_trace(f, ops_to_check=[sparse_block_gemv_inplace])
def test_blocksparse_inplace_outer_opt():
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.lmatrix()
oIdx = tensor.lmatrix()
o = sparse_block_dot(W, h, iIdx, b, oIdx)
f = theano.function([W, h, iIdx, b, oIdx],
[o, tensor.grad(o.sum(), wrt=W)])
if theano.config.mode == "FAST_COMPILE":
assert not f.maker.fgraph.toposort()[-1].op.inplace
assert check_stack_trace(f, ops_to_check=sparse_block_outer)
else:
assert f.maker.fgraph.toposort()[-1].op.inplace
assert check_stack_trace(f, ops_to_check=sparse_block_outer_inplace)
| JazzeYoung/VeryDeepAutoEncoder | theano/tensor/nnet/tests/test_opt.py | Python | bsd-3-clause | 1,557 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""GRPC Hook"""
from typing import Any, Callable, Dict, Generator, List, Optional
import grpc
from google import auth as google_auth
from google.auth import jwt as google_auth_jwt
from google.auth.transport import (
grpc as google_auth_transport_grpc,
requests as google_auth_transport_requests,
)
from airflow.exceptions import AirflowConfigException
from airflow.hooks.base import BaseHook
class GrpcHook(BaseHook):
"""
General interaction with gRPC servers.
:param grpc_conn_id: The connection ID to use when fetching connection info.
:param interceptors: a list of gRPC interceptor objects which would be applied
to the connected gRPC channel. None by default.
Each interceptor should based on or extends the four
official gRPC interceptors, eg, UnaryUnaryClientInterceptor,
UnaryStreamClientInterceptor, StreamUnaryClientInterceptor,
StreamStreamClientInterceptor.
:param custom_connection_func: The customized connection function to return gRPC channel.
A callable that accepts the connection as its only arg.
"""
conn_name_attr = 'grpc_conn_id'
default_conn_name = 'grpc_default'
conn_type = 'grpc'
hook_name = 'GRPC Connection'
@staticmethod
def get_connection_form_widgets() -> Dict[str, Any]:
"""Returns connection widgets to add to connection form"""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"extra__grpc__auth_type": StringField(
lazy_gettext('Grpc Auth Type'), widget=BS3TextFieldWidget()
),
"extra__grpc__credential_pem_file": StringField(
lazy_gettext('Credential Keyfile Path'), widget=BS3TextFieldWidget()
),
"extra__grpc__scopes": StringField(
lazy_gettext('Scopes (comma separated)'), widget=BS3TextFieldWidget()
),
}
def __init__(
self,
grpc_conn_id: str = default_conn_name,
interceptors: Optional[List[Callable]] = None,
custom_connection_func: Optional[Callable] = None,
) -> None:
super().__init__()
self.grpc_conn_id = grpc_conn_id
self.conn = self.get_connection(self.grpc_conn_id)
self.extras = self.conn.extra_dejson
self.interceptors = interceptors if interceptors else []
self.custom_connection_func = custom_connection_func
def get_conn(self) -> grpc.Channel:
base_url = self.conn.host
if self.conn.port:
base_url = base_url + ":" + str(self.conn.port)
auth_type = self._get_field("auth_type")
if auth_type == "NO_AUTH":
channel = grpc.insecure_channel(base_url)
elif auth_type in {"SSL", "TLS"}:
credential_file_name = self._get_field("credential_pem_file")
with open(credential_file_name, "rb") as credential_file:
creds = grpc.ssl_channel_credentials(credential_file.read())
channel = grpc.secure_channel(base_url, creds)
elif auth_type == "JWT_GOOGLE":
credentials, _ = google_auth.default()
jwt_creds = google_auth_jwt.OnDemandCredentials.from_signing_credentials(credentials)
channel = google_auth_transport_grpc.secure_authorized_channel(jwt_creds, None, base_url)
elif auth_type == "OATH_GOOGLE":
scopes = self._get_field("scopes").split(",")
credentials, _ = google_auth.default(scopes=scopes)
request = google_auth_transport_requests.Request()
channel = google_auth_transport_grpc.secure_authorized_channel(credentials, request, base_url)
elif auth_type == "CUSTOM":
if not self.custom_connection_func:
raise AirflowConfigException(
"Customized connection function not set, not able to establish a channel"
)
channel = self.custom_connection_func(self.conn)
else:
raise AirflowConfigException(
"auth_type not supported or not provided, channel cannot be established, "
f"given value: {str(auth_type)}"
)
if self.interceptors:
for interceptor in self.interceptors:
channel = grpc.intercept_channel(channel, interceptor)
return channel
def run(
self, stub_class: Callable, call_func: str, streaming: bool = False, data: Optional[dict] = None
) -> Generator:
"""Call gRPC function and yield response to caller"""
if data is None:
data = {}
with self.get_conn() as channel:
stub = stub_class(channel)
try:
rpc_func = getattr(stub, call_func)
response = rpc_func(**data)
if not streaming:
yield response
else:
yield from response
except grpc.RpcError as ex:
self.log.exception(
"Error occurred when calling the grpc service: %s, method: %s \
status code: %s, error details: %s",
stub.__class__.__name__,
call_func,
ex.code(),
ex.details(),
)
raise ex
def _get_field(self, field_name: str) -> str:
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The grpc hook type adds custom UI elements
to the hook page, which allow admins to specify scopes, credential pem files, etc.
They get formatted as shown below.
"""
full_field_name = f'extra__grpc__{field_name}'
return self.extras[full_field_name]
| Acehaidrey/incubator-airflow | airflow/providers/grpc/hooks/grpc.py | Python | apache-2.0 | 6,659 |
import ctypes
from ..codeprinter import CodePrinter
from .. import expression as e
from .. import functions as f
import expresso.visitor as visitor
from mpmath import mp
class c_complex(ctypes.Structure):
_fields_ = [('real',ctypes.c_double),('imag',ctypes.c_double)]
def __str__(self):
return str(self.real) + '+' + str(self.imag) + 'j'
def __repr__(self):
return '(' + str(self.real) + ',' + str(self.imag) + ')'
def __complex__(self):
return complex(self.real,self.imag)
def is_complex(self):
return True
@staticmethod
def np_type():
import numpy
return numpy.complex128
class CCodePrinter(CodePrinter):
def __init__(self):
super(CCodePrinter,self).__init__()
self.includes = {'cmath','complex','thread','future','vector'}
self.namespaces = {'std'}
self.typenames = {
f.Types.Boolean:'bool',
f.Types.Natural:'unsigned',
f.Types.Integer:'int',
f.Types.Rational:'double',
f.Types.Real:'double',
f.Types.Complex:'c_complex',
None:'c_complex'
}
self.ctype_map = {
'bool':ctypes.c_bool,
'unsigned':ctypes.c_uint,
'int':ctypes.c_int,
'double':ctypes.c_double,
'c_complex':c_complex
}
self.type_converters = {}
self.need_conversion = {}
self.preamble = set()
self.globals = set()
c_complex_type = '''
struct c_complex{ double real; double imag; };
inline std::complex<double> to_cpp_complex(c_complex z){ return std::complex<double>(z.real,z.imag); }
inline c_complex to_c_complex(std::complex<double> z){ return c_complex{z.real(),z.imag()}; }
'''
self.preamble.add(c_complex_type)
self.type_converters['c_complex'] = (lambda x:'to_cpp_complex(%s)' % x,lambda x:'to_c_complex(%s)' % x)
complex_operators = '''
inline complex<double> operator{0}(complex<double> lhs, const double & rhs){{
return lhs {0} complex<double>(rhs);
}}
inline complex<double> operator{0}(const double & lhs,complex<double> rhs){{
return complex<double>(lhs) {0} rhs;
}}
'''
self.preamble.update(set([complex_operators.format(op) for op in ['+', '-', '*', '/']]))
parallel_for = '''
inline unsigned hardware_thread_count(){ return std::thread::hardware_concurrency(); }
template<typename C1,typename C2,typename F> void parallel_for(C1 start,C2 end,F f,uintptr_t thread_count = hardware_thread_count()){
if(end-start < thread_count) thread_count = end-start;
std::vector<std::future<void>> handles(thread_count);
C2 block_size = (end - start)/thread_count;
for(uintptr_t i=0;i<thread_count-1;++i){
handles[i] = std::async(std::launch::async,[=](){
C2 begin = start+block_size*i, end = start+block_size*(i+1);
for(C2 j=begin;j<end;++j){ f(j); }
});
}
handles[thread_count-1] = std::async([&](){
C2 begin = start+block_size*(thread_count-1);
for(C2 j=begin;j<end;++j)f(j);
});
for(auto & handle:handles) handle.wait();
}
'''
self.preamble.add(parallel_for)
ndarray = '''
template<size_t _size,size_t _stride, size_t... sizes> struct ndarray_index_calculator {
using rest = ndarray_index_calculator<sizes...>;
static size_t size(){ return _size; }
template <typename ... Args> static bool is_valid(size_t idx,Args ... args){ if(!rest::is_valid(args...)) return false; return idx < size(); }
static size_t stride(){ return _stride; }
template <typename ... Args> static size_t get_index(size_t idx,Args ... args){ return stride() * idx + rest::get_index(args...); }
};
template<size_t _size,size_t _stride> struct ndarray_index_calculator <_size,_stride> {
static size_t size(){ return _size; }
static bool is_valid(size_t idx){ return idx < size(); }
static size_t stride(){ return _stride; }
static size_t get_index(size_t idx){ return idx; }
};
template <class T,size_t ... size_stride> struct mapped_ndarray{
T * data;
T default_value;
using index_calculator = ndarray_index_calculator<size_stride...>;
mapped_ndarray(T * d,const T &_default_value = 0):data(d),default_value(_default_value){ }
template <typename ... Args> T & operator()(Args ... indices){
if(!index_calculator::is_valid(indices...)){ return default_value; }
return data[index_calculator::get_index(indices...)];
}
};
'''
self.preamble.add(ndarray)
self.preamble.add('''template <typename T> int sign(T val) { return (T(0) <= val) - (val < T(0)); }''')
def needs_brackets_in(self,expr,parent):
if expr.is_atomic:
return False
return expr.function.is_operator
@visitor.on('expr',parent = CodePrinter)
def visit(self,expr):
raise ValueError('cannot print expression %s' % expr)
@visitor.function(f.CustomFunction)
def visit(self,expr):
f = expr.args[0].value
if hasattr(f,'ccode'):
self.preamble.add(f.ccode)
else:
raise ValueError('cannot compile custom function %s' % expr)
return "%s(%s)" % (f.name,','.join([self(arg) for arg in expr.args[1:]]))
@visitor.function(f.exponentiation)
def visit(self,expr):
return 'pow(%s,%s)' % (self(expr.args[0]),self(expr.args[1]))
@visitor.atomic(f.I)
def visit(self,expr):
return "std::complex<double>(0,1)"
@visitor.atomic(f.pi)
def visit(self,expr):
return "M_PI"
@visitor.atomic(f.e)
def visit(self,expr):
return "M_E"
@visitor.function(f.Xor)
def visit(self,expr):
return self.print_binary_operator(expr,symbol='^')
@visitor.function(f.Not)
def visit(self,expr):
return "!(%s)" % self(expr.args[0])
@visitor.function(f.equal)
def visit(self,expr):
return self.print_binary_operator(expr,'==')
@visitor.function(f.fraction)
def visit(self,expr):
return "1./(%s)" % self(expr.args[0])
@visitor.function(f.mod)
def visit(self,expr):
return "fmod(%s,%s)" % (self(expr.args[0]),self(expr.args[1]))
@visitor.function(f.InnerPiecewise)
def visit(self,expr):
parts = ['(%s)?(%s):' % (self(arg.args[1]),self(arg.args[0])) for arg in expr.args]
return '(%s%s)' % (''.join(parts),self(e.S(0)))
@visitor.symbol
def visit(self,expr):
converter = self.need_conversion.get(expr)
if converter:
if isinstance(converter,tuple):
return converter[0](expr)
else:
return converter(expr)
return expr.name
@visitor.atomic(e.S(True))
def visit(self,expr):
return 'true'
@visitor.atomic(e.S(False))
def visit(self,expr):
return 'false'
def print_includes(self):
return '\n'.join(['#include <%s>' % name for name in self.includes])
def print_namespaces(self):
return '\n'.join(['using namespace %s;' % namespace for namespace in self.namespaces])
def print_auxiliary_code(self):
return '%s\n%s' % ('\n'.join(self.preamble),'\n'.join(self.globals))
def print_file(self,*function_definitions):
function_code = [self.generate_function(f) for f in function_definitions]
function_code += [self.generate_vector_function(f,use_previous_definition=True) for f in function_definitions]
return "\n\n".join([self.print_includes(),
self.print_namespaces(),
self.print_auxiliary_code()] + function_code )
def print_typename(self,expr):
return self.typenames.get(expr,self.typenames[None])
def print_vector_typename(self,expr):
return "%s*" % self.typenames.get(expr,self.typenames[None])
def get_ctype(self,typename):
if typename[-1] == '*':
return ctypes.POINTER(self.get_ctype(typename[:-1]))
return self.ctype_map[typename]
@visitor.function(f.unfoldable)
def visit(self,expr):
return self.visit(expr.args[0])
@visitor.function(f.ArrayAccess)
def visit(self,expr):
arr = expr.args[0].value
pointer = arr.ctypes.data
type = f.type_converters.numpy_c_typenames[arr.dtype.name]
size = ','.join(["%s,%s" % (size,stride/arr.itemsize) for size,stride in zip(arr.shape,arr.strides)])
name = expr.args[0].name
self.globals.add('mapped_ndarray<%s,%s> %s((%s*)%s);' % (type,size,name,type,pointer))
return "%s(%s)" % (name,','.join([self(arg) for arg in reversed(expr.args[1:])]))
@visitor.obj(mp.mpf)
def visit(self,expr):
return repr(float(expr.value))
@visitor.obj(mp.mpc)
def visit(self,expr):
v = expr.value
return "complex<double>(%s,%s)" % (repr(float(v.real)),repr(float(v.imag)))
def optimize_function(self,expr):
from expresso.pycas.evaluators.optimizers import optimize_for_compilation
return optimize_for_compilation(expr)
def get_body_code(self,definition):
if definition.return_type == None:
return_type = self.print_typename(f.Type(definition.expr).evaluate())
else:
return_type = self.print_typename(definition.return_type)
f_code = self(self.optimize_function(definition.expr))
if return_type in self.type_converters and isinstance(self.type_converters[return_type],tuple):
f_code = self.type_converters[return_type][1](f_code)
return f_code
def generate_function(self,definition):
if definition.return_type == None:
return_type = self.print_typename(f.Type(definition.expr).evaluate())
else:
return_type = self.print_typename(definition.return_type)
args = definition.args
if definition.arg_types == None:
argument_types = [self.print_typename(f.Type(arg).evaluate()) for arg in args]
else:
argument_types = [self.print_typename(f.Type(arg).evaluate()) for arg in definition.arg_types]
self.need_conversion = {arg:self.type_converters[t]
for arg,t in zip(args,argument_types)
if t in self.type_converters}
f_code = self.get_body_code(definition)
formatted = (return_type, definition.name,
','.join(['%s %s' % (type,arg.name) for arg,type in zip(args,argument_types)]),
f_code)
definition.c_return_type = self.get_ctype(return_type)
definition.c_arg_types = [self.get_ctype(arg_type) for arg_type in argument_types]
return 'extern "C"{\n%s %s(%s){\n\treturn %s;\n}\n}' % formatted
def vectorized_name(self,name):
return "__%s_vector" % name
def generate_vector_function(self,definition,use_previous_definition = False):
if definition.return_type == None:
return_type = self.print_vector_typename(f.Type(definition.expr).evaluate())
else:
return_type = self.print_vector_typename(definition.return_type)
args = definition.args
if definition.arg_types == None:
argument_types = [self.print_vector_typename(f.Type(arg).evaluate()) for arg in args]
else:
argument_types = [self.print_vector_typename(f.Type(arg).evaluate()) for arg in definition.arg_types]
self.need_conversion.update({arg:lambda a:'%s[__i]' % a for arg in args})
argument_types = ['unsigned',return_type] + argument_types
if not use_previous_definition :
f_code = self.get_body_code(definition)
else:
f_code = '%s(%s)' % (definition.name,','.join(self(arg) for arg in definition.args))
if definition.parallel:
f_code = 'parallel_for(0,__size,[&](unsigned __i){ __res[__i] = %s; }); ' % f_code
else:
f_code = 'for(unsigned __i = 0; __i<__size;++__i) __res[__i] = %s;' % f_code
rargument_types = [argument_types[0]] + ['%s __restrict__ ' % t for t in argument_types[1:]]
formatted_args = ','.join(['%s %s' % vardef for vardef in
zip(rargument_types,['__size','__res'] + list(args))])
formatted = (self.vectorized_name(definition.name), formatted_args, f_code)
definition.c_vectorized_arg_types = [self.get_ctype(arg_type) for arg_type in argument_types]
return 'extern "C"{\nvoid %s(%s){\n\t%s\n}\n}' % formatted
class CompilerError(Exception):
def __init__(self, message):
if isinstance(message, unicode):
super(CompilerError, self).__init__(message.encode('utf-8'))
self.message = message
elif isinstance(message, str):
super(CompilerError, self).__init__(message)
self.message = message.decode('utf-8')
else:
raise TypeError
def __unicode__(self):
return self.message
def ccompile(*function_definitions,**kwargs):
import tempfile
import shutil
import ctypes
import numpy as np
from subprocess import Popen, PIPE
from os import environ
ccode_printer = CCodePrinter()
code = ccode_printer.print_file(*function_definitions)
output_directory = tempfile.mkdtemp()
object_file = output_directory+'/'+'pycas_compiled_expression.o'
flags = kwargs.pop('flags',[])
p = Popen([environ.get('CXX','g++'),'-o',object_file] + flags + ['-c','-xc++','-std=c++11','-ffast-math','-O3','-fPIC','-'],stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.stdin.write(code)
p.stdin.close()
return_code = p.wait()
if(return_code!=0):
raise CompilerError("Cannot compile expression: " + p.stderr.read().decode('utf-8'))
print_output = kwargs.pop('print_output',False)
print_warnings = print_output or kwargs.pop('print_warnings',False)
if print_warnings:
print p.stderr.read()
if print_output:
print p.stdout.read()
shared_library = output_directory+'/'+'pycas_compiled_expression.so'
p = Popen(['g++','-shared','-o',shared_library,object_file],stdin=PIPE, stdout=PIPE, stderr=PIPE)
p.stdin.close()
return_code = p.wait()
if(return_code!=0):
raise RuntimeError("Cannot convert to shared library: " + p.stderr.read())
if print_warnings:
print p.stderr.read()
if print_output:
print p.stdout.read()
lib = ctypes.cdll.LoadLibrary(shared_library)
shutil.rmtree(output_directory)
compiled_functions = {}
class CompiledFunction(object):
def __init__(self,cf,cf_vector):
self.cf = cf
self.cf_vector = cf_vector
def __call__(self,*args,**kwargs):
if(len(args) == 0):
return self.cf()
if any([isinstance(arg,(list,tuple)) for arg in args]):
argtypes = self.cf_vector.argtypes
args = [np.array(arg,dtype=t) for t,arg in zip(argtypes[2:],args)]
if any([isinstance(arg,np.ndarray) for arg in args]):
argtypes = self.cf_vector.argtypes
shape = None
for arg in args:
if isinstance(arg,np.ndarray):
if shape == None:
shape = arg.shape
else:
if arg.shape != shape:
raise AttributeError('c function got arguments with different shapes')
args = [arg if isinstance(arg,np.ndarray) else arg * np.ones(shape) for arg in args]
args = [np.ascontiguousarray(arg,dtype=t._type_) for t,arg in zip(argtypes[2:],args)]
if argtypes[1]._type_ == c_complex:
restype = c_complex.np_type()
else:
restype = argtypes[1]._type_
res = kwargs.get('res')
if res is None:
res = np.zeros(args[0].shape,dtype=restype)
else:
assert res.dtype == restype
assert res.shape == shape
assert res.flags['C_CONTIGUOUS']
call_args = [res.size,res.ctypes.data_as(argtypes[1])]
call_args += [arg.ctypes.data_as(t) for t,arg in zip(argtypes[2:],args)]
self.cf_vector(*call_args)
return res
return self.cf(*args)
def address(self):
return ctypes.cast(self.cf, ctypes.c_void_p).value
class CompiledLibrary(object):
def __init__(self,lib,code):
self.lib = lib
self.code = code
res = CompiledLibrary(lib,code)
for definition in function_definitions:
f = getattr(lib,definition.name)
f.argtypes = definition.c_arg_types
f.restype = definition.c_return_type
f_vector = getattr(lib, ccode_printer.vectorized_name(definition.name))
f_vector.argtypes = definition.c_vectorized_arg_types
f_vector.restype = None
setattr(res,definition.name,CompiledFunction(f,f_vector))
return res
| TheLartians/Expresso | expresso/pycas/compilers/c_compiler.py | Python | mit | 17,186 |
"""
Author: Zhang Chengsheng, @2018.01.29
test in python 3.6
Input: gene name list (symbol or any id format)
Output: 'gene name /t search name /t Also known as id list /n'
input example:
HLA-A
TAS2R43
PRSS3
RBMXL1
DDX11
ZNF469
SLC35G5
GOLGA6L6
PRB2
"""
import os,sys,time
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
from urllib.request import quote
import re
from bs4 import BeautifulSoup
def ncbi_Crawler(gene_name):
url_1 = 'https://www.ncbi.nlm.nih.gov/gene/?term=' + gene_name #?term='(' + gene_name + ')+AND+"Homo+sapiens"%5Bporgn%3A__txid9606%5D'
try:
headers = {b'Host': b'www.ncbi.nlm.nih.gov',
b'Cache - Control': b'max - age = 0',
b'Upgrade-Insecure-Requests': b'1',
b'User-Agent': b'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
b'Accept-Encoding': b'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
b'Accept-Language': b'zh-CN,zh;q=0.9',
}
html = urlopen(url_1, headers)
time.sleep(0.5)
except HTTPError as e:
soup = 'none'
print('HTTP ERROR')
print(e)
except URLError as e:
soup = 'none'
print('URL error')
print(e)
except ValueError as e:
soup = 'none'
print('Value ERROR')
print(e)
except OSError as e:
soup = 'none'
print('OSError')
print(e)
else:
soup = BeautifulSoup(html, 'lxml')
finally:
return str(soup)
#print(str(soup))
def re_match(strings):
"""balabala"""
regular = '\<span class="highlight" style="background-color:"\>.+?\<td class="col-omim"\>'
regular_c = re.compile(regular)
b = re.findall(regular_c, strings)
res_list = []
for i in b:
if 'Homo sapiens' in i:
res_list.append(i)
if not res_list:
return 'no result'
re_sub = '<.*?>'
re_sub_c = re.compile(re_sub)
res_line = []
if res_list:
for i in res_list:
res_line1 = []
for j in i.split('</td><td>'):
# print(j)
j = j.replace('<span class="gene-id">', '\t')
test = re.sub(re_sub_c, '', j)
res_line1.append(test)
# print(test)
# print(j)
res_line.append(res_line1)
return res_line
# ['WASHC2A\tID: 387680', 'WASH complex subunit 2A [Homo sapiens (human)]', 'Chromosome 10, NC_000010.11 (50067888..50133509)', 'FAM21A, FAM21B, bA56A21.1, bA98I6.1']
def text_writing(gene_name,res_of_re_match):
"""input: result of re_match()"""
if len(res_of_re_match) == 0:
res_line = str(gene_name) + '\n'
return res_line
elif len(res_of_re_match) == 1:
res = res_of_re_match[0]
gene_name1 = res[0].split('\t')[0]
res_line = str(gene_name).strip() + '\t' + str(gene_name1) + '\t' + str(res[-1]) + '\n'
return res_line
elif len(res_of_re_match) > 1:
res_line = ''
for i in res_of_re_match:
res = i
gene_name1 = res[0].split('\t')[0]
res_line += '[multi]' + str(gene_name) + '\t' + str(gene_name1) + '\t' + str(res[-1]) + '\n'
return res_line
else:
return gene_name + '\ttext_writing error\n'
def main(input):
""""""
output_file = input + '.out'
output_file_o = open(output_file,'w')
input_o = open(input,'r')
gene_list = input_o.readlines()
input_o.close()
count = 0
for i in gene_list:
count += 1
print(count,' genes start at: ',time.asctime())
re_gene = ncbi_Crawler(str(i).strip('\n'))
if re_gene != 'none':
res = re_match(str(re_gene))
if res != 'no result':
res_line = text_writing(i,res)
#print(res_line)
output_file_o.write(res_line)
else:
output_file_o.write(i)
output_file_o.write('\t')
output_file_o.write('no result')
output_file_o.write('\n')
else:
output_file_o.write(i)
output_file_o.write('\t')
output_file_o.write('network error')
output_file_o.write('\n')
output_file_o.close()
#probe_test = "D:\\zcs-genex\\180129\\IDT_probe\\probe_text.txt"
inputfile = sys.argv[1]
main(inputfile)
| captorr/ngs | scripts/ncbi_symbol_used_to_be.py | Python | gpl-3.0 | 4,534 |
# Modified from function.py ---
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import moose
simtime = 1.0
def test_example():
moose.Neutral('/model')
function = moose.Function('/model/function')
function.c['c0'] = 1.0
function.c['c1'] = 2.0
#function.x.num = 1
function.expr = 'c0 * exp(c1*x0) * cos(y0) + sin(t)'
# mode 0 - evaluate function value, derivative and rate
# mode 1 - just evaluate function value,
# mode 2 - evaluate derivative,
# mode 3 - evaluate rate
function.mode = 0
function.independent = 'y0'
nsteps = 1000
xarr = np.linspace(0.0, 1.0, nsteps)
# Stimulus tables allow you to store sequences of numbers which
# are delivered via the 'output' message at each time step. This
# is a placeholder and in real scenario you will be using any
# sourceFinfo that sends out a double value.
input_x = moose.StimulusTable('/xtab')
input_x.vector = xarr
input_x.startTime = 0.0
input_x.stepPosition = xarr[0]
input_x.stopTime = simtime
moose.connect(input_x, 'output', function.x[0], 'input')
yarr = np.linspace(-np.pi, np.pi, nsteps)
input_y = moose.StimulusTable('/ytab')
input_y.vector = yarr
input_y.startTime = 0.0
input_y.stepPosition = yarr[0]
input_y.stopTime = simtime
moose.connect(function, 'requestOut', input_y, 'getOutputValue')
# data recording
result = moose.Table('/ztab')
moose.connect(result, 'requestOut', function, 'getValue')
derivative = moose.Table('/zprime')
moose.connect(derivative, 'requestOut', function, 'getDerivative')
rate = moose.Table('/dz_by_dt')
moose.connect(rate, 'requestOut', function, 'getRate')
x_rec = moose.Table('/xrec')
moose.connect(x_rec, 'requestOut', input_x, 'getOutputValue')
y_rec = moose.Table('/yrec')
moose.connect(y_rec, 'requestOut', input_y, 'getOutputValue')
dt = simtime/nsteps
for ii in range(32):
moose.setClock(ii, dt)
moose.reinit()
moose.start(simtime)
# Uncomment the following lines and the import matplotlib.pyplot as plt on top
# of this file to display the plot.
plt.subplot(3,1,1)
plt.plot(x_rec.vector, result.vector, 'r-', label='z = {}'.format(function.expr))
z = function.c['c0'] * np.exp(function.c['c1'] * xarr) * np.cos(yarr) + np.sin(np.arange(len(xarr)) * dt)
plt.plot(xarr, z, 'b--', label='numpy computed')
plt.xlabel('x')
plt.ylabel('z')
plt.legend()
plt.subplot(3,1,2)
plt.plot(y_rec.vector, derivative.vector, 'r-', label='dz/dy0')
# derivatives computed by putting x values in the analytical formula
dzdy = function.c['c0'] * np.exp(function.c['c1'] * xarr) * (- np.sin(yarr))
plt.plot(yarr, dzdy, 'b--', label='numpy computed')
plt.xlabel('y')
plt.ylabel('dz/dy')
plt.legend()
plt.subplot(3,1,3)
# *** BEWARE *** The first two entries are spurious. Entry 0 is
# *** from reinit sending out the defaults. Entry 2 is because
# *** there is no lastValue for computing real forward difference.
plt.plot(np.arange(2, len(rate.vector), 1) * dt, rate.vector[2:], 'r-', label='dz/dt')
dzdt = np.diff(z)/dt
plt.plot(np.arange(0, len(dzdt), 1.0) * dt, dzdt, 'b--', label='numpy computed')
plt.xlabel('t')
plt.ylabel('dz/dt')
plt.legend()
plt.tight_layout()
plt.savefig(__file__+'.png')
if __name__ == '__main__':
test_example()
| dilawar/moose-core | tests/core/test_function_example.py | Python | gpl-3.0 | 3,483 |
# Modules
import gc
import getpass
import logging
import paramiko
from contextlib import suppress, closing
from paramiko import SSHException
# Variables
CMDLIST = '/home/local-adm/LG_Installation/minho/CR/commands.txt'
SYSLIST = '/home/local-adm/LG_Installation/minho/CR/systems.txt'
# Classes and Functions
class InputReader:
def __init__(self, commands_path, hosts_path):
self.commands_path = commands_path
self.hosts_path = hosts_path
def read(self):
self.commands = self.__readlines(self.commands_path)
self.hosts = self.__readlines(self.hosts_path)
def __readlines(self, path):
with open(path) as f:
return [v.strip() for v in f.readlines()]
class CommandExecuter():
def __init__(self, host, command, user, pswd):
self.host = host
self.command = command
self.user = user
self.pswd = pswd
def execute(self):
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(self.host, username=self.user, password=self.pswd)
stdin, stdout, stderr = ssh.exec_command(self.command)
lines = [v.strip() for v in stdout.readlines()]
ssh.close()
return lines
except Exception as err:
print('[ERROR] %s SSH connection failed' % self.host + '\n')
raise err
finally:
if ssh:
ssh.close()
def main():
user = input("Enter User Name: ")
pswd = getpass.getpass("Enter User Password: ")
reader = InputReader(CMDLIST, SYSLIST)
reader.read()
for h in reader.hosts:
try:
for c in reader.commands:
executer = CommandExecuter(h, c, user, pswd)
results = executer.execute()
print("{0} {1}".format(h, c) + '\n')
if results is not None:
for i in results:
print(i + '\n')
except Exception as err:
logging.exception('%s', err)
# Main Procedure
if __name__ == '__main__':
main() | minhouse/python-lesson | auto-ssh_v5.py | Python | mit | 2,143 |
from seleniumbase import BaseCase
class VisualLayoutTests(BaseCase):
def test_xkcd_layout_change(self):
self.open("https://xkcd.com/554/")
print('\nCreating baseline in "visual_baseline" folder.')
self.check_window(name="xkcd_554", baseline=True)
# Change height: (83 -> 130) , Change width: (185 -> 120)
self.set_attribute('[alt="xkcd.com logo"]', "height", "130")
self.set_attribute('[alt="xkcd.com logo"]', "width", "120")
self.check_window(name="xkcd_554", level=0)
| seleniumbase/SeleniumBase | examples/visual_testing/xkcd_visual_test.py | Python | mit | 532 |
""" The mako """
import logging
import os
import projex
logger = logging.getLogger(__name__)
try:
import mako
import mako.template
import mako.lookup
except ImportError:
logger.warning('The projex.makotext package requires mako to be installed.')
mako = None
# import useful modules for the template
from datetime import datetime, date
import projex.text
_macros = {}
def register(macro):
"""
Registers a macro method for the mako text rendering system.
:param macro | <method>
"""
_macros[macro.__name__] = macro
def renderfile(filename,
options=None,
templatePaths=None,
default='',
silent=False):
"""
Renders a file to text using the mako template system.
To learn more about mako and its usage, see [[www.makotemplates.org]]
:return <str> formatted text
"""
if not mako:
logger.debug('mako is not installed')
return default
if not mako:
logger.debug('mako is not installed.')
return default
if templatePaths is None:
templatePaths = []
# use the default mako templates
basepath = os.environ.get('MAKO_TEMPLATEPATH', '')
if basepath:
basetempls = basepath.split(os.path.pathsep)
else:
basetempls = []
templatePaths += basetempls
# include the root path
templatePaths.insert(0, os.path.dirname(filename))
templatePaths = map(lambda x: x.replace('\\', '/'), templatePaths)
# update the default options
scope = dict(os.environ)
scope['projex_text'] = projex.text
scope['date'] = date
scope['datetime'] = datetime
scope.update(_macros)
scope.update(os.environ)
if options is not None:
scope.update(options)
old_env_path = os.environ.get('MAKO_TEMPLATEPATH', '')
os.environ['MAKO_TEMPLATEPATH'] = os.path.pathsep.join(templatePaths)
logger.debug('rendering mako file: %s', filename)
if templatePaths:
lookup = mako.lookup.TemplateLookup(directories=templatePaths)
templ = mako.template.Template(filename=filename, lookup=lookup)
else:
templ = mako.template.Template(filename=filename)
try:
output = templ.render(**scope)
except StandardError:
output = default
if not silent:
logger.exception('Error rendering mako text')
os.environ['MAKO_TEMPLATEPATH'] = old_env_path
return output
def render(text,
options=None,
templatePaths=None,
default=None,
silent=False,
raiseErrors=False):
"""
Renders a template text to a resolved text value using the mako template
system.
Provides a much more robust template option to the projex.text system.
While the projex.text method can handle many simple cases with no
dependencies, the makotext module makes use of the powerful mako template
language. This module provides a simple wrapper to the mako code.
To learn more about mako and its usage, see [[www.makotemplates.org]]
:param text <str>
:param options <dict> { <str> key: <variant> value, .. }
:return <str> formatted text
:usage |import projex.makotext
|options = { 'key': 10, 'name': 'eric' }
|template = '${name.lower()}_${key}_${date.today()}.txt'
|projex.makotext.render( template, options )
"""
if not mako:
logger.debug('mako is not installed.')
return text if default is None else default
if templatePaths is None:
templatePaths = []
# use the default mako templates
basepath = os.environ.get('MAKO_TEMPLATEPATH', '')
if basepath:
basetempls = basepath.split(os.path.pathsep)
else:
basetempls = []
templatePaths += basetempls
# update the default options
scope = dict(os.environ)
scope['projex_text'] = projex.text
scope['date'] = date
scope['datetime'] = datetime
scope.update(_macros)
if options is not None:
scope.update(options)
if templatePaths:
lookup = mako.lookup.TemplateLookup(directories=templatePaths)
try:
templ = mako.template.Template(text, lookup=lookup)
except StandardError:
output = text if default is None else default
if not silent:
logger.exception('Error compiling mako text')
return output
else:
try:
templ = mako.template.Template(text)
except StandardError:
output = text if default is None else default
if not silent:
logger.exception('Error compiling mako text')
return output
try:
output = templ.render(**scope)
except StandardError:
if raiseErrors:
raise
output = text if default is None else default
if not silent:
logger.exception('Error rendering mako text')
return output
return output
def unregister(method):
"""
Unregisters the given macro from the system.
:param name | <str>
"""
_macros.pop(method.__name__, None)
# ----------------------------------------------------------------------
# register some macros
# ----------------------------------------------------------------------
def collectfiles(path, filt=None):
"""
Collects some files based on the given filename.
:param path | <str>
filt | <method>
:return [(<str> name, <str> filepath), ..]
"""
if not os.path.isdir(path):
path = os.path.dirname(path)
output = []
for name in sorted(os.listdir(path)):
filepath = os.path.join(path, name)
if os.path.isfile(filepath):
if not filt or filt(name):
output.append((name, filepath))
return output
register(collectfiles) | bitesofcode/projex | projex/makotext.py | Python | mit | 6,007 |
#!/usr/bin/python
#
# Problem: Saving the Universe
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
# Comments: Inefficient O(SQ)-time algorithm. Can be solved in O(S+Q) time.
MAX = 1000000000
def compute():
ns = input()
ses = [raw_input() for j in range(ns)]
nq = input()
qus = [raw_input() for j in range(nq)]
table = []
for jj in range(nq):
table.append([])
for kk in range(ns):
if qus[jj] == ses[kk]:
conf = 1
else:
conf = 0
table[jj].append(conf)
if nq == 0:
return 0
score = [[]]
for m in range(ns):
score[0].append(table[0][m])
for v in range(1,nq):
score.append([])
for x in range(ns):
if table[v][x] == 1:
score[v].append(MAX)
else:
cc = MAX
for p in range(ns):
if p == x:
ad = 0
else:
ad = 1
cc = min(cc, score[v-1][p] +ad)
score[v].append(cc)
scc = MAX
for kkk in range(ns):
scc = min(scc,score[nq-1][kkk])
return scc
for i in range(input()):
print "Case #%d: %d" % (i+1, compute())
| KirarinSnow/Google-Code-Jam | Qualification Round 2008/A.py | Python | gpl-3.0 | 1,077 |
#!/usr/bin/python
#
# Original Copyright (C) 2006 Google Inc.
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note that this module will not function without specifically adding
# 'analytics': [ #Google Analytics
# 'https://www.google.com/analytics/feeds/'],
# to CLIENT_LOGIN_SCOPES in the gdata/service.py file
"""Contains extensions to Atom objects used with Google Analytics."""
__author__ = 'api.suryasev (Sal Uryasev)'
import atom
import gdata
GAN_NAMESPACE = 'http://schemas.google.com/analytics/2009'
class TableId(gdata.GDataEntry):
"""tableId element."""
_tag = 'tableId'
_namespace = GAN_NAMESPACE
class Property(gdata.GDataEntry):
_tag = 'property'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
def __init__(self, name=None, value=None, *args, **kwargs):
self.name = name
self.value = value
super(Property, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class AccountListEntry(gdata.GDataEntry):
"""The Google Documents version of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}tableId' % GAN_NAMESPACE] = ('tableId',
[TableId])
_children['{%s}property' % GAN_NAMESPACE] = ('property',
[Property])
def __init__(self, tableId=None, property=None,
*args, **kwargs):
self.tableId = tableId
self.property = property
super(AccountListEntry, self).__init__(*args, **kwargs)
def AccountListEntryFromString(xml_string):
"""Converts an XML string into an AccountListEntry object.
Args:
xml_string: string The XML describing a Document List feed entry.
Returns:
A AccountListEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(AccountListEntry, xml_string)
class AccountListFeed(gdata.GDataFeed):
"""A feed containing a list of Google Documents Items"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[AccountListEntry])
def AccountListFeedFromString(xml_string):
"""Converts an XML string into an AccountListFeed object.
Args:
xml_string: string The XML describing an AccountList feed.
Returns:
An AccountListFeed object corresponding to the given XML.
All properties are also linked to with a direct reference
from each entry object for convenience. (e.g. entry.AccountName)
"""
feed = atom.CreateClassFromXMLString(AccountListFeed, xml_string)
for entry in feed.entry:
for pro in entry.property:
entry.__dict__[pro.name.replace('ga:','')] = pro
for td in entry.tableId:
td.__dict__['value'] = td.text
return feed
class Dimension(gdata.GDataEntry):
_tag = 'dimension'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
_attributes['type'] = 'type'
_attributes['confidenceInterval'] = 'confidence_interval'
def __init__(self, name=None, value=None, type=None,
confidence_interval = None, *args, **kwargs):
self.name = name
self.value = value
self.type = type
self.confidence_interval = confidence_interval
super(Dimension, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class Metric(gdata.GDataEntry):
_tag = 'metric'
_namespace = GAN_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
_attributes['type'] = 'type'
_attributes['confidenceInterval'] = 'confidence_interval'
def __init__(self, name=None, value=None, type=None,
confidence_interval = None, *args, **kwargs):
self.name = name
self.value = value
self.type = type
self.confidence_interval = confidence_interval
super(Metric, self).__init__(*args, **kwargs)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class AnalyticsDataEntry(gdata.GDataEntry):
"""The Google Analytics version of an Atom Entry"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}dimension' % GAN_NAMESPACE] = ('dimension',
[Dimension])
_children['{%s}metric' % GAN_NAMESPACE] = ('metric',
[Metric])
def __init__(self, dimension=None, metric=None, *args, **kwargs):
self.dimension = dimension
self.metric = metric
super(AnalyticsDataEntry, self).__init__(*args, **kwargs)
class AnalyticsDataFeed(gdata.GDataFeed):
"""A feed containing a list of Google Analytics Data Feed"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[AnalyticsDataEntry])
"""
Data Feed
"""
def AnalyticsDataFeedFromString(xml_string):
"""Converts an XML string into an AccountListFeed object.
Args:
xml_string: string The XML describing an AccountList feed.
Returns:
An AccountListFeed object corresponding to the given XML.
Each metric and dimension is also referenced directly from
the entry for easier access. (e.g. entry.keyword.value)
"""
feed = atom.CreateClassFromXMLString(AnalyticsDataFeed, xml_string)
if feed.entry:
for entry in feed.entry:
for met in entry.metric:
entry.__dict__[met.name.replace('ga:','')] = met
if entry.dimension is not None:
for dim in entry.dimension:
entry.__dict__[dim.name.replace('ga:','')] = dim
return feed
| boxed/CMi | web_frontend/gdata/analytics/__init__.py | Python | mit | 6,995 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
class task(osv.osv):
_inherit = "project.task"
# Compute: effective_hours, total_hours, progress
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
res = {}
for task in self.browse(cr, uid, ids, context=context):
res[task.id] = {'effective_hours': 0.0, 'remaining_hours': task.planned_hours, 'progress': 0.0, 'total_hours': task.planned_hours, 'delay_hours': 0.0}
tasks_data = self.pool['account.analytic.line'].read_group(cr, uid, [('task_id', 'in', ids)], ['task_id','unit_amount'], ['task_id'], context=context)
for data in tasks_data:
task = self.browse(cr, uid, data['task_id'][0], context=context)
res[data['task_id'][0]] = {'effective_hours': data.get('unit_amount', 0.0), 'remaining_hours': task.planned_hours - data.get('unit_amount', 0.0)}
res[data['task_id'][0]]['total_hours'] = res[data['task_id'][0]]['remaining_hours'] + data.get('unit_amount', 0.0)
res[data['task_id'][0]]['delay_hours'] = res[data['task_id'][0]]['total_hours'] - task.planned_hours
res[data['task_id'][0]]['progress'] = 0.0
if (task.planned_hours > 0.0 and data.get('unit_amount', 0.0)):
res[data['task_id'][0]]['progress'] = round(min(100.0 * data.get('unit_amount', 0.0) / task.planned_hours, 99.99),2)
# TDE CHECK: if task.state in ('done','cancelled'):
if task.stage_id and task.stage_id.fold:
res[data['task_id'][0]]['progress'] = 100.0
return res
def _get_task(self, cr, uid, id, context=None):
res = []
for line in self.pool.get('account.analytic.line').search_read(cr,uid,[('task_id', '!=', False),('id','in',id)], context=context):
res.append(line['task_id'][0])
return res
def _get_total_hours(self):
return super(task, self)._get_total_hours() + self.effective_hours
_columns = {
'remaining_hours': fields.function(_hours_get, string='Remaining Hours', multi='line_id', help="Total remaining time, can be re-estimated periodically by the assignee of the task.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['timesheet_ids', 'remaining_hours', 'planned_hours'], 10),
'account.analytic.line': (_get_task, ['task_id', 'unit_amount'], 10),
}),
'effective_hours': fields.function(_hours_get, string='Hours Spent', multi='line_id', help="Computed using the sum of the task work done.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['timesheet_ids', 'remaining_hours', 'planned_hours'], 10),
'account.analytic.line': (_get_task, ['task_id', 'unit_amount'], 10),
}),
'total_hours': fields.function(_hours_get, string='Total', multi='line_id', help="Computed as: Time Spent + Remaining Time.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['timesheet_ids', 'remaining_hours', 'planned_hours'], 10),
'account.analytic.line': (_get_task, ['task_id', 'unit_amount'], 10),
}),
'progress': fields.function(_hours_get, string='Progress (%)', multi='line_id', group_operator="avg", help="If the task has a progress of 99.99% you should close the task if it's finished or reevaluate the time",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['timesheet_ids', 'remaining_hours', 'planned_hours', 'state', 'stage_id'], 10),
'account.analytic.line': (_get_task, ['task_id', 'unit_amount'], 10),
}),
'delay_hours': fields.function(_hours_get, string='Delay Hours', multi='line_id', help="Computed as difference between planned hours by the project manager and the total hours of the task.",
store = {
'project.task': (lambda self, cr, uid, ids, c={}: ids, ['timesheet_ids', 'remaining_hours', 'planned_hours'], 10),
'account.analytic.line': (_get_task, ['task_id', 'unit_amount'], 10),
}),
'timesheet_ids': fields.one2many('account.analytic.line', 'task_id', 'Timesheets'),
}
_defaults = {
'progress': 0,
}
| orchidinfosys/odoo | addons/hr_timesheet/project_timesheet.py | Python | gpl-3.0 | 4,417 |
# -*- coding: utf-8 -*-
#
# This file is part of the VecNet OpenMalaria Portal.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/vecnet/om
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf import settings
import subprocess
import sys
import os
import logging
from website.apps.ts_om.models import Simulation
logger = logging.getLogger(__name__)
def submit(simulation):
logger.debug("dispatcher.submit: simulation id %s" % simulation.id)
assert isinstance(simulation, Simulation)
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
executable = sys.executable
if hasattr(settings, "PYTHON_EXECUTABLE"):
executable = settings.PYTHON_EXECUTABLE
run_script_filename = os.path.join(base_dir, "run.py")
try:
logger.debug("dispatcher.submit: before Popen")
p = subprocess.Popen(
[executable, run_script_filename, str(simulation.id)],
cwd=base_dir, shell=False
)
logger.debug("dispatcher.submit: after Popen")
except (OSError, IOError) as e:
logger.exception("subprocess failed: %s", sys.exc_info())
simulation.status = Simulation.FAILED
simulation.last_error_message = "Subprocess failed: %s" % e
simulation.pid = ""
simulation.save(update_fields=["status", "pid", "last_error_message"])
return None
simulation.status = Simulation.QUEUED
simulation.pid = str(p.pid)
simulation.last_error_message = ""
simulation.save(update_fields=["status", "pid", "last_error_message"])
logger.debug("dispatcher.submit: success, PID: %s" % p.pid)
return str(p.pid)
| vecnet/om | sim_services_local/dispatcher.py | Python | mpl-2.0 | 1,944 |
# Generated by Django 2.1.7 on 2019-04-17 13:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tmv_app', '0110_auto_20190417_0944'),
]
operations = [
migrations.AddField(
model_name='topicterm',
name='alltopic_score',
field=models.FloatField(null=True),
),
]
| mcallaghan/tmv | BasicBrowser/tmv_app/migrations/0111_topicterm_alltopic_score.py | Python | gpl-3.0 | 392 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from substanced.util import Batch
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from dace.objectofcollaboration.principal.util import get_current
from pontus.view import BasicView
from pontus.util import merge_dicts
from novaideo.utilities.util import render_listing_objs
from novaideo.content.processes.novaideo_view_manager.behaviors import (
SeeReportedContents)
from novaideo.core import BATCH_DEFAULT_SIZE
from novaideo.content.novaideo_application import NovaIdeoApplication
from novaideo import _
from novaideo.views.filter import (
get_filter, FILTER_SOURCES, merge_with_filter_view, find_entities)
from novaideo.views.filter.sort import (
sort_view_objects)
from novaideo.content.interface import ISignalableEntity
from novaideo.views.core import asyn_component_config
CONTENTS_MESSAGES = {
'0': _(u"""No content found"""),
'1': _(u"""One content found"""),
'*': _(u"""${number} contents found""")
}
@asyn_component_config(id='novaideoap_seereportedcontents')
@view_config(
name='seereportedcontents',
context=NovaIdeoApplication,
renderer='pontus:templates/views_templates/grid.pt',
)
class SeeReportedContentsView(BasicView):
title = _('Reported contents')
name = 'seereportedcontents'
behaviors = [SeeReportedContents]
template = 'novaideo:views/novaideo_view_manager/templates/search_result.pt'
viewid = 'seereportedcontents'
wrapper_template = 'novaideo:views/templates/simple_wrapper.pt'
css_class = 'panel-transparent'
requirements = {'css_links': [],
'js_links': ['novaideo:static/js/comment.js']}
def _add_filter(self, user):
def source(**args):
filters = [
{'metadata_filter': {
'interfaces': [ISignalableEntity],
'states': ['reported'],
}}
]
objects = find_entities(
user=user,filters=filters, **args)
return objects
url = self.request.resource_url(self.context, '@@novaideoapi')
return get_filter(
self,
url=url,
source=source,
select=[('metadata_filter', ['keywords', 'challenges']),
'contribution_filter', ('temporal_filter', ['negation', 'created_date']),
'text_filter', 'other_filter'])
def update(self):
self.execute(None)
user = get_current()
filter_form, filter_data = self._add_filter(user)
filters = [
{'metadata_filter': {
'interfaces': [ISignalableEntity],
'states': ['reported'],
}}
]
args = {}
args = merge_with_filter_view(self, args)
args['request'] = self.request
objects = find_entities(
interfaces=[ISignalableEntity],
user=user,
filters=filters,
**args)
objects, sort_body = sort_view_objects(
self, objects, ['proposal'], user)
url = self.request.resource_url(
self.context, 'seereportedcontents')
batch = Batch(objects, self.request,
url=url,
default_size=BATCH_DEFAULT_SIZE)
batch.target = "#results_contents"
len_result = batch.seqlen
index = str(len_result)
if len_result > 1:
index = '*'
self.title = _(CONTENTS_MESSAGES[index],
mapping={'number': len_result})
filter_data['filter_message'] = self.title
filter_body = self.filter_instance.get_body(filter_data)
result_body, result = render_listing_objs(
self.request, batch, user)
if filter_form:
result = merge_dicts(
{'css_links': filter_form['css_links'],
'js_links': filter_form['js_links']
}, result)
values = {'bodies': result_body,
'batch': batch,
'filter_body': filter_body,
'sort_body': sort_body}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
result = merge_dicts(self.requirements_copy, result)
return result
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{SeeReportedContents: SeeReportedContentsView})
FILTER_SOURCES.update(
{SeeReportedContentsView.name: SeeReportedContentsView})
| ecreall/nova-ideo | novaideo/views/novaideo_view_manager/see_reported_contents.py | Python | agpl-3.0 | 4,728 |
# -*- coding: utf-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
"""
pida.core.log
~~~~~~~~~~~~~
sets up the core logging
:copyright: 2007 the Pida Project
:license: GPL2 or later
"""
import logbook.compat
logbook.compat.redirect_logging()
from pida.core.environment import is_debug
from pida.utils.descriptors import cached_property
from collections import deque
from threading import Lock
log = logbook.Logger('pida')
def configure():
if is_debug():
pida_handler.level = logbook.DEBUG
else:
pida_handler.level = logbook.NOTICE
class Log(object):
def get_name(self):
return '%s.%s' % (self.__module__, self.__class__.__name__)
@cached_property
def log(self):
return logbook.Logger(self.get_name())
class RollOverHandler(logbook.Handler):
"""
massively dumbed down version of logbooks FingersCrossedHandler
"""
#XXX: unittests
def __init__(self, filter=None, bubble=False):
logbook.Handler.__init__(self, logbook.NOTSET, filter, bubble)
self.lock = Lock()
self._handler = None
self.buffered_records = deque()
def close(self):
if self._handler is not None:
self._handler.close()
def enqueue(self, record):
assert self.buffered_records is not None, 'rollover occurred'
self.buffered_records.append(record)
def rollover(self, handler):
assert self.buffered_records is not None, 'rollover occurred'
with self.lock:
self._handler = handler
for old_record in self.buffered_records:
self._handler.emit(old_record)
self.buffered_records = None
@property
def triggered(self):
"""This attribute is `True` when the action was triggered. From
this point onwards the handler transparently
forwards all log records to the inner handler.
"""
return self._handler is not None
def emit(self, record):
with self.lock:
if self._handler is not None:
self._handler.emit(record)
else:
self.enqueue(record)
null = logbook.NullHandler()
pida_handler = logbook.StderrHandler()
rollover = RollOverHandler(bubble=True)
nested_setup = logbook.NestedSetup([ null, pida_handler, rollover ])
nested_setup.push_application()
| fermat618/pida | pida/core/log.py | Python | gpl-2.0 | 2,390 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Copyright (c) 2016 Cédric Clerget - HPC Center of Franche-Comté University
#
# This file is part of Janua-SMS
#
# http://github.com/mesocentrefc/Janua-SMS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import csv
import datetime
import cStringIO
import sys
from janua import jdb
from janua.actions.action import Action, argument, date
from janua.actions.action import console_error, console_success
from janua.db.database import Sms
from janua.utils.utilities import prompt
class Log(Action):
"""
Extract log from database in CSV format
"""
category = '__INTERNAL__'
def display_log(self, entries):
output_file = self.output()
output = cStringIO.StringIO()
output.write('Date;recipient;sender;message;status;authorized\n')
status_id = sorted(Sms.status_id.items(), key=lambda x: x[1])
count = 0
for entry in entries:
date = entry.date_time.strftime('%Y-%m-%d %H:%M:%S')
authorized = 'yes' if entry.authorized else 'no'
sender = entry.sender
recipient = entry.recipient
message = entry.raw_message
status = status_id[entry.status][0]
line = '%s;%s;%s;%s;%s;%s\n' % (date, sender, recipient,
message, status, authorized)
output.write(line.encode('utf-8'))
count += 1
if output_file:
outfile = open(output_file, 'w')
outfile.write(output.getvalue())
outfile.close()
return console_success('File %s was created' % output_file)
else:
print output.getvalue()
return console_success('%d lines displayed' % count)
def view_log(self, start, end):
entries = jdb.sms.get_by_date(startdate=start, enddate=end)
return self.display_log(entries)
def delete_log(self, start, end):
if not end:
return console_error('You must specify at least end date')
else:
jdb.sms.delete(start, end)
return console_success('Logs have been deleted')
def view_admin_log(self, start, end):
admins = jdb.admin.get_all()
print 'Select an admin number to display only these logs :'
idx = 0
for admin in admins:
print '%d. %s %s' % (idx, admin.firstname, admin.name)
idx += 1
admin_id = prompt('Enter a number (or ENTER to quit)')
if admin_id == '':
return console_success('exit')
if admin_id.isdigit():
admin_id = admins[int(admin_id)].id
entries = jdb.sms.get_by_admin(
admin_id, startdate=start, enddate=end
)
return self.display_log(entries)
else:
return console_error('the value entered is not a number')
def console(self):
op = self.operation()
startdate = None
enddate = None
if self.startdate():
startdate = datetime.datetime.strptime(self.startdate(), '%Y/%m/%d')
if self.enddate():
enddate = datetime.datetime.strptime(self.enddate(), '%Y/%m/%d')
enddate += datetime.timedelta(days=1)
if op == 'view':
return self.view_log(startdate, enddate)
elif op == 'delete':
return self.delete_log(startdate, enddate)
elif op == 'view_admin':
return self.view_admin_log(startdate, enddate)
@argument(required=True)
def operation(self):
"""Operation: view, delete, adminview"""
return ['view', 'delete', 'view_admin']
@argument(required=False)
def output(self):
"""output file"""
return ''
@argument(required=False)
def startdate(self):
"""From date (date format: YYYY/MM/DD)"""
return date()
@argument(required=False)
def enddate(self):
"""To date (date format: YYYY/MM/DD)"""
return date()
| mesocentrefc/Janua-SMS | janua/actions/log.py | Python | gpl-2.0 | 4,569 |
# Copyright 2017 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from hy.importer import (import_file_to_module, import_buffer_to_ast,
MetaLoader, get_bytecode_path)
from hy.errors import HyTypeError
import os
import ast
import tempfile
def test_basics():
"Make sure the basics of the importer work"
import_file_to_module("basic",
"tests/resources/importer/basic.hy")
def test_stringer():
_ast = import_buffer_to_ast("(defn square [x] (* x x))", '')
assert type(_ast.body[0]) == ast.FunctionDef
def test_imports():
path = os.getcwd() + "/tests/resources/importer/a.hy"
testLoader = MetaLoader(path)
def _import_test():
try:
return testLoader.load_module("tests.resources.importer.a")
except:
return "Error"
assert _import_test() == "Error"
assert _import_test() is not None
def test_import_error_reporting():
"Make sure that (import) reports errors correctly."
def _import_error_test():
try:
import_buffer_to_ast("(import \"sys\")", '')
except HyTypeError:
return "Error reported"
assert _import_error_test() == "Error reported"
assert _import_error_test() is not None
def test_import_autocompiles():
"Test that (import) byte-compiles the module."
f = tempfile.NamedTemporaryFile(suffix='.hy', delete=False)
f.write(b'(defn pyctest [s] (+ "X" s "Y"))')
f.close()
try:
os.remove(get_bytecode_path(f.name))
except (IOError, OSError):
pass
import_file_to_module("mymodule", f.name)
assert os.path.exists(get_bytecode_path(f.name))
os.remove(f.name)
os.remove(get_bytecode_path(f.name))
| Tritlo/hy | tests/importer/test_importer.py | Python | mit | 1,808 |
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import math
from cornice import Service
from pyramid.exceptions import HTTPNotFound
from sqlalchemy import func, distinct
from sqlalchemy.sql import or_
from bodhi import log
from bodhi.models import Update, Build, Package, Release
import bodhi.schemas
import bodhi.security
from bodhi.validators import (
validate_tags,
validate_enums,
validate_updates,
validate_packages,
validate_release,
)
release = Service(name='release', path='/releases/{name}',
description='Fedora Releases',
cors_origins=bodhi.security.cors_origins_ro)
releases = Service(name='releases', path='/releases/',
description='Fedora Releases',
# Note, this 'rw' is not a typo. the @comments service has
# a ``post`` section at the bottom.
cors_origins=bodhi.security.cors_origins_rw)
@release.get(accept="text/html", renderer="release.html")
def get_release_html(request):
id = request.matchdict.get('name')
release = Release.get(id, request.db)
if not release:
request.errors.add('body', 'name', 'No such release')
request.errors.status = HTTPNotFound.code
updates = request.db.query(Update).filter(
Update.release==release).order_by(
Update.date_submitted.desc())
updates_count = request.db.query(Update.date_submitted, Update.type).filter(
Update.release==release).order_by(
Update.date_submitted.desc())
date_commits = {}
dates = set()
for update in updates_count.all():
d = update.date_submitted
yearmonth = str(d.year) + '/' + str(d.month).zfill(2)
dates.add(yearmonth)
if not update.type.description in date_commits:
date_commits[update.type.description] = {}
if yearmonth in date_commits[update.type.description]:
date_commits[update.type.description][yearmonth] += 1
else:
date_commits[update.type.description][yearmonth] = 0
return dict(release=release,
latest_updates=updates.limit(25).all(),
count=updates.count(),
date_commits=date_commits,
dates = sorted(dates))
@release.get(accept=('application/json', 'text/json'), renderer='json')
@release.get(accept=('application/javascript'), renderer='jsonp')
def get_release_json(request):
id = request.matchdict.get('name')
release = Release.get(id, request.db)
if not release:
request.errors.add('body', 'name', 'No such release')
request.errors.status = HTTPNotFound.code
return release
@releases.get(accept="text/html", schema=bodhi.schemas.ListReleaseSchema,
renderer='releases.html',
validators=(validate_release, validate_updates,
validate_packages))
def query_releases_html(request):
def collect_releases(releases):
x = {}
for r in releases:
if r['state'] in x:
x[r['state']].append(r)
else:
x[r['state']] = [r]
return x
db = request.db
releases = db.query(Release).order_by(Release.id.desc()).all()
return dict(releases=collect_releases(releases))
@releases.get(accept=('application/json', 'text/json'),
schema=bodhi.schemas.ListReleaseSchema, renderer='json',
validators=(validate_release, validate_updates,
validate_packages))
def query_releases_json(request):
db = request.db
data = request.validated
query = db.query(Release)
name = data.get('name')
if name is not None:
query = query.filter(Release.name.like(name))
updates = data.get('updates')
if updates is not None:
query = query.join(Release.builds).join(Build.update)
args = \
[Update.title == update.title for update in updates] +\
[Update.alias == update.alias for update in updates]
query = query.filter(or_(*args))
packages = data.get('packages')
if packages is not None:
query = query.join(Release.builds).join(Build.package)
query = query.filter(or_(*[Package.id == p.id for p in packages]))
# We can't use ``query.count()`` here because it is naive with respect to
# all the joins that we're doing above.
count_query = query.with_labels().statement\
.with_only_columns([func.count(distinct(Release.id))])\
.order_by(None)
total = db.execute(count_query).scalar()
page = data.get('page')
rows_per_page = data.get('rows_per_page')
pages = int(math.ceil(total / float(rows_per_page)))
query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)
return dict(
releases=query.all(),
page=page,
pages=pages,
rows_per_page=rows_per_page,
total=total,
)
@releases.post(schema=bodhi.schemas.SaveReleaseSchema,
acl=bodhi.security.admin_only_acl, renderer='json',
validators=(validate_tags, validate_enums)
)
def save_release(request):
"""Save a release
This entails either creating a new release, or editing an existing one. To
edit an existing release, the release's original name must be specified in
the ``edited`` parameter.
"""
data = request.validated
edited = data.pop("edited", None)
# This has already been validated at this point, but we need to ditch
# it since the models don't care about a csrf argument.
data.pop('csrf_token')
try:
if edited is None:
log.info("Creating a new release: %s" % data['name'])
r = Release(**data)
else:
log.info("Editing release: %s" % edited)
r = request.db.query(Release).filter(Release.name==edited).one()
for k, v in data.items():
setattr(r, k, v)
except Exception as e:
log.exception(e)
request.errors.add('body', 'release',
'Unable to create update: %s' % e)
return
request.db.add(r)
request.db.flush()
return r
| mathstuf/bodhi | bodhi/services/releases.py | Python | gpl-2.0 | 6,860 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
new_default = orm.Extension._meta.get_field_by_name('icon')[0].default
for ext in orm.Extension.objects.filter(icon=""):
ext.icon = new_default
ext.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'extensions.extension': {
'Meta': {'object_name': 'Extension'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'icon': ('django.db.models.fields.files.ImageField', [], {'default': "'/static/images/plugin.png'", 'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'screenshot': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'extensions.extensionversion': {
'Meta': {'unique_together': "(('extension', 'version'),)", 'object_name': 'ExtensionVersion'},
'extension': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['extensions.Extension']"}),
'extra_json_fields': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shell_versions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['extensions.ShellVersion']", 'symmetrical': 'False'}),
'source': ('django.db.models.fields.files.FileField', [], {'max_length': '223'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'extensions.shellversion': {
'Meta': {'object_name': 'ShellVersion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major': ('django.db.models.fields.PositiveIntegerField', [], {}),
'minor': ('django.db.models.fields.PositiveIntegerField', [], {}),
'point': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['extensions']
| magcius/sweettooth | sweettooth/extensions/migrations/0008_new_icon_default.py | Python | agpl-3.0 | 6,118 |
#
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
meta_grep_parameters test.
"""
import meta_grep
from mysql.utilities.exception import MUTLibError
class test(meta_grep.test):
"""Process grep
This test executes the meta grep utility parameters.
It uses the meta_grep test as a parent for setup and teardown methods.
"""
def check_prerequisites(self):
return meta_grep.test.check_prerequisites(self)
def setup(self):
return meta_grep.test.setup(self)
def run(self):
self.res_fname = "result.txt"
cmd_base = "mysqlmetagrep.py "
test_num = 1
comment = "Test case {0} - do the help".format(test_num)
cmd = "{0} --help".format(cmd_base)
res = self.run_test_case(0, cmd, comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
# Remove version information
self.remove_result_and_lines_after("MySQL Utilities mysqlmetagrep.py "
"version", 6)
test_num += 1
comment = ("Test case {0} - do the SQL for a simple "
"search".format(test_num))
cmd = "{0} --sql -Gb --pattern=t2".format(cmd_base)
res = self.run_test_case(0, cmd, comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
comment = ("Test case {0} - do the SQL for a simple search with "
"type".format(test_num))
cmd = ("{0} --sql --search-objects=table -Gb "
"--pattern=t2".format(cmd_base))
res = self.run_test_case(0, cmd, comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
comment = ("Test case {0} - do the SQL for a body search with type "
"(VIEW).".format(test_num))
cmd = ("{0} --sql --search-objects=view -Gb "
"--pattern=%t1%".format(cmd_base))
res = self.run_test_case(0, cmd, comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
self.mask_column_result("root:*@localhost", ",", 1, "root[...]")
# Mask version
self.replace_result(
"MySQL Utilities mysqlmetagrep version",
"MySQL Utilities mysqlmetagrep version X.Y.Z\n")
return True
def get_result(self):
return self.compare(__name__, self.results)
def record(self):
return self.save_result_file(__name__, self.results)
def cleanup(self):
return meta_grep.test.cleanup(self)
| mysql/mysql-utilities | mysql-test/t/meta_grep_parameters.py | Python | gpl-2.0 | 3,293 |
#import re, os, locale, HTMLParser
#from decimal import *
from django import template
from django.template import RequestContext, Context
from django.core.urlresolvers import reverse
from django.utils.importlib import import_module
from frontadmin.conf import settings
#from django.core.urlresolvers import RegexURLResolver, reverse
#from django.utils.safestring import SafeString
#from django.utils.translation import gettext as _
register = template.Library()
def load_plugins(request):
context = []
for p in settings.PLUGINS:
module = import_module(p)
context.append(module.Plugin(request).get_context())
return context
@register.simple_tag()
def frontadmin_bar(request):
"""
The main frontadmin bar
"""
t = loader.select_template([
"frontadmin/bar.inc.html",
])
return t.render(RequestContext(request, {
'plugins': load_plugins(request),
}))
@register.simple_tag()
def frontadmin_toolbar(request, obj):
# Changelist admin
if isinstance(obj, (str,unicode)):
app_label = obj.split('.')[0].lower()
app_model = obj.split('.')[1].lower()
t = loader.select_template([
"frontadmin/toolbar.inc.html",
"frontadmin/%s/toolbar.inc.html" % app_label,
])
return t.render(RequestContext(request, {
'app_label': app_label,
'app_model': app_model,
'changelist_url': reverse('admin:%s_%s_changelist' % (app_label, app_model)),
}))
# Object admin
else:
app_label = obj._meta.app_label
object_name = obj._meta.object_name.lower()
t = loader.select_template([
"frontadmin/toolbar.inc.html",
"frontadmin/%s/toolbar.inc.html" % app_label,
"frontadmin/%s/%s/toolbar.inc.html" % (app_label, object_name),
])
return t.render(RequestContext(request, {
'app_label': app_label,
'object_name': object_name,
'object': obj,
'delete_url': reverse('admin:%s_%s_delete' % (app_label, object_name), args=(obj.id,)),
'change_url': reverse('admin:%s_%s_change' % (app_label, object_name), args=(obj.id,)),
'history_url': reverse('admin:%s_%s_history' % (app_label, object_name), args=(obj.id,)),
}))
from django.template import loader, Context
@register.tag(name='frontadmin')
def render_frontadmin(parser, token):
try:
tag_name, request, obj = token.contents.split(None, 2)
except ValueError:
raise template.TemplateSyntaxError("'frontadmin' node requires a request and a object variables")
nodelist = parser.parse(('endfrontadmin',))
parser.delete_first_token()
return CaptureasNode(nodelist, request, obj)
class CaptureasNode(template.Node):
def __init__(self, nodelist, request, obj):
self.nodelist = nodelist
self.obj = template.Variable(obj)
self.request = template.Variable(request)
def _has_perm(self, request, var):
if not request.user:
return False
elif not request.user.is_authenticated():
return False
elif isinstance(var, (str,unicode)):
app = var.split('.')[0].lower()
model = var.split('.')[1].lower()
return request.user.has_perm("%s.add_%s" % (app, model)) and \
request.user.has_perm("%s.change_%s" % (app, model)) and \
request.user.has_perm("%s.delete_%s" % (app, model))
else:
app = var._meta.app_label.lower()
model = var._meta.object_name.lower().lower()
return request.user.has_perm("%s.add_%s" % (app, model)) and \
request.user.has_perm("%s.change_%s" % (app, model)) and \
request.user.has_perm("%s.delete_%s" % (app, model))
def render(self, context):
var = self.obj.resolve(context)
request = self.request.resolve(context)
output = self.nodelist.render(context)
try:
if not self._has_perm(request, var):
return output
except AttributeError:
msg = 'The requested object is not administrable. Frontadmin accepts only models(app.Model) or single object instances(app.Model.object).'
raise AttributeError(msg)
if isinstance(var, (str,unicode)):
css_class = var.replace('.', '-').lower()
else:
css_class = '%s-%s-%s' % (var._meta.app_label, var._meta.object_name.lower(), var.pk)
return """
<div id="frontadmin-%s" class="frontadmin-block">
%s<div class="frontadmin-block-content">%s</div>
</div>""" % (css_class,
frontadmin_toolbar(request, var),
output,)
| h3/django-frontadmin | frontadmin/templatetags/frontadmin_tags.py | Python | bsd-3-clause | 4,843 |
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.VSPHERE)
driver = cls(url='https://192.168.1.100:8080/sdk/',
username='admin', password='admin')
print(driver.list_nodes())
# ...
| dcorbacho/libcloud | docs/examples/compute/vsphere/connect_url_custom_port.py | Python | apache-2.0 | 264 |
# -*- coding: utf-8 -*-
from pyload.plugin.internal.XFSAccount import XFSAccount
class LinestorageCom(XFSAccount):
__name = "LinestorageCom"
__type = "account"
__version = "0.03"
__description = """Linestorage.com account plugin"""
__license = "GPLv3"
__authors = [("Walter Purcaro", "[email protected]")]
HOSTER_DOMAIN = "linestorage.com"
HOSTER_URL = "http://linestorage.com/"
| ardi69/pyload-0.4.10 | pyload/plugin/account/LinestorageCom.py | Python | gpl-3.0 | 434 |
################################
# IMPORTS #
################################
from flask import Flask, render_template, Response, request
import time
import wave
from robotDebug import Robot
#*****************************#
# CONSTANTS & CONFIG #
#*****************************#
FPS_LIMIT = 15
IPV6 = False
#=========================#
# OBJECTS #
#=========================#
app = Flask(__name__)
robot = Robot()
#------------------------#
# FLASK APP #
#------------------------#
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
@app.route('/about')
def about():
"""Video streaming home page."""
return render_template('AboutUs.html')
@app.route('/description')
def description():
return render_template('Description.html')
def gen(camera):
"""Video streaming generator function."""
while True:
a = time.clock()
frame = camera.get_frame() # fetching 1 image from the robot
delta = time.clock()-a # time elapsed during request to robot
delta = (1.0/FPS_LIMIT) - delta # diff between time elapsed and FPS limit period
if (delta > 0): # delta > 0 => frame acquisition faster than FPS period
time.sleep(delta)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(robot),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route("/audio_feed")
def audio_feed():
return ""
@app.route('/command', methods=['POST'])
def command():
#print(request.form)
if (request.form["idle"] == 'true'):
robot.setPositionIdle()
elif (request.form["rest"] == 'true'):
robot.setPositionRest()
elif (request.form["cue"] == 'true'):
robot.setPositionCue()
robot.motion(
{
'leftx':int(request.form["leftx"]),
'lefty':int(request.form["lefty"]),
'rightx':int(request.form["rightx"]),
'righty':int(request.form["righty"])
}
)
robot.cameraMotion({
'yaw':int(request.form["yaw"]),
'pitch':int(request.form["pitch"])
})
return "0"
@app.route('/say', methods=['POST'])
def say():
robot.sayText(request.form["message"])
return str(request.form["message"])
@app.route('/sound', methods=['POST'])
def replayVoice():
s = request.files["file"]
res = wave.open(s, 'r')
length = res.getnframes()
res = res.readframes(length)
print(res.count('\0'))
robot.playSound(res) #.replace('\0', '')
return "0"
if __name__ == '__main__':
host_name = "0.0.0.0"
if (IPV6):
host_name = "::"
print("Server running IPV6 on port "+str(host_port))
else:
host_name = "0.0.0.0"
app.run(host=host_name, port=80, debug=True, threaded=True)
| erreur404/Gnubiquity | robot/app.py | Python | gpl-2.0 | 3,139 |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import DataMigration
from django.db import connection
class Migration(DataMigration):
old_table = 'cmsplugin_subscriptionplugin'
new_table = 'aldryn_mailchimp_subscriptionplugin'
def forwards(self, orm):
table_names = connection.introspection.table_names()
if self.old_table in table_names:
if not self.new_table in table_names:
db.rename_table(self.old_table, self.new_table)
else:
db.drop_table(self.old_table)
# Adding field 'SubscriptionPlugin.assign_language'
db.add_column(u'aldryn_mailchimp_subscriptionplugin', 'assign_language',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SubscriptionPlugin.assign_language'
db.delete_column(u'aldryn_mailchimp_subscriptionplugin', 'assign_language')
models = {
u'aldryn_mailchimp.subscriptionplugin': {
'Meta': {'object_name': 'SubscriptionPlugin', '_ormbases': ['cms.CMSPlugin']},
'assign_language': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'list_id': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['aldryn_mailchimp']
symmetrical = True
| CT-Data-Collaborative/ctdata-mailchimp | ctdata_mailchimp/migrations/0005_fix_old_cmsplugin_tables.py | Python | bsd-3-clause | 3,407 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pandas as pd
from superset.dataframe import dedup, SupersetDataFrame
from superset.db_engine_specs import BaseEngineSpec
from superset.db_engine_specs.presto import PrestoEngineSpec
from .base_tests import SupersetTestCase
class SupersetDataFrameTestCase(SupersetTestCase):
def test_dedup(self):
self.assertEqual(dedup(["foo", "bar"]), ["foo", "bar"])
self.assertEqual(
dedup(["foo", "bar", "foo", "bar", "Foo"]),
["foo", "bar", "foo__1", "bar__1", "Foo"],
)
self.assertEqual(
dedup(["foo", "bar", "bar", "bar", "Bar"]),
["foo", "bar", "bar__1", "bar__2", "Bar"],
)
self.assertEqual(
dedup(["foo", "bar", "bar", "bar", "Bar"], case_sensitive=False),
["foo", "bar", "bar__1", "bar__2", "Bar__3"],
)
def test_get_columns_basic(self):
data = [("a1", "b1", "c1"), ("a2", "b2", "c2")]
cursor_descr = (("a", "string"), ("b", "string"), ("c", "string"))
cdf = SupersetDataFrame(data, cursor_descr, BaseEngineSpec)
self.assertEqual(
cdf.columns,
[
{"is_date": False, "type": "STRING", "name": "a", "is_dim": True},
{"is_date": False, "type": "STRING", "name": "b", "is_dim": True},
{"is_date": False, "type": "STRING", "name": "c", "is_dim": True},
],
)
def test_get_columns_with_int(self):
data = [("a1", 1), ("a2", 2)]
cursor_descr = (("a", "string"), ("b", "int"))
cdf = SupersetDataFrame(data, cursor_descr, BaseEngineSpec)
self.assertEqual(
cdf.columns,
[
{"is_date": False, "type": "STRING", "name": "a", "is_dim": True},
{
"is_date": False,
"type": "INT",
"name": "b",
"is_dim": False,
"agg": "sum",
},
],
)
def test_get_columns_type_inference(self):
data = [(1.2, 1), (3.14, 2)]
cursor_descr = (("a", None), ("b", None))
cdf = SupersetDataFrame(data, cursor_descr, BaseEngineSpec)
self.assertEqual(
cdf.columns,
[
{
"is_date": False,
"type": "FLOAT",
"name": "a",
"is_dim": False,
"agg": "sum",
},
{
"is_date": False,
"type": "INT",
"name": "b",
"is_dim": False,
"agg": "sum",
},
],
)
def test_is_date(self):
f = SupersetDataFrame.is_date
self.assertEqual(f(np.dtype("M"), ""), True)
self.assertEqual(f(np.dtype("f"), "DATETIME"), True)
self.assertEqual(f(np.dtype("i"), "TIMESTAMP"), True)
self.assertEqual(f(None, "DATETIME"), True)
self.assertEqual(f(None, "TIMESTAMP"), True)
self.assertEqual(f(None, ""), False)
self.assertEqual(f(np.dtype(np.int32), ""), False)
def test_dedup_with_data(self):
data = [("a", 1), ("a", 2)]
cursor_descr = (("a", "string"), ("a", "string"))
cdf = SupersetDataFrame(data, cursor_descr, BaseEngineSpec)
self.assertListEqual(cdf.column_names, ["a", "a__1"])
def test_int64_with_missing_data(self):
data = [(None,), (1239162456494753670,), (None,), (None,), (None,), (None,)]
cursor_descr = [("user_id", "bigint", None, None, None, None, True)]
# the base engine spec does not provide a dtype based on the cursor
# description, so the column is inferred as float64 because of the
# missing data
cdf = SupersetDataFrame(data, cursor_descr, BaseEngineSpec)
np.testing.assert_array_equal(
cdf.raw_df.values.tolist(),
[[np.nan], [1.2391624564947538e18], [np.nan], [np.nan], [np.nan], [np.nan]],
)
# currently only Presto provides a dtype based on the cursor description
cdf = SupersetDataFrame(data, cursor_descr, PrestoEngineSpec)
np.testing.assert_array_equal(
cdf.raw_df.values.tolist(),
[[np.nan], [1239162456494753670], [np.nan], [np.nan], [np.nan], [np.nan]],
)
def test_pandas_datetime64(self):
data = [(None,)]
cursor_descr = [("ds", "timestamp", None, None, None, None, True)]
cdf = SupersetDataFrame(data, cursor_descr, PrestoEngineSpec)
self.assertEqual(cdf.raw_df.dtypes[0], np.dtype("<M8[ns]"))
def test_no_type_coercion(self):
data = [("a", 1), ("b", 2)]
cursor_descr = [
("one", "varchar", None, None, None, None, True),
("two", "integer", None, None, None, None, True),
]
cdf = SupersetDataFrame(data, cursor_descr, PrestoEngineSpec)
self.assertEqual(cdf.raw_df.dtypes[0], np.dtype("O"))
self.assertEqual(cdf.raw_df.dtypes[1], pd.Int64Dtype())
def test_empty_data(self):
data = []
cursor_descr = [
("one", "varchar", None, None, None, None, True),
("two", "integer", None, None, None, None, True),
]
cdf = SupersetDataFrame(data, cursor_descr, PrestoEngineSpec)
self.assertEqual(cdf.raw_df.dtypes[0], np.dtype("O"))
self.assertEqual(cdf.raw_df.dtypes[1], pd.Int64Dtype())
| zhouyao1994/incubator-superset | tests/dataframe_test.py | Python | apache-2.0 | 6,316 |
import threading
from ctypes import byref, c_char_p, c_int, c_char, c_size_t, Structure, POINTER
from django.contrib.gis import memoryview
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string, check_sized_string
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
from django.utils import six
from django.utils.encoding import force_bytes
### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
### WKTReader routines ###
wkt_reader_create = GEOSFunc('GEOSWKTReader_create')
wkt_reader_create.restype = WKT_READ_PTR
wkt_reader_destroy = GEOSFunc('GEOSWKTReader_destroy')
wkt_reader_destroy.argtypes = [WKT_READ_PTR]
wkt_reader_read = GEOSFunc('GEOSWKTReader_read')
wkt_reader_read.argtypes = [WKT_READ_PTR, c_char_p]
wkt_reader_read.restype = GEOM_PTR
wkt_reader_read.errcheck = check_geom
### WKTWriter routines ###
wkt_writer_create = GEOSFunc('GEOSWKTWriter_create')
wkt_writer_create.restype = WKT_WRITE_PTR
wkt_writer_destroy = GEOSFunc('GEOSWKTWriter_destroy')
wkt_writer_destroy.argtypes = [WKT_WRITE_PTR]
wkt_writer_write = GEOSFunc('GEOSWKTWriter_write')
wkt_writer_write.argtypes = [WKT_WRITE_PTR, GEOM_PTR]
wkt_writer_write.restype = geos_char_p
wkt_writer_write.errcheck = check_string
try:
wkt_writer_get_outdim = GEOSFunc('GEOSWKTWriter_getOutputDimension')
wkt_writer_get_outdim.argtypes = [WKT_WRITE_PTR]
wkt_writer_get_outdim.restype = c_int
wkt_writer_set_outdim = GEOSFunc('GEOSWKTWriter_setOutputDimension')
wkt_writer_set_outdim.argtypes = [WKT_WRITE_PTR, c_int]
except AttributeError:
# GEOSWKTWriter_get/setOutputDimension has been introduced in GEOS 3.3.0
# Always return 2 if not available
wkt_writer_get_outdim = lambda ptr: 2
wkt_writer_set_outdim = lambda ptr, dim: None
### WKBReader routines ###
wkb_reader_create = GEOSFunc('GEOSWKBReader_create')
wkb_reader_create.restype = WKB_READ_PTR
wkb_reader_destroy = GEOSFunc('GEOSWKBReader_destroy')
wkb_reader_destroy.argtypes = [WKB_READ_PTR]
def wkb_read_func(func):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
func.argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
wkb_reader_read = wkb_read_func(GEOSFunc('GEOSWKBReader_read'))
wkb_reader_read_hex = wkb_read_func(GEOSFunc('GEOSWKBReader_readHEX'))
### WKBWriter routines ###
wkb_writer_create = GEOSFunc('GEOSWKBWriter_create')
wkb_writer_create.restype = WKB_WRITE_PTR
wkb_writer_destroy = GEOSFunc('GEOSWKBWriter_destroy')
wkb_writer_destroy.argtypes = [WKB_WRITE_PTR]
# WKB Writing prototypes.
def wkb_write_func(func):
func.argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
func.restype = c_uchar_p
func.errcheck = check_sized_string
return func
wkb_writer_write = wkb_write_func(GEOSFunc('GEOSWKBWriter_write'))
wkb_writer_write_hex = wkb_write_func(GEOSFunc('GEOSWKBWriter_writeHEX'))
# WKBWriter property getter/setter prototypes.
def wkb_writer_get(func, restype=c_int):
func.argtypes = [WKB_WRITE_PTR]
func.restype = restype
return func
def wkb_writer_set(func, argtype=c_int):
func.argtypes = [WKB_WRITE_PTR, argtype]
return func
wkb_writer_get_byteorder = wkb_writer_get(GEOSFunc('GEOSWKBWriter_getByteOrder'))
wkb_writer_set_byteorder = wkb_writer_set(GEOSFunc('GEOSWKBWriter_setByteOrder'))
wkb_writer_get_outdim = wkb_writer_get(GEOSFunc('GEOSWKBWriter_getOutputDimension'))
wkb_writer_set_outdim = wkb_writer_set(GEOSFunc('GEOSWKBWriter_setOutputDimension'))
wkb_writer_get_include_srid = wkb_writer_get(GEOSFunc('GEOSWKBWriter_getIncludeSRID'), restype=c_char)
wkb_writer_set_include_srid = wkb_writer_set(GEOSFunc('GEOSWKBWriter_setIncludeSRID'), argtype=c_char)
### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
def __del__(self):
# Cleaning up with the appropriate destructor.
if self._ptr:
self._destructor(self._ptr)
### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
_destructor = wkt_reader_destroy
ptr_type = WKT_READ_PTR
def read(self, wkt):
if not isinstance(wkt, (bytes, six.string_types)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
_destructor = wkb_reader_destroy
ptr_type = WKB_READ_PTR
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, six.string_types)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
_destructor = wkt_writer_destroy
ptr_type = WKT_WRITE_PTR
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
_destructor = wkb_writer_destroy
ptr_type = WKB_WRITE_PTR
def write(self, geom):
"Returns the WKB representation of the given geometry."
return memoryview(wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t())))
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
return wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
def _get_outdim(self):
return wkb_writer_get_outdim(self.ptr)
def _set_outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
outdim = property(_get_outdim, _set_outdim)
# Property for getting/setting the include srid flag.
def _get_include_srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
def _set_include_srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
srid = property(_get_include_srid, _set_include_srid)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter()
thread_context.wkt_w.outdim = dim
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter()
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter()
thread_context.ewkb_w.srid = True
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
| 912/M-new | virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/io.py | Python | gpl-2.0 | 9,452 |
import json
from unit.http import TestHTTP
from unit.option import option
http = TestHTTP()
def check_chroot():
available = option.available
resp = http.put(
url='/config',
sock_type='unix',
addr=option.temp_dir + '/control.unit.sock',
body=json.dumps(
{
"listeners": {"*:7080": {"pass": "routes"}},
"routes": [
{
"action": {
"share": option.temp_dir,
"chroot": option.temp_dir,
}
}
],
}
),
)
if 'success' in resp['body']:
available['features']['chroot'] = True
| nginx/unit | test/unit/check/chroot.py | Python | apache-2.0 | 748 |
from __future__ import absolute_import
__copyright__ = "Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License"
import os
import glob
import sys
import time
import math
import re
import traceback
import threading
import platform
import Queue as queue
import serial
from Cura.avr_isp import stk500v2
from Cura.avr_isp import ispBase
from Cura.util import profile
from Cura.util import version
try:
import _winreg
except:
pass
def serialList(forAutoDetect=False):
baselist=[]
if platform.system() == "Windows":
try:
key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"HARDWARE\\DEVICEMAP\\SERIALCOMM")
i=0
while True:
values = _winreg.EnumValue(key, i)
if not forAutoDetect or 'USBSER' in values[0]:
baselist+=[values[1]]
i+=1
except:
pass
if forAutoDetect:
baselist = baselist + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*') + glob.glob("/dev/tty.usb*") + glob.glob("/dev/cu.usb*")
baselist = filter(lambda s: not 'Bluetooth' in s, baselist)
else:
baselist = baselist + glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*') + glob.glob("/dev/tty.usb*") + glob.glob("/dev/cu.*") + glob.glob("/dev/rfcomm*")
prev = profile.getPreference('serial_port_auto')
if prev in baselist:
baselist.remove(prev)
baselist.insert(0, prev)
if version.isDevVersion() and not forAutoDetect:
baselist.append('VIRTUAL')
return baselist
def machineIsConnected():
port = profile.getPreference('serial_port')
if port == 'AUTO':
return len(serialList(True)) > 0
if platform.system() == "Windows":
return port in serialList()
return os.path.isfile(port)
def baudrateList():
ret = [250000, 230400, 115200, 57600, 38400, 19200, 9600]
if profile.getPreference('serial_baud_auto') != '':
prev = int(profile.getPreference('serial_baud_auto'))
if prev in ret:
ret.remove(prev)
ret.insert(0, prev)
return ret
class VirtualPrinter():
def __init__(self):
self.readList = ['start\n', 'Marlin: Virtual Marlin!\n', '\x80\n']
self.temp = 0.0
self.targetTemp = 0.0
self.lastTempAt = time.time()
self.bedTemp = 1.0
self.bedTargetTemp = 1.0
def write(self, data):
if self.readList is None:
return
#print "Send: %s" % (data.rstrip())
if 'M104' in data or 'M109' in data:
try:
self.targetTemp = float(re.search('S([0-9]+)', data).group(1))
except:
pass
if 'M140' in data or 'M190' in data:
try:
self.bedTargetTemp = float(re.search('S([0-9]+)', data).group(1))
except:
pass
if 'M105' in data:
self.readList.append("ok T:%.2f /%.2f B:%.2f /%.2f @:64\n" % (self.temp, self.targetTemp, self.bedTemp, self.bedTargetTemp))
elif len(data.strip()) > 0:
self.readList.append("ok\n")
def readline(self):
if self.readList is None:
return ''
n = 0
timeDiff = self.lastTempAt - time.time()
self.lastTempAt = time.time()
if abs(self.temp - self.targetTemp) > 1:
self.temp += math.copysign(timeDiff * 10, self.targetTemp - self.temp)
if abs(self.bedTemp - self.bedTargetTemp) > 1:
self.bedTemp += math.copysign(timeDiff * 10, self.bedTargetTemp - self.bedTemp)
while len(self.readList) < 1:
time.sleep(0.1)
n += 1
if n == 20:
return ''
if self.readList is None:
return ''
time.sleep(0.001)
#print "Recv: %s" % (self.readList[0].rstrip())
return self.readList.pop(0)
def close(self):
self.readList = None
class MachineComPrintCallback(object):
def mcLog(self, message):
pass
def mcTempUpdate(self, temp, bedTemp, targetTemp, bedTargetTemp):
pass
def mcStateChange(self, state):
pass
def mcMessage(self, message):
pass
def mcProgress(self, lineNr):
pass
def mcZChange(self, newZ):
pass
class MachineCom(object):
STATE_NONE = 0
STATE_OPEN_SERIAL = 1
STATE_DETECT_SERIAL = 2
STATE_DETECT_BAUDRATE = 3
STATE_CONNECTING = 4
STATE_OPERATIONAL = 5
STATE_PRINTING = 6
STATE_PAUSED = 7
STATE_CLOSED = 8
STATE_ERROR = 9
STATE_CLOSED_WITH_ERROR = 10
def __init__(self, port = None, baudrate = None, callbackObject = None):
if port is None:
port = profile.getPreference('serial_port')
if baudrate is None:
if profile.getPreference('serial_baud') == 'AUTO':
baudrate = 0
else:
baudrate = int(profile.getPreference('serial_baud'))
if callbackObject is None:
callbackObject = MachineComPrintCallback()
self._port = port
self._baudrate = baudrate
self._callback = callbackObject
self._state = self.STATE_NONE
self._serial = None
self._baudrateDetectList = baudrateList()
self._baudrateDetectRetry = 0
self._extruderCount = int(profile.getPreference('extruder_amount'))
self._temperatureRequestExtruder = 0
self._temp = [0] * self._extruderCount
self._targetTemp = [0] * self._extruderCount
self._bedTemp = 0
self._bedTargetTemp = 0
self._gcodeList = None
self._gcodePos = 0
self._commandQueue = queue.Queue()
self._logQueue = queue.Queue(256)
self._feedRateModifier = {}
self._currentZ = -1
self._heatupWaitStartTime = 0
self._heatupWaitTimeLost = 0.0
self._printStartTime100 = None
self.thread = threading.Thread(target=self._monitor)
self.thread.daemon = True
self.thread.start()
def _changeState(self, newState):
if self._state == newState:
return
oldState = self.getStateString()
self._state = newState
self._log('Changing monitoring state from \'%s\' to \'%s\'' % (oldState, self.getStateString()))
self._callback.mcStateChange(newState)
def getState(self):
return self._state
def getStateString(self):
if self._state == self.STATE_NONE:
return "Offline"
if self._state == self.STATE_OPEN_SERIAL:
return "Opening serial port"
if self._state == self.STATE_DETECT_SERIAL:
return "Detecting serial port"
if self._state == self.STATE_DETECT_BAUDRATE:
return "Detecting baudrate"
if self._state == self.STATE_CONNECTING:
return "Connecting"
if self._state == self.STATE_OPERATIONAL:
return "Operational"
if self._state == self.STATE_PRINTING:
return "Printing"
if self._state == self.STATE_PAUSED:
return "Paused"
if self._state == self.STATE_CLOSED:
return "Closed"
if self._state == self.STATE_ERROR:
return "Error: %s" % (self.getShortErrorString())
if self._state == self.STATE_CLOSED_WITH_ERROR:
return "Error: %s" % (self.getShortErrorString())
return "?%d?" % (self._state)
def getShortErrorString(self):
if len(self._errorValue) < 20:
return self._errorValue
return self._errorValue[:20] + "..."
def getErrorString(self):
return self._errorValue
def isClosedOrError(self):
return self._state == self.STATE_ERROR or self._state == self.STATE_CLOSED_WITH_ERROR or self._state == self.STATE_CLOSED
def isError(self):
return self._state == self.STATE_ERROR or self._state == self.STATE_CLOSED_WITH_ERROR
def isOperational(self):
return self._state == self.STATE_OPERATIONAL or self._state == self.STATE_PRINTING or self._state == self.STATE_PAUSED
def isPrinting(self):
return self._state == self.STATE_PRINTING
def isPaused(self):
return self._state == self.STATE_PAUSED
def getPrintPos(self):
return self._gcodePos
def getPrintTime(self):
return time.time() - self._printStartTime
def getPrintTimeRemainingEstimate(self):
if self._printStartTime100 == None or self.getPrintPos() < 200:
return None
printTime = (time.time() - self._printStartTime100) / 60
printTimeTotal = printTime * (len(self._gcodeList) - 100) / (self.getPrintPos() - 100)
printTimeLeft = printTimeTotal - printTime
return printTimeLeft
def getTemp(self):
return self._temp
def getBedTemp(self):
return self._bedTemp
def getLog(self):
ret = []
while not self._logQueue.empty():
ret.append(self._logQueue.get())
for line in ret:
self._logQueue.put(line, False)
return ret
def _monitor(self):
#Open the serial port.
if self._port == 'AUTO':
self._changeState(self.STATE_DETECT_SERIAL)
programmer = stk500v2.Stk500v2()
self._log("Serial port list: %s" % (str(serialList(True))))
for p in serialList(True):
try:
self._log("Connecting to: %s" % (p))
programmer.connect(p)
self._serial = programmer.leaveISP()
profile.putPreference('serial_port_auto', p)
break
except ispBase.IspError as (e):
self._log("Error while connecting to %s: %s" % (p, str(e)))
pass
except:
self._log("Unexpected error while connecting to serial port: %s %s" % (p, getExceptionString()))
programmer.close()
elif self._port == 'VIRTUAL':
self._changeState(self.STATE_OPEN_SERIAL)
self._serial = VirtualPrinter()
else:
self._changeState(self.STATE_OPEN_SERIAL)
try:
self._log("Connecting to: %s" % (self._port))
if self._baudrate == 0:
self._serial = serial.Serial(str(self._port), 115200, timeout=0.1, writeTimeout=10000)
else:
self._serial = serial.Serial(str(self._port), self._baudrate, timeout=2, writeTimeout=10000)
except:
self._log("Unexpected error while connecting to serial port: %s %s" % (self._port, getExceptionString()))
if self._serial == None:
self._log("Failed to open serial port (%s)" % (self._port))
self._errorValue = 'Failed to autodetect serial port.'
self._changeState(self.STATE_ERROR)
return
self._log("Connected to: %s, starting monitor" % (self._serial))
if self._baudrate == 0:
self._changeState(self.STATE_DETECT_BAUDRATE)
else:
self._changeState(self.STATE_CONNECTING)
#Start monitoring the serial port.
if self._state == self.STATE_CONNECTING:
timeout = time.time() + 15
else:
timeout = time.time() + 5
tempRequestTimeout = timeout
while True:
line = self._readline()
if line == None:
break
#No matter the state, if we see an error, goto the error state and store the error for reference.
if line.startswith('Error:'):
#Oh YEAH, consistency.
# Marlin reports an MIN/MAX temp error as "Error:x\n: Extruder switched off. MAXTEMP triggered !\n"
# But a bed temp error is reported as "Error: Temperature heated bed switched off. MAXTEMP triggered !!"
# So we can have an extra newline in the most common case. Awesome work people.
if re.match('Error:[0-9]\n', line):
line = line.rstrip() + self._readline()
#Skip the communication errors, as those get corrected.
if 'checksum mismatch' in line or 'Line Number is not Last Line Number' in line or 'No Line Number with checksum' in line or 'No Checksum with line number' in line:
pass
elif not self.isError():
self._errorValue = line[6:]
self._changeState(self.STATE_ERROR)
if ' T:' in line or line.startswith('T:'):
self._temp[self._temperatureRequestExtruder] = float(re.search("[0-9\.]*", line.split('T:')[1]).group(0))
if ' B:' in line:
self._bedTemp = float(re.search("[0-9\.]*", line.split(' B:')[1]).group(0))
self._callback.mcTempUpdate(self._temp, self._bedTemp, self._targetTemp, self._bedTargetTemp)
#If we are waiting for an M109 or M190 then measure the time we lost during heatup, so we can remove that time from our printing time estimate.
if not 'ok' in line and self._heatupWaitStartTime != 0:
t = time.time()
self._heatupWaitTimeLost = t - self._heatupWaitStartTime
self._heatupWaitStartTime = t
elif line.strip() != '' and line.strip() != 'ok' and not line.startswith('Resend:') and line != 'echo:Unknown command:""\n' and self.isOperational():
self._callback.mcMessage(line)
if self._state == self.STATE_DETECT_BAUDRATE:
if line == '' or time.time() > timeout:
if len(self._baudrateDetectList) < 1:
self.close()
self._errorValue = "No more baudrates to test, and no suitable baudrate found."
self._changeState(self.STATE_ERROR)
elif self._baudrateDetectRetry > 0:
self._baudrateDetectRetry -= 1
self._serial.write('\n')
self._log("Baudrate test retry: %d" % (self._baudrateDetectRetry))
self._sendCommand("M105")
self._testingBaudrate = True
else:
baudrate = self._baudrateDetectList.pop(0)
try:
self._serial.baudrate = baudrate
self._serial.timeout = 0.5
self._log("Trying baudrate: %d" % (baudrate))
self._baudrateDetectRetry = 5
self._baudrateDetectTestOk = 0
timeout = time.time() + 5
self._serial.write('\n')
self._sendCommand("M105")
self._testingBaudrate = True
except:
self._log("Unexpected error while setting baudrate: %d %s" % (baudrate, getExceptionString()))
elif 'ok' in line and 'T:' in line:
self._baudrateDetectTestOk += 1
if self._baudrateDetectTestOk < 10:
self._log("Baudrate test ok: %d" % (self._baudrateDetectTestOk))
self._sendCommand("M105")
else:
self._sendCommand("M999")
self._serial.timeout = 2
profile.putPreference('serial_baud_auto', self._serial.baudrate)
self._changeState(self.STATE_OPERATIONAL)
else:
self._testingBaudrate = False
elif self._state == self.STATE_CONNECTING:
if line == '':
self._sendCommand("M105")
elif 'ok' in line:
self._changeState(self.STATE_OPERATIONAL)
if time.time() > timeout:
self.close()
elif self._state == self.STATE_OPERATIONAL:
#Request the temperature on comm timeout (every 2 seconds) when we are not printing.
if line == '':
if self._extruderCount > 0:
self._temperatureRequestExtruder = (self._temperatureRequestExtruder + 1) % self._extruderCount
self._sendCommand("M105 T%d" % (self._temperatureRequestExtruder))
else:
self._sendCommand("M105")
tempRequestTimeout = time.time() + 5
elif self._state == self.STATE_PRINTING:
if line == '' and time.time() > timeout:
self._log("Communication timeout during printing, forcing a line")
line = 'ok'
#Even when printing request the temperature every 5 seconds.
if time.time() > tempRequestTimeout:
if self._extruderCount > 0:
self._temperatureRequestExtruder = (self._temperatureRequestExtruder + 1) % self._extruderCount
self._sendCommand("M105 T%d" % (self._temperatureRequestExtruder))
else:
self._sendCommand("M105")
tempRequestTimeout = time.time() + 5
if 'ok' in line:
timeout = time.time() + 5
if not self._commandQueue.empty():
self._sendCommand(self._commandQueue.get())
else:
self._sendNext()
elif "resend" in line.lower() or "rs" in line:
try:
self._gcodePos = int(line.replace("N:"," ").replace("N"," ").replace(":"," ").split()[-1])
except:
if "rs" in line:
self._gcodePos = int(line.split()[1])
self._log("Connection closed, closing down monitor")
def _log(self, message):
self._callback.mcLog(message)
try:
self._logQueue.put(message, False)
except:
#If the log queue is full, remove the first message and append the new message again
self._logQueue.get()
try:
self._logQueue.put(message, False)
except:
pass
def _readline(self):
if self._serial == None:
return None
try:
ret = self._serial.readline()
except:
self._log("Unexpected error while reading serial port: %s" % (getExceptionString()))
self._errorValue = getExceptionString()
self.close(True)
return None
if ret == '':
#self._log("Recv: TIMEOUT")
return ''
self._log("Recv: %s" % (unicode(ret, 'ascii', 'replace').encode('ascii', 'replace').rstrip()))
return ret
def close(self, isError = False):
if self._serial != None:
self._serial.close()
if isError:
self._changeState(self.STATE_CLOSED_WITH_ERROR)
else:
self._changeState(self.STATE_CLOSED)
self._serial = None
def __del__(self):
self.close()
def _sendCommand(self, cmd):
if self._serial is None:
return
if 'M109' in cmd or 'M190' in cmd:
self._heatupWaitStartTime = time.time()
if 'M104' in cmd or 'M109' in cmd:
try:
t = 0
if 'T' in cmd:
t = int(re.search('T([0-9]+)', cmd).group(1))
self._targetTemp[t] = float(re.search('S([0-9]+)', cmd).group(1))
except:
pass
if 'M140' in cmd or 'M190' in cmd:
try:
self._bedTargetTemp = float(re.search('S([0-9]+)', cmd).group(1))
except:
pass
self._log('Send: %s' % (cmd))
try:
self._serial.write(cmd + '\n')
except serial.SerialTimeoutException:
self._log("Serial timeout while writing to serial port, trying again.")
try:
time.sleep(0.5)
self._serial.write(cmd + '\n')
except:
self._log("Unexpected error while writing serial port: %s" % (getExceptionString()))
self._errorValue = getExceptionString()
self.close(True)
except:
self._log("Unexpected error while writing serial port: %s" % (getExceptionString()))
self._errorValue = getExceptionString()
self.close(True)
def _sendNext(self):
if self._gcodePos >= len(self._gcodeList):
self._changeState(self.STATE_OPERATIONAL)
return
if self._gcodePos == 100:
self._printStartTime100 = time.time()
line = self._gcodeList[self._gcodePos]
if type(line) is tuple:
self._printSection = line[1]
line = line[0]
try:
if line == 'M0' or line == 'M1':
self.setPause(True)
line = 'M105' #Don't send the M0 or M1 to the machine, as M0 and M1 are handled as an LCD menu pause.
if self._printSection in self._feedRateModifier:
line = re.sub('F([0-9]*)', lambda m: 'F' + str(int(int(m.group(1)) * self._feedRateModifier[self._printSection])), line)
if ('G0' in line or 'G1' in line) and 'Z' in line:
z = float(re.search('Z([0-9\.]*)', line).group(1))
if self._currentZ != z:
self._currentZ = z
self._callback.mcZChange(z)
except:
self._log("Unexpected error: %s" % (getExceptionString()))
checksum = reduce(lambda x,y:x^y, map(ord, "N%d%s" % (self._gcodePos, line)))
self._sendCommand("N%d%s*%d" % (self._gcodePos, line, checksum))
self._gcodePos += 1
self._callback.mcProgress(self._gcodePos)
def sendCommand(self, cmd):
cmd = cmd.encode('ascii', 'replace')
if self.isPrinting():
self._commandQueue.put(cmd)
elif self.isOperational():
self._sendCommand(cmd)
def printGCode(self, gcodeList):
if not self.isOperational() or self.isPrinting():
return
self._gcodeList = gcodeList
self._gcodePos = 0
self._printStartTime100 = None
self._printSection = 'CUSTOM'
self._changeState(self.STATE_PRINTING)
self._printStartTime = time.time()
for i in xrange(0, 4):
self._sendNext()
def cancelPrint(self):
if self.isOperational():
self._changeState(self.STATE_OPERATIONAL)
def setPause(self, pause):
if not pause and self.isPaused():
self._changeState(self.STATE_PRINTING)
for i in xrange(0, 6):
self._sendNext()
if pause and self.isPrinting():
self._changeState(self.STATE_PAUSED)
def setFeedrateModifier(self, type, value):
self._feedRateModifier[type] = value
def getExceptionString():
locationInfo = traceback.extract_tb(sys.exc_info()[2])[0]
return "%s: '%s' @ %s:%s:%d" % (str(sys.exc_info()[0].__name__), str(sys.exc_info()[1]), os.path.basename(locationInfo[0]), locationInfo[2], locationInfo[1])
| tinkerinestudio/Tinkerine-Suite | TinkerineSuite/Cura/util/machineCom.py | Python | agpl-3.0 | 19,071 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Parsers for XML to dom.
"""
from __future__ import absolute_import
import xml.sax.handler
from .dom import *
class Parser:
def __init__(self):
self.tree = Tree()
self.node = self.tree
self.nodes = []
def line(self, id, lineno, colno):
while self.nodes:
n = self.nodes.pop()
n._line(id, lineno, colno)
def add(self, node):
self.node.add(node)
self.nodes.append(node)
def start(self, name, attrs):
tag = Tag(name, *attrs)
self.add(tag)
self.node = tag
def end(self, name):
self.balance(name)
self.node = self.node.parent
def data(self, data):
children = self.node.children
if children and isinstance(children[-1], Data):
children[-1].data += data
else:
self.add(Data(data))
def comment(self, comment):
self.add(Comment(comment))
def entity(self, ref):
self.add(Entity(ref))
def character(self, ref):
self.add(Character(ref))
def balance(self, name = None):
while self.node != self.tree and name != self.node.name:
self.node.parent.extend(self.node.children)
del self.node.children[:]
self.node.singleton = True
self.node = self.node.parent
class XMLParser(xml.sax.handler.ContentHandler):
def __init__(self):
self.parser = Parser()
self.locator = None
def line(self):
if self.locator != None:
self.parser.line(self.locator.getSystemId(),
self.locator.getLineNumber(),
self.locator.getColumnNumber())
def setDocumentLocator(self, locator):
self.locator = locator
def startElement(self, name, attrs):
self.parser.start(name, attrs.items())
self.line()
def endElement(self, name):
self.parser.end(name)
self.line()
def characters(self, content):
self.parser.data(content)
self.line()
def skippedEntity(self, name):
self.parser.entity(name)
self.line()
| kgiusti/qpid-proton | tools/python/mllib/parsers.py | Python | apache-2.0 | 2,719 |
# coding=utf-8
# Author: Paul Wollaston
# Contributions: Luke Mullan
#
# This client script allows connection to Deluge Daemon directly, completely
# circumventing the requirement to use the WebUI.
from __future__ import print_function, unicode_literals
from base64 import b64encode
import sickbeard
from sickbeard import logger
from sickbeard.clients.generic import GenericClient
from synchronousdeluge import DelugeClient
class DelugeDAPI(GenericClient):
drpc = None
def __init__(self, host=None, username=None, password=None):
super(DelugeDAPI, self).__init__('DelugeD', host, username, password)
def _get_auth(self):
if not self.connect():
return None
return True
def connect(self, reconnect=False):
hostname = self.host.replace("/", "").split(':')
if not self.drpc or reconnect:
self.drpc = DelugeRPC(hostname[1], port=hostname[2], username=self.username, password=self.password)
return self.drpc
def _add_torrent_uri(self, result):
# label = sickbeard.TORRENT_LABEL
# if result.show.is_anime:
# label = sickbeard.TORRENT_LABEL_ANIME
options = {
'add_paused': sickbeard.TORRENT_PAUSED
}
remote_torrent = self.drpc.add_torrent_magnet(result.url, options, result.hash)
if not remote_torrent:
return None
result.hash = remote_torrent
return remote_torrent
def _add_torrent_file(self, result):
# label = sickbeard.TORRENT_LABEL
# if result.show.is_anime:
# label = sickbeard.TORRENT_LABEL_ANIME
if not result.content:
result.content = {}
return None
options = {
'add_paused': sickbeard.TORRENT_PAUSED
}
remote_torrent = self.drpc.add_torrent_file(result.name + '.torrent', result.content, options, result.hash)
if not remote_torrent:
return None
result.hash = remote_torrent
return remote_torrent
def _set_torrent_label(self, result):
label = sickbeard.TORRENT_LABEL.lower()
if result.show.is_anime:
label = sickbeard.TORRENT_LABEL_ANIME.lower()
if ' ' in label:
logger.log(self.name + ': Invalid label. Label must not contain a space', logger.ERROR)
return False
if label:
return self.drpc.set_torrent_label(result.hash, label)
return True
def _set_torrent_ratio(self, result):
if result.ratio:
ratio = float(result.ratio)
return self.drpc.set_torrent_ratio(result.hash, ratio)
return True
def _set_torrent_priority(self, result):
if result.priority == 1:
return self.drpc.set_torrent_priority(result.hash, True)
return True
def _set_torrent_path(self, result):
if sickbeard.TORRENT_PATH:
return self.drpc.set_torrent_path(result.hash, sickbeard.TORRENT_PATH)
return True
def _set_torrent_pause(self, result):
if sickbeard.TORRENT_PAUSED:
return self.drpc.pause_torrent(result.hash)
return True
def testAuthentication(self):
if self.connect(True) and self.drpc.test():
return True, 'Success: Connected and Authenticated'
else:
return False, 'Error: Unable to Authenticate! Please check your config!'
class DelugeRPC(object):
host = 'localhost'
port = 58846
username = None
password = None
client = None
def __init__(self, host='localhost', port=58846, username=None, password=None):
super(DelugeRPC, self).__init__()
self.host = host
self.port = port
self.username = username
self.password = password
def connect(self):
self.client = DelugeClient()
self.client.connect(self.host, int(self.port), self.username, self.password)
def test(self):
try:
self.connect()
except Exception:
return False
return True
def add_torrent_magnet(self, torrent, options, torrent_hash):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_magnet(torrent, options).get() # pylint:disable=no-member
if not torrent_id:
torrent_id = self._check_torrent(torrent_hash)
except Exception:
return False
finally:
if self.client:
self.disconnect()
return torrent_id
def add_torrent_file(self, filename, torrent, options, torrent_hash):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options).get() # pylint:disable=no-member
if not torrent_id:
torrent_id = self._check_torrent(torrent_hash)
except Exception:
return False
finally:
if self.client:
self.disconnect()
return torrent_id
def set_torrent_label(self, torrent_id, label):
try:
self.connect()
self.client.label.set_torrent(torrent_id, label).get() # pylint:disable=no-member
except Exception:
try:
self.connect()
self.client.labelplus.set_torrent_labels([torrent_id], label).get() # pylint:disable=no-member
except Exception:
return False
finally:
if self.client:
self.disconnect()
return True
def set_torrent_path(self, torrent_id, path):
try:
self.connect()
self.client.core.set_torrent_move_completed_path(torrent_id, path).get() # pylint:disable=no-member
self.client.core.set_torrent_move_completed(torrent_id, 1).get() # pylint:disable=no-member
except Exception:
return False
finally:
if self.client:
self.disconnect()
return True
def set_torrent_priority(self, torrent_ids, priority):
try:
self.connect()
if priority:
self.client.core.queue_top([torrent_ids]).get() # pylint:disable=no-member
except Exception:
return False
finally:
if self.client:
self.disconnect()
return True
def set_torrent_ratio(self, torrent_ids, ratio):
try:
self.connect()
self.client.core.set_torrent_stop_at_ratio(torrent_ids, True).get() # pylint:disable=no-member
self.client.core.set_torrent_stop_ratio(torrent_ids, ratio).get() # pylint:disable=no-member
except Exception:
return False
finally:
if self.client:
self.disconnect()
return True
def pause_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.pause_torrent(torrent_ids).get() # pylint:disable=no-member
except Exception:
return False
finally:
if self.client:
self.disconnect()
return True
def disconnect(self):
self.client.disconnect()
def _check_torrent(self, torrent_hash):
torrent_id = self.client.core.get_torrent_status(torrent_hash, {}).get() # pylint:disable=no-member
if torrent_id['hash']:
logger.log('DelugeD: Torrent already exists in Deluge', logger.DEBUG)
return torrent_hash
return False
api = DelugeDAPI()
| nopjmp/SickRage | sickbeard/clients/deluged_client.py | Python | gpl-3.0 | 7,649 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (C) 2015 Luiz Fernando Oliveira, Carlos Oliveira, Matheus Fernandes
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from cocos.layer import Layer
from pyglet import resource
from pyglet.gl import glPushMatrix, glPopMatrix
class BackgroundLayer(Layer):
""" A simple layer with a image background. """
def __init__(self, background):
super(BackgroundLayer, self).__init__()
self.image = resource.image(background)
def draw(self):
glPushMatrix()
self.transform()
self.image.blit(0, 0)
glPopMatrix()
| SpaceWars/spacewars | src/layers/base_layers.py | Python | gpl-3.0 | 1,054 |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo.config import cfg
from sahara import conductor as c
from sahara import context
from sahara.utils.openstack import keystone
conductor = c.API
CONF = cfg.CONF
def create_trust(cluster):
client = keystone.client()
ctx = context.current()
trustee_id = keystone.client_for_admin().user_id
trust = client.trusts.create(trustor_user=client.user_id,
trustee_user=trustee_id,
impersonation=True,
role_names=ctx.roles,
project=client.tenant_id)
conductor.cluster_update(ctx,
cluster,
{'trust_id': trust.id})
def use_os_admin_auth_token(cluster):
if cluster.trust_id:
ctx = context.current()
ctx.username = CONF.keystone_authtoken.admin_user
ctx.tenant_id = cluster.tenant_id
client = keystone.client_for_trusts(cluster.trust_id)
ctx.token = client.auth_token
ctx.service_catalog = json.dumps(
client.service_catalog.catalog['catalog'])
def delete_trust(cluster):
if cluster.trust_id:
keystone_client = keystone.client_for_trusts(cluster.trust_id)
keystone_client.trusts.delete(cluster.trust_id)
| tellesnobrega/storm_plugin | sahara/service/trusts.py | Python | apache-2.0 | 1,897 |
'''Particle Swarm Optimization Algorithm for Minimization'''
from threading import Thread, Event
import numpy as _np
class PSO:
"""."""
DEFAULT_COEFF_INERTIA = 0.7984
DEFAULT_COEFF_INDIVIDUAL = 1.49618
DEFAULT_COEFF_COLLECTIVE = 1.49618
def __init__(self, save=False):
"""."""
# Number of particles in the swarm # (Recommended is 10 + 2 * sqrt(d))
# where d is the dimension of search space
self._nswarm = 0
self._niter = 0
# Inertia
self._coeff_inertia = PSO.DEFAULT_COEFF_INERTIA
# Best position of individual particle
self._coeff_indiv = PSO.DEFAULT_COEFF_INDIVIDUAL
# Best position ever reached by the swarm
self._coeff_coll = PSO.DEFAULT_COEFF_COLLECTIVE
# Boundary limits of problem
self._pos_lim_upper = _np.array([])
self._pos_lim_lower = _np.array([])
# Elements of PSO
self._initial_position = _np.array([])
self._position = self._initial_position
self._velocity = _np.array([])
self._best_indiv = _np.array([])
self._best_global = _np.array([])
self._thread = Thread(target=self._optimize, daemon=True)
self._stopevt = Event()
self.hist_best_positions = _np.array([])
self.hist_best_objfun = _np.array([])
self._flag_save = save
self.initialization()
self._check_initialization()
@property
def coeff_inertia(self):
"""."""
return self._coeff_inertia
@coeff_inertia.setter
def coeff_inertia(self, value):
"""."""
self._coeff_inertia = value
@property
def coeff_indiv(self):
"""."""
return self._coeff_indiv
@coeff_indiv.setter
def coeff_indiv(self, value):
"""."""
self._coeff_indiv = value
@property
def coeff_coll(self):
"""."""
return self._coeff_coll
@coeff_coll.setter
def coeff_coll(self, value):
"""."""
self._coeff_coll = value
@property
def ndim(self):
"""."""
return len(self._initial_position)
@property
def nswarm(self):
"""."""
return self._nswarm
@nswarm.setter
def nswarm(self, value):
"""."""
self._nswarm = value
@property
def niter(self):
"""."""
return self._niter
@niter.setter
def niter(self, value):
"""."""
self._niter = value
@property
def limits_upper(self):
"""."""
return self._pos_lim_upper
@limits_upper.setter
def limits_upper(self, value):
"""."""
if len(value) != len(self._initial_position):
raise Exception('Incompatible upper limit!')
self._pos_lim_upper = _np.array(value)
@property
def limits_lower(self):
"""."""
return self._pos_lim_lower
@limits_lower.setter
def limits_lower(self, value):
"""."""
if len(value) != len(self._initial_position):
raise Exception('Incompatible lower limit!')
self._pos_lim_lower = _np.array(value)
@property
def initial_position(self):
"""."""
return self._initial_position
@initial_position.setter
def initial_position(self, value):
"""."""
self._initial_position = value
@property
def position(self):
"""."""
return self._position
@position.setter
def position(self, value):
"""."""
self._position = value
@property
def velocity(self):
"""."""
return self._velocity
@velocity.setter
def velocity(self, value):
"""."""
self._velocity = value
def initialization(self):
"""."""
raise NotImplementedError
def _check_initialization(self):
"""."""
if len(self._pos_lim_upper) != len(self._pos_lim_lower):
raise Exception(
'Upper and Lower Limits has different lengths!')
if self.ndim != len(self._pos_lim_upper):
raise Exception(
'Dimension incompatible with limits!')
if self.nswarm < round(10 + 2 * _np.sqrt(self.ndim)):
print(
'Swarm population lower than recommended!')
def _create_swarm(self):
"""."""
self._best_indiv = _np.zeros((self.nswarm, self.ndim))
self._best_global = _np.zeros(self.ndim)
# Random initialization of swarm position inside the boundary limits
dlim = self._pos_lim_upper - self._pos_lim_lower
rarray = _np.random.rand(self.nswarm, self.ndim)
self._position = _np.ones((self.nswarm, 1)) * self._initial_position
self._position += dlim * rarray + self._pos_lim_lower
# Include the zero variation as first particle in the swarm
self._position[0, :] *= 0
self._check_lim()
# The first individual contribution will be zero
self._best_indiv = self._position
# Initializing with zero velocity
self._velocity = _np.zeros((self.nswarm, self.ndim))
def _update_position(self):
"""."""
r_indiv = self._coeff_indiv * _np.random.rand()
r_coll = self._coeff_coll * _np.random.rand()
# Inertial velocity
self._velocity *= self._coeff_inertia
# Velocity dependent to distance from best individual position
self._velocity += r_indiv * (self._best_indiv - self._position)
# Velocity dependent to distance from best global position
self._velocity += r_coll * (self._best_global - self._position)
# Update position and check boundary limits
self._position += self._velocity
self._check_lim()
def _check_lim(self):
"""."""
# If particle position exceeds the boundary, set the boundary value
for i in range(self._pos_lim_upper.size):
over = self._position[:, i] > self._pos_lim_upper[i]
under = self._position[:, i] < self._pos_lim_lower[i]
self._position[over, i] = self._pos_lim_upper[i]
self._position[under, i] = self._pos_lim_lower[i]
def _save_data(self, k, f, fbest):
"""."""
with open('pos_PSO.txt', 'a') as f_pos:
if k == 0:
f_pos.write('NEW RUN'.center(50, '=') + '\n')
f_pos.write('Step ' + str(k+1) + ' \n')
_np.savetxt(f_pos, self._position, fmt='%+.8e')
with open('fig_PSO.txt', 'a') as f_fig:
if k == 0:
f_fig.write('NEW RUN'.center(50, '=') + '\n')
f_fig.write('Step ' + str(k+1) + ' \n')
_np.savetxt(f_fig, f, fmt='%+.8e')
with open('best_pos_history_PSO.txt', 'a') as f_posh:
if k == 0:
f_posh.write('NEW RUN'.center(50, '=') + '\n')
f_posh.write('Step ' + str(k+1) + ' \n')
_np.savetxt(f_posh, self._best_global, fmt='%+.8e')
with open('best_fig_history_PSO.txt', 'a') as f_figh:
if k == 0:
f_figh.write('NEW RUN'.center(50, '=') + '\n')
f_figh.write('Step ' + str(k+1) + ' \n')
_np.savetxt(f_figh, _np.array([fbest]), fmt='%+.8e')
def calc_obj_fun(self):
"""Return a vector for every particle evaluation."""
raise NotImplementedError
def start(self):
"""."""
if not self._thread.is_alive():
self._thread = Thread(target=self._optimize, daemon=True)
self._stopevt.clear()
self._thread.start()
def stop(self):
"""."""
self._stopevt.set()
def join(self):
"""."""
self._thread.join()
@property
def isrunning(self):
"""."""
return self._thread.is_alive()
def _optimize(self):
"""."""
self._create_swarm()
f_old = _np.zeros(self.nswarm)
f_new = _np.zeros(self.nswarm)
# History of best position and merit function over iteractions
best_pos_hstry = _np.zeros((self.niter, self.ndim))
best_fig_hstry = _np.zeros(self.niter)
print('>>> Iteraction Number: 1')
f_old = self.calc_obj_fun()
self._best_global = self._best_indiv[_np.argmin(f_old), :]
best_pos_hstry[0, :] = self._best_global
best_fig_hstry[0] = _np.min(f_old)
ref0 = self._best_global
if self._flag_save:
self._save_data(k=0, f=f_old, fbest=best_fig_hstry[0])
print('Best particle: ' + str(_np.argmin(f_old)+1))
print('Obj. Func.:' + str(_np.min(f_old)))
for niter in range(self.niter):
print('------------------------------------------------------')
print('>>> Iteraction Number: ' + str(niter+2))
self._update_position()
f_new = self.calc_obj_fun()
improve = f_new < f_old
if improve.any():
# Update best individual position and merit function for
# comparison only if the merit function is lower
self._best_indiv[improve, :] = self._position[improve, :]
if _np.min(f_new) < _np.min(f_old):
self._best_global = self._best_indiv[
_np.argmin(f_new), :].copy()
print('UPDATE GLOBAL BEST!')
print(
'Best particle: ' + str(_np.argmin(f_new)+1))
print('Obj. Func.:' + str(_np.min(f_new)))
f_old[improve] = f_new[improve]
else:
print('Best particle: ' + str(_np.argmin(f_new)+1))
print('Obj. Func.:' + str(_np.min(f_new)))
best_pos_hstry[niter, :] = self._best_global
best_fig_hstry[niter] = _np.min(f_old)
if self._flag_save:
self._save_data(k=niter, f=f_new, fbest=best_fig_hstry[niter])
if self._stopevt.is_set():
print('Stopped!')
break
print('Finished!')
print('Best Position Found:' + str(self._best_global))
print('Best Obj. Func. Found:' + str(_np.min(f_old)))
self.hist_best_positions = best_pos_hstry
self.hist_best_objfun = best_fig_hstry
| lnls-fac/apsuite | apsuite/optimization/pso.py | Python | mit | 10,292 |
"""
VexFlow / TabDiv Build Script
Requires: SCons, Git, and Google Closure Compiler
Copyright Mohit Cheppudira 2010
"""
import os
from datetime import datetime
from SCons.Script import *
"""
Make the default zip action use the external zip command. Also
add -j (--junk-paths) to the command to store only the name of the
file and strip out the directory name.
"""
DefaultEnvironment(ZIPCOM = "zip -r -j $TARGET $SOURCES")
default_env = Environment(
VEX_BUILD_PREFIX = "debug-4",
VEX_VERSION = "1.0-pre",
VEX_BUILD_DATE = str(datetime.now()),
JAVA = "java",
JS_COMPILER = "support/compiler.jar",
JS_DEFINES = {},
JS_COMPILATION_LEVEL = "SIMPLE_OPTIMIZATIONS",
ENV = os.environ)
def js_builder(target, source, env):
""" A JavaScript builder using Google Closure Compiler. """
cmd = env.subst(
"$JAVA -jar $JS_COMPILER --compilation_level $JS_COMPILATION_LEVEL");
# Add defines to the command
for define in env['JS_DEFINES'].keys():
cmd += " --define=\"%s=%s\"" % (define, env['JS_DEFINES'][define])
# Add the source files
for file in source:
cmd += " --js " + str(file)
# Add the output file
cmd += " --js_output_file " + str(target[0])
# Log the command and run
print env.subst(cmd)
os.system(env.subst(cmd))
def vexflow_stamper(target, source, env):
""" A Build Stamper for VexFlow """
cmd = "sed "
cmd += " -e s/__VEX_BUILD_PREFIX__/$VEX_BUILD_PREFIX/"
cmd += " -e s/__VEX_VERSION__/$VEX_VERSION/"
cmd += ' -e "s/__VEX_BUILD_DATE__/${VEX_BUILD_DATE}/"'
cmd += " -e s/__VEX_GIT_SHA1__/`git rev-list --max-count=1 HEAD`/ "
cmd += ("%s > %s" % (source[0], target[0]))
print env.subst(cmd)
os.system(env.subst(cmd))
"""
Add our custom builders to the environment.
"""
default_env.Append(
BUILDERS = {'JavaScript': Builder(action = js_builder),
'VexFlowStamp': Builder(action = vexflow_stamper)})
def build_and_stamp(target, sources, env):
"""
A helper command to build the javascript output and stamp
the header files.
"""
pre_node = env.JavaScript(target + ".pre", sources)
final_node = env.VexFlowStamp(target, pre_node)
return final_node
def mkdir_with_cleanup(dirname, env):
"""
Helper function to create directories and attach cleanup
handlers. This is the only way to get implicitly created directories
cleaned up.
"""
dir = env.subst(dirname)
t = Command(dir, [], Mkdir("$TARGET"))
Clean(t, dir) # Cleanup handler
def cpdir_with_cleanup(targetdirname, srcdirname, env):
"""
Helper function to copy directories and attach cleanup
handlers. This is the only way to get implicitly created directories
cleaned up.
"""
targetdir = env.subst(targetdirname)
srcdir = env.subst(srcdirname)
t = Command(targetdir, srcdir, Copy("$TARGET", "$SOURCE"))
Clean(t, targetdir)
| georgedrummond/vexflow | site_scons/vexflow_scons.py | Python | mit | 2,838 |
# Copyright (c) 2017 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from cinder.policies import base
CLEAN_POLICY = 'workers:cleanup'
workers_policies = [
policy.DocumentedRuleDefault(
name=CLEAN_POLICY,
check_str=base.RULE_ADMIN_API,
description="Clean up workers.",
operations=[
{
'method': 'POST',
'path': '/workers/cleanup'
}
])
]
def list_rules():
return workers_policies
| phenoxim/cinder | cinder/policies/workers.py | Python | apache-2.0 | 1,100 |
#!/usr/bin/env python
# coding: utf-8
'''
BACON
Copyright (C) 2017 Brett Pemberton
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Bacon module for systemd service operations
Accepts a change structure for a change on a package
'''
import logging
import dbus
LOGGER = logging.getLogger("bacon")
def service_is_running(service):
''' See if a service is running or not '''
sysbus = dbus.SystemBus()
systemd1 = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
if not service.endswith(".service"):
service += ".service"
joblist = manager.ListUnits()
for job in joblist:
name = job[0]
active = job[3]
if name == service:
if active == 'active':
return True
return False
LOGGER.error("Service %s not found", service)
return False
def perform_change(service, ensure):
''' Perform the change on the resource '''
sysbus = dbus.SystemBus()
systemd1 = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager')
if not service.endswith(".service"):
service += ".service"
if ensure == 'running':
manager.StartUnit(service, 'fail')
elif ensure == 'stopped':
manager.StopUnit(service)
elif ensure == 'reload':
manager.Reload(service)
return None
| generica/bacon | bacon/modules/systemd/service.py | Python | gpl-3.0 | 2,073 |
import sys
import time
from datetime import date, datetime
from decimal import Decimal
from .otypes import OrientRecordLink, OrientRecord, OrientBinaryObject
from .exceptions import PyOrientBadMethodCallException
class OrientSerializationBinary(object):
def __init__(self):
self.className = None
self.data = {}
self.type = OrientSerialization.Binary
def decode(self, content):
raise NotImplementedError
def encode(self, record):
raise NotImplementedError
class OrientSerializationCSV(object):
def __init__(self):
self.className = None
self.data = {}
self.type = OrientSerialization.CSV
def decode(self, content ):
"""
Deserialize a record.
:param content str The input to un-serialize.
:return: (class_name, dict)
"""
if not content:
return self.className, self.data
if not isinstance(content, str):
content = content.decode()
content = content.strip()
chunk = self._parse_first_key( content )
if chunk[2]:
# this is actually a class name.
self.className = chunk[0]
content = chunk[1]
chunk = self._parse_key(content)
key = chunk[0]
content = chunk[1]
else:
key = chunk[0]
content = chunk[1]
if not key and not content:
return self.className, self.data
chunk = self._parse_value(content)
value = chunk[0]
content = chunk[1]
self.data[key] = value
while len(content) != 0:
if content[0] == ',':
content = content[1:]
else:
break
chunk = self._parse_key(content)
key = chunk[0]
content = chunk[1]
if len(content) > 0:
chunk = self._parse_value(content)
value = chunk[0]
content = chunk[1]
self.data[key] = value
else:
self.data[key] = None
return self.className, self.data
def encode(self, record):
"""
Encode an OrientRecord to be sent over the connection
:param record: :class: `OrientRecord <pyorient.types.OrientRecord>`
:return: raw string to send over the wire
"""
raw = ''
o_class = getattr(record, '_class', False)
if o_class:
raw = o_class + '@'
fields = list(record.oRecordData)
for idx, key in enumerate(fields):
raw += key + ':'
value = record.oRecordData[key]
raw += self._encode_value(value)
if idx < len(list(fields)) - 1:
# not last element
raw += ','
return raw
#
# ENCODING STUFF
#
def _encode_value(self, value):
if isinstance(value, str):
ret = '"' + value + '"'
elif isinstance(value, float):
ret = str(value) + 'f'
elif sys.version_info[0] >= 3 and isinstance(value, int):
if value > 2147483647:
ret = str(value) + 'l'
else:
ret = str(value)
elif sys.version_info[0] < 3 and isinstance(value, long):
ret = str(value) + 'l'
elif isinstance(value, int):
ret = str(value)
elif isinstance(value, datetime):
ret = str(int(time.mktime(value.timetuple())) * 1000) + 't'
elif isinstance(value, date):
ret = str(int(time.mktime(value.timetuple())) * 1000) + 'a'
elif isinstance(value, Decimal):
ret = str(value) + 'c'
elif isinstance(value, list):
try:
ret = "[" + ','.join(
map(
lambda elem: self._parse_value(type(value[0])(elem))
if not isinstance(value[0], OrientRecordLink)
else elem.get_hash(),
value
)) + ']'
except ValueError as e:
raise Exception("wrong type commistion")
elif isinstance(value, dict):
ret = "{" + ','.join(map(
lambda elem: '"' + elem + '":' + self._parse_value(value[elem]),
value)) + '}'
elif isinstance(value, OrientRecord):
ret = "(" + self.__encode(value) + ")"
elif isinstance(value, OrientRecordLink):
ret = value.get_hash()
elif isinstance(value, OrientBinaryObject):
ret = value.get_hash()
else:
ret = ''
return ret
#
# DECODING STUFF
#
# Consume the first field key, which could be a class name.
# :param content str The input to consume
# :return: list The collected string and any remaining content,
# followed by a boolean indicating whether this is a class name.
def _parse_first_key(self, content):
length = len(content)
collected = ''
is_class_name = False
if content[0] == '"':
result = self._parse_string(content[1:])
return [result[0], result[1][1:]]
i = 0
for i in range(0, length):
c = content[i]
if c == '@':
is_class_name = True
break
elif c == ':':
break
else:
collected += c
return [collected, content[( i + 1 ):], is_class_name]
def _parse_key( self, content ):
"""
Consume a field key, which may or may not be quoted.
:param content str The input to consume
:return: dict The collected string and any remaining content.
"""
length = len(content)
if length == 0:
return [None, None]
collected = ''
if content[ 0 ] == '"':
result = self._parse_string( content[1:] )
return [ result[ 0 ], result[1][1:] ]
i = 0
for i in range(0, length):
c = content[i]
if c == ':':
break
else:
collected += c
return [ collected, content[( i + 1 ):] ]
def _parse_value( self, content ):
"""
Consume a field value.
:param: content str The input to consume
:return: list The collected value and any remaining content.
"""
c = ''
content = content.lstrip( " " )
try:
c = content[ 0 ] # string index out of range 0
except IndexError:
pass
if len( content ) == 0 or c == ',':
return [ None, content ]
elif c == '"':
return self._parse_string( content[1:] )
elif c == '#':
return self._parse_rid( content[1:] )
elif c == '[':
return self._parse_collection( content[1:] )
elif c == '<':
return self._parse_set( content[1:] )
elif c == '{':
return self._parse_map( content[1:] )
elif c == '(':
return self._parse_record( content[1:] )
elif c == '%':
return self._parse_bag( content[1:] )
elif c == '_':
return self._parse_binary( content[1:] )
elif c == '-' or self._is_numeric( c ):
return self._parse_number( content )
elif c == 'n' and content[ 0:4 ] == 'null':
return [ None, content[ 4: ] ]
elif c == 't' and content[ 0:4 ] == 'true':
return [ True, content[ 4: ] ]
elif c == 'f' and content[ 0:5 ] == 'false':
return [ False, content[ 5: ] ]
else:
return [ None, content ]
@staticmethod
def _is_numeric( content ):
try:
float( content )
return True
except ValueError:
return False
@staticmethod
def _parse_string( content ):
"""
Consume a string.
:param content str The input to consume
:return: list The collected string and any remaining content.
"""
length = len( content )
collected = ''
i = 0
while i < length:
c = content[ i ]
if c == '\\':
# escape, skip to the next character
i += 1
collected += content[ i ]
# increment again to pass over
i += 1
continue
elif c == '"':
break
else:
i += 1
collected += c
return [ collected, content[ ( i + 1 ): ] ]
def _parse_number(self, content):
"""
Consume a number.
If the number has a suffix, consume it also and instantiate the
right type, e.g. for dates
:param content str The content to consume
:return: list The collected number and any remaining content.
"""
length = len(content)
collected = ''
is_float = False
i = 0
for i in range(0, length):
c = content[i]
if c == '-' or self._is_numeric(c):
collected += c
elif c == '.':
is_float = True
collected += c
elif c == 'E' and is_float:
collected += c
else:
break
content = content[i:]
c = ''
try:
c = content[ 0 ] # string index out of range 0
except IndexError:
pass
if c == 'a':
collected = date.fromtimestamp(float(collected) / 1000)
content = content[1:]
elif c == 't':
# date
collected = datetime.fromtimestamp(float(collected) / 1000)
content = content[1:]
elif c == 'f' or c == 'd':
# float # double
collected = float(collected)
content = content[1:]
elif c == 'c':
collected = Decimal(collected)
content = content[1:]
elif c == 'b' or c == 's':
collected = int(collected)
content = content[1:]
elif c == 'l':
if sys.version_info[0] < 3:
collected = long(collected) # python 2.x long type
else:
collected = int(collected)
content = content[1:]
elif is_float:
collected = float(collected)
else:
collected = int(collected)
return [collected, content]
def _parse_rid(self, content):
"""
Consume a Record ID.
:param content str The input to consume
:return: list The collected RID and any remaining content.
"""
length = len(content)
collected = ''
cluster = None
i = 0
for i in range(0, length):
c = content[i]
if cluster is None and c == ':':
cluster = collected
collected = ''
elif self._is_numeric(c):
collected += c
else:
break
return [ OrientRecordLink( cluster + ":" + collected ), content[i:]]
def _parse_collection(self, content):
"""
Consume an array of values.
:param content str The input to consume
:return: list The collected array and any remaining content.
"""
collection = []
while len(content) != 0:
c = content[0]
if c == ',':
content = content[1:]
elif c == ']':
content = content[1:]
break
chunk = self._parse_value(content)
collection.append(chunk[0])
content = chunk[1]
return [collection, content]
def _parse_set(self, content):
"""
Consume a set of values.
:param content str The input to consume
:return: list The collected set and any remaining content.
"""
list_set = []
while len(content) != 0:
c = content[0]
if c == ',':
content = content[1:]
elif c == '>':
content = content[1:]
break
chunk = self._parse_value(content)
list_set.append(chunk[0])
content = chunk[1]
return [list_set, content]
def _parse_map( self, content ):
"""
Consume a map of keys to values.
:param content str The input to consume
:return: list The collected map and any remaining content.
"""
_map = {}
content = content.lstrip(' ')
while len(content) != 0:
c = content[0]
if c == ' ':
content = content[1:].lstrip(' ')
continue
elif c == ',':
content = content[1:].lstrip(' ')
elif c == '}':
content = content[1:]
break
chunk = self._parse_key(content)
key = chunk[0]
content = chunk[1].lstrip(' ')
if len(content) != 0:
chunk = self._parse_value(content)
_map[key] = chunk[0]
content = chunk[1].lstrip(' ')
else:
_map[key] = None
break
return [_map, content]
def _parse_record(self, content):
"""
Consume an embedded record.
:param content str The content to unserialize.
:return: list The collected record and any remaining content.
"""
record = {}
content = content.lstrip(' ')
if content[0] == ')':
# this is an empty record.
return [record, content[1:]]
chunk = self._parse_first_key(content)
if chunk[2]:
# this is actually a class name.
record['o_class'] = chunk[0]
content = chunk[1].lstrip(' ')
if content[0] == ')':
return [record, content[1:]]
chunk = self._parse_key(content)
key = chunk[0]
content = chunk[1]
else:
key = chunk[0]
content = chunk[1]
chunk = self._parse_key(content)
value = chunk[0]
content = chunk[1].lstrip(' ')
record[key] = value
while len(content) > 0:
if content[0] == ',':
content = content[1:].lstrip(' ')
elif content[0] == ')':
content = content[1:].lstrip(' ')
break
chunk = self._parse_key(content)
key = chunk[0]
content = chunk[1].lstrip(' ')
if len(content) > 0:
chunk = self._parse_value(content)
value = chunk[0]
content = chunk[1]
record[key] = value
else:
record[key] = None
return [record, content]
@staticmethod
def _parse_bag(content):
"""
Consume a record id bag.
:param content str The content to consume
:return: list The collected record id bag and any remaining content.
"""
length = len(content)
collected = ''
i = 0
for i in range(0, length):
c = content[i]
if c == ';':
break
else:
collected += c
return [OrientBinaryObject(collected), content[( i + 1 ):]]
@staticmethod
def _parse_binary(content):
"""
Consume a binary field.
:param content str The content to consume
:return: list The collected binary and any remaining content.
"""
length = len(content)
collected = ''
i = 0
for i in range(0, length):
c = content[i]
if c == '_' \
or c == ',' \
or c == ')' \
or c == '>' \
or c == '}' \
or c == ']':
break
else:
collected += c
return [collected, content[( i + 1 ):]]
class OrientSerialization(object):
"""
Enum representing the available serialization
"""
#: CSV the default serialization
CSV = "ORecordDocument2csv"
#: Now unimplemented
Binary = "ORecordSerializerBinary"
@classmethod
def get_impl(cls, impl):
impl_map = {
cls.CSV: OrientSerializationCSV,
cls.Binary: OrientSerializationBinary,
}
implementation = impl_map.get(impl, False)
if not implementation:
raise PyOrientBadMethodCallException(
impl + ' is not an availableserialization type', []
)
return implementation()
| lebedov/pyorient | pyorient/serializations.py | Python | apache-2.0 | 17,017 |
# Copyright (C) 2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that, empty, has_entries )
from ycmd.tests import SharedYcmd, IsolatedYcmd
from ycmd.tests.test_utils import ( EMPTY_SIGNATURE_HELP,
BuildRequest )
@SharedYcmd
def SignatureHelp_IdentifierCompleter_test( app ):
event_data = BuildRequest( contents = 'foo foogoo ba',
event_name = 'FileReadyToParse' )
app.post_json( '/event_notification', event_data )
# query is 'oo'
request_data = BuildRequest( contents = 'oo foo foogoo ba',
column_num = 3 )
response_data = app.post_json( '/signature_help', request_data ).json
assert_that( response_data, has_entries( {
'errors': empty(),
'signature_help': EMPTY_SIGNATURE_HELP
} ) )
@IsolatedYcmd( { 'disable_signature_help': 1 } )
def SignatureHelp_IdentifierCompleter_disabled_test( app ):
event_data = BuildRequest( contents = 'foo foogoo ba',
event_name = 'FileReadyToParse' )
app.post_json( '/event_notification', event_data )
# query is 'oo'
request_data = BuildRequest( contents = 'oo foo foogoo ba',
column_num = 3 )
response_data = app.post_json( '/signature_help', request_data ).json
assert_that( response_data, has_entries( {
'errors': empty(),
'signature_help': EMPTY_SIGNATURE_HELP
} ) )
def Dummy_test():
# Workaround for https://github.com/pytest-dev/pytest-rerunfailures/issues/51
assert True
| vheon/ycmd | ycmd/tests/signature_help_test.py | Python | gpl-3.0 | 2,184 |
# -*- coding: utf-8 -*-
"""
npyscreen2 is mostly an experiment to see what I could do refactoring npyscreen
if backwards compatibility was not a concern.
A lot of the fundamental curses code, along with some of the base Form and
Widget methods come from the original npyscreen. Containers are my own
development (not that it's a terribly original idea), and I have made some heavy
modifications in many areas to create the new API design. A handful of changes
have been motivated simply by PEP8 conformance.
npyscreen author: Nicholas Cole
npyscreen2 author: Paul Barton (SavinaRoja)
"""
from .safe_wrapper import wrapper, wrapper_basic
from .app import NPSApp, App, NPSAppAdvanced, AppAdvanced
from .widgets import Widget, NotEnoughSpaceForWidget, BorderBox, TextField, \
Gauge
from .containers import Container, GridContainer, SmartContainer, TitledField
from .forms import Form, set_theme, get_theme, TraditionalForm
from .logs import activate_logging, add_rotating_file_handler
from . import themes
from . import theme_managers
import logging
logger = logging.getLogger('npyscreen2') | SavinaRoja/npyscreen2 | npyscreen2/__init__.py | Python | gpl-3.0 | 1,119 |
#Copyright 2017 The Snail Authors. All Rights Reserved.
#
#This Source Code Form is subject to the terms of the Mozilla Public
#License, v. 2.0. If a copy of the MPL was not distributed with this
#file, You can obtain one at http://mozilla.org/MPL/2.0/.
#Exhibit B is not attached; this software is compatible with the
#licenses expressed under Section 1.12 of the MPL v2.
# ==================================================================
import logging as log
from .StreamProcessor import StreamProcessor
class MainSP(StreamProcessor):
""" MainSP is used to chain multiple StreamProcessors, in order to create
a pipeline of processors """
chain = []
def then(self, next):
"""
Adds a processor to the pipeline. It is not executed.
Args:
next: StreamProcessor to be added to the pipeline
Returns:
self
"""
self.chain.append(next)
return self
def process(self, input_stream):
"""
Executes all pipelined processors in order of addition.
Args:
input_stream: the input m21 stream
Returns:
output_stream: the final processed m21 stream
"""
output_stream = input_stream
# TODO: check if chain is empty
if len(self.chain) == 0:
log.warning('No operations chained. Echoing input.')
return output_stream
for sp in self.chain:
output_stream = sp.process(output_stream)
return output_stream
| hexagrammidae/snail | src/StreamProcessors/MainSP.py | Python | mpl-2.0 | 1,535 |
"""
Installs and configures Keystone
"""
import logging
import uuid
from packstack.installer import validators
from packstack.installer import basedefs
from packstack.installer import utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-Keystone"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding OpenStack Keystone configuration")
paramsList = [
{"CMD_OPTION" : "keystone-host",
"USAGE" : "The IP address of the server on which to install Keystone",
"PROMPT" : "Enter the IP address of the Keystone server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_KEYSTONE_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "keystone-db-passwd",
"USAGE" : "The password to use for the Keystone to access DB",
"PROMPT" : "Enter the password for the Keystone DB access",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_KEYSTONE_DB_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : True,
"CONDITION" : False },
{"CMD_OPTION" : "keystone-admin-token",
"USAGE" : "The token to use for the Keystone service api",
"PROMPT" : "The token to use for the Keystone service api",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex,
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_KEYSTONE_ADMIN_TOKEN",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "keystone-admin-passwd",
"USAGE" : "The password to use for the Keystone admin user",
"PROMPT" : "Enter the password for the Keystone admin user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_KEYSTONE_ADMIN_PW",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "keystone-token-format",
"USAGE" : "Kestone token format. Use either UUID or PKI",
"PROMPT" : "Enter the Keystone token format.",
"OPTION_LIST" : ['UUID', 'PKI'],
"VALIDATORS" : [validators.validate_options],
"DEFAULT_VALUE" : 'PKI',
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : 'CONFIG_KEYSTONE_TOKEN_FORMAT',
"USE_DEFAULT" : True,
"NEED_CONFIRM" : False,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "KEYSTONE",
"DESCRIPTION" : "Keystone Config parameters",
"PRE_CONDITION" : lambda x: 'yes',
"PRE_CONDITION_MATCH" : "yes",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
keystonesteps = [
{'title': 'Adding Keystone manifest entries', 'functions':[createmanifest]}
]
controller.addSequence("Installing OpenStack Keystone", [], [], keystonesteps)
def createmanifest(config):
manifestfile = "%s_keystone.pp"%controller.CONF['CONFIG_KEYSTONE_HOST']
manifestdata = getManifestTemplate("keystone.pp")
appendManifestFile(manifestfile, manifestdata)
| paramite/packstack | packstack/plugins/keystone_100.py | Python | apache-2.0 | 5,074 |
"""
Hacks on external libraries
"""
import ogrepkg.materialexport
from .material import RexMaterialExporter
ogrepkg.materialexport.GameEngineMaterial = RexMaterialExporter
| caedesvvv/b2rex | scripts/b2rexpkg/hacks.py | Python | lgpl-3.0 | 175 |
#
# Copyright (c) ,2010 Matteo Boscolo
#
# This file is part of PythonCAD.
#
# PythonCAD is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PythonCAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PythonCAD; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# qt text class
#
from Interface.Entity.base import *
from math import degrees
class Text(BaseEntity):
def __init__(self, entity):
super(Text, self).__init__(entity)
geoEnt=self.geoItem
self.text=geoEnt.text #QtCore.QString(geoEnt.text)
x, y=geoEnt.location.getCoords()
self.angle=degrees(geoEnt.angle)
self.location=QtCore.QPointF(float(x), -1.0*y)
self.pointPosition=geoEnt.pointPosition
self.font=QtGui.QFont() #This have to be derived from the geoent as son is implemented
self.setPos(self.location)
self.rotate(self.angle)
return
def drawShape(self, painterPath):
"""
overloading of the shape method
"""
painterPath.addText(QtCore.QPointF(0.0, 0.0), self.font, self.text)
return
def drawGeometry(self, painter, option, widget):
#Create Text
painter.drawText(self.boundingRect(),QtCore.Qt.AlignCenter, self.text)
| chiamingyen/PythonCAD_py3 | Interface/Entity/text.py | Python | gpl-2.0 | 1,767 |
# This file is part of Gajim.
#
# Gajim is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; version 3 only.
#
# Gajim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gajim. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
from typing import Any
import logging
from gi.repository import Gtk
from gi.repository import Pango
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import GObject
from gajim.common import app
from gajim.common.const import StyleAttr
from gajim.common.helpers import open_uri
from gajim.common.helpers import parse_uri
from gajim.common.structs import URI
from gajim.common.styling import PlainBlock
from ..menus import get_conv_action_context_menu
from ..menus import get_conv_uri_context_menu
from ..emoji_data import emoji_pixbufs
from ..emoji_data import get_emoji_pixbuf
from ..util import get_cursor
from ..util import make_pango_attributes
log = logging.getLogger('gajim.gui.conversaion.plain_widget')
URI_TAGS = ['uri', 'address', 'xmppadr', 'mailadr']
STYLE_TAGS = ['strong', 'emphasis', 'strike', 'pre']
class PlainWidget(Gtk.Box):
def __init__(self, account: str, selectable: bool) -> None:
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL)
self.set_vexpand(True)
self._account = account
# We use a Gtk.Textview on Windows and MacOS, since there is no support
# for rendering color fonts (Emojis) on Windows/MacOS yet, see:
# https://gitlab.freedesktop.org/cairo/cairo/-/merge_requests/244
# https://gitlab.freedesktop.org/cairo/cairo/-/merge_requests/9
if app.settings.get('dev_use_message_label'):
self._text_widget = MessageLabel(self._account, selectable)
else:
self._text_widget = MessageTextview(self._account)
self.add(self._text_widget)
def add_content(self, block: PlainBlock) -> None:
self._text_widget.print_text_with_styling(block)
def add_action_phrase(self, text: str, nickname: str) -> None:
text = text.replace('/me', '* %s' % nickname, 1)
text = GLib.markup_escape_text(text)
self._text_widget.add_action_phrase(text)
def update_text_tags(self) -> None:
self._text_widget.update_text_tags()
class MessageLabel(Gtk.Label):
def __init__(self, account: str, selectable: bool) -> None:
Gtk.Label.__init__(self)
self.set_hexpand(True)
self.set_selectable(selectable)
self.set_line_wrap(True)
self.set_xalign(0)
self.set_line_wrap_mode(Pango.WrapMode.WORD_CHAR)
self.set_track_visited_links(False)
self._account = account
self.get_style_context().add_class('gajim-conversation-text')
self.connect('populate-popup', self._on_populate_popup)
self.connect('activate-link', self._on_activate_link)
self.connect('focus-in-event', self._on_focus_in)
self.connect('focus-out-event', self._on_focus_out)
def _on_populate_popup(self, label: Gtk.Label, menu: Gtk.Menu) -> None:
selected, start, end = label.get_selection_bounds()
if not selected:
menu.show_all()
return
selected_text = label.get_text()[start:end]
action_menu_item = get_conv_action_context_menu(
self._account, selected_text)
menu.prepend(action_menu_item)
menu.show_all()
def print_text_with_styling(self, block: PlainBlock) -> None:
text = ''
after = GLib.markup_escape_text(block.text.strip())
for uri in block.uris:
uri_escaped = GLib.markup_escape_text(uri.text)
before, _, after = after.partition(uri_escaped)
text += before
text += uri.get_markup_string()
text += after
self.set_markup(text)
self.set_attributes(make_pango_attributes(block))
def add_action_phrase(self, text: str) -> None:
self.set_markup(f'<i>{text}</i>')
def update_text_tags(self) -> None:
pass
def _on_activate_link(self, _label: Gtk.Label, uri: str) -> int:
open_uri(uri, self._account)
return Gdk.EVENT_STOP
@staticmethod
def _on_focus_in(widget: MessageLabel,
_event: Gdk.EventFocus
) -> None:
widget.get_style_context().remove_class('transparent-selection')
@staticmethod
def _on_focus_out(widget: MessageLabel,
_event: Gdk.EventFocus
) -> None:
widget.get_style_context().add_class('transparent-selection')
class MessageTextview(Gtk.TextView):
def __init__(self, account: str) -> None:
Gtk.TextView.__init__(self)
self.set_hexpand(True)
self.set_margin_start(0)
self.set_margin_end(0)
self.set_border_width(0)
self.set_left_margin(0)
self.set_right_margin(0)
self.set_has_tooltip(True)
self.set_editable(False)
self.set_cursor_visible(False)
self.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
self._handlers: dict[int, MessageTextview] = {}
id_ = self.connect('query-tooltip', self._query_tooltip)
self._handlers[id_] = self
id_ = self.connect('button-press-event', self._on_button_press)
self._handlers[id_] = self
id_ = self.connect('populate-popup', self._on_populate_popup)
self._handlers[id_] = self
self._account = account
# Used for changing the mouse pointer when hovering clickable URIs
self._cursor_changed: bool = False
# Keeps text selections for quoting and search actions
self._selected_text: str = ''
self.get_style_context().add_class('gajim-conversation-text')
# Create Tags
self._create_url_tags()
self.get_buffer().create_tag('strong', weight=Pango.Weight.BOLD)
self.get_buffer().create_tag('emphasis', style=Pango.Style.ITALIC)
self.get_buffer().create_tag('strike', strikethrough=True)
self.get_buffer().create_tag('pre', family='monospace')
self.connect('destroy', self._on_destroy)
def _on_destroy(self, *args: Any) -> None:
for id_, widget in self._handlers.items():
if widget.handler_is_connected(id_):
widget.disconnect(id_)
self._handlers.clear()
def _create_url_tags(self) -> None:
color = app.css_config.get_value('.gajim-url', StyleAttr.COLOR)
for name in URI_TAGS:
tag = self.get_buffer().create_tag(name,
foreground=color,
underline=Pango.Underline.SINGLE)
tag.connect('event', self._on_uri_clicked, tag)
def update_text_tags(self) -> None:
tag_table = self.get_buffer().get_tag_table()
url_color = app.css_config.get_value('.gajim-url', StyleAttr.COLOR)
for tag_name in URI_TAGS:
tag = tag_table.lookup(tag_name)
assert tag is not None
tag.set_property('foreground', url_color)
def clear(self) -> None:
buffer_ = self.get_buffer()
start, end = buffer_.get_bounds()
buffer_.delete(start, end)
def get_text(self) -> str:
buffer_ = self.get_buffer()
start, end = buffer_.get_bounds()
return buffer_.get_text(start, end, False)
def print_text_with_styling(self, block: PlainBlock) -> None:
buffer_ = self.get_buffer()
buffer_.insert(buffer_.get_start_iter(), block.text.strip())
for span in block.spans:
start_iter = buffer_.get_iter_at_offset(span.start)
end_iter = buffer_.get_iter_at_offset(span.end)
buffer_.apply_tag_by_name(span.name, start_iter, end_iter)
for uri in block.uris:
start_iter = buffer_.get_iter_at_offset(uri.start)
end_iter = buffer_.get_iter_at_offset(uri.end)
buffer_.apply_tag_by_name(uri.name, start_iter, end_iter)
for emoji in block.emojis:
start_iter = buffer_.get_iter_at_offset(emoji.start)
end_iter = buffer_.get_iter_at_offset(emoji.end)
if emoji_pixbufs.complete:
# Only search for pixbuf if loading is completed
pixbuf = get_emoji_pixbuf(emoji.text)
if pixbuf is None:
buffer_.insert(end_iter, emoji.text)
else:
pixbuf = pixbuf.copy()
anchor = buffer_.create_child_anchor(end_iter)
anchor.plaintext = emoji.text # type: ignore
img = Gtk.Image.new_from_pixbuf(pixbuf)
img.show()
self.add_child_at_anchor(img, anchor)
buffer_.delete(
buffer_.get_iter_at_offset(emoji.start),
buffer_.get_iter_at_offset(emoji.end))
else:
# Set marks and save them so we can replace emojis
# once loading is complete
start_mark = buffer_.create_mark(None, end_iter, True)
buffer_.insert(end_iter, emoji.text)
end_mark = buffer_.create_mark(None, end_iter, True)
emoji_pixbufs.append_marks(
self, start_mark, end_mark, emoji.text)
def add_action_phrase(self, text: str) -> None:
buffer_ = self.get_buffer()
buffer_.insert(buffer_.get_start_iter(), text.strip())
start_iter = buffer_.get_start_iter()
end_iter = buffer_.get_end_iter()
buffer_.apply_tag_by_name('emphasis', start_iter, end_iter)
def _query_tooltip(self,
widget: Gtk.TextView,
x_pos: int,
y_pos: int,
_keyboard_mode: bool,
tooltip: Gtk.Tooltip
) -> bool:
window = widget.get_window(Gtk.TextWindowType.TEXT)
assert window is not None
x_pos, y_pos = self.window_to_buffer_coords(
Gtk.TextWindowType.TEXT, x_pos, y_pos)
iter_ = self.get_iter_at_position(x_pos, y_pos)[1]
for tag in iter_.get_tags():
tag_name = tag.get_property('name')
if tag_name in URI_TAGS:
window.set_cursor(get_cursor('pointer'))
self._cursor_changed = True
return False
if self._cursor_changed:
window.set_cursor(get_cursor('text'))
self._cursor_changed = False
return False
def _on_button_press(self, _widget: Any, event: Gdk.EventButton) -> bool:
'''
We don’t open the standard context menu when receiving
a click on tagged text.
If it’s untagged text, check if something is selected
'''
self._selected_text = ''
if event.button != 3:
# If it’s not a right click
return False
x_pos, y_pos = self.window_to_buffer_coords(
Gtk.TextWindowType.TEXT,
int(event.x),
int(event.y))
_, iter_ = self.get_iter_at_location(x_pos, y_pos)
tags = iter_.get_tags()
if tags:
# A tagged text fragment has been clicked
for tag in tags:
if tag.get_property('name') in URI_TAGS:
# Block regular context menu
return True
# Check if there is a selection and make it available for
# _on_populate_popup
buffer_ = self.get_buffer()
return_val = buffer_.get_selection_bounds()
if return_val:
# Something has been selected, get the text
start_sel, finish_sel = return_val[0], return_val[1]
self._selected_text = buffer_.get_text(
start_sel, finish_sel, True)
elif iter_.get_char() and ord(iter_.get_char()) > 31:
# Clicked on a word, take whole word for selection
start_sel = iter_.copy()
if not start_sel.starts_word():
start_sel.backward_word_start()
finish_sel = iter_.copy()
if not finish_sel.ends_word():
finish_sel.forward_word_end()
self._selected_text = buffer_.get_text(
start_sel, finish_sel, True)
return False
def _on_populate_popup(self,
_textview: Gtk.TextView,
menu: Gtk.Menu
) -> None:
'''
Overrides the default context menu.
If text is selected, a submenu with actions on the selection is added.
(see _on_button_press)
'''
if not self._selected_text:
menu.show_all()
return
action_menu_item = get_conv_action_context_menu(
self._account, self._selected_text)
menu.prepend(action_menu_item)
menu.show_all()
def _on_uri_clicked(self,
texttag: Gtk.TextTag,
_widget: Any,
event: Gdk.Event,
iter_: Gtk.TextIter,
_kind: Gtk.TextTag
) -> int:
if event.type != Gdk.EventType.BUTTON_PRESS:
return Gdk.EVENT_PROPAGATE
begin_iter = iter_.copy()
# we get the beginning of the tag
while not begin_iter.starts_tag(texttag):
begin_iter.backward_char()
end_iter = iter_.copy()
# we get the end of the tag
while not end_iter.ends_tag(texttag):
end_iter.forward_char()
# Detect XHTML-IM link
word = getattr(texttag, 'href', None)
if not word:
word = self.get_buffer().get_text(begin_iter, end_iter, True)
uri = parse_uri(word)
if event.button.button == 3: # right click
self._show_uri_context_menu(uri)
return Gdk.EVENT_STOP
# TODO:
# self.plugin_modified = False
# app.plugin_manager.extension_point(
# 'hyperlink_handler', uri, self, self.get_toplevel())
# if self.plugin_modified:
# return Gdk.EVENT_STOP
open_uri(uri, account=self._account)
return Gdk.EVENT_STOP
def _show_uri_context_menu(self, uri: URI) -> None:
menu = get_conv_uri_context_menu(self._account, uri)
if menu is None:
log.warning('No handler for URI type: %s', uri)
return
def _destroy(menu: Gtk.Menu, _pspec: GObject.ParamSpec) -> None:
visible = menu.get_property('visible')
if not visible:
GLib.idle_add(menu.destroy)
menu.attach_to_widget(self, None)
menu.connect('notify::visible', _destroy)
menu.popup_at_pointer()
| gajim/gajim | gajim/gtk/conversation/plain_widget.py | Python | gpl-3.0 | 15,270 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PTransforms for supporting Spanner in Python pipelines.
These transforms are currently supported by Beam portable
Flink and Spark runners.
**Setup**
Transforms provided in this module are cross-language transforms
implemented in the Beam Java SDK. During the pipeline construction, Python SDK
will connect to a Java expansion service to expand these transforms.
To facilitate this, a small amount of setup is needed before using these
transforms in a Beam Python pipeline.
There are several ways to setup cross-language Spanner transforms.
* Option 1: use the default expansion service
* Option 2: specify a custom expansion service
See below for details regarding each of these options.
*Option 1: Use the default expansion service*
This is the recommended and easiest setup option for using Python Spanner
transforms. This option is only available for Beam 2.26.0 and later.
This option requires following pre-requisites before running the Beam
pipeline.
* Install Java runtime in the computer from where the pipeline is constructed
and make sure that 'java' command is available.
In this option, Python SDK will either download (for released Beam version) or
build (when running from a Beam Git clone) a expansion service jar and use
that to expand transforms. Currently Spanner transforms use the
'beam-sdks-java-io-google-cloud-platform-expansion-service' jar for this
purpose.
*Option 2: specify a custom expansion service*
In this option, you startup your own expansion service and provide that as
a parameter when using the transforms provided in this module.
This option requires following pre-requisites before running the Beam
pipeline.
* Startup your own expansion service.
* Update your pipeline to provide the expansion service address when
initiating Spanner transforms provided in this module.
Flink Users can use the built-in Expansion Service of the Flink Runner's
Job Server. If you start Flink's Job Server, the expansion service will be
started on port 8097. For a different address, please set the
expansion_service parameter.
**More information**
For more information regarding cross-language transforms see:
- https://beam.apache.org/roadmap/portability/
For more information specific to Flink runner see:
- https://beam.apache.org/documentation/runners/flink/
"""
# pytype: skip-file
from enum import Enum
from enum import auto
from typing import NamedTuple
from typing import Optional
from apache_beam.transforms.external import BeamJarExpansionService
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import NamedTupleBasedPayloadBuilder
from apache_beam.typehints.schemas import named_tuple_to_schema
__all__ = [
'ReadFromSpanner',
'SpannerDelete',
'SpannerInsert',
'SpannerInsertOrUpdate',
'SpannerReplace',
'SpannerUpdate',
'TimestampBoundMode',
'TimeUnit',
]
def default_io_expansion_service():
return BeamJarExpansionService(
'sdks:java:io:google-cloud-platform:expansion-service:shadowJar')
class TimeUnit(Enum):
NANOSECONDS = auto()
MICROSECONDS = auto()
MILLISECONDS = auto()
SECONDS = auto()
HOURS = auto()
DAYS = auto()
class TimestampBoundMode(Enum):
MAX_STALENESS = auto()
EXACT_STALENESS = auto()
READ_TIMESTAMP = auto()
MIN_READ_TIMESTAMP = auto()
STRONG = auto()
class ReadFromSpannerSchema(NamedTuple):
instance_id: str
database_id: str
schema: bytes
sql: Optional[str]
table: Optional[str]
project_id: Optional[str]
host: Optional[str]
emulator_host: Optional[str]
batching: Optional[bool]
timestamp_bound_mode: Optional[str]
read_timestamp: Optional[str]
staleness: Optional[int]
time_unit: Optional[str]
class ReadFromSpanner(ExternalTransform):
"""
A PTransform which reads from the specified Spanner instance's database.
This transform required type of the row it has to return to provide the
schema. Example::
from typing import NamedTuple
from apache_beam import coders
class ExampleRow(NamedTuple):
id: int
name: unicode
coders.registry.register_coder(ExampleRow, coders.RowCoder)
with Pipeline() as p:
result = (
p
| ReadFromSpanner(
instance_id='your_instance_id',
database_id='your_database_id',
project_id='your_project_id',
row_type=ExampleRow,
sql='SELECT * FROM some_table',
timestamp_bound_mode=TimestampBoundMode.MAX_STALENESS,
staleness=3,
time_unit=TimeUnit.HOURS,
).with_output_types(ExampleRow))
Experimental; no backwards compatibility guarantees.
"""
URN = 'beam:external:java:spanner:read:v1'
def __init__(
self,
project_id,
instance_id,
database_id,
row_type=None,
sql=None,
table=None,
host=None,
emulator_host=None,
batching=None,
timestamp_bound_mode=None,
read_timestamp=None,
staleness=None,
time_unit=None,
expansion_service=None,
):
"""
Initializes a read operation from Spanner.
:param project_id: Specifies the Cloud Spanner project.
:param instance_id: Specifies the Cloud Spanner instance.
:param database_id: Specifies the Cloud Spanner database.
:param row_type: Row type that fits the given query or table. Passed as
NamedTuple, e.g. NamedTuple('name', [('row_name', unicode)])
:param sql: An sql query to execute. It's results must fit the
provided row_type. Don't use when table is set.
:param table: A spanner table. When provided all columns from row_type
will be selected to query. Don't use when query is set.
:param batching: By default Batch API is used to read data from Cloud
Spanner. It is useful to disable batching when the underlying query
is not root-partitionable.
:param host: Specifies the Cloud Spanner host.
:param emulator_host: Specifies Spanner emulator host.
:param timestamp_bound_mode: Defines how Cloud Spanner will choose a
timestamp for a read-only transaction or a single read/query.
Passed as TimestampBoundMode enum. Possible values:
STRONG: A timestamp bound that will perform reads and queries at a
timestamp where all previously committed transactions are visible.
READ_TIMESTAMP: Returns a timestamp bound that will perform reads
and queries at the given timestamp.
MIN_READ_TIMESTAMP: Returns a timestamp bound that will perform reads
and queries at a timestamp chosen to be at least given timestamp value.
EXACT_STALENESS: Returns a timestamp bound that will perform reads and
queries at an exact staleness. The timestamp is chosen soon after the
read is started.
MAX_STALENESS: Returns a timestamp bound that will perform reads and
queries at a timestamp chosen to be at most time_unit stale.
:param read_timestamp: Timestamp in string. Use only when
timestamp_bound_mode is set to READ_TIMESTAMP or MIN_READ_TIMESTAMP.
:param staleness: Staleness value as int. Use only when
timestamp_bound_mode is set to EXACT_STALENESS or MAX_STALENESS.
time_unit has to be set along with this param.
:param time_unit: Time unit for staleness_value passed as TimeUnit enum.
Possible values: NANOSECONDS, MICROSECONDS, MILLISECONDS, SECONDS,
HOURS, DAYS.
:param expansion_service: The address (host:port) of the ExpansionService.
"""
assert row_type
assert sql or table and not (sql and table)
staleness_value = int(staleness) if staleness else None
if staleness_value or time_unit:
assert staleness_value and time_unit and \
timestamp_bound_mode is TimestampBoundMode.MAX_STALENESS or \
timestamp_bound_mode is TimestampBoundMode.EXACT_STALENESS
if read_timestamp:
assert timestamp_bound_mode is TimestampBoundMode.MIN_READ_TIMESTAMP\
or timestamp_bound_mode is TimestampBoundMode.READ_TIMESTAMP
super(ReadFromSpanner, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
ReadFromSpannerSchema(
instance_id=instance_id,
database_id=database_id,
sql=sql,
table=table,
schema=named_tuple_to_schema(row_type).SerializeToString(),
project_id=project_id,
host=host,
emulator_host=emulator_host,
batching=batching,
timestamp_bound_mode=_get_enum_name(timestamp_bound_mode),
read_timestamp=read_timestamp,
staleness=staleness,
time_unit=_get_enum_name(time_unit),
),
),
expansion_service or default_io_expansion_service(),
)
class WriteToSpannerSchema(NamedTuple):
project_id: str
instance_id: str
database_id: str
table: str
max_batch_size_bytes: Optional[int]
max_number_mutations: Optional[int]
max_number_rows: Optional[int]
grouping_factor: Optional[int]
host: Optional[str]
emulator_host: Optional[str]
commit_deadline: Optional[int]
max_cumulative_backoff: Optional[int]
_CLASS_DOC = \
"""
A PTransform which writes {operation} mutations to the specified Spanner
table.
This transform receives rows defined as NamedTuple. Example::
from typing import NamedTuple
from apache_beam import coders
class {row_type}(NamedTuple):
id: int
name: unicode
coders.registry.register_coder({row_type}, coders.RowCoder)
with Pipeline() as p:
_ = (
p
| 'Impulse' >> beam.Impulse()
| 'Generate' >> beam.FlatMap(lambda x: range(num_rows))
| 'To row' >> beam.Map(lambda n: {row_type}(n, str(n))
.with_output_types({row_type})
| 'Write to Spanner' >> Spanner{operation_suffix}(
instance_id='your_instance',
database_id='existing_database',
project_id='your_project_id',
table='your_table'))
Experimental; no backwards compatibility guarantees.
"""
_INIT_DOC = \
"""
Initializes {operation} operation to a Spanner table.
:param project_id: Specifies the Cloud Spanner project.
:param instance_id: Specifies the Cloud Spanner instance.
:param database_id: Specifies the Cloud Spanner database.
:param table: Specifies the Cloud Spanner table.
:param max_batch_size_bytes: Specifies the batch size limit (max number of
bytes mutated per batch). Default value is 1048576 bytes = 1MB.
:param max_number_mutations: Specifies the cell mutation limit (maximum
number of mutated cells per batch). Default value is 5000.
:param max_number_rows: Specifies the row mutation limit (maximum number of
mutated rows per batch). Default value is 500.
:param grouping_factor: Specifies the multiple of max mutation (in terms
of both bytes per batch and cells per batch) that is used to select a
set of mutations to sort by key for batching. This sort uses local
memory on the workers, so using large values can cause out of memory
errors. Default value is 1000.
:param host: Specifies the Cloud Spanner host.
:param emulator_host: Specifies Spanner emulator host.
:param commit_deadline: Specifies the deadline for the Commit API call.
Default is 15 secs. DEADLINE_EXCEEDED errors will prompt a backoff/retry
until the value of commit_deadline is reached. DEADLINE_EXCEEDED errors
are ar reported with logging and counters. Pass seconds as value.
:param max_cumulative_backoff: Specifies the maximum cumulative backoff
time when retrying after DEADLINE_EXCEEDED errors. Default is 900s
(15min). If the mutations still have not been written after this time,
they are treated as a failure, and handled according to the setting of
failure_mode. Pass seconds as value.
:param expansion_service: The address (host:port) of the ExpansionService.
"""
def _add_doc(
value,
operation=None,
row_type=None,
operation_suffix=None,
):
def _doc(obj):
obj.__doc__ = value.format(
operation=operation,
row_type=row_type,
operation_suffix=operation_suffix,
)
return obj
return _doc
@_add_doc(
_CLASS_DOC,
operation='delete',
row_type='ExampleKey',
operation_suffix='Delete',
)
class SpannerDelete(ExternalTransform):
URN = 'beam:external:java:spanner:delete:v1'
@_add_doc(_INIT_DOC, operation='a delete')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
@_add_doc(
_CLASS_DOC,
operation='insert',
row_type='ExampleRow',
operation_suffix='Insert',
)
class SpannerInsert(ExternalTransform):
URN = 'beam:external:java:spanner:insert:v1'
@_add_doc(_INIT_DOC, operation='an insert')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
@_add_doc(
_CLASS_DOC,
operation='replace',
row_type='ExampleRow',
operation_suffix='Replace',
)
class SpannerReplace(ExternalTransform):
URN = 'beam:external:java:spanner:replace:v1'
@_add_doc(_INIT_DOC, operation='a replace')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
@_add_doc(
_CLASS_DOC,
operation='insert-or-update',
row_type='ExampleRow',
operation_suffix='InsertOrUpdate',
)
class SpannerInsertOrUpdate(ExternalTransform):
URN = 'beam:external:java:spanner:insert_or_update:v1'
@_add_doc(_INIT_DOC, operation='an insert-or-update')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
@_add_doc(
_CLASS_DOC,
operation='update',
row_type='ExampleRow',
operation_suffix='Update',
)
class SpannerUpdate(ExternalTransform):
URN = 'beam:external:java:spanner:update:v1'
@_add_doc(_INIT_DOC, operation='an update')
def __init__(
self,
project_id,
instance_id,
database_id,
table,
max_batch_size_bytes=None,
max_number_mutations=None,
max_number_rows=None,
grouping_factor=None,
host=None,
emulator_host=None,
commit_deadline=None,
max_cumulative_backoff=None,
expansion_service=None,
):
max_cumulative_backoff = int(
max_cumulative_backoff) if max_cumulative_backoff else None
commit_deadline = int(commit_deadline) if commit_deadline else None
super().__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToSpannerSchema(
project_id=project_id,
instance_id=instance_id,
database_id=database_id,
table=table,
max_batch_size_bytes=max_batch_size_bytes,
max_number_mutations=max_number_mutations,
max_number_rows=max_number_rows,
grouping_factor=grouping_factor,
host=host,
emulator_host=emulator_host,
commit_deadline=commit_deadline,
max_cumulative_backoff=max_cumulative_backoff,
),
),
expansion_service=expansion_service or default_io_expansion_service(),
)
def _get_enum_name(enum):
return None if enum is None else enum.name
| lukecwik/incubator-beam | sdks/python/apache_beam/io/gcp/spanner.py | Python | apache-2.0 | 21,164 |
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
dnevnik = Table('dnevnik', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('ime_i_prezime', String(length=50)),
Column('razred', String(length=4)),
Column('ime', String(length=15)),
Column('prezime', String(length=30)),
Column('test', String(length=30)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['dnevnik'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['dnevnik'].drop()
| knadir/IIIgimnazija80 | db_repository/versions/024_migration.py | Python | bsd-3-clause | 887 |
from mock import patch
import jenkins
from tests.base import JenkinsTestBase
from tests.helper import build_response_mock
class JenkinsVersionTest(JenkinsTestBase):
@patch('jenkins.requests.Session.send', autospec=True)
def test_some_version(self, session_send_mock):
session_send_mock.return_value = build_response_mock(
200, headers={'X-Jenkins': 'Version42', 'Content-Length': 0})
self.assertEqual(self.j.get_version(), 'Version42')
@patch('jenkins.requests.Session.send', autospec=True)
def test_raise_HTTPError(self, session_send_mock):
session_send_mock.side_effect = iter([
build_response_mock(404, reason="Not Found"), # crumb
build_response_mock(499, reason="Unhandled Error"), # request
])
with self.assertRaises(jenkins.BadHTTPException) as context_manager:
self.j.get_version()
self.assertEqual(
str(context_manager.exception),
'Error communicating with server[{0}/]'.format(self.base_url))
@patch('jenkins.requests.Session.send', autospec=True)
def test_raise_BadStatusLine(self, session_send_mock):
session_send_mock.side_effect = jenkins.BadStatusLine('not a valid status line')
with self.assertRaises(jenkins.BadHTTPException) as context_manager:
self.j.get_version()
self.assertEqual(
str(context_manager.exception),
'Error communicating with server[{0}/]'.format(self.base_url))
@patch('jenkins.requests.Session.send', autospec=True)
def test_return_empty_response(self, session_send_mock):
session_send_mock.return_value = build_response_mock(0)
with self.assertRaises(jenkins.EmptyResponseException) as context_manager:
self.j.get_version()
self.assertEqual(
str(context_manager.exception),
'Error communicating with server[{0}/]:'
' empty response'.format(self.base_url))
| stackforge/python-jenkins | tests/test_version.py | Python | bsd-3-clause | 1,993 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedClusterVersionOperations(object):
"""ManagedClusterVersionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~service_fabric_managed_clusters_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
location, # type: str
cluster_version, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterCodeVersionResult"
"""Gets information about a Service Fabric managed cluster code version available in the specified location.
Gets information about an available Service Fabric managed cluster code version.
:param location: The location for the cluster code versions. This is different from cluster
location.
:type location: str
:param cluster_version: The cluster code version.
:type cluster_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterCodeVersionResult, or the result of cls(response)
:rtype: ~service_fabric_managed_clusters_management_client.models.ManagedClusterCodeVersionResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterCodeVersionResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'clusterVersion': self._serialize.url("cluster_version", cluster_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterCodeVersionResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/managedClusterVersions/{clusterVersion}'} # type: ignore
def get_by_environment(
self,
location, # type: str
cluster_version, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterCodeVersionResult"
"""Gets information about a Service Fabric cluster code version available for the specified environment.
Gets information about an available Service Fabric cluster code version by environment.
:param location: The location for the cluster code versions. This is different from cluster
location.
:type location: str
:param cluster_version: The cluster code version.
:type cluster_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterCodeVersionResult, or the result of cls(response)
:rtype: ~service_fabric_managed_clusters_management_client.models.ManagedClusterCodeVersionResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterCodeVersionResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
environment = "Windows"
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get_by_environment.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'environment': self._serialize.url("environment", environment, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'clusterVersion': self._serialize.url("cluster_version", cluster_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterCodeVersionResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_environment.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/environments/{environment}/managedClusterVersions/{clusterVersion}'} # type: ignore
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.ManagedClusterCodeVersionResult"]
"""Gets the list of Service Fabric cluster code versions available for the specified location.
Gets all available code versions for Service Fabric cluster resources by location.
:param location: The location for the cluster code versions. This is different from cluster
location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ManagedClusterCodeVersionResult, or the result of cls(response)
:rtype: list[~service_fabric_managed_clusters_management_client.models.ManagedClusterCodeVersionResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ManagedClusterCodeVersionResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ManagedClusterCodeVersionResult]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/managedClusterVersions'} # type: ignore
def list_by_environment(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.ManagedClusterCodeVersionResult"]
"""Gets the list of Service Fabric cluster code versions available for the specified environment.
Gets all available code versions for Service Fabric cluster resources by environment.
:param location: The location for the cluster code versions. This is different from cluster
location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ManagedClusterCodeVersionResult, or the result of cls(response)
:rtype: list[~service_fabric_managed_clusters_management_client.models.ManagedClusterCodeVersionResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ManagedClusterCodeVersionResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
environment = "Windows"
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.list_by_environment.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'environment': self._serialize.url("environment", environment, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ManagedClusterCodeVersionResult]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_environment.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/locations/{location}/environments/{environment}/managedClusterVersions'} # type: ignore
| Azure/azure-sdk-for-python | sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/azure/mgmt/servicefabricmanagedclusters/operations/_managed_cluster_version_operations.py | Python | mit | 14,365 |
from django import template
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape as escape
register = template.Library()
@register.filter
def panel_info(data):
if not data:
return mark_safe('<p class="empty">None</p>')
out = []
out.append('<dl class="panel-info">')
for k,v in data:
out.append('<dt>')
out.append(escape(k))
out.append('</dt><dd>')
out.append(escape(v))
out.append('</dd>')
out.append('</dl>')
return mark_safe(''.join(out)) | sfu-fas/coursys | coredata/templatetags/admin_panel_tags.py | Python | gpl-3.0 | 555 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mint.django_rest.rbuilder.inventory import models
class SystemManager(object):
CREDS = {}
VERSIONS = {}
def getSoftwareVersionsForInstanceId(self, instanceId):
if self.VERSIONS.has_key(instanceId):
return self.VERSIONS[instanceId]
else:
installedGroups = [ 'group-blah=/foo@bar:baz/1234567890:1-2-3[]',
'group-second-appliance=/foo@bar:baz/1234567891:1-2-4[]' ]
return '\n'.join(installedGroups)
def setSoftwareVersionForInstanceId(self, instanceId, softwareVersion):
self.VERSIONS[instanceId] = '\n'.join("%s=%s[%s]" %
(x[1], x[1].freeze(), x[2])
for x in softwareVersion)
def setSystemSSLInfo(self, instanceId, sslCert, sslKey):
return 'setSystemSSLInfo'
def getSystemByInstanceId(self, instanceId):
if self.CREDS.has_key(instanceId):
cert, key = self.CREDS[instanceId]
system = models.System(
ssl_client_key = key,
ssl_client_certificate = cert,
is_manageable = True)
else:
system = models.System(
is_manageable = False,
)
return system
def getCachedUpdates(self, nvf):
return None
def cacheUpdate(self, nvf, updateNvf):
return
def clearCachedUpdates(self, nvfs):
return
class RbuilderManager(object):
def __init__(self, cfg, userName):
self.cfg = cfg
self.userName = userName
self.sysMgr = SystemManager()
def addLaunchedSystem(self, system, dnsName=None, targetName=None,
targetType=None):
# Dump certs
file("/tmp/adfadf", "w").write(self.cfg.dataPath)
certFile = "%s/x509.crt" % self.cfg.dataPath
keyFile = "%s/x509.key" % self.cfg.dataPath
file(certFile, "w").write(system.ssl_client_certificate)
file(keyFile, "w").write(system.ssl_client_key)
return system
| sassoftware/catalog-service | catalogService_test/mockedModules/mint/django_rest/rbuilder/manager/rbuildermanager.py | Python | apache-2.0 | 2,593 |
from six import iteritems
class Song:
"""
Song Attributes:
name (String)
track_id (Integer)
artist (String)
album_artist (String)
composer = None (String)
album = None (String)
genre = None (String)
kind = None (String)
size = None (Integer)
total_time = None (Integer)
track_number = None (Integer)
track_count = None (Integer)
disc_number = None (Integer)
disc_count = None (Integer)
year = None (Integer)
date_modified = None (Time)
date_added = None (Time)
bit_rate = None (Integer)
sample_rate = None (Integer)
comments = None (String)
rating = None (Integer)
rating_computed = False (Boolean)
album_rating = None (Integer)
play_count = None (Integer)
location = None (String)
location_escaped = None (String)
compilation = False (Boolean)
grouping = None (String)
lastplayed = None (Time)
skip_count = None (Integer)
skip_date = None (Time)
length = None (Integer)
persistent_id = None (String)
album_rating_computed = False (Boolean)
work = None (String)
movement_name = None (String)
movement_number = None (Integer)
movement_count = None (Integer)
playlist_only = None (Bool)
apple_music = None (Bool)
protected = None (Bool)
"""
name = None
track_id = None
artist = None
album_artist = None
composer = None
album = None
genre = None
kind = None
size = None
total_time = None
track_number = None
track_count = None
disc_number = None
disc_count = None
year = None
date_modified = None
date_added = None
bit_rate = None
sample_rate = None
comments = None
rating = None
rating_computed = None
album_rating = None
play_count = None
skip_count = None
skip_date = None
location = None
location_escaped = None
compilation = None
grouping = None
lastplayed = None
length = None
persistent_id = None
album_rating_computed = None
work = None
movement_name = None
movement_number = None
movement_count = None
playlist_only = None
apple_music = None
protected = None
def __iter__(self):
for attr, value in iteritems(self.__dict__):
yield attr, value
def ToDict(self):
return {key: value for (key, value) in self}
| liamks/pyitunes | libpytunes/Song.py | Python | mit | 2,391 |
import importlib
import inspect
import jabberbot
import pkgutil
import os
from jabberbot.mucbot import MUCBot
def run_command(msg, *args):
"""Returns a help string containing all commands"""
docs = {}
cmdpath = jabberbot.commands.__path__
for module_finder, name, ispkg in pkgutil.iter_modules(cmdpath):
module = importlib.import_module('jabberbot.commands.' + name)
if not hasattr(module, 'run_command'):
continue
doc = inspect.getdoc(module.run_command)
if not doc:
continue
module_name = module.__name__ # jabberbot.commands.foo
command_name = module_name.rsplit('.', 1)[1] # foo
docs[command_name] = doc
message = []
if args: # help <command>
cmd = args[0]
if len(args) > 1 or cmd not in docs:
return 'chat', 'Command not found'
message.append(docs[cmd])
else: # help
message.append('Available commands:{}'.format(os.linesep))
for cmd in sorted(docs.keys()):
if cmd == 'help':
continue
doc = docs[cmd]
lines = doc.splitlines()
message.append('{}{}: {}'.format(MUCBot.cmd_prefix,
cmd,
lines[0]))
bottom = ('{0}Type !help <command name> to get more info '
'about that specific command.').format(os.linesep)
message.append(bottom)
src = 'Source code available at https://github.com/ScreenDriver/jabberbot'
message.append(src)
return 'chat', os.linesep.join(message)
| ScreenDriver/jabberbot | jabberbot/commands/help.py | Python | gpl-2.0 | 1,633 |
# -*- coding: utf-8 -*-
import random
import os
import string
import eve
from datetime import datetime
from unittest import TestCase
from sqlalchemy.sql.elements import BooleanClauseList
from operator import and_, or_
from eve.utils import str_to_date
from eve_sqlalchemy.tests.test_sql_tables import People
from eve_sqlalchemy.parser import parse
from eve_sqlalchemy.parser import parse_dictionary
from eve_sqlalchemy.parser import parse_sorting
from eve_sqlalchemy.parser import ParseError
from eve_sqlalchemy.parser import sqla_op
from eve_sqlalchemy.structures import SQLAResultCollection
from eve_sqlalchemy import SQL
class TestSQLParser(TestCase):
def setUp(self):
self.model = People
def test_wrong_attribute(self):
self.assertRaises(AttributeError, parse, 'a == 1', self.model)
def test_eq(self):
expected_expression = sqla_op.eq(self.model.firstname, 'john')
r = parse('firstname == john', self.model)
self.assertEqual(type(r), list)
self.assertTrue(len(r) == 1)
self.assertTrue(expected_expression.compare(r[0]))
def test_gt(self):
expected_expression = sqla_op.gt(self.model.prog, 5)
r = parse('prog > 5', self.model)
self.assertEqual(type(r), list)
self.assertTrue(len(r) == 1)
self.assertTrue(expected_expression.compare(r[0]))
def test_gte(self):
expected_expression = sqla_op.ge(self.model.prog, 5)
r = parse('prog >= 5', self.model)
self.assertEqual(type(r), list)
self.assertTrue(len(r) == 1)
self.assertTrue(expected_expression.compare(r[0]))
def test_lt(self):
expected_expression = sqla_op.lt(self.model.prog, 5)
r = parse('prog < 5', self.model)
self.assertEqual(type(r), list)
self.assertTrue(len(r) == 1)
self.assertTrue(expected_expression.compare(r[0]))
def test_lte(self):
expected_expression = sqla_op.le(self.model.prog, 5)
r = parse('prog <= 5', self.model)
self.assertEqual(type(r), list)
self.assertTrue(len(r) == 1)
self.assertTrue(expected_expression.compare(r[0]))
def test_not_eq(self):
expected_expression = sqla_op.ne(self.model.prog, 5)
r = parse('prog != 5', self.model)
self.assertEqual(type(r), list)
self.assertTrue(len(r) == 1)
self.assertTrue(expected_expression.compare(r[0]))
def test_and_bool_op(self):
r = parse('firstname == "john" and prog == 5', self.model)
self.assertEqual(type(r), list)
self.assertEqual(type(r[0]), BooleanClauseList)
self.assertEqual(r[0].operator, and_)
self.assertEqual(len(r[0].clauses), 2)
expected_expression = sqla_op.eq(self.model.firstname, 'john')
self.assertTrue(expected_expression.compare(r[0].clauses[0]))
expected_expression = sqla_op.eq(self.model.prog, 5)
self.assertTrue(expected_expression.compare(r[0].clauses[1]))
def test_or_bool_op(self):
r = parse('firstname == "john" or prog == 5', self.model)
self.assertEqual(type(r), list)
self.assertEqual(type(r[0]), BooleanClauseList)
self.assertEqual(r[0].operator, or_)
self.assertEqual(len(r[0].clauses), 2)
expected_expression = sqla_op.eq(self.model.firstname, 'john')
self.assertTrue(expected_expression.compare(r[0].clauses[0]))
expected_expression = sqla_op.eq(self.model.prog, 5)
self.assertTrue(expected_expression.compare(r[0].clauses[1]))
def test_nested_bool_op(self):
r = parse('firstname == "john" or (prog == 5 and lastname == "smith")',
self.model)
self.assertEqual(type(r), list)
self.assertEqual(type(r[0]), BooleanClauseList)
self.assertEqual(r[0].operator, or_)
self.assertEqual(len(r[0].clauses), 2)
expected_expression = sqla_op.eq(self.model.firstname, 'john')
self.assertTrue(expected_expression.compare(r[0].clauses[0]))
second_op = r[0].clauses[1]
self.assertEqual(type(second_op), BooleanClauseList)
self.assertEqual(second_op.operator, and_)
self.assertEqual(len(second_op.clauses), 2)
expected_expression = sqla_op.eq(self.model.prog, 5)
self.assertTrue(expected_expression.compare(second_op.clauses[0]))
expected_expression = sqla_op.eq(self.model.lastname, 'smith')
self.assertTrue(expected_expression.compare(second_op.clauses[1]))
def test_raises_parse_error_for_invalid_queries(self):
self.assertRaises(ParseError, parse, '', self.model)
self.assertRaises(ParseError, parse, 'firstname', self.model)
def test_raises_parse_error_for_invalid_op(self):
self.assertRaises(ParseError, parse, 'firstname | "john"', self.model)
def test_parse_string_to_date(self):
expected_expression = \
sqla_op.gt(self.model._updated,
str_to_date('Sun, 06 Nov 1994 08:49:37 GMT'))
r = parse('_updated > "Sun, 06 Nov 1994 08:49:37 GMT"', self.model)
self.assertEqual(type(r), list)
self.assertTrue(len(r) == 1)
self.assertTrue(expected_expression.compare(r[0]))
def test_parse_dictionary(self):
r = parse_dictionary({'firstname': 'john', 'prog': 5}, self.model)
self.assertEqual(type(r), list)
self.assertTrue(len(r) == 2)
expected_expression = sqla_op.eq(self.model.firstname, 'john')
any_true = any(expected_expression.compare(elem) for elem in r)
self.assertTrue(any_true)
expected_expression = sqla_op.eq(self.model.prog, 5)
any_true = any(expected_expression.compare(elem) for elem in r)
self.assertTrue(any_true)
def test_parse_adv_dictionary(self):
r = parse_dictionary({'firstname': ['john', 'dylan']}, self.model)
self.assertEqual(str(r[0]),
'people.firstname IN (:firstname_1, :firstname_2)')
def test_parse_sqla_operators(self):
r = parse_dictionary({'firstname': 'ilike("john%")'}, self.model)
self.assertEqual(str(r[0]),
'lower(people.firstname) LIKE lower(:firstname_1)')
r = parse_dictionary({'firstname': 'like("john%")'}, self.model)
self.assertEqual(str(r[0]),
'people.firstname LIKE :firstname_1')
r = parse_dictionary({'firstname': 'in("(\'john\',\'mark\')")'},
self.model)
self.assertEqual(str(r[0]),
'people.firstname in :firstname_1')
self.assertEqual(r[0].right.value,
"('john','mark')")
r = parse_dictionary({'firstname': 'similar to("(\'john%\'|\'mark%\')")'},
self.model)
self.assertEqual(str(r[0]),
'people.firstname similar to :firstname_1')
self.assertEqual(r[0].right.value,
"('john%'|'mark%')")
def test_parse_sqla_and_or_conjunctions(self):
r = parse_dictionary(
{'or_': '[{"firstname": "john"}, {"and_": ['
'{"firstname": "dylan"},{"lastname": "smith"}]}]'}, self.model)
self.assertEqual(str(r[0]),
'people.firstname = :firstname_1 OR '
'people.firstname = :firstname_2 AND '
'people.lastname = :lastname_1')
self.assertEqual(type(r), list)
self.assertEqual(type(r[0]), BooleanClauseList)
self.assertEqual(r[0].operator, or_)
self.assertEqual(len(r[0].clauses), 2)
expected_expression = sqla_op.eq(self.model.firstname, 'john')
self.assertTrue(expected_expression.compare(r[0].clauses[0]))
second_op = r[0].clauses[1]
self.assertEqual(type(second_op), BooleanClauseList)
self.assertEqual(second_op.operator, and_)
self.assertEqual(len(second_op.clauses), 2)
expected_expression = sqla_op.eq(self.model.firstname, 'dylan')
self.assertTrue(expected_expression.compare(second_op.clauses[0]))
expected_expression = sqla_op.eq(self.model.lastname, 'smith')
self.assertTrue(expected_expression.compare(second_op.clauses[1]))
class TestSQLStructures(TestCase):
def setUp(self):
self.person = People(firstname='douglas', lastname='adams', prog=5,
_id=1, _updated=datetime.now(),
_created=datetime.now())
self.fields = ['_id', '_updated', '_created', 'firstname', 'lastname',
'prog', '_etag']
self.known_resource_count = 101
self.max_results = 25
def test_sql_collection(self):
self.setupDB()
c = SQLAResultCollection(self.query, self.fields)
self.assertEqual(c.count(), self.known_resource_count)
self.dropDB()
def test_sql_collection_pagination(self):
self.setupDB()
with self.app.app_context():
c = SQLAResultCollection(self.query, self.fields,
max_results=self.max_results)
self.assertEqual(c.count(), self.known_resource_count)
results = [p for p in c]
self.assertEqual(len(results), self.max_results)
self.dropDB()
def test_base_sorting(self):
self.setupDB()
self.assertEqual(str(
parse_sorting(People, self.query, 'lastname', -1)).lower(),
'people.lastname desc')
self.assertEqual(str(
parse_sorting(People, self.query, 'lastname', 1)).lower(),
'people.lastname')
self.assertEqual(str(
parse_sorting(People, self.query, 'lastname', -1, 'nullslast')).lower(),
'people.lastname desc nulls last')
self.assertEqual(str(
parse_sorting(People, self.query, 'lastname', -1, 'nullsfirst')).lower(),
'people.lastname desc nulls first')
def setupDB(self):
self.this_directory = os.path.dirname(os.path.realpath(__file__))
self.settings_file = os.path.join(self.this_directory,
'test_settings_sql.py')
self.app = eve.Eve(settings=self.settings_file, data=SQL)
self.connection = SQL.driver
self.connection.drop_all()
self.connection.create_all()
self.bulk_insert()
self.query = self.connection.session.query(People)
def bulk_insert(self):
if not self.connection.session.query(People).count():
# load random people in db
people = self.random_people(self.known_resource_count)
people = [People.from_tuple(item) for item in people]
for person in people:
self.connection.session.add(person)
self.connection.session.commit()
def random_string(self, length=6):
return ''.join(random.choice(string.ascii_lowercase)
for _ in range(length)).capitalize()
def random_people(self, num):
people = []
for i in range(num):
people.append((self.random_string(6), self.random_string(6), i))
return people
def dropDB(self):
self.connection = SQL.driver
self.connection.session.remove()
self.connection.drop_all()
# TODO: Validation tests
# class TestSQLValidator(TestCase):
# def test_unique_fail(self):
# """ relying on POST and PATCH tests since we don't have an active
# app_context running here """
# pass
#
# def test_unique_success(self):
# """ relying on POST and PATCH tests since we don't have an active
# app_context running here """
# pass
#
# def test_objectid_fail(self):
# schema = {'id': {'type': 'objectid'}}
# doc = {'id': 'not_an_object_id'}
# v = Validator(schema, None)
# self.assertFalse(v.validate(doc))
# self.assertTrue('id' in v.errors)
# self.assertTrue('ObjectId' in v.errors['id'])
#
# def test_objectid_success(self):
# schema = {'id': {'type': 'objectid'}}
# doc = {'id': ObjectId('50656e4538345b39dd0414f0')}
# v = Validator(schema, None)
# self.assertTrue(v.validate(doc))
#
# def test_transparent_rules(self):
# schema = {'a_field': {'type': 'string'}}
# v = Validator(schema)
# self.assertTrue(v.transparent_schema_rules, True)
| lingfish/eve-sqlalchemy | eve_sqlalchemy/tests/sql.py | Python | bsd-3-clause | 12,468 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__version__ = "0.2.1"
from .db import Database # noqa
from .models import TinyJsonModel # noqa
from jsonmodels import fields # Proxy to jsonmodels fields objects
| abnerjacobsen/tinydb-jsonorm | src/tinydb_jsonorm/__init__.py | Python | bsd-2-clause | 318 |
"""
Django template context processors.
"""
from django.conf import settings
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
def configuration_context(request): # pylint: disable=unused-argument
"""
Configuration context for django templates.
"""
return {
'platform_name': configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
| ahmedaljazzar/edx-platform | openedx/core/djangoapps/site_configuration/context_processors.py | Python | agpl-3.0 | 421 |
def extractNegaTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
tagmap = {
'Glutton Berserker' : 'Glutton Berserker',
'Kaette Kita Motoyuusha' : 'Kaette Kita Motoyuusha',
'Takami no Kago' : 'Takami no Kago',
'Gacha Girl Corps' : 'Gacha Girl Corps',
'Tanpa kategori' : 'Takami no Kago',
'The Story of Hero Among Heroes' : 'The Story of Hero Among Heroes ~The Founding Chronicles of Arestia',
'Sono Mono, Nochi Ni' : 'Sono Mono, Nochi Ni',
'29-sai dokushin wa isekai' : '29-sai dokushin wa isekai',
'sono mono, nochi ni... 2' : 'sono mono, nochi ni... Part 2',
'i was called incompetent' : 'Munou To Yobareta Ore, Yottsu No Chikara Wo Eru',
'Arifureta' : 'Arifureta',
'One-eyed female General' : 'One-eyed Female General and the Harem',
}
for tag, sname in tagmap.items():
if tag in item['tags']:
return buildReleaseMessageWithType(item, sname, vol, chp, frag=frag)
titlemap = [
('Takami no Kago ch', 'Takami no Kago', 'translated'),
('Gacha Girl Corps', 'Gacha Girl Corps', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractNegaTranslations.py | Python | bsd-3-clause | 1,589 |
# Copyright (c) 2016 RIPE NCC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import webbrowser
from .base import Command as BaseCommand
from ..helpers.validators import ArgumentType
class Command(BaseCommand):
NAME = "go"
DESCRIPTION = "Visit the web page for a specific measurement"
URL = "https://atlas.ripe.net/measurements/{0}/"
def add_arguments(self):
self.parser.add_argument(
"measurement_id",
type=ArgumentType.msm_id_or_name(),
help="The measurement id or alias you want reported",
)
def run(self):
url = self.URL.format(self.arguments.measurement_id)
if not webbrowser.open(url):
self.ok(
"It looks like your system doesn't have a web browser "
"available. You'll have to go there manually: {0}".format(url)
)
| RIPE-NCC/ripe-atlas-tools | ripe/atlas/tools/commands/go.py | Python | gpl-3.0 | 1,462 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-03-28 17:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotes', '0009_remove_modelotermica_receita'),
]
operations = [
migrations.CreateModel(
name='Lote',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lote', models.CharField(max_length=20, verbose_name='lote')),
('op', models.IntegerField(blank=True, null=True, verbose_name='OP')),
('referencia', models.CharField(max_length=5, verbose_name='Referência')),
('tamanho', models.CharField(max_length=3, verbose_name='Tamanho')),
('cor', models.CharField(max_length=6, verbose_name='Cor')),
('qtd_produzir', models.IntegerField(verbose_name='quantidade')),
('create_at', models.DateTimeField(blank=True, null=True, verbose_name='criado em')),
('update_at', models.DateTimeField(blank=True, null=True, verbose_name='alterado em')),
],
options={
'db_table': 'fo2_cd_lote',
'verbose_name': 'lote',
},
),
]
| anselmobd/fo2 | src/lotes/migrations/0010_lote.py | Python | mit | 1,354 |
from models import BaseModel
class ApplicationAccessModel(BaseModel):
table = "applications_access"
db = "console"
fields={
"access_type":True,
"access_content":True,
"container_id":True,
"container_host":True,
"container_name":True,
"user_id":True,
"response":True,
"status":True,
"logs":True,
"update_time":True,
'create_time':True
} | liuhong1happy/DockerConsoleApp | models/application_access.py | Python | apache-2.0 | 445 |
"""SCons.Scanner
The Scanner package for the SCons software construction utility.
"""
#
# Copyright (c) 2001 - 2019 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/__init__.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import re
import SCons.Node.FS
import SCons.Util
class _Null(object):
pass
# This is used instead of None as a default argument value so None can be
# used as an actual argument value.
_null = _Null
def Scanner(function, *args, **kw):
"""
Public interface factory function for creating different types
of Scanners based on the different types of "functions" that may
be supplied.
TODO: Deprecate this some day. We've moved the functionality
inside the Base class and really don't need this factory function
any more. It was, however, used by some of our Tool modules, so
the call probably ended up in various people's custom modules
patterned on SCons code.
"""
if SCons.Util.is_Dict(function):
return Selector(function, *args, **kw)
else:
return Base(function, *args, **kw)
class FindPathDirs(object):
"""
A class to bind a specific E{*}PATH variable name to a function that
will return all of the E{*}path directories.
"""
def __init__(self, variable):
self.variable = variable
def __call__(self, env, dir=None, target=None, source=None, argument=None):
import SCons.PathList
try:
path = env[self.variable]
except KeyError:
return ()
import os
dir = dir or os.getcwd()
path = SCons.PathList.PathList(path).subst_path(env, target, source)
return tuple(path)
class Base(object):
"""
The base class for dependency scanners. This implements
straightforward, single-pass scanning of a single file.
"""
def __init__(self,
function,
name = "NONE",
argument = _null,
skeys = _null,
path_function = None,
# Node.FS.Base so that, by default, it's okay for a
# scanner to return a Dir, File or Entry.
node_class = SCons.Node.FS.Base,
node_factory = None,
scan_check = None,
recursive = None):
"""
Construct a new scanner object given a scanner function.
'function' - a scanner function taking two or three
arguments and returning a list of strings.
'name' - a name for identifying this scanner object.
'argument' - an optional argument that, if specified, will be
passed to both the scanner function and the path_function.
'skeys' - an optional list argument that can be used to determine
which scanner should be used for a given Node. In the case of File
nodes, for example, the 'skeys' would be file suffixes.
'path_function' - a function that takes four or five arguments
(a construction environment, Node for the directory containing
the SConscript file that defined the primary target, list of
target nodes, list of source nodes, and optional argument for
this instance) and returns a tuple of the directories that can
be searched for implicit dependency files. May also return a
callable() which is called with no args and returns the tuple
(supporting Bindable class).
'node_class' - the class of Nodes which this scan will return.
If node_class is None, then this scanner will not enforce any
Node conversion and will return the raw results from the
underlying scanner function.
'node_factory' - the factory function to be called to translate
the raw results returned by the scanner function into the
expected node_class objects.
'scan_check' - a function to be called to first check whether
this node really needs to be scanned.
'recursive' - specifies that this scanner should be invoked
recursively on all of the implicit dependencies it returns
(the canonical example being #include lines in C source files).
May be a callable, which will be called to filter the list
of nodes found to select a subset for recursive scanning
(the canonical example being only recursively scanning
subdirectories within a directory).
The scanner function's first argument will be a Node that should
be scanned for dependencies, the second argument will be an
Environment object, the third argument will be the tuple of paths
returned by the path_function, and the fourth argument will be
the value passed into 'argument', and the returned list should
contain the Nodes for all the direct dependencies of the file.
Examples:
s = Scanner(my_scanner_function)
s = Scanner(function = my_scanner_function)
s = Scanner(function = my_scanner_function, argument = 'foo')
"""
# Note: this class could easily work with scanner functions that take
# something other than a filename as an argument (e.g. a database
# node) and a dependencies list that aren't file names. All that
# would need to be changed is the documentation.
self.function = function
self.path_function = path_function
self.name = name
self.argument = argument
if skeys is _null:
if SCons.Util.is_Dict(function):
skeys = list(function.keys())
else:
skeys = []
self.skeys = skeys
self.node_class = node_class
self.node_factory = node_factory
self.scan_check = scan_check
if callable(recursive):
self.recurse_nodes = recursive
elif recursive:
self.recurse_nodes = self._recurse_all_nodes
else:
self.recurse_nodes = self._recurse_no_nodes
def path(self, env, dir=None, target=None, source=None):
if not self.path_function:
return ()
if self.argument is not _null:
return self.path_function(env, dir, target, source, self.argument)
else:
return self.path_function(env, dir, target, source)
def __call__(self, node, env, path=()):
"""
This method scans a single object. 'node' is the node
that will be passed to the scanner function, and 'env' is the
environment that will be passed to the scanner function. A list of
direct dependency nodes for the specified node will be returned.
"""
if self.scan_check and not self.scan_check(node, env):
return []
self = self.select(node)
if self == None: return []
if self.argument is not _null:
node_list = self.function(node, env, path, self.argument)
else:
node_list = self.function(node, env, path)
kw = {}
if hasattr(node, 'dir'):
kw['directory'] = node.dir
node_factory = env.Entry
nodes = []
for l in node_list:
l = node_factory(str(l))
nodes.append(l)
return nodes
def __eq__(self, other):
try:
return self.__dict__ == other.__dict__
except AttributeError:
# other probably doesn't have a __dict__
return self.__dict__ == other
def __hash__(self):
return id(self)
def __str__(self):
return self.name
def add_skey(self, skey):
"""Add a skey to the list of skeys"""
self.skeys.append(skey)
def get_skeys(self, env=None):
if env and SCons.Util.is_String(self.skeys):
return env.subst_list(self.skeys)[0]
return self.skeys
def select(self, node):
if SCons.Util.is_Dict(self.function):
key = node.scanner_key()
try:
return self.function[key]
except KeyError:
return None
else:
return self
def _recurse_all_nodes(self, nodes):
return nodes
def _recurse_no_nodes(self, nodes):
return []
# recurse_nodes = _recurse_no_nodes
def add_scanner(self, skey, scanner):
self.function[skey] = scanner
self.add_skey(skey)
class Selector(Base):
"""
A class for selecting a more specific scanner based on the
scanner_key() (suffix) for a specific Node.
TODO: This functionality has been moved into the inner workings of
the Base class, and this class will be deprecated at some point.
(It was never exposed directly as part of the public interface,
although it is used by the Scanner() factory function that was
used by various Tool modules and therefore was likely a template
for custom modules that may be out there.)
"""
def __init__(self, dict, *args, **kw):
Base.__init__(self, None, *args, **kw)
self.dict = dict
self.skeys = list(dict.keys())
def __call__(self, node, env, path=()):
return self.select(node)(node, env, path)
def select(self, node):
try:
return self.dict[node.scanner_key()]
except KeyError:
return None
def add_scanner(self, skey, scanner):
self.dict[skey] = scanner
self.add_skey(skey)
class Current(Base):
"""
A class for scanning files that are source files (have no builder)
or are derived files and are current (which implies that they exist,
either locally or in a repository).
"""
def __init__(self, *args, **kw):
def current_check(node, env):
return not node.has_builder() or node.is_up_to_date()
kw['scan_check'] = current_check
Base.__init__(self, *args, **kw)
class Classic(Current):
"""
A Scanner subclass to contain the common logic for classic CPP-style
include scanning, but which can be customized to use different
regular expressions to find the includes.
Note that in order for this to work "out of the box" (without
overriding the find_include() and sort_key() methods), the regular
expression passed to the constructor must return the name of the
include file in group 0.
"""
def __init__(self, name, suffixes, path_variable, regex, *args, **kw):
self.cre = re.compile(regex, re.M)
def _scan(node, _, path=(), self=self):
if not node.exists():
return []
return self.scan(node, path)
kw['function'] = _scan
kw['path_function'] = FindPathDirs(path_variable)
# Allow recursive to propagate if child class specifies.
# In this case resource scanner needs to specify a filter on which files
# get recursively processed. Previously was hardcoded to 1 instead of
# defaulted to 1.
kw['recursive'] = kw.get('recursive', 1)
kw['skeys'] = suffixes
kw['name'] = name
Current.__init__(self, *args, **kw)
def find_include(self, include, source_dir, path):
n = SCons.Node.FS.find_file(include, (source_dir,) + tuple(path))
return n, include
def sort_key(self, include):
return SCons.Node.FS._my_normcase(include)
def find_include_names(self, node):
print(node.name)
return self.cre.findall(node.get_contents())
def scan(self, node, path=()):
includes = self.find_include_names(node)
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the #include line (including the
# " or <, since that may affect what file is found), which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.dir
if callable(path):
path = path()
for include in includes:
n, i = self.find_include(include, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (included from: %s) -- file not found" % (i, node))
else:
nodes.append((self.sort_key(include), n))
return [pair[1] for pair in sorted(nodes)]
class ClassicCPP(Classic):
"""
A Classic Scanner subclass which takes into account the type of
bracketing used to include the file, and uses classic CPP rules
for searching for the files based on the bracketing.
Note that in order for this to work, the regular expression passed
to the constructor must return the leading bracket in group 0, and
the contained filename in group 1.
"""
def find_include(self, include, source_dir, path):
include = list(map(SCons.Util.to_str, include))
if include[0] == '"':
paths = (source_dir,) + tuple(path)
else:
paths = tuple(path) + (source_dir,)
n = SCons.Node.FS.find_file(include[1], paths)
i = SCons.Util.silent_intern(include[1])
return n, i
def sort_key(self, include):
return SCons.Node.FS._my_normcase(' '.join(include))
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| loonycyborg/scons-plusplus | python_modules/Scanner/__init__.py | Python | lgpl-3.0 | 14,608 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import standard_attr
class Tag(model_base.BASEV2):
standard_attr_id = sa.Column(
sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
sa.ForeignKey(standard_attr.StandardAttribute.id, ondelete="CASCADE"),
nullable=False, primary_key=True)
tag = sa.Column(sa.String(60), nullable=False, primary_key=True)
standard_attr = orm.relationship(
'StandardAttribute',
backref=orm.backref('tags', lazy='joined', viewonly=True))
| sebrandon1/neutron | neutron/db/models/tag.py | Python | apache-2.0 | 1,156 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class SomaticSniper(CMakePackage):
"""A tool to call somatic single nucleotide variants."""
homepage = "https://gmt.genome.wustl.edu/packages/somatic-sniper"
url = "https://github.com/genome/somatic-sniper/archive/v1.0.5.0.tar.gz"
version('1.0.5.0', sha256='fc41e90237b059fcc591e404830c4b1be678642dd5afd76ce545b97b4b7b3de1')
depends_on('ncurses')
parallel = False
| LLNL/spack | var/spack/repos/builtin/packages/somatic-sniper/package.py | Python | lgpl-2.1 | 617 |
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'Force Speech code in order',
'version': '0.1',
'category': 'Product',
'description': '''
Force generation of speech code in order
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'product_speech_code',
'sale',
],
'init_xml': [],
'demo': [],
'data': [
'force_order_view.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
| Micronaet/micronaet-product | product_speech_code_order/__openerp__.py | Python | agpl-3.0 | 1,491 |
##########################################################################
## # The Coq Proof Assistant / The Coq Development Team ##
## v # INRIA, CNRS and contributors - Copyright 1999-2019 ##
## <O___,, # (see CREDITS file for the list of authors) ##
## \VV/ ###############################################################
## // # This file is distributed under the terms of the ##
## # GNU Lesser General Public License Version 2.1 ##
## # (see LICENSE file for the text of the license) ##
##########################################################################
"""
Parse Coq's ANSI output.
========================
Translated to Python from Coq's terminal.ml.
"""
# pylint: disable=too-many-return-statements, too-many-branches
def parse_color(style, offset):
color = style[offset] % 10
if color == 0:
return ("black", 1)
elif color == 1:
return ("red", 1)
elif color == 2:
return ("green", 1)
elif color == 3:
return ("yellow", 1)
elif color == 4:
return ("blue", 1)
elif color == 5:
return ("magenta", 1)
elif color == 6:
return ("cyan", 1)
elif color == 7:
return ("white", 1)
elif color == 9:
return ("default", 1)
elif color == 8:
nxt = style[offset + 1]
if nxt == 5:
return ("index-{}".format(style[offset + 1]), 2)
elif nxt == 2:
return ("rgb-{}-{}-{}".format(*style[offset+1:offset+4]), 4)
else:
raise ValueError("{}, {}".format(style, offset))
else:
raise ValueError()
def parse_style(style, offset, acc):
offset = 0
while offset < len(style):
head = style[offset]
if head == 0:
acc.append("reset")
elif head == 1:
acc.append("bold")
elif head == 3:
acc.append("italic")
elif head == 4:
acc.append("underline")
elif head == 7:
acc.append("negative")
elif head == 22:
acc.append("no-bold")
elif head == 23:
acc.append("no-italic")
elif head == 24:
acc.append("no-underline")
elif head == 27:
acc.append("no-negative")
else:
color, suboffset = parse_color(style, offset)
offset += suboffset - 1
if 30 <= head < 40:
acc.append("fg-{}".format(color))
elif 40 <= head < 50:
acc.append("bg-{}".format(color))
elif 90 <= head < 100:
acc.append("fg-light-{}".format(color))
elif 100 <= head < 110:
acc.append("bg-light-{}".format(color))
offset += 1
def parse_ansi(code):
"""Parse an ansi code into a collection of CSS classes.
:param code: A sequence of ‘;’-separated ANSI codes. Do not include the
leading ‘^[[’ or the final ‘m’
"""
classes = []
parse_style([int(c) for c in code.split(';')], 0, classes)
return ["ansi-" + cls for cls in classes]
if __name__ == '__main__':
# As produced by Coq with ‘Check nat.’
print(parse_ansi("92;49;22;23;24;27"))
| Matafou/coq | doc/tools/coqrst/repl/ansicolors.py | Python | lgpl-2.1 | 3,288 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_static_route
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages static route configuration
description:
- Manages static route configuration
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If no vrf is supplied, vrf is set to default.
- If C(state=absent), the route will be removed, regardless of the
non-required parameters.
options:
prefix:
description:
- Destination prefix of static route.
required: true
aliases:
- address
next_hop:
description:
- Next hop address or interface of static route.
If interface, it must be the fully-qualified interface name.
required: true
vrf:
description:
- VRF for static route.
default: default
tag:
description:
- Route tag value (numeric) or keyword 'default'.
route_name:
description:
- Name of the route or keyword 'default'. Used with the name parameter on the CLI.
pref:
description:
- Preference or administrative difference of route (range 1-255) or keyword 'default'.
aliases:
- admin_distance
aggregate:
description: List of static route definitions
version_added: 2.5
track:
description:
- Track value (range 1 - 512). Track must already be configured on the device before adding the route.
version_added: "2.8"
state:
description:
- Manage the state of the resource.
choices: ['present','absent']
default: 'present'
'''
EXAMPLES = '''
- nxos_static_route:
prefix: "192.168.20.64/24"
next_hop: "192.0.2.3"
route_name: testing
pref: 100
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["ip route 192.168.20.0/24 192.0.2.3 name testing 100"]
'''
import re
from copy import deepcopy
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
from ansible.module_utils.network.common.utils import remove_default_spec
def reconcile_candidate(module, candidate, prefix, w):
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
state = w['state']
set_command = set_route_command(prefix, w, module)
remove_command = remove_route_command(prefix, w)
parents = []
commands = []
yrc = remove_command.replace('no ', '')
if w['vrf'] == 'default':
netcfg = str(netcfg).split('\n')
ncfg = []
for line in netcfg:
# remove ip route commands of non-default vrfs from
# the running config just in case the same commands
# exist in default and non-default vrfs
if ' ip route' not in line:
ncfg.append(line)
if any(yrc in s for s in ncfg) and state == 'absent':
commands = [remove_command]
elif set_command not in ncfg and state == 'present':
if any(yrc in s for s in ncfg):
commands = [remove_command, set_command]
else:
commands = [set_command]
else:
parents = ['vrf context {0}'.format(w['vrf'])]
config = netcfg.get_section(parents)
if not isinstance(config, list):
config = config.split('\n')
config = [line.strip() for line in config]
if any(yrc in s for s in config) and state == 'absent':
commands = [remove_command]
elif set_command not in config and state == 'present':
if any(yrc in s for s in config):
commands = [remove_command, set_command]
else:
commands = [set_command]
if commands:
candidate.add(commands, parents=parents)
def remove_route_command(prefix, w):
return 'no ip route {0} {1}'.format(prefix, w['next_hop'])
def get_configured_track(module, ctrack):
check_track = '{0}'.format(ctrack)
track_exists = False
command = 'show track'
try:
body = run_commands(module, [command])
match = re.findall(r'Track\s+(\d+)', body[0])
except IndexError:
return None
if check_track in match:
track_exists = True
return track_exists
def set_route_command(prefix, w, module):
route_cmd = 'ip route {0} {1}'.format(prefix, w['next_hop'])
if w['track']:
if w['track'] in range(1, 512):
if get_configured_track(module, w['track']):
route_cmd += ' track {0}'.format(w['track'])
else:
module.fail_json(msg='Track {0} not configured on device'.format(w['track']))
else:
module.fail_json(msg='Invalid track number, valid range is 1-512.')
if w['route_name'] and w['route_name'] != 'default':
route_cmd += ' name {0}'.format(w['route_name'])
if w['tag']:
if w['tag'] != 'default' and w['tag'] != '0':
route_cmd += ' tag {0}'.format(w['tag'])
if w['pref'] and w['pref'] != 'default':
route_cmd += ' {0}'.format(w['pref'])
return route_cmd
def get_dotted_mask(mask):
bits = 0
for i in range(32 - mask, 32):
bits |= (1 << i)
mask = ("%d.%d.%d.%d" % ((bits & 0xff000000) >> 24, (bits & 0xff0000) >> 16, (bits & 0xff00) >> 8, (bits & 0xff)))
return mask
def get_network_start(address, netmask):
address = address.split('.')
netmask = netmask.split('.')
return [str(int(address[x]) & int(netmask[x])) for x in range(0, 4)]
def network_from_string(address, mask, module):
octects = address.split('.')
if len(octects) > 4:
module.fail_json(msg='Incorrect address format.', address=address)
for octect in octects:
try:
if int(octect) < 0 or int(octect) > 255:
module.fail_json(msg='Address may contain invalid values.',
address=address)
except ValueError:
module.fail_json(msg='Address may contain non-integer values.',
address=address)
try:
if int(mask) < 0 or int(mask) > 32:
module.fail_json(msg='Incorrect mask value.', mask=mask)
except ValueError:
module.fail_json(msg='Mask may contain non-integer values.', mask=mask)
netmask = get_dotted_mask(int(mask))
return '.'.join(get_network_start(address, netmask))
def normalize_prefix(module, prefix):
splitted_prefix = prefix.split('/')
address = splitted_prefix[0]
if len(splitted_prefix) > 2:
module.fail_json(msg='Incorrect address format.', address=address)
elif len(splitted_prefix) == 2:
mask = splitted_prefix[1]
network = network_from_string(address, mask, module)
normalized_prefix = str(network) + '/' + str(mask)
else:
normalized_prefix = prefix + '/' + str(32)
return normalized_prefix
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
obj.append(d)
else:
obj.append({
'prefix': module.params['prefix'],
'next_hop': module.params['next_hop'],
'vrf': module.params['vrf'],
'tag': module.params['tag'],
'route_name': module.params['route_name'],
'pref': module.params['pref'],
'state': module.params['state'],
'track': module.params['track']
})
return obj
def main():
element_spec = dict(
prefix=dict(type='str', aliases=['address']),
next_hop=dict(type='str'),
vrf=dict(type='str', default='default'),
tag=dict(type='str'),
route_name=dict(type='str'),
pref=dict(type='str', aliases=['admin_distance']),
state=dict(choices=['absent', 'present'], default='present'),
track=dict(type='int'),
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['prefix'] = dict(required=True)
aggregate_spec['next_hop'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec)
)
argument_spec.update(element_spec)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
warnings = list()
result = {'changed': False, 'commands': []}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
for w in want:
prefix = normalize_prefix(module, w['prefix'])
candidate = CustomNetworkConfig(indent=3)
reconcile_candidate(module, candidate, prefix, w)
if not module.check_mode and candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['commands'].extend(candidate)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| romain-dartigues/ansible | lib/ansible/modules/network/nxos/nxos_static_route.py | Python | gpl-3.0 | 10,227 |
class KnownUsers:
"""
This deals with both the dedicated server log AND the xml "user" file, as related to the known users.
TODO: refactor this to have a distinct class for the dedicatedLog and the users.xml
"""
def __init__(self, existing_users_filename):
"""Setup init variables parsing the existing user xml."""
import xml.etree.ElementTree as ET
try:
known_users = ET.parse(existing_users_filename)
self.knownUsersRoot = known_users.getroot()
self.existingUsers = self.knownUsersRoot.findall("./user")
except IOError:
print('warning: no known users file: %s' % existing_users_filename)
return
def getTodayLogins(self, serverLogLines):
import re
userLogins = {}
user = {}
# run through the server log lines, find this guy based on his displayname
for line in serverLogLines:
# 2015-11-10 19:51:36.696 - Thread: 8 -> OnConnectedClient mccorkle attempt
if ('OnConnectedClient' in line): # should catch only the FIRST login
matchObject = re.match(r'(\d+-\d+-\d+)\s(\d+:\d+:\d+\.\d+).*OnConnectedClient\s(.*?)\sattempt', line)
if (matchObject):
user['loginTime'] = matchObject.group(1) + " " + matchObject.group(2)
user['loginName'] = matchObject.group(3)
# print ("LOGIN: %s %s" % (user['loginName'], user['loginTime']))
if ('User left' in line and user['loginName'] in line): # and will overwrite until the last logout time
matchObject = re.match(r'(\d+-\d+-\d+)\s(\d+:\d+:\d+\.\d+)\s', line)
user['logoutTime'] = matchObject.group(1) + " " + matchObject.group(2)
# if user['logoutTime']:
# print ("LOGOUT: %s %s" % (user['loginName'], user['logoutTime']))
userLogins[user['loginName']] = user
del user
return userLogins
def getExistingUsers(self):
return self.existingUsers
# return self.knownUsersRoot.findall("./user")
def getNewUsers(self, playersDict):
newUsers = {}
for player in playersDict:
# print ("checking player %s to see if he is in users" % player)
foundKnownUser = 0
for knownUser in self.knownUsersRoot.findall("./user"):
if knownUser.get('inGameID') == playersDict[player]["inGameID"]:
foundKnownUser = 1
# childBranch = knownUser
if foundKnownUser == 0:
# print ("** NEW USER")
newUsers[playersDict[player]["inGameID"]] = playersDict[player]
return newUsers
def updateUsersFile(self, playersDict, existingUsersFilename):
import xml.etree.ElementTree as ET
for player in playersDict:
# print ("checking player %s to see if he is in users" % player)
foundKnownUser = 0
for knownUser in self.knownUsersRoot.findall("./user"):
if knownUser.get('inGameID') == playersDict[player]["inGameID"]:
foundKnownUser = 1
if foundKnownUser == 0:
# print ("** NEW USER")
# add all the new users to the knownUsers file, so next time we read, they aren't new
child = ET.SubElement(self.knownUsersRoot, "user")
child.set("username", playersDict[player]["username"])
child.set("inGameID", playersDict[player]["inGameID"])
child.set("steamID", playersDict[player]["steamID"])
child.set("playerToolbarSlotCount", playersDict[player]["playerToolbarSlotCount"])
# firstSeen is a combination of loginTime || users.xml where we stored the first time we saw this user
# we don't have those inside of parsing users, so need to parse and class log file first
# child.set("firstSeen", today)
# child.set("loginTime", loginTime)
# child.set("logoutTime", logoutTime)
testOut = ET.ElementTree(self.knownUsersRoot)
testOut.write(existingUsersFilename, encoding='utf-8', xml_declaration=True)
| mccorkle/seds-utils | KnownUsers.py | Python | gpl-3.0 | 4,260 |
import atexit
from flask import Flask, jsonify, g, request, make_response
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from flask_jwt_extended import JWTManager, set_access_cookies, jwt_required, unset_jwt_cookies
from apscheduler.schedulers.background import BackgroundScheduler
import logging
logging.basicConfig()
app = Flask(__name__)
app.config.from_pyfile('./config/config.py')
db = SQLAlchemy(app)
CORS(app, supports_credentials=True)
migrate = Migrate(app, db)
jwt = JWTManager(app)
from tasks.next_reservation_check import check_all_users
from DB.User import User
from endpoints import reservation, user
scheduler = BackgroundScheduler(daemon=True)
scheduler.add_job(check_all_users,'interval', hours=4)
scheduler.start()
# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())
@app.route('/')
def index():
return "Hello, this is an API, Swagger documentation will follow here..."
@app.route('/token')
def get_auth_token():
if not request.authorization:
response = make_response(jsonify({'error':'Login required'}))
response.headers.set('WWW-Authenticate', 'Basic realm="patklaey.ch"')
return response, 401
if not verify_password(request.authorization.username, request.authorization.password):
response = jsonify({'error':'Invalid username or password'})
return response, 401
token = g.user.generate_auth_token()
response = jsonify({'token': token})
set_access_cookies(response, token)
return response, 200
def verify_password(username, password):
user = User.query.filter_by(username=username, active=True).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
@app.route('/logout', methods=['POST'])
def logout():
resp = jsonify({'logout': True})
unset_jwt_cookies(resp)
return resp, 200
| patklaey/ZermattReservationAPI | main.py | Python | mit | 1,957 |
import logging
from pyramid.interfaces import IRequest
from openprocurement.auctions.dgf.models import (
IDgfOtherAssetsAuction,
IDgfFinancialAssetsAuction,
DGFOtherAssets,
DGFFinancialAssets
)
from openprocurement.auctions.dgf.adapters import (
AuctionDGFOtherAssetsConfigurator,
AuctionDGFFinancialAssetsConfigurator,
AuctionDGFOtherAssetsManagerAdapter,
AuctionDGFFinancialAssetsManagerAdapter
)
from openprocurement.auctions.core.plugins.awarding.v3.adapters import (
AwardingNextCheckV3
)
from openprocurement.auctions.core.includeme import (
IContentConfigurator,
IAwardingNextCheck
)
from openprocurement.auctions.core.interfaces import (
IAuctionManager
)
from openprocurement.auctions.dgf.constants import (
FINANCIAL_VIEW_LOCATIONS,
OTHER_VIEW_LOCATIONS,
DEFAULT_PROCUREMENT_METHOD_TYPE_OTHER,
DEFAULT_PROCUREMENT_METHOD_TYPE_FINANCIAL,
DEFAULT_LEVEL_OF_ACCREDITATION
)
LOGGER = logging.getLogger(__name__)
def includeme_other(config, plugin_config=None):
procurement_method_types = plugin_config.get('aliases', [])
if plugin_config.get('use_default', False):
procurement_method_types.append(DEFAULT_PROCUREMENT_METHOD_TYPE_OTHER)
for procurementMethodType in procurement_method_types:
config.add_auction_procurementMethodType(DGFOtherAssets,
procurementMethodType)
for view_location in OTHER_VIEW_LOCATIONS:
config.scan(view_location)
# Register adapters
config.registry.registerAdapter(
AuctionDGFOtherAssetsConfigurator,
(IDgfOtherAssetsAuction, IRequest),
IContentConfigurator
)
config.registry.registerAdapter(
AwardingNextCheckV3,
(IDgfOtherAssetsAuction, ),
IAwardingNextCheck
)
config.registry.registerAdapter(
AuctionDGFOtherAssetsManagerAdapter,
(IDgfOtherAssetsAuction, ),
IAuctionManager
)
LOGGER.info("Included openprocurement.auctions.dgf.financial plugin",
extra={'MESSAGE_ID': 'included_plugin'})
# add accreditation level
if not plugin_config.get('accreditation'):
config.registry.accreditation['auction'][DGFOtherAssets._internal_type] = DEFAULT_LEVEL_OF_ACCREDITATION
else:
config.registry.accreditation['auction'][DGFOtherAssets._internal_type] = plugin_config['accreditation']
def includeme_financial(config, plugin_config=None):
procurement_method_types = plugin_config.get('aliases', [])
if plugin_config.get('use_default', False):
procurement_method_types.append(
DEFAULT_PROCUREMENT_METHOD_TYPE_FINANCIAL
)
for procurementMethodType in procurement_method_types:
config.add_auction_procurementMethodType(DGFFinancialAssets,
procurementMethodType)
for view_location in FINANCIAL_VIEW_LOCATIONS:
config.scan(view_location)
# Register Adapters
config.registry.registerAdapter(
AuctionDGFFinancialAssetsConfigurator,
(IDgfFinancialAssetsAuction, IRequest),
IContentConfigurator
)
config.registry.registerAdapter(
AwardingNextCheckV3,
(IDgfFinancialAssetsAuction, ),
IAwardingNextCheck
)
config.registry.registerAdapter(
AuctionDGFFinancialAssetsManagerAdapter,
(IDgfFinancialAssetsAuction, ),
IAuctionManager
)
LOGGER.info("Included openprocurement.auctions.dgf.other plugin",
extra={'MESSAGE_ID': 'included_plugin'})
# add accreditation level
if not plugin_config.get('accreditation'):
config.registry.accreditation['auction'][DGFFinancialAssets._internal_type] = DEFAULT_LEVEL_OF_ACCREDITATION
else:
config.registry.accreditation['auction'][DGFFinancialAssets._internal_type] = plugin_config['accreditation']
| openprocurement/openprocurement.auctions.dgf | openprocurement/auctions/dgf/includeme.py | Python | apache-2.0 | 3,915 |
# Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: exceptions
:synopsis: Defines all security_monkey specific exceptions
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from security_monkey import app
class SecurityMonkeyException(Exception):
"""Base class for all security monkey exceptions."""
pass
class InvalidARN(SecurityMonkeyException):
"""Found an indecipherable ARN"""
def __init__(self, bad_arn):
self.bad_arn = bad_arn
app.logger.info(self)
def __str__(self):
return repr("Given an invalid ARN: {}".format(self.bad_arn))
class InvalidSourceOwner(SecurityMonkeyException):
"""Source Owners should be an integer representing an AWS account owner."""
def __init__(self, bad_source_owner):
self.bad_source_owner = bad_source_owner
app.logger.info(self)
def __str__(self):
return repr("Given an invalid SourceOwner: {}".format(self.bad_source_owner))
class InvalidAWSJSON(SecurityMonkeyException):
"""The JSON returned from AWS is not valid."""
def __init__(self, bad_json):
self.bad_json = bad_json
app.logger.info(self)
def __str__(self):
return repr("Could not parse invalid JSON from AWS:\n {}".format(self.bad_json))
class BotoConnectionIssue(SecurityMonkeyException):
"""Boto could not connect. This could be a permissions issue."""
def __init__(self, connection_message, tech, account, region):
self.connection_message = connection_message
self.tech = tech
self.account = account
self.region = region
app.logger.info(self)
def __str__(self):
return repr("Problem Connecting to {}/{}/{}:\n{}".format(
self.tech, self.account, self.region, self.connection_message))
class S3PermissionsIssue(SecurityMonkeyException):
"""Boto could not read metadata about an S3 bucket. Check permissions."""
def __init__(self, bucket_name):
self.bucket_name = bucket_name
app.logger.info(self)
def __str__(self):
return repr("AWS returned an exception while attempting "+
"to obtain information on a bucket I should "+
"have access to. Bucket Name: {}".format(self.bucket_name))
class S3ACLReturnedNoneDisplayName(SecurityMonkeyException):
"""The XML representation of an S3 ACL is not providing a proper DisplayName."""
def __init__(self, bucket_name):
self.bucket_name = bucket_name
app.logger.info(self)
def __str__(self):
return repr("AWS returned <DisplayName>None</DisplayName>"+
" in the output of bhandle.get_acl().to_xml()."+
" Bucket Name:{}".format(self.bucket_name))
class AWSRateLimitReached(SecurityMonkeyException):
"""Security Monkey is being throttled by AWS."""
def __init__(self, connection_message, tech, account, region):
self.connection_message = connection_message
self.tech = tech
self.account = account
self.region = region
app.logger.info(self)
def __str__(self):
return repr("Likely reached the AWS rate limit. {}/{}/{}:\n{}".format(
self.tech, self.account, self.region, self.connection_message))
| JaguarSecurity/SMG | security_monkey/exceptions.py | Python | apache-2.0 | 3,893 |
from .namespace import NamespaceDeclGenerator
from .classes import (CXXRecordDeclGenerator,
DefinitionDataGenerator,
DefaultConstructorGenerator,
CopyConstructorGenerator,
MoveConstructorGenerator,
CopyAssignmentGenerator,
CXXMethodDeclGenerator,
CXXConstructorDeclGenerator,
VarDeclGenerator,
FieldDeclGenerator,
IndirectFieldDeclGenerator,
)
from .enums import (EnumDeclGenerator)
from .functions import (FunctionDeclGenerator)
from .tranlation_unit import TranslationUnitDeclGenerator
from .generator import Generator
| nak/pyllars | src/python/pyllars/cppparser/generation/clang/__init__.py | Python | apache-2.0 | 764 |
""" IO classes for Omnivor input file
Copyright (C) 2013 DTU Wind Energy
Author: Emmanuel Branlard
Email: [email protected]
Last revision: 25/11/2013
Namelist IO: badis functions to read and parse a fortran file into python dictonary and write it back to a file
The parser was adapted from: fortran-namelist on code.google with the following info:
__author__ = 'Stephane Chamberland ([email protected])'
__version__ = '$Revision: 1.0 $'[11:-2]
__date__ = '$Date: 2006/09/05 21:16:24 $'
__copyright__ = 'Copyright (c) 2006 RPN'
__license__ = 'LGPL'
Recognizes files of the form:
&namelistname
opt1 = value1
...
/
"""
from __future__ import print_function
from we_file_io import WEFileIO, TestWEFileIO
import unittest
import numpy as np
import os.path as path
import sys
import re
import tempfile
import os
__author__ = 'E. Branlard '
class FortranNamelistIO(WEFileIO):
"""
Fortran Namelist IO class
Scan a Fortran Namelist file and put Section/Parameters into a dictionary
Write the file back if needed.
"""
def _write(self):
""" Write a file (overrided)
"""
with open(self.filename, 'w') as f:
for nml in self.data :
f.write('&'+nml+'\n')
# Sorting dictionary data (in the same order as it was created, thanks to id)
SortedList = sorted(self.data[nml].items(), key=lambda(k, v): v['id'])
# for param in self.data[nml]:
for param in map(lambda(k,v):k,SortedList):
f.write(param+'='+','.join(self.data[nml][param]['val']))
if len(self.data[nml][param]['com']) >0:
f.write(' !'+self.data[nml][param]['com'])
f.write('\n')
f.write('/\n')
def _read(self):
""" Read the file (overrided)
"""
with open(self.filename, 'r') as f:
data = f.read()
varname = r'\b[a-zA-Z][a-zA-Z0-9_]*\b'
valueInt = re.compile(r'[+-]?[0-9]+')
valueReal = re.compile(r'[+-]?([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)')
valueNumber = re.compile(r'\b(([\+\-]?[0-9]+)?\.)?[0-9]*([eE][-+]?[0-9]+)?')
valueBool = re.compile(r"(\.(true|false|t|f)\.)",re.I)
valueTrue = re.compile(r"(\.(true|t)\.)",re.I)
spaces = r'[\s\t]*'
quote = re.compile(r"[\s\t]*[\'\"]")
namelistname = re.compile(r"^[\s\t]*&(" + varname + r")[\s\t]*$")
paramname = re.compile(r"[\s\t]*(" + varname+r')[\s\t]*=[\s\t]*')
namlistend = re.compile(r"^" + spaces + r"/" + spaces + r"$")
#split sections/namelists
mynmlfile = {}
mynmlfileRaw = {}
mynmlname = ''
for item in FortranNamelistIO.clean(data.split("\n"),cleancomma=1):
if re.match(namelistname,item):
mynmlname = re.sub(namelistname,r"\1",item)
mynmlfile[mynmlname] = {}
mynmlfileRaw[mynmlname] = []
elif re.match(namlistend,item):
mynmlname = ''
else:
if mynmlname:
mynmlfileRaw[mynmlname].append(item)
#parse param in each section/namelist
for mynmlname in mynmlfile.keys():
#split strings
bb = []
for item in mynmlfileRaw[mynmlname]:
if item[0]!='!':
# discarding lines that starts with a comment
bb.extend(FortranNamelistIO.splitstring(item))
#split comma and =
aa = []
for item in bb:
if not re.match(quote,item):
aa.extend(re.sub(r"[\s\t]*=",r" =\n",re.sub(r",+",r"\n",item)).split("\n"))
# aa.extend(re.sub(r"[\s\t]*=",r" =\n",item).split("\n"))
else:
aa.append(item)
del(bb)
aa = FortranNamelistIO.clean(aa,cleancomma=1)
myparname = ''
id_cum=0
for item in aa:
if re.search(paramname,item):
#myparname = re.sub(paramname,r"\1",item).lower() ! NO MORE LOWER CASE
myparname = re.sub(paramname,r"\1",item)
id_cum=id_cum+1
mynmlfile[mynmlname][myparname] = {
'val' : [],
'id' : id_cum,
'com' : ''
}
elif paramname:
# Storing comments
item2=item.split('!')
item=item2[0]
if len(item) > 1 :
mynmlfile[mynmlname][myparname]['com']=''.join(item2[1:])
if re.match(valueBool,item):
if re.match(valueTrue,item):
mynmlfile[mynmlname][myparname]['val'].append('.true.')
else:
mynmlfile[mynmlname][myparname]['val'].append('.false.')
else:
# item2=re.sub(r"(^[\'\"]|[\'\"]$)",r"",item.strip())
mynmlfile[mynmlname][myparname]['val'].append(item.strip())
self.data=mynmlfile
# Accessor and mutator dictionary style
def __getitem__(self, key):
""" Transform the class instance into a dictionary."""
return self.data[key]
def __setitem__(self, key, value):
""" Transform the class instance into a dictionary."""
self.data[key] = value
#==== Helper functions for Parsing of files
@staticmethod
def clean(mystringlist,commentexpr=r"^[\s\t]*\#.*$",spacemerge=0,cleancomma=0):
"""
Remove leading and trailing blanks, comments/empty lines from a list of strings
mystringlist = foo.clean(mystringlist,spacemerge=0,commentline=r"^[\s\t]*\#",cleancharlist="")
commentline: definition of commentline
spacemerge: if <>0, merge/collapse multi space
cleancomma: Remove leading and trailing commas
"""
aa = mystringlist
if cleancomma:
aa = [re.sub("(^([\s\t]*\,)+)|((\,[\s\t]*)+$)","",item).strip() for item in aa]
if commentexpr:
aa = [re.sub(commentexpr,"",item).strip() for item in aa]
if spacemerge:
aa = [re.sub("[\s\t]+"," ",item).strip() for item in aa if len(item.strip()) <> 0]
else:
aa = [item.strip() for item in aa if len(item.strip()) <> 0]
return aa
@staticmethod
def splitstring(mystr):
"""
Split a string in a list of strings at quote boundaries
Input: String
Output: list of strings
"""
dquote=r'(^[^\"\']*)(\"[^"]*\")(.*)$'
squote=r"(^[^\"\']*)(\'[^']*\')(.*$)"
mystrarr = re.sub(dquote,r"\1\n\2\n\3",re.sub(squote,r"\1\n\2\n\3",mystr)).split("\n")
#remove zerolenght items
mystrarr = [item for item in mystrarr if len(item) <> 0]
if len(mystrarr) > 1:
mystrarr2 = []
for item in mystrarr:
mystrarr2.extend(FortranNamelistIO.splitstring(item))
mystrarr = mystrarr2
return mystrarr
## Do Some testing -------------------------------------------------------
class TestFortranNamelist(TestWEFileIO):
""" Test class for MyFileType class """
test_file = './test/fortran/fortran_namelist.nml'
def test_output_identical(self):
InputFile=FortranNamelistIO(self.test_file)
test_fileout=tempfile.mkstemp()[1]
InputFile.write(test_fileout)
with open(self.test_file, 'r') as f:
data_expected = f.read()
with open(test_fileout, 'r') as f:
data_read = f.read()
try:
self.assertMultiLineEqual(data_read, data_expected)
finally:
os.remove(test_fileout)
def test_duplication(self):
self._test_duplication(FortranNamelistIO, self.test_file)
## Main function ---------------------------------------------------------
if __name__ == '__main__':
""" This is the main fuction that will run the tests automatically
"""
unittest.main()
| DTUWindEnergy/Python4WindEnergy | py4we/fortran_namelist_io.py | Python | apache-2.0 | 8,294 |
# -*- test-case-name: foolscap.test.test_banana -*-
from twisted.internet import defer
from twisted.python import log
from foolscap.slicers.list import ListSlicer
from foolscap.slicers.tuple import TupleUnslicer
from foolscap.slicer import BaseUnslicer
from foolscap.tokens import Violation
from foolscap.constraint import OpenerConstraint, Any, IConstraint
from foolscap.util import AsyncAND
class SetSlicer(ListSlicer):
opentype = ("set",)
trackReferences = True
slices = set
def sliceBody(self, streamable, banana):
for i in self.obj:
yield i
class FrozenSetSlicer(SetSlicer):
opentype = ("immutable-set",)
trackReferences = False
slices = frozenset
class _Placeholder:
pass
class SetUnslicer(BaseUnslicer):
# this is a lot like a list, but sufficiently different to make it not
# worth subclassing
opentype = ("set",)
debug = False
maxLength = None
itemConstraint = None
def setConstraint(self, constraint):
if isinstance(constraint, Any):
return
assert isinstance(constraint, SetConstraint)
self.maxLength = constraint.maxLength
self.itemConstraint = constraint.constraint
def start(self, count):
#self.opener = foo # could replace it if we wanted to
self.set = set()
self.count = count
if self.debug:
log.msg("%s[%d].start with %s" % (self, self.count, self.set))
self.protocol.setObject(count, self.set)
self._ready_deferreds = []
def checkToken(self, typebyte, size):
if self.maxLength != None and len(self.set) >= self.maxLength:
# list is full, no more tokens accepted
# this is hit if the max+1 item is a primitive type
raise Violation("the set is full")
if self.itemConstraint:
self.itemConstraint.checkToken(typebyte, size)
def doOpen(self, opentype):
# decide whether the given object type is acceptable here. Raise a
# Violation exception if not, otherwise give it to our opener (which
# will normally be the RootUnslicer). Apply a constraint to the new
# unslicer.
if self.maxLength != None and len(self.set) >= self.maxLength:
# this is hit if the max+1 item is a non-primitive type
raise Violation("the set is full")
if self.itemConstraint:
self.itemConstraint.checkOpentype(opentype)
unslicer = self.open(opentype)
if unslicer:
if self.itemConstraint:
unslicer.setConstraint(self.itemConstraint)
return unslicer
def update(self, obj, placeholder):
# obj has already passed typechecking
if self.debug:
log.msg("%s[%d].update: [%s]=%s" % (self, self.count,
placeholder, obj))
self.set.remove(placeholder)
self.set.add(obj)
return obj
def receiveChild(self, obj, ready_deferred=None):
if ready_deferred:
self._ready_deferreds.append(ready_deferred)
if self.debug:
log.msg("%s[%d].receiveChild(%s)" % (self, self.count, obj))
# obj could be a primitive type, a Deferred, or a complex type like
# those returned from an InstanceUnslicer. However, the individual
# object has already been through the schema validation process. The
# only remaining question is whether the larger schema will accept
# it.
if self.maxLength != None and len(self.set) >= self.maxLength:
# this is redundant
# (if it were a non-primitive one, it would be caught in doOpen)
# (if it were a primitive one, it would be caught in checkToken)
raise Violation("the set is full")
if isinstance(obj, defer.Deferred):
if self.debug:
log.msg(" adding my update[%d] to %s" % (len(self.set), obj))
# note: the placeholder isn't strictly necessary, but it will
# help debugging to see a _Placeholder sitting in the set when it
# shouldn't rather than seeing a set that is smaller than it
# ought to be. If a remote method ever sees a _Placeholder, then
# something inside Foolscap has broken.
placeholder = _Placeholder()
obj.addCallback(self.update, placeholder)
obj.addErrback(self.printErr)
self.set.add(placeholder)
else:
self.set.add(obj)
def printErr(self, why):
print "ERR!"
print why.getBriefTraceback()
log.err(why)
def receiveClose(self):
ready_deferred = None
if self._ready_deferreds:
ready_deferred = AsyncAND(self._ready_deferreds)
return self.set, ready_deferred
class FrozenSetUnslicer(TupleUnslicer):
opentype = ("immutable-set",)
def receiveClose(self):
obj_or_deferred, ready_deferred = TupleUnslicer.receiveClose(self)
if isinstance(obj_or_deferred, defer.Deferred):
def _convert(the_tuple):
return frozenset(the_tuple)
obj_or_deferred.addCallback(_convert)
else:
obj_or_deferred = frozenset(obj_or_deferred)
return obj_or_deferred, ready_deferred
class SetConstraint(OpenerConstraint):
"""The object must be a Set of some sort, with a given maximum size. To
accept sets of any size, use maxLength=None. All member objects must obey
the given constraint. By default this will accept both mutable and
immutable sets, if you want to require a particular type, set mutable= to
either True or False.
"""
# TODO: if mutable!=None, we won't throw out the wrong set type soon
# enough. We need to override checkOpenType to accomplish this.
opentypes = [("set",), ("immutable-set",)]
name = "SetConstraint"
def __init__(self, constraint, maxLength=None, mutable=None):
self.constraint = IConstraint(constraint)
self.maxLength = maxLength
self.mutable = mutable
def checkObject(self, obj, inbound):
if not isinstance(obj, (set, frozenset)):
raise Violation("not a set")
if (self.mutable == True and
not isinstance(obj, set)):
raise Violation("obj is a set, but not a mutable one")
if (self.mutable == False and
not isinstance(obj, frozenset)):
raise Violation("obj is a set, but not an immutable one")
if self.maxLength is not None and len(obj) > self.maxLength:
raise Violation("set is too large")
if self.constraint:
for o in obj:
self.constraint.checkObject(o, inbound)
| david415/foolscap | src/foolscap/slicers/set.py | Python | mit | 6,763 |
import unittest
from tfi.as_tensor import as_tensor
from functools import partialmethod
class ServeTest(unittest.TestCase):
pass
_FIXTURES = [
('zoo/torchvision/resnet.py:Resnet50', )
]
for (name, *rest) in _FIXTURES:
def do_model_test(self, path):
# TODO(adamb) Load the model
# TODO(adamb) For each instance method, run the example in Python
# TODO(adamb) Export the model
# TODO(adamb) Load the model
# TODO(adamb) For each instance method, run the example in Python
# TODO(adamb) Start the server
# TODO(adamb) For each instance method, run the curl example
# TODO(adamb) For each instance method, run the command line example
# TODO(adamb) If example returns are given, check the result.
result = as_tensor(data, shape, dtype)
self.assertEqual(expect, result)
setattr(AsTensorTest,
'test_%s' % name,
partialmethod(do_test, *rest))
if __name__ == '__main__':
unittest.main()
| ajbouh/tfi | tests/broken/serve_test.py | Python | mit | 1,028 |
#
# Copyright (c) 2013+ Anton Tyurin <[email protected]>
# Copyright (c) 2013+ Evgeny Safronov <[email protected]>
# Copyright (c) 2011-2014 Other contributors as noted in the AUTHORS file.
#
# This file is part of Cocaine-tools.
#
# Cocaine is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cocaine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import itertools
import time
from operator import itemgetter
from tornado import gen
from cocaine.tools import actions, log
from cocaine.decorators import coroutine
from cocaine.tools.actions import app
__author__ = 'Evgeny Safronov <[email protected]>'
index_format = 'cocaine-%Y-%m-%d'
def parse_crashlog_day_format(day_string):
if not day_string:
return day_string
if 'today'.startswith(day_string):
return datetime.date.today().strftime(index_format)
elif 'yesterday'.startswith(day_string):
yesterday = datetime.date.today() - datetime.timedelta(days=1)
return yesterday.strftime(index_format)
else:
values_count = day_string.count("-")
if values_count == 0: # only day specified
today = datetime.date.today()
day = datetime.datetime.strptime(day_string, "%d").replace(year=today.year,
month=today.month)
return day.strftime(index_format)
elif values_count == 1: # day and month
day = datetime.datetime.strptime(day_string,
"%d-%m").replace(year=datetime.date.today().year)
return day.strftime(index_format)
elif values_count == 2: # the whole date
return datetime.datetime.strptime(day_string, "%d-%m-%Y").strftime(index_format)
raise ValueError("Invalid day format %s. Must be day-month-year|today|yesterday" % day_string)
def days_range(from_date, to_date, delta=datetime.timedelta(days=1)):
fdate = datetime.datetime.strptime(from_date, "%Y-%m-%d")
tdate = datetime.datetime.strptime(to_date, "%Y-%m-%d")
while fdate <= tdate:
yield fdate
fdate = fdate + delta
return
class List(actions.Storage):
def __init__(self, storage, name, day_string=''):
super(List, self).__init__(storage)
self.name = name
if not self.name:
raise ValueError('Please specify a crashlog name')
self.day = parse_crashlog_day_format(day_string)
@coroutine
def execute(self):
indexes = [self.name]
if self.day:
indexes.append(self.day)
channel = yield self.storage.find('crashlogs', indexes)
listing = yield channel.rx.get()
raise gen.Return(listing)
def _parseCrashlogs(crashlogs, timestamp=None):
def is_filter(arg):
return arg == timestamp if timestamp else True
_list = (log.split(':', 1) for log in crashlogs)
return [(ts, time.ctime(float(ts) / 1000000), name) for ts, name in _list if is_filter(ts)]
class Specific(actions.Storage):
def __init__(self, storage, name, timestamp=None):
super(Specific, self).__init__(storage)
self.name = name
self.timestamp = timestamp
if not self.name:
raise ValueError('Please specify application name')
class View(Specific):
@coroutine
def execute(self):
channel = yield self.storage.find('crashlogs', [self.name])
crashlogs = yield channel.rx.get()
parsed_crashlogs = _parseCrashlogs(crashlogs, timestamp=self.timestamp)
contents = []
if not self.timestamp and parsed_crashlogs:
parsed_crashlogs = [max(parsed_crashlogs, key=itemgetter(1))]
for crashlog in parsed_crashlogs:
key = '%s:%s' % (crashlog[0], crashlog[2])
channel = yield self.storage.read('crashlogs', key)
content = yield channel.rx.get()
contents.append(content)
raise gen.Return(''.join(contents))
class Remove(Specific):
@coroutine
def execute(self):
channel = yield self.storage.find('crashlogs', [self.name])
crashlogs = yield channel.rx.get()
parsed_crashlogs = _parseCrashlogs(crashlogs, timestamp=self.timestamp)
for crashlog in parsed_crashlogs:
try:
key = '%s:%s' % (crashlog[0], crashlog[2])
channel = yield self.storage.remove('crashlogs', key)
yield channel.rx.get()
except Exception as err:
log.error("unable to delete crashlog %s: %s", str(crashlog), err)
raise gen.Return('Done')
class RemoveAll(Remove):
def __init__(self, storage, name):
super(RemoveAll, self).__init__(storage, name, timestamp=None)
class Status(actions.Storage):
@coroutine
def execute(self):
applications = yield app.List(self.storage).execute()
crashed = []
for application in applications:
crashlogs = yield List(self.storage, application).execute()
if crashlogs:
last = max(_parseCrashlogs(crashlogs), key=lambda (timestamp, time, uuid): timestamp)
crashed.append((application, last, len(crashlogs)))
raise gen.Return(crashed)
def splitted(collection, sep=None, maxsplit=None):
for item in collection:
yield item.split(sep, maxsplit)
def filtered(crashlogs):
for (ts, uuid) in splitted(crashlogs, ':', 1):
yield int(ts), uuid
class Clean(Specific):
def __init__(self, storage, name, size, timestamp=None):
super(Clean, self).__init__(storage, name, timestamp)
self.size = int(size)
@coroutine
def execute(self):
if not self.name:
apps = yield app.List(self.storage).execute()
else:
apps = [self.name]
result = []
if self.timestamp:
try:
dt = datetime.datetime.strptime(self.timestamp, '%Y-%m-%dT%H:%M:%S')
timestamp = int(time.mktime(dt.timetuple())) * 1000000 + dt.microsecond
except ValueError:
timestamp = int(self.timestamp)
for app_name in apps:
channel = yield self.storage.find('crashlogs', [app_name])
crashlogs = yield channel.rx.get()
result = filter(lambda (ts, uuid): ts < timestamp, filtered(crashlogs))
elif self.size > 0:
for app_name in apps:
channel = yield self.storage.find('crashlogs', [app_name])
crashlogs = yield channel.rx.get()
result = itertools.islice(
sorted(filtered(crashlogs[0]), key=lambda (ts, uuid): ts, reverse=True), self.size, None)
for crashlog in result:
print('removing', '%d:%s' % crashlog)
channel = yield self.storage.remove('crashlogs', '%d:%s' % crashlog)
yield channel.rx.get()
raise gen.Return('Done')
class CleanRange(object):
def __init__(self, storage, from_day, to_day="yesterday"):
self.storage = storage
if not from_day:
raise ValueError("from-day must have a value")
# strip cocaine-
self.from_day = parse_crashlog_day_format(from_day)[len("cocaine-"):]
self.to_day = parse_crashlog_day_format(to_day)[len("cocaine-"):]
@coroutine
def execute(self):
listing = list()
for day in days_range(self.from_day, self.to_day):
tag = day.strftime(index_format)
items = yield (yield self.storage.find('crashlogs', [tag])).rx.get()
log.info("found %d crashlog(s) for %s", len(items), tag)
listing.extend(items)
log.info("there are %d crashlog(s)", len(listing))
step = len(listing) / 100
for i, key in enumerate(listing, start=1):
try:
if not i % step:
log.info("(%d/%d) %d%% of crashlogs have been removed", i, len(listing), i / step)
yield (yield self.storage.remove('crashlogs', key)).rx.get()
except Exception as err:
log.error("unable to remove %s, %s", key, err)
| antmat/cocaine-tools | cocaine/tools/actions/crashlog.py | Python | lgpl-3.0 | 8,706 |
import os
import json
import inspect
import numpy
from IPython.core.display import HTML, Javascript, display
from jinja2 import Template
from pywr.core import Node
from pywr.core import Model, Input, Output, Link, Storage, StorageInput, StorageOutput
from pywr.nodes import NodeMeta
from pywr._component import Component
import pywr.domains
from pywr.parameters._parameters import get_parameter_from_registry
from .figures import *
# load javascript template for d3 graph
folder = os.path.dirname(__file__)
with open(os.path.join(folder, "draw_graph.js"), "r") as f:
draw_graph_template = Template(f.read())
with open(os.path.join(folder, "graph.css"), "r") as f:
draw_graph_css = f.read()
def pywr_model_to_d3_json(model, attributes=False):
"""
Convert a Pywr graph to a structure d3 can display
Parameters
----------
model : `pywr.core.Model`
attributes: bool (default=False)
If True, attribute data for each node is extract
"""
nodes = []
node_names = []
for node in model.graph.nodes():
if node.parent is None and node.virtual is False:
nodes.append(node)
node_names.append(node.name)
edges = []
for edge in model.graph.edges():
node_source, node_target = edge
# where a link is to/from a subnode, display a link to the parent instead
if node_source.parent is not None:
node_source = node_source.parent
if node_target.parent is not None:
node_target = node_target.parent
if node_source is node_target:
# link is between two subnodes
continue
index_source = node_names.index(node_source.name)
index_target = node_names.index(node_target.name)
edges.append({'source': index_source, 'target': index_target})
json_nodes = []
for n, node in enumerate(nodes):
node_dict = {"name": node.name}
classes = []
cls = node.__class__
classes.append(cls)
while True:
for base in cls.__bases__:
if issubclass(base, Node) and base is not Node:
classes.append(base)
if classes[-1] is cls:
break
else:
cls = classes[-1]
classes = classes[::-1]
node_dict["clss"] = [cls.__name__.lower() for cls in classes]
try:
node["position"] = node.position["schematic"]
except KeyError:
pass
if attributes:
node_dict["attributes"] = get_node_attr(node)
json_nodes.append(node_dict)
graph = {
"nodes": json_nodes,
"links": edges}
return graph
def get_node_attr(node):
"""
Returns a dictionary that contains node attributes as strings
Parameters
----------
node : a pywr node object
"""
attrs = inspect.getmembers(node, lambda a:not(inspect.isroutine(a)))
attribute_data = []
for att in attrs:
attr_name, attr_val = att
if attr_name.startswith("_"):
continue
attr_type = type(attr_val).__name__
attrs_to_skip = ["component_attrs", "components", "color", "model", "input", "output",
"inputs", "outputs", "sub_domain", "sub_output", "sublinks", "visible",
"fully_qualified_name", "allow_isolated"]
if not attr_val or attr_name.lower() in attrs_to_skip:
continue
if isinstance(attr_val, Component):
attr_val = attr_val.name
if not attr_val:
attr_val = attr_type
else:
attr_val = attr_val + " - " + attr_type
if isinstance(attr_val, list):
new_vals = []
for val in attr_val:
val_name = str(val)
val_name = val_name.replace("[", "").replace("]", "")
new_vals.append(val_name)
attr_val = "".join(new_vals)
else:
attr_val = str(attr_val)
attribute_data.append({"attribute": attr_name, "value": attr_val})
return attribute_data
def pywr_json_to_d3_json(model, attributes=False):
"""
Converts a JSON file or a JSON-derived dict into structure that d3js can use.
Parameters
----------
model : dict or str
str inputs should be a path to a json file containing the model.
"""
if isinstance(model, str):
with open(model) as d:
model = json.load(d)
nodes = [node["name"] for node in model["nodes"]]
edges = []
for edge in model["edges"]:
sourceindex = nodes.index(edge[0])
targetindex = nodes.index(edge[1])
edges.append({'source': sourceindex, 'target': targetindex})
nodes = []
node_classes = create_node_class_trees()
for node in model["nodes"]:
json_node = {'name': node.pop("name"), 'clss': node_classes[node["type"].lower()]}
try:
json_node['position'] = node['position']['schematic']
except KeyError:
pass
if attributes:
json_node["attributes"] = []
for name, val in node.items():
if name == "type":
continue
attr_val = val
if isinstance(val, dict):
try:
attr_val = get_parameter_from_registry(val["type"]).__name__
except KeyError:
pass
elif val in model["parameters"].keys():
param = model["parameters"][val]
attr_type = get_parameter_from_registry(param["type"]).__name__
attr_val = attr_val + " - " + attr_type
attr_dict = {"attribute": name, "value": attr_val}
json_node["attributes"].append(attr_dict)
nodes.append(json_node)
graph = {
"nodes": nodes,
"links": edges}
return graph
def create_node_class_trees():
# create class tree for each node type
node_class_trees = {}
for name, cls in NodeMeta.node_registry.items():
classes = [cls]
while True:
for base in cls.__bases__:
if issubclass(base, Node) and base is not Node:
classes.append(base)
if classes[-1] is cls:
break
else:
cls = classes[-1]
clss = [cls.__name__.lower() for cls in classes[::-1]]
node_class_trees[name] = clss
return node_class_trees
def draw_graph(model, width=500, height=400, labels=False, attributes=False, css=None):
"""Display a Pywr model using D3 in Jupyter
Parameters
----------
model : pywr.core.Model or json-dict that describes a model
The model to display
width : int
The width of the svg canvas to draw the graph on
height : int
The height of the svg canvas to draw the graph on
labels : bool
If True, each graph node is labelled with its name. If false, the node names are displayed
during mouseover events
attributes : bool
If True, a table of node attributes is displayed during mouseover events
css : string
Stylesheet data to use instead of default
"""
js = _draw_graph(model, width, height, labels, attributes, css)
display(js)
def _draw_graph(model, width=500, height=400, labels=False, attributes=False, css=None):
"""Creates Javascript/D3 code for graph"""
if isinstance(model, Model):
graph = pywr_model_to_d3_json(model, attributes)
else:
graph = pywr_json_to_d3_json(model, attributes)
if css is None:
css = draw_graph_css
js = Javascript(
data=draw_graph_template.render(
graph=graph,
width=width,
height=height,
labels=labels,
attributes=attributes,
css=css.replace("\n","")
),
lib="http://d3js.org/d3.v3.min.js",
)
return js
| snorfalorpagus/pywr | pywr/notebook/__init__.py | Python | gpl-3.0 | 8,048 |
from __future__ import print_function, absolute_import
import os
import warnings
import numpy as np
import mdtraj as md
from ..utils.progressbar import ProgressBar, Percentage, Bar, ETA
from ..utils import verbosedump
from ..cmdline import NumpydocClassCommand, argument, exttype, stripquotestype
from ..dataset import dataset, MDTrajDataset
from ..featurizer import (AtomPairsFeaturizer, SuperposeFeaturizer,
DRIDFeaturizer, DihedralFeaturizer,
ContactFeaturizer, GaussianSolventFeaturizer,
KappaAngleFeaturizer, AlphaAngleFeaturizer,
RMSDFeaturizer, BinaryContactFeaturizer,
LogisticContactFeaturizer, VonMisesFeaturizer,
FunctionFeaturizer, RawPositionsFeaturizer,
SASAFeaturizer)
class FeaturizerCommand(NumpydocClassCommand):
_group = '1-Featurizer'
trjs = argument(
'--trjs', help='Glob pattern for trajectories',
default='', required=True, type=stripquotestype)
top = argument(
'--top', help='Path to topology file matching the trajectories', default='')
chunk = argument(
'--chunk',
help='''Chunk size for loading trajectories using mdtraj.iterload''',
default=10000, type=int)
out = argument(
'-o', '--out', help='''Path to save featurizer instance using
the pickle protocol''',
default='', type=exttype('.pkl'))
transformed = argument(
'--transformed',
help="Output path for transformed data",
type=exttype('/'), required=True)
stride = argument(
'--stride', default=1, type=int,
help='Load only every stride-th frame')
def start(self):
if os.path.exists(self.transformed):
self.error('File exists: %s' % self.transformed)
if os.path.exists(self.out):
self.error('File exists: %s' % self.out)
print(self.instance)
if self.top.strip() == "":
top = None
else:
top = os.path.expanduser(self.top)
err = "Couldn't find topology file '{}'".format(top)
assert os.path.exists(top), err
input_dataset = MDTrajDataset(self.trjs, topology=top, stride=self.stride, verbose=False)
out_dataset = input_dataset.create_derived(self.transformed, fmt='dir-npy')
pbar = ProgressBar(widgets=[Percentage(), Bar(), ETA()],
maxval=len(input_dataset)).start()
for key in pbar(input_dataset.keys()):
trajectory = []
for i, chunk in enumerate(input_dataset.iterload(key, chunk=self.chunk)):
trajectory.append(self.instance.partial_transform(chunk))
out_dataset[key] = np.concatenate(trajectory)
out_dataset.close()
print("\nSaving transformed dataset to '%s'" % self.transformed)
print("To load this dataset interactive inside an IPython")
print("shell or notebook, run\n")
print(" $ ipython")
print(" >>> from msmbuilder.dataset import dataset")
print(" >>> ds = dataset('%s')\n" % self.transformed)
if self.out != '':
verbosedump(self.instance, self.out)
print("To load this %s object interactively inside an IPython\n"
"shell or notebook, run: \n" % self.klass.__name__)
print(" $ ipython")
print(" >>> from msmbuilder.utils import load")
print(" >>> model = load('%s')\n" % self.out)
class DihedralFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = DihedralFeaturizer
example = '''
$ msmb DihedralFeaturizer --trjs './trajectories/*.h5' \\
--transformed dihedrals-withchi --types phi psi chi1
'''
class KappaAngleFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = KappaAngleFeaturizer
class AlphaAngleFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = AlphaAngleFeaturizer
class AtomPairsFeaturizerCommand(FeaturizerCommand):
klass = AtomPairsFeaturizer
_concrete = True
def _pair_indices_type(self, fn):
if fn is None:
return None
return np.loadtxt(fn, dtype=int, ndmin=2)
class RMSDFeaturizerCommand(FeaturizerCommand):
klass = RMSDFeaturizer
_concrete = True
def _reference_traj_type(self, fn):
if self.top.strip() == "":
top = None
else:
top = os.path.expanduser(self.top)
err = ("Couldn't find topology file '{}' "
"when loading reference trajectory".format(top))
assert os.path.exists(top), err
return md.load(fn, top=top)
def _atom_indices_type(self, fn):
if fn is None:
return None
return np.loadtxt(fn, dtype=int, ndmin=1)
class SuperposeFeaturizerCommand(FeaturizerCommand):
klass = SuperposeFeaturizer
_concrete = True
def _reference_traj_type(self, fn):
if self.top.strip() == "":
top = None
else:
top = os.path.expanduser(self.top)
err = ("Couldn't find topology file '{}' "
"when loading reference trajectory".format(top))
assert os.path.exists(top), err
return md.load(fn, top=top)
def _atom_indices_type(self, fn):
if fn is None:
return None
return np.loadtxt(fn, dtype=int, ndmin=1)
class DRIDFeaturizerCommand(FeaturizerCommand):
klass = DRIDFeaturizer
_concrete = True
def _atom_indices_type(self, fn):
if fn is None:
return None
return np.loadtxt(fn, dtype=int, ndmin=1)
class ContactFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = ContactFeaturizer
def _contacts_type(self, val):
if val == 'all':
return val
else:
return np.loadtxt(val, dtype=int, ndmin=2)
class BinaryContactFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = BinaryContactFeaturizer
def _contacts_type(self, val):
if val == 'all':
return val
else:
return np.loadtxt(val, dtype=int, ndmin=2)
class LogisticContactFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = LogisticContactFeaturizer
def _contacts_type(self, val):
if val == 'all':
return val
else:
return np.loadtxt(val, dtype=int, ndmin=2)
class GaussianSolventFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = GaussianSolventFeaturizer
def _solvent_indices_type(self, fn):
return np.loadtxt(fn, dtype=int, ndmin=1)
def _solute_indices_type(self, fn):
return np.loadtxt(fn, dtype=int, ndmin=1)
class VonMisesFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = VonMisesFeaturizer
class RawPositionsFeaturizerCommand(FeaturizerCommand):
klass = RawPositionsFeaturizer
_concrete = True
def _reference_traj_type(self, fn):
if self.top.strip() == "":
top = None
else:
top = os.path.expanduser(self.top)
err = ("Couldn't find topology file '{}' "
"when loading reference trajectory".format(top))
assert os.path.exists(top), err
return md.load(fn, top=top)
def _atom_indices_type(self, fn):
if fn is None:
return None
return np.loadtxt(fn, dtype=int, ndmin=1)
class SASAFeaturizerCommand(FeaturizerCommand):
_concrete = True
klass = SASAFeaturizer
| dr-nate/msmbuilder | msmbuilder/commands/featurizer.py | Python | lgpl-2.1 | 7,658 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import logging
from .sites import foolSlide
from .sites import readcomicOnlineli
from .sites import comicNaver
from .sites import mangaHere
from .sites import rawSenManga
from .sites import mangaFox
from .sites import omgBeauPeep
from .sites import mangaReader
from .sites import mangaEden
from .sites import acQQ
from .sites import stripUtopia
from .sites import readComicBooksOnline
from .sites import readComicsWebsite
from .sites import batoto
from .sites import hqbr
from .sites import comicextra
from .sites import readComicsIO
from .sites import japscan
from .sites import manganelo
from .sites import webtoons
class Honcho(object):
def comic_language_resolver(self, language_code):
# Will return the Language Name corresponding to the language code.
language_dict = {
'0': 'English',
'1': 'Italian',
'2': 'Spanish',
'3': 'French',
'4': 'German',
'5': 'Portuguese',
'6': 'Turkish',
'7': 'Indonesian',
'8': 'Greek',
'9': 'Filipino',
'10': 'Polish',
'11': 'Thai',
'12': 'Malay',
'13 ': 'Hungarian',
'14': 'Romanian',
'15': ' Arabic',
'16': 'Hebrew',
'17': 'Russian',
'18': 'Vietnamese',
'19': 'Dutch',
'20': 'Bengali',
'21': 'Persian',
'22': 'Czech',
'23': 'Brazilian',
'24': 'Bulgarian',
'25': 'Danish',
'26': 'Esperanto',
'27': 'Swedish',
'28': 'Lithuanian',
'29': 'Other'
}
return language_dict[language_code]
def checker(self, comic_url, download_directory, chapter_range, **kwargs):
user_name = kwargs.get("username")
password = kwargs.get("password")
current_directory = kwargs.get("current_directory")
log_flag = kwargs.get("logger")
sorting = kwargs.get("sorting_order")
comic_language = kwargs.get("comic_language")
print_index = kwargs.get("print_index")
if log_flag is True:
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG)
logging.debug("Comic Url : %s" % comic_url)
domain = urlparse(comic_url).netloc
logging.debug("Selected Domain : %s" % domain)
# Remove the "/" from ending to make checking URL for Full Series or Single Chapter easier.
if comic_url[-1] == "/":
comic_url = comic_url[:-1]
if domain in ["yomanga.co", "gomanga.co"]:
foolSlide.FoolSlide(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
elif domain in ["www.readcomiconline.li", "readcomiconline.li", "www.readcomicsonline.ru", "readcomicsonline.ru"]:
readcomicOnlineli.ReadComicOnlineLi(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
image_quality=kwargs.get("image_quality"),
print_index=print_index)
return 0
elif domain in ["www.comic.naver.com", "comic.naver.com"]:
comicNaver.ComicNaver(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangahere.co", "mangahere.co", "www.mangahere.cc", "mangahere.cc"]:
mangaHere.MangaHere(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.raw.senmanga.com", "raw.senmanga.com"]:
rawSenManga.RawSenaManga(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangafox.me", "mangafox.me", "www.mangafox.la", "mangafox.la", "www.fanfox.net",
"fanfox.net"]:
mangaFox.MangaFox(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.omgbeaupeep.com", "omgbeaupeep.com", "www.otakusmash.com", "otakusmash.com"]:
omgBeauPeep.OmgBeauPeep(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO --print-index -i http://ac.qq.com/Comic/comicInfo/id/547059?trace_id=907_27.156.162.231_1539265645 broken?
elif domain in ["www.ac.qq.com", "ac.qq.com"]:
acQQ.AcQq(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.striputopija.blogspot.in", "striputopija.blogspot.in", "www.striputopija.blogspot.com",
"striputopija.blogspot.com"]:
stripUtopia.StripUtopia(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range,
print_index=print_index)
return 0
elif domain in ["www.mangareader.net", "mangareader.net"]:
mangaReader.MangaReader(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.readcomicbooksonline.net", "readcomicbooksonline.net", "www.readcomicbooksonline.org",
"readcomicbooksonline.org"]:
readComicBooksOnline.ReadComicBooksOnline(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO seems broken
elif domain in ["www.readcomics.website", "readcomics.website"]:
readComicsWebsite.ReadComicsWebsite(manga_url=comic_url, logger=logging,
current_directory=current_directory, sorting_order=sorting,
log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.japscan.to"]:
japscan.Japscan(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.hqbr.com.br", "hqbr.com.br"]:
hqbr.Hqbr(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.comicextra.com", "comicextra.com"]:
comicextra.ComicExtra(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
# TODO KO seems broken
elif domain in ["www.readcomics.io", "readcomics.io"]:
readComicsIO.ReadComicsIO(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.kissmanga.com", "kissmanga.com"]:
# kissManga.KissManga(manga_url = comic_url, logger = logging,
# current_directory = current_directory, sorting_order = sorting)
print("Under Development!")
return 0
elif domain in ["www.bato.to", "bato.to"]:
batoto.Batoto(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"), username=user_name, password=password,
comic_language=self.comic_language_resolver(comic_language),
print_index=print_index)
return 0
elif domain in ["manganelo.com", "mangakakalot.com", "manganato.com", "readmanganato.com"]:
manganelo.Manganelo(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"),
print_index=print_index)
return 0
elif domain in ["www.mangaeden.com"]:
if print_index:
print("please use -find and -cid instead!")
return -1
mangaEden.MangaEden(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"))
return 0
elif domain in ["www.webtoons.com", "webtoons.com"]:
webtoons.Webtoons(manga_url=comic_url, logger=logging, current_directory=current_directory,
sorting_order=sorting, log_flag=log_flag, download_directory=download_directory,
chapter_range=chapter_range, conversion=kwargs.get("conversion"),
keep_files=kwargs.get("keep_files"), image_quality=kwargs.get("image_quality"))
return 0
else:
print("%s is not supported at the moment. You can request it on the Github repository." % domain)
| Xonshiz/comic-dl | comic_dl/honcho.py | Python | mit | 14,699 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio module that adds more fun to the platform."""
from __future__ import absolute_import, print_function
from flask_babelex import gettext as _
from .views.server import blueprint as server
from .views.settings import blueprint as settings
from . import config
class InvenioOAIServer(object):
"""Invenio-OAIServer extension."""
def __init__(self, app=None):
"""Extension initialization."""
_('A translation string')
if app:
self.init_app(app)
def init_app(self, app):
"""Flask application initialization."""
self.init_config(app)
app.register_blueprint(server)
app.register_blueprint(settings)
app.extensions['invenio-oaiserver'] = self
def init_config(self, app):
"""Initialize configuration."""
app.config.setdefault(
"OAISERVER_BASE_TEMPLATE",
#app.config.get("BASE_TEMPLATE",
# "invenio_oaiserver/base.html"))
app.config.get("BASE_TEMPLATE",
"invenio_oaiserver/settings/base.html"))
for k in dir(config):
if k.startswith('OAISERVER_'):
app.config.setdefault(k, getattr(config, k))
| Dziolas/invenio-oaiserver | invenio_oaiserver/ext.py | Python | gpl-2.0 | 2,205 |
"""
Given a string, return the string made of its first two chars, so the String "Hello" yields\
"He". If the string is shorter than length 2, return whatever there is, so "X" yields \
"X", and the empty string "" yields the empty string "".
"""
def first_two(snip_string):
if len(snip_string) < 2:
snipped_string = snip_string
else:
snipped_string = str(snip_string[:2])
return snipped_string
print(first_two('Hello')) # 'He'
print(first_two('abcdefg')) # 'ab'
print(first_two('ab')) # 'ab'
print(first_two('X')) # 'X' | Baumelbi/IntroPython2016 | students/sheree/session_01/homework/coding_bat_string-6.py | Python | unlicense | 554 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution # pylint: disable=line-too-long
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util # pylint: disable=line-too-long
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class InverseGamma(distribution.Distribution):
"""The `InverseGamma` distribution with parameter alpha and beta.
The parameters are the shape and inverse scale parameters alpha, beta.
The PDF of this distribution is:
```pdf(x) = (beta^alpha)/Gamma(alpha)(x^(-alpha-1))e^(-beta/x), x > 0```
and the CDF of this distribution is:
```cdf(x) = GammaInc(alpha, beta / x) / Gamma(alpha), x > 0```
where GammaInc is the upper incomplete Gamma function.
Examples:
```python
dist = InverseGamma(alpha=3.0, beta=2.0)
dist2 = InverseGamma(alpha=[3.0, 4.0], beta=[2.0, 3.0])
```
"""
def __init__(self,
alpha,
beta,
strict=True,
strict_statistics=True,
name="InverseGamma"):
"""Construct InverseGamma distributions with parameters `alpha` and `beta`.
The parameters `alpha` and `beta` must be shaped in a way that supports
broadcasting (e.g. `alpha + beta` is a valid operation).
Args:
alpha: `float` or `double` tensor, the shape params of the
distribution(s).
alpha must contain only positive values.
beta: `float` or `double` tensor, the scale params of the distribution(s).
beta must contain only positive values.
strict: Whether to assert that `a > 0, b > 0`, and that `x > 0` in the
methods `prob(x)` and `log_prob(x)`. If `strict` is False
and the inputs are invalid, correct behavior is not guaranteed.
strict_statistics: Boolean, default True. If True, raise an exception if
a statistic (e.g. mean/mode/etc...) is undefined for any batch member.
If False, batch members with valid parameters leading to undefined
statistics will return NaN for this statistic.
name: The name to prepend to all ops created by this distribution.
Raises:
TypeError: if `alpha` and `beta` are different dtypes.
"""
self._strict_statistics = strict_statistics
self._strict = strict
with ops.op_scope([alpha, beta], name) as scope:
self._name = scope
with ops.control_dependencies([check_ops.assert_positive(
alpha), check_ops.assert_positive(beta)] if strict else []):
alpha = array_ops.identity(alpha, name="alpha")
beta = array_ops.identity(beta, name="beta")
contrib_tensor_util.assert_same_float_dtype((alpha, beta))
self._broadcast_tensor = alpha + beta
self._get_batch_shape = self._broadcast_tensor.get_shape()
self._get_event_shape = tensor_shape.TensorShape([])
self._alpha = alpha
self._beta = beta
@property
def strict_statistics(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._strict_statistics
@property
def strict(self):
"""Boolean describing behavior on invalid input."""
return self._strict
@property
def name(self):
"""Name to prepend to all ops."""
return self._name
@property
def dtype(self):
"""dtype of samples from this distribution."""
return self._alpha.dtype
@property
def alpha(self):
"""Shape parameter."""
return self._alpha
@property
def beta(self):
"""Scale parameter."""
return self._beta
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
`Tensor` `batch_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([self._broadcast_tensor], name):
return array_ops.shape(self._broadcast_tensor)
def get_batch_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
`TensorShape` object.
"""
return self._get_batch_shape
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
`Tensor` `event_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
`TensorShape` object.
"""
return self._get_event_shape
def mean(self, name="mean"):
"""Mean of each batch member.
The mean of an inverse gamma distribution is `beta / (alpha - 1)`,
when `alpha > 1`, and `NaN` otherwise. If `self.strict_statistics` is
`True`, an exception will be raised rather than returning `NaN`
Args:
name: A name to give this op.
Returns:
The mean for every batch member, a `Tensor` with same `dtype` as self.
"""
alpha = self._alpha
beta = self._beta
with ops.name_scope(self.name):
with ops.op_scope([alpha, beta], name):
mean_if_defined = beta / (alpha - 1.0)
if self.strict_statistics:
one = ops.convert_to_tensor(1.0, dtype=self.dtype)
return control_flow_ops.with_dependencies(
[check_ops.assert_less(one, alpha)], mean_if_defined)
else:
alpha_gt_1 = alpha > 1.0
nan = np.nan * self._ones()
return math_ops.select(alpha_gt_1, mean_if_defined, nan)
def mode(self, name="mode"):
"""Mode of each batch member.
The mode of an inverse gamma distribution is `beta / (alpha + 1)`.
Args:
name: A name to give this op.
Returns:
The mode for every batch member, a `Tensor` with same `dtype` as self.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta], name):
return self._beta / (self._alpha + 1.0)
def variance(self, name="variance"):
"""Variance of each batch member.
Variance for inverse gamma is defined only for `alpha > 2`. If
`self.strict_statistics` is `True`, an exception will be raised rather
than returning `NaN`.
Args:
name: A name to give this op.
Returns:
The variance for every batch member, a `Tensor` with same `dtype` as self.
"""
alpha = self._alpha
beta = self._beta
with ops.name_scope(self.name):
with ops.op_scope([alpha, beta], name):
var_if_defined = (math_ops.square(self._beta) /
(math_ops.square(self._alpha - 1.0) *
(self._alpha - 2.0)))
if self.strict_statistics:
two = ops.convert_to_tensor(2.0, dtype=self.dtype)
return control_flow_ops.with_dependencies(
[check_ops.assert_less(two, alpha)], var_if_defined)
else:
alpha_gt_2 = alpha > 2.0
nan = np.nan * self._ones()
return math_ops.select(alpha_gt_2, var_if_defined, nan)
def log_prob(self, x, name="log_prob"):
"""Log prob of observations in `x` under these InverseGamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-PDFs of `x`.
Raises:
TypeError: if `x` and `alpha` are different dtypes.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta, x], name):
alpha = self._alpha
beta = self._beta
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.strict else [], x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
dtype=self.dtype)
return (alpha * math_ops.log(beta) - math_ops.lgamma(self._alpha) -
(alpha + 1) * math_ops.log(x) - beta / x)
def prob(self, x, name="prob"):
"""Pdf of observations in `x` under these Gamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
prob: tensor of dtype `dtype`, the PDFs of `x`
Raises:
TypeError: if `x` and `alpha` are different dtypes.
"""
return super(InverseGamma, self).prob(x, name)
def log_cdf(self, x, name="log_cdf"):
"""Log CDF of observations `x` under these InverseGamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta, x], name):
x = ops.convert_to_tensor(x)
x = control_flow_ops.with_dependencies([check_ops.assert_positive(x)] if
self.strict else [], x)
contrib_tensor_util.assert_same_float_dtype(tensors=[x,],
dtype=self.dtype)
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return math_ops.log(math_ops.igammac(self._alpha, self._beta / x))
def cdf(self, x, name="cdf"):
"""CDF of observations `x` under these InverseGamma distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `alpha` and `beta`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta, x], name):
return math_ops.igammac(self._alpha, self._beta / x)
def entropy(self, name="entropy"):
"""The entropy of these InverseGamma distribution(s).
This is defined to be
```
entropy = alpha - log(beta) + log(Gamma(alpha))
+ (1-alpha)digamma(alpha)
```
where digamma(alpha) is the digamma function.
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._alpha, self._beta], name):
alpha = self._alpha
beta = self._beta
return (alpha + math_ops.log(beta) + math_ops.lgamma(alpha) -
(1 + alpha) * math_ops.digamma(alpha))
def sample(self, n, seed=None, name="sample"):
"""Draws `n` samples from these InverseGamma distribution(s).
See the doc for tf.random_gamma for further details on sampling strategy.
Args:
n: Python integer, the number of observations to sample from each
distribution.
seed: Python integer, the random seed for this operation.
name: Optional name for the operation.
Returns:
samples: a `Tensor` of shape `(n,) + self.batch_shape + self.event_shape`
with values of type `self.dtype`.
"""
with ops.name_scope(self.name):
with ops.op_scope([n, self._alpha, self._beta], name):
one = constant_op.constant(1.0, dtype=self.dtype)
return one / random_ops.random_gamma([n],
self._alpha,
beta=self._beta,
dtype=self.dtype,
seed=seed)
@property
def is_reparameterized(self):
return False
def _ones(self):
return array_ops.ones_like(self._alpha + self._beta, dtype=self.dtype)
@property
def is_continuous(self):
return True
| HaebinShin/tensorflow | tensorflow/contrib/distributions/python/ops/inverse_gamma.py | Python | apache-2.0 | 13,335 |
# -*- encoding: UTF-8 -*-
'''Cartesian control: Torso and Foot trajectories'''
import sys
import motion
import almath
from naoqi import ALProxy
def StiffnessOn(proxy):
# We use the "Body" name to signify the collection of all joints
pNames = "Body"
pStiffnessLists = 1.0
pTimeLists = 1.0
proxy.stiffnessInterpolation(pNames, pStiffnessLists, pTimeLists)
def main(robotIP):
''' Example of a cartesian foot trajectory
Warning: Needs a PoseInit before executing
'''
try:
motionProxy = ALProxy("ALMotion", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALMotion"
print "Error was: ", e
try:
postureProxy = ALProxy("ALRobotPosture", robotIP, 9559)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
# Set NAO in Stiffness On
StiffnessOn(motionProxy)
# Send NAO to Pose Init
postureProxy.goToPosture("StandInit", 0.5)
space = motion.FRAME_ROBOT
axisMask = almath.AXIS_MASK_ALL # full control
isAbsolute = False
# Lower the Torso and move to the side
effector = "Torso"
path = [0.0, -0.07, -0.03, 0.0, 0.0, 0.0]
time = 2.0 # seconds
motionProxy.positionInterpolation(effector, space, path,
axisMask, time, isAbsolute)
# LLeg motion
effector = "LLeg"
path = [0.0, 0.06, 0.00, 0.0, 0.0, 0.8]
times = 2.0 # seconds
motionProxy.positionInterpolation(effector, space, path,
axisMask, times, isAbsolute)
if __name__ == "__main__":
robotIp = "127.0.0.1"
if len(sys.argv) <= 1:
print "Usage python motion_cartesianFoot.py robotIP (optional default: 127.0.0.1)"
else:
robotIp = sys.argv[1]
main(robotIp)
| kwailamchan/programming-languages | python/aldebaran/hana/hana/motion/cartesian/motion_cartesianFoot.py | Python | mit | 1,913 |
import unittest
import os
import shutil
from hyo2.soundspeedmanager import AppInfo
from hyo2.soundspeed.atlas import atlases
from hyo2.soundspeed.soundspeed import SoundSpeedLibrary
class TestSoundSpeedAtlasAtlases(unittest.TestCase):
def setUp(self):
self.cur_dir = os.path.abspath(os.path.dirname(__file__))
def tearDown(self):
dir_items = os.listdir(self.cur_dir)
for item in dir_items:
if item.split('.')[-1] == 'db':
os.remove(os.path.join(self.cur_dir, item))
if item == 'atlases':
shutil.rmtree(os.path.join(self.cur_dir, item))
def test_creation_of_Atlases(self):
lib = SoundSpeedLibrary(data_folder=self.cur_dir)
atl = atlases.Atlases(prj=lib)
self.assertTrue("atlases" in atl.rtofs_folder)
self.assertTrue("woa" in atl.woa09_folder)
self.assertTrue("woa" in atl.woa13_folder)
lib.close()
def suite():
s = unittest.TestSuite()
s.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSoundSpeedAtlasAtlases))
return s
| hydroffice/hyo_soundspeed | tests/soundspeed/atlas/test_atlases.py | Python | lgpl-2.1 | 1,091 |
"""
Harvester for the Digital Commons @ CalPoly API for the SHARE project
More information at https://github.com/CenterForOpenScience/SHARE/blob/master/providers/edu.calpoly.md
Example API call: http://digitalcommons.calpoly.edu/do/oai/?verb=ListRecords&metadataPrefix=oai_dc&from=2014-10-05T00:00:00Z
"""
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class CalPolyHarvester(OAIHarvester):
short_name = 'calpoly'
long_name = 'Digital Commons @ CalPoly'
url = 'http://digitalcommons.calpoly.edu/'
base_url = 'http://digitalcommons.calpoly.edu/do/oai/'
property_list = ['type', 'source', 'format', 'date', 'setSpec']
approved_sets = [
'csusymp2009',
'acct_fac',
'aerosp',
'aero_fac',
'agbsp',
'agb_fac',
'agedsp',
'aged_fac',
'ascisp',
'asci_fac',
'aen_fac',
'arcesp',
'arch_fac',
'art_fac',
'artsp',
'bts',
'bio_fac',
'biosp',
'bmed_fac',
'bmedsp',
'bae_fac',
'braesp',
'ccapc',
'ari',
'csq',
'chem_fac',
'chemsp',
'crp_fac',
'crpsp',
'cenv_fac',
'cadrc',
'comssp',
'comm_fac',
'cpesp',
'cscsp',
'csse_fac',
'cmgt_fac',
'cesp',
'fpe_rpt',
'dscisp',
'dsci_fac',
'erscsp',
'econ_fac',
'eesp',
'eeng_fac',
'engl_fac',
'englsp',
'ethicsandanimals',
'eth_fac',
'essp',
'fin_fac',
'focus',
'fsn_fac',
'fsnsp',
'aged_rpt',
'gse_fac',
'grcsp',
'grc_fac',
'hist_fac',
'histsp',
'honors',
'hcssp',
'hcs_fac',
'imesp',
'ime_fac',
'it_fac',
'itsp',
'ir2008',
'joursp',
'jour_fac',
'kine_fac',
'kinesp',
'land_fac',
'laessp',
'ls_fac',
'lib_fac',
'mgmtsp',
'mgmt_sp',
'mkt_fac',
'theses',
'matesp',
'mate_fac',
'math_fac',
'mathsp',
'mesp',
'meng_fac',
'mll_fac',
'mllsp',
'mus_fac',
'musp',
'nrmsp',
'nrm_fac',
'pres_schol',
'phil_fac',
'philsp',
'phy_fac',
'physsp',
'poli_fac',
'polssp',
'bakerforum',
'psycd_fac',
'psycdsp',
'rpta_fac',
'rptasp',
'coe_dean',
'socssp',
'ssci_fac',
'statsp',
'stat_fac',
'star',
'susconf',
'symposium',
'forum',
'thdanc_fac',
'wvi_fac',
'wvisp'
]
| jeffreyliu3230/scrapi | scrapi/harvesters/calpoly.py | Python | apache-2.0 | 2,863 |
import sys
import csv
reader = csv.reader(sys.stdin, delimiter='\t')
next(reader, None)
for line in reader:
if len(line) == 19:
author_id = line[3]
hour = int(line[8][11:13])
print "{0}\t{1}".format(author_id, hour)
# Reducer
import sys
oldKey = None
hours = {}
for i in range(24):
hours[i] = 0
for line in sys.stdin:
data_mapped = line.strip().split("\t")
if len(data_mapped) != 2:
continue
thisKey, thisValue = data_mapped
if oldKey and oldKey != thisKey:
print oldKey, "\t", max(hours, key=hours.get)
oldKey = thisKey
for i in range(24):
hours[i] = 0
oldKey = thisKey
hours[int(thisValue)] += 1
if oldKey != None:
print oldKey, "\t", max(hours, key=hours.get) | mi1980/projecthadoop3 | udacity/ud617-intro-hadoop/code/final-project/student_times.py | Python | mit | 699 |
""" Interface for generating an object describing geometry of mass spec acquisition process.
"""
def geometry_section(section_name):
""" Class decorator adding static property @section_name with the input value to a decorated class
"""
def modifier(cls):
cls.section_name = section_name
return cls
return modifier
class ACQ_GEOMETRY_KEYS(object):
''' Collection of identifiers served as keys in objects
describing mass spec acquisition geometry.
'''
@geometry_section('acquisition_grid')
class AcqGridSection(object):
REGULAR_GRID = 'regular_grid'
PIXEL_COUNT_X = 'count_x'
PIXEL_COUNT_Y = 'count_y'
PIXEL_SPACING_X = 'spacing_x'
PIXEL_SPACING_Y = 'spacing_y'
PIXEL_CORRD_LIST = 'coord_list'
@geometry_section('pixel_size')
class PixelSizeSection(object):
REGULAR_SIZE = 'regular_size'
PIXEL_SIZE_X = 'size_x'
PIXEL_SIZE_Y = 'size_y'
PIXEL_SIZE_LIST = 'size_list'
LENGTH_UNIT = 'length_unit'
class AcqGeometryFactory(object):
""" Interface for generating an object describing
geometry of mass spec acquisition process.
In the comments below all coordinates are assumed to be calculated
relatively to top left corner of the acquisition area.
"""
def __init__(self, ms_file_path):
self.ms_file_path = ms_file_path
self.geometry = {}
def create(self):
""" Generates acquisition geometry descriptor """
if not self.geometry:
self.geometry = {
ACQ_GEOMETRY_KEYS.LENGTH_UNIT: self._length_unit(),
ACQ_GEOMETRY_KEYS.AcqGridSection.section_name: self._acquisition_grid(),
ACQ_GEOMETRY_KEYS.PixelSizeSection.section_name: self._pixel_shape()
}
return self.geometry
def _acquisition_grid(self):
""" Object with the following structure:
if @self._is_regular_grid == True:
{
AcqGridSection.REGULAR_GRID : true
# count of pixels along X axis
AcqGridSection.PIXEL_COUNT_X : int
# count of pixels along Y axis
AcqGridSection.PIXEL_COUNT_Y : int
# distance between pixel centers along X axis
AcqGridSection.PIXEL_SPACING_X : float
# distance between pixel centers along Y axis
AcqGridSection.PIXEL_SPACING_Y : float
}
else: list of coordinates of pixel centers
{
AcqGridSection.REGULAR_GRID : false
AcqGridSection.PIXEL_CORRD_LIST : [(x, y)]
}
"""
raise NotImplementedError
def _pixel_shape(self):
""" Object with the following structure:
if @self.is_regular_pixel_shape == True:
{
PixelSizeSection.REGULAR_SIZE : true
# pixel size along X axis
PixelSizeSection.PIXEL_SIZE_X : float
# pixel size along Y axis
PixelSizeSection.PIXEL_SIZE_Y : float
}
else: list of pixel sizes
{
PixelSizeSection.REGULAR_SIZE : false
PixelSizeSection.PIXEL_SIZE_LIST : [(x_size, y_size)]
}
"""
raise NotImplementedError
def _length_unit(self):
""" String identifier of units of distance used in @self.acquisition_grid and @self.pixel_shape """
raise NotImplementedError
| SpatialMetabolomics/SM_distributed | sm/engine/acq_geometry_factory.py | Python | apache-2.0 | 3,421 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# David Durieux, [email protected]
#
"""
Alignak - Arbiter, Scheduler and Broker modules for the Alignak backend
"""
# Package name
__pkg_name__ = u"alignak_module_backend"
# Module type for PyPI keywords
# Used for:
# - PyPI keywords
__module_types__ = u"backend"
# Application manifest
__version__ = u"1.4.3"
__author__ = u"David Durieux"
__author_email__ = u"[email protected]"
__copyright__ = u"(c) 2015-2018 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = u"https://github.com/Alignak-monitoring-contrib/alignak-module-backend"
__doc_url__ = u"http://alignak-doc.readthedocs.io/en/latest"
__description__ = u"Alignak - Backend modules for Arbiter, Scheduler and Broker"
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
| Alignak-monitoring-contrib/alignak-module-backend | version.py | Python | agpl-3.0 | 1,262 |
# http://eli.thegreenplace.net/2014/02/15/programmatically-populating-a-django-database
from django.core.management.base import BaseCommand
from django.db import IntegrityError
import json
from maps.models import Permit, PermitOwner
from permits_redux.settings import BASE_DIR, GEOIP_PATH
from django.contrib.auth.models import User
# from math import floor
# import sys
import os
# sys.path.append(os.path.join(os.environ.get('PWD', ''), 'permit_user'))
# from permit_user.models import PermitUser
class Command(BaseCommand):
args = '<foo bar ...>'
help = 'our help string comes here'
def _save_permit_to_database(self):
"""Add a permit to the database"""
#file_path = 'media/contruction.json'
file_path = os.path.join(GEOIP_PATH, 'contruction.json')
# permit_user = PermitOwner.objects.filter(user=1).first()
owner = User.objects.all().first()
with open(file_path, 'r') as f:
data = json.load(f)
count = 0
for perm in data:
if perm.get('final_date') is None and perm.get('latitude') and perm.get('longitude'):
if perm.get('description'):
if len(perm.get('description')) >= 255:
perm['description'] = perm.get('description')[0:255]
permit = Permit(
# permit_user=permit_user,
owner=owner,
permit_number=perm['application_permit_number'],
latitude=perm['latitude'],
longitude=perm['longitude'],
master_use_permit=perm.get('master_use_permit'),
action_type=perm.get('action_type'),
address=perm.get('address'),
applicant_name=perm.get('applicant_name'),
application_date=perm.get('application_date'),
issue_date=perm.get('issue_date'),
final_date=perm.get('final_date'),
experation_date=perm.get('experation_date'),
category=perm.get('category'),
description=perm.get('description'),
url=perm.get('permit_and_complaint_status_url'),
permit_type=perm.get('permit_type'),
status=perm.get('status'),
value=float(perm.get('value')),
work_type=perm.get('work_type'),
contractor=perm.get('contractor'),
)
try:
print("Count: {} Adding permit to DB: {}".format(count, perm.get('application_permit_number')))
permit.save()
# print()
except IntegrityError:
print("Count: {} Premit {} is already in the database".format(count, perm.get('application_permit_number')))
pass
count += 1
# uncomment to only add x number of permit
# x = 100
# if count >= x:
# return
print('count: {}'.format(count))
def _list_permits():
pass
# def _list_permit_user(self):
# pu = PermitUser.objects.filter(user=1).first()
# print("Permit User: {}".format(pu.user.username))
def _hello(self):
""" Say Hello """
print('hello\n')
print(BASE_DIR)
print(GEOIP_PATH)
def handle(self, *args, **options):
self._hello()
# self._list_permit_user()
self._save_permit_to_database()
| crashtack/permits_redux | maps/management/commands/populate_db.py | Python | mit | 3,567 |
import re
from argparse import ArgumentParser
# Parse arguments
parser = ArgumentParser(description="Transforming output to more concise version")
parser.add_argument('-f', '--file',
help="Input file for data, e.g.: -f output_raw.txt",
dest="infile",
required=True,
default=None)
parser.add_argument('-o', '--out',
help="Output file for formatted data, e.g.: -o output_formatted.txt",
dest="out",
required=True,
default=None)
args = parser.parse_args()
try:
f = open(args.out, 'w')
with open(args.infile) as outputFile:
# simple flag to identify 1st vs 2nd entry for a host
isSecond = 0
for line in outputFile:
bandwidth = re.search("Bandwidth:\s(\d+.\d+)", line)
if bandwidth:
host_bw = bandwidth.group(1)
f.write(host_bw + "\t")
continue
elapsed_time = re.search("real\t\dm(\d+.\d+)s", line)
if elapsed_time:
time_s = elapsed_time.group(1)
time_ms = str(float(time_s) * 1000)
f.write(time_ms)
if isSecond:
f.write("\n")
isSecond = 0
else:
f.write("\t")
isSecond = 1
f.close()
except EnvironmentError:
print 'Error reading file'
| XianliangJ/collections | TCPCwnd/formatOutput.py | Python | gpl-3.0 | 1,446 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.