text
stringlengths 4
1.02M
| meta
dict |
---|---|
import subprocess
def get_gitconfig(key, subkey, is_global=False):
cmd = ['git', 'config', '--null']
if is_global:
cmd.append('--global')
cmd.extend(['--get', '{}.{}'.format(key, subkey)])
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode:
return None
z = out.index('\0')
return out[:z]
| {
"content_hash": "021ad4b4bf8f49e0ba14a8afdc143a87",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 56,
"avg_line_length": 29.846153846153847,
"alnum_prop": 0.5927835051546392,
"repo_name": "depp/headerfix",
"id": "f7b756510eba15b0e6503528d5d8c775f2897f38",
"size": "559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/header/git.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "101"
},
{
"name": "C++",
"bytes": "38"
},
{
"name": "Python",
"bytes": "48417"
},
{
"name": "Shell",
"bytes": "298"
}
],
"symlink_target": ""
} |
from mock import patch, Mock
from oslo.config import cfg
import unittest
from savanna.exceptions import NotFoundException, SavannaException
import savanna.openstack.common.exception as os_ex
from savanna.service.api import Resource
import savanna.service.validation as v
CONF = cfg.CONF
CONF.import_opt('allow_cluster_ops', 'savanna.config')
def _raise(ex):
def function(*args, **kwargs):
raise ex
return function
def _cluster(base, **kwargs):
base['cluster'].update(**kwargs)
return base
def _template(base, **kwargs):
base['node_template'].update(**kwargs)
return base
class TestValidation(unittest.TestCase):
def setUp(self):
self._create_object_fun = None
CONF.set_override('allow_cluster_ops', False)
def tearDown(self):
self._create_object_fun = None
CONF.clear_override('allow_cluster_ops')
@patch("savanna.utils.api.bad_request")
@patch("savanna.utils.api.request_data")
def test_malformed_request_body(self, request_data, bad_request):
ex = os_ex.MalformedRequestBody()
request_data.side_effect = _raise(ex)
m_func = Mock()
m_func.__name__ = "m_func"
v.validate(m_func)(m_func)()
self._assert_calls(bad_request,
(1, 'MALFORMED_REQUEST_BODY',
'Malformed message body: %(reason)s'))
def _assert_exists_by_id(self, side_effect, assert_func=True):
m_checker = Mock()
m_checker.side_effect = side_effect
m_func = Mock()
m_func.__name__ = "m_func"
v.exists_by_id(m_checker, "template_id")(m_func)(template_id="asd")
m_checker.assert_called_once_with(id="asd")
if assert_func:
m_func.assert_called_once_with(template_id="asd")
@patch("savanna.utils.api.internal_error")
@patch("savanna.utils.api.not_found")
def test_exists_by_id_passed(self, not_found, internal_error):
self._assert_exists_by_id(None)
self.assertEqual(not_found.call_count, 0)
self.assertEqual(internal_error.call_count, 0)
@patch("savanna.utils.api.internal_error")
@patch("savanna.utils.api.not_found")
def test_exists_by_id_failed(self, not_found, internal_error):
self._assert_exists_by_id(_raise(NotFoundException("")), False)
self.assertEqual(not_found.call_count, 1)
self.assertEqual(internal_error.call_count, 0)
self._assert_exists_by_id(_raise(SavannaException()), False)
self.assertEqual(not_found.call_count, 1)
self.assertEqual(internal_error.call_count, 1)
self._assert_exists_by_id(_raise(AttributeError()), False)
self.assertEqual(not_found.call_count, 1)
self.assertEqual(internal_error.call_count, 2)
def _assert_calls(self, mock, call_info):
print "_assert_calls for %s, \n\t actual: %s , \n\t expected: %s" \
% (mock, mock.call_args, call_info)
if not call_info:
self.assertEqual(mock.call_count, 0)
else:
self.assertEqual(mock.call_count, call_info[0])
self.assertEqual(mock.call_args[0][0].code, call_info[1])
self.assertEqual(mock.call_args[0][0].message, call_info[2])
def _assert_create_object_validation(
self, data, bad_req_i=None, not_found_i=None, int_err_i=None):
request_data_p = patch("savanna.utils.api.request_data")
bad_req_p = patch("savanna.utils.api.bad_request")
not_found_p = patch("savanna.utils.api.not_found")
int_err_p = patch("savanna.utils.api.internal_error")
get_clusters_p = patch("savanna.service.api.get_clusters")
get_templates_p = patch("savanna.service.api.get_node_templates")
get_template_p = patch("savanna.service.api.get_node_template")
get_types_p = patch("savanna.service.api.get_node_types")
get_node_type_required_params_p = \
patch("savanna.service.api.get_node_type_required_params")
patchers = (request_data_p, bad_req_p, not_found_p, int_err_p,
get_clusters_p, get_templates_p, get_template_p,
get_types_p, get_node_type_required_params_p)
request_data = request_data_p.start()
bad_req = bad_req_p.start()
not_found = not_found_p.start()
int_err = int_err_p.start()
get_clusters = get_clusters_p.start()
get_templates = get_templates_p.start()
get_template = get_template_p.start()
get_types = get_types_p.start()
get_node_type_required_params = get_node_type_required_params_p.start()
# stub clusters list
get_clusters.return_value = getattr(self, "_clusters_data", [
Resource("cluster", {
"name": "some-cluster-1"
})
])
# stub node templates
get_templates.return_value = getattr(self, "_templates_data", [
Resource("node_template", {
"name": "jt_nn.small",
"node_type": {
"name": "JT+NN",
"processes": ["job_tracker", "name_node"]
}
}),
Resource("node_template", {
"name": "nn.small",
"node_type": {
"name": "NN",
"processes": ["name_node"]
}
})
])
def _get_template(name):
for template in get_templates():
if template.name == name:
return template
return None
get_template.side_effect = _get_template
get_types.return_value = getattr(self, "_types_data", [
Resource("node_type", {
"name": "JT+NN",
"processes": ["job_tracker", "name_node"]
})
])
def _get_r_params(name):
if name == "JT+NN":
return {"job_tracker": ["jt_param"]}
return dict()
get_node_type_required_params.side_effect = _get_r_params
# mock function that should be validated
m_func = Mock()
m_func.__name__ = "m_func"
# request data to validate
request_data.return_value = data
v.validate(self._create_object_fun)(m_func)(id="some-id")
self.assertEqual(request_data.call_count, 1)
self._assert_calls(bad_req, bad_req_i)
self._assert_calls(not_found, not_found_i)
self._assert_calls(int_err, int_err_i)
for patcher in patchers:
patcher.stop()
def test_cluster_create_v_required(self):
self._create_object_fun = v.validate_cluster_create
self._assert_create_object_validation(
{},
bad_req_i=(1, "VALIDATION_ERROR",
u"'cluster' is a required property")
)
self._assert_create_object_validation(
{"cluster": {}},
bad_req_i=(1, "VALIDATION_ERROR",
u"'name' is a required property")
)
self._assert_create_object_validation(
{"cluster": {
"name": "some-name"
}},
bad_req_i=(1, "VALIDATION_ERROR",
u"'base_image_id' is a required property")
)
self._assert_create_object_validation(
{"cluster": {
"name": "some-name",
"base_image_id": "some-image-id"
}},
bad_req_i=(1, "VALIDATION_ERROR",
u"'node_templates' is a required property")
)
def test_cluster_create_v_name_base(self):
self._create_object_fun = v.validate_cluster_create
cluster = {
"cluster": {
"base_image_id": "some-image-id",
"node_templates": {}
}
}
self._assert_create_object_validation(
_cluster(cluster, name=None),
bad_req_i=(1, "VALIDATION_ERROR",
u"None is not of type 'string'")
)
self._assert_create_object_validation(
_cluster(cluster, name=""),
bad_req_i=(1, "VALIDATION_ERROR",
u"'' is too short")
)
self._assert_create_object_validation(
_cluster(cluster, name="a" * 51),
bad_req_i=(1, "VALIDATION_ERROR",
u"'%s' is too long" % ('a' * 51))
)
def test_cluster_create_v_name_pattern(self):
self._create_object_fun = v.validate_cluster_create
cluster = {
"cluster": {
"base_image_id": "some-image-id",
"node_templates": {}
}
}
def _assert_cluster_name_pattern(self, name):
cluster_schema = v.CLUSTER_CREATE_SCHEMA['properties']['cluster']
name_p = cluster_schema['properties']['name']['pattern']
self._assert_create_object_validation(
_cluster(cluster, name=name),
bad_req_i=(1, "VALIDATION_ERROR",
(u"'%s' does not match '%s'" % (name, name_p))
.replace('\\', "\\\\"))
)
_assert_cluster_name_pattern(self, "asd_123")
_assert_cluster_name_pattern(self, "123")
_assert_cluster_name_pattern(self, "asd?")
def test_cluster_create_v_name_exists(self):
self._create_object_fun = v.validate_cluster_create
cluster = {
"cluster": {
"base_image_id": "some-image-id",
"node_templates": {}
}
}
self._assert_create_object_validation(
_cluster(cluster, name="some-cluster-1"),
bad_req_i=(1, "CLUSTER_NAME_ALREADY_EXISTS",
u"Cluster with name 'some-cluster-1' already exists")
)
def test_cluster_create_v_templates(self):
self._create_object_fun = v.validate_cluster_create
cluster = {
"cluster": {
"name": "some-cluster",
"base_image_id": "some-image-id"
}
}
self._assert_create_object_validation(
_cluster(cluster, node_templates={}),
bad_req_i=(1, "NOT_SINGLE_NAME_NODE",
u"Hadoop cluster should contain only 1 NameNode. "
u"Actual NN count is 0")
)
self._assert_create_object_validation(
_cluster(cluster, node_templates={
"nn.small": 1
}),
bad_req_i=(1, "NOT_SINGLE_JOB_TRACKER",
u"Hadoop cluster should contain only 1 JobTracker. "
u"Actual JT count is 0")
)
self._assert_create_object_validation(
_cluster(cluster, node_templates={
"incorrect_template": 10
}),
bad_req_i=(1, "NODE_TEMPLATE_NOT_FOUND",
u"NodeTemplate 'incorrect_template' not found")
)
self._assert_create_object_validation(
_cluster(cluster, node_templates={
"jt_nn.small": 1
})
)
def test_node_template_create_v_required(self):
self._create_object_fun = v.validate_node_template_create
self._assert_create_object_validation(
{},
bad_req_i=(1, "VALIDATION_ERROR",
u"'node_template' is a required property")
)
self._assert_create_object_validation(
{"node_template": {}},
bad_req_i=(1, "VALIDATION_ERROR",
u"'name' is a required property")
)
self._assert_create_object_validation(
{"node_template": {
"name": "some-name"
}},
bad_req_i=(1, "VALIDATION_ERROR",
u"'node_type' is a required property")
)
self._assert_create_object_validation(
{"node_template": {
"name": "some-name",
"node_type": "some-node-type"
}},
bad_req_i=(1, "VALIDATION_ERROR",
u"'flavor_id' is a required property")
)
self._assert_create_object_validation(
{"node_template": {
"name": "some-name",
"node_type": "JT+NN",
"flavor_id": "flavor-1"
}},
bad_req_i=(1, "VALIDATION_ERROR",
u"'name_node' is a required property")
)
self._assert_create_object_validation(
{"node_template": {
"name": "some-name",
"node_type": "JT+NN",
"flavor_id": "flavor-1",
"name_node": {}
}},
bad_req_i=(1, "VALIDATION_ERROR",
u"'job_tracker' is a required property")
)
self._assert_create_object_validation(
{"node_template": {
"name": "some-name",
"node_type": "JT+NN",
"flavor_id": "flavor-1",
"name_node": {},
"job_tracker": {}
}},
bad_req_i=(1, "REQUIRED_PARAM_MISSED",
u"Required parameter 'jt_param' of process "
u"'job_tracker' should be specified")
)
self._assert_create_object_validation(
{"node_template": {
"name": "some-name",
"node_type": "JT+NN",
"flavor_id": "flavor-1",
"name_node": {},
"job_tracker": {"jt_param": ""}
}},
bad_req_i=(1, "REQUIRED_PARAM_MISSED",
u"Required parameter 'jt_param' of process "
u"'job_tracker' should be specified")
)
self._assert_create_object_validation(
{"node_template": {
"name": "some-name",
"node_type": "JT+NN",
"flavor_id": "flavor-1",
"name_node": {},
"job_tracker": {"jt_param": "some value"}
}}
)
self._assert_create_object_validation(
{"node_template": {
"name": "some-name",
"node_type": "JT+NN",
"flavor_id": "flavor-1",
"name_node": {},
"job_tracker": {},
"task_tracker": {}
}},
bad_req_i=(1, "NODE_PROCESS_DISCREPANCY",
u"Discrepancies in Node Processes. "
u"Required: ['name_node', 'job_tracker']")
)
def test_node_template_create_v_name_base(self):
self._create_object_fun = v.validate_node_template_create
template = {
"node_template": {
"node_type": "JT+NN",
"flavor_id": "flavor-1",
"name_node": {},
"job_tracker": {}
}
}
self._assert_create_object_validation(
_template(template, name=None),
bad_req_i=(1, "VALIDATION_ERROR",
u"None is not of type 'string'")
)
self._assert_create_object_validation(
_template(template, name=""),
bad_req_i=(1, "VALIDATION_ERROR",
u"'' is too short")
)
self._assert_create_object_validation(
_template(template, name="a" * 241),
bad_req_i=(1, "VALIDATION_ERROR",
u"'%s' is too long" % ('a' * 241))
)
def test_node_template_create_v_name_pattern(self):
self._create_object_fun = v.validate_node_template_create
template = {
"node_template": {
"node_type": "JT+NN",
"flavor_id": "flavor-1",
"name_node": {},
"job_tracker": {}
}
}
def _assert_template_name_pattern(self, name):
schema_props = v.TEMPLATE_CREATE_SCHEMA['properties']
template_schema = schema_props['node_template']
name_p = template_schema['properties']['name']['pattern']
self._assert_create_object_validation(
_template(template, name=name),
bad_req_i=(1, "VALIDATION_ERROR",
(u"'%s' does not match '%s'" % (name, name_p))
.replace('\\', "\\\\"))
)
_assert_template_name_pattern(self, "asd;123")
_assert_template_name_pattern(self, "123")
_assert_template_name_pattern(self, "asd?")
def test_node_template_create_v_name_exists(self):
self._create_object_fun = v.validate_node_template_create
template = {
"node_template": {
"node_type": "JT+NN",
"flavor_id": "flavor-1",
"name_node": {},
"job_tracker": {}
}
}
self._assert_create_object_validation(
_template(template, name="jt_nn.small"),
bad_req_i=(1, "NODE_TEMPLATE_ALREADY_EXISTS",
u"NodeTemplate with name 'jt_nn.small' already exists")
)
def test_node_template_create_v_types(self):
self._create_object_fun = v.validate_node_template_create
self._assert_create_object_validation(
{
"node_template": {
"name": "some-name",
"node_type": "JJ",
"flavor_id": "flavor-1",
"name_node": {},
"job_tracker": {}
}
},
bad_req_i=(1, "NODE_TYPE_NOT_FOUND",
u"NodeType 'JJ' not found")
)
# TODO(slukjanov): add tests for allow_cluster_ops = True
| {
"content_hash": "205ea7640b3853a9d34f9ceaa099b88e",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 79,
"avg_line_length": 35.387351778656125,
"alnum_prop": 0.5041885401541383,
"repo_name": "darionyaphets/savanna",
"id": "7ff6a322e636c145da25689e3c02c33b67161679",
"size": "18489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "savanna/tests/unit/test_validation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
class NullAPIKeyError(Exception):
"""Raised when the api_key is None
"""
class ConnectionError(Exception):
def __init__(self, response, content=None, message=None):
self.response = response
self.content = content
self.message = message
def __str__(self):
message = "Failed."
if hasattr(self.response, 'status_code'):
message += 'Response status code: %s' % self.response.status_code
if hasattr(self.response, 'reason'):
message += 'Response reason: %s' % self.response.reason
if self.content is not None:
message += 'Error message: %s' % (str(self.content))
return message
class ClientError(ConnectionError):
"""4xx Exceptions Error
"""
class BadRequestError(ClientError):
"""400 Error Code
"""
class MetaDataInstanceError(Exception):
"""Raised when a metadata object passed to a resource
is not a instance of `MetaData` class.
"""
class CustomerInstanceError(Exception):
"""Raised when a customer metadata object passed to a resource
is not a instance of `CustomerMetaData` class.
"""
| {
"content_hash": "ac0285612b22d18b63cc56b81190d2ab",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 27.5,
"alnum_prop": 0.638961038961039,
"repo_name": "michaeltcoelho/pagarme.py",
"id": "0c1ca94d2dfbd9e4dd7bcbcfa6b425b3cca72e0c",
"size": "1172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pagarme/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "169"
},
{
"name": "Python",
"bytes": "46273"
}
],
"symlink_target": ""
} |
import sys
import json
def main(argv):
line = sys.stdin.readline()
try:
while line:
line = line.rstrip()
print line
line = sys.stdin.readline()
except "end of file":
return None
if __name__ == "__main__":
main(sys.argv)
| {
"content_hash": "94ed54f52d184db45da4062e612c2720",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 33,
"avg_line_length": 17.2,
"alnum_prop": 0.5852713178294574,
"repo_name": "dgonzo/bmdc_skullcandy",
"id": "b93c2439ede5c3480707bbba509efc741e8e4690",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collections/map_reduce_jobs/get_twitter_followers_locations/reduce.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1322"
},
{
"name": "JavaScript",
"bytes": "24280"
},
{
"name": "Perl",
"bytes": "5862"
},
{
"name": "Python",
"bytes": "30009"
},
{
"name": "R",
"bytes": "9795"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 0056_minor_updates
Revises: 0055_service_whitelist
Create Date: 2016-10-04 09:43:42.321138
"""
# revision identifiers, used by Alembic.
revision = "0056_minor_updates"
down_revision = "0055_service_whitelist"
import sqlalchemy as sa
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column("service_whitelist", "recipient", existing_type=sa.VARCHAR(length=255), nullable=False)
op.alter_column("services", "research_mode", existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column("services_history", "research_mode", existing_type=sa.BOOLEAN(), nullable=False)
op.create_foreign_key("templates_history_service_id_fkey", "templates_history", "services", ["service_id"], ["id"])
op.create_foreign_key(
"templates_history_created_by_id_fkey", "templates_history", "users", ["created_by_id"], ["id"]
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("templates_history_service_id_fkey", "templates_history", type_="foreignkey")
op.drop_constraint("templates_history_created_by_id_fkey", "templates_history", type_="foreignkey")
op.alter_column("services_history", "research_mode", existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column("services", "research_mode", existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column("service_whitelist", "recipient", existing_type=sa.VARCHAR(length=255), nullable=True)
### end Alembic commands ###
| {
"content_hash": "230c3063f5287b9dcd2e0b3cb90e4314",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 119,
"avg_line_length": 43.97222222222222,
"alnum_prop": 0.7087807959570436,
"repo_name": "alphagov/notifications-api",
"id": "f5010298deb70bf5b0ca2be81489cf99c2ffdc7f",
"size": "1583",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "migrations/versions/0056_minor_updates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "719"
},
{
"name": "Jinja",
"bytes": "5543"
},
{
"name": "Makefile",
"bytes": "6627"
},
{
"name": "Mako",
"bytes": "361"
},
{
"name": "Procfile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "3506225"
},
{
"name": "Shell",
"bytes": "13179"
}
],
"symlink_target": ""
} |
import csv
import os
import color
def _GetDataDirPath():
return os.path.join(os.path.dirname(__file__), 'data')
def _GetCsvPath():
return os.path.join(_GetDataDirPath(), 'dmccolors.csv')
def _GetCsvString():
with open(_GetCsvPath()) as f:
return f.read().strip()
def _CreateDmcColorFromRow(row):
number = int(row[0])
name = row[1]
hex_color = row[5]
rgb_color = color.RGBColorFromHexString(hex_color)
return DMCColor(number, name, rgb_color)
# DMC Colors singleton
_dmc_colors = None
def _CreateDMCColors():
global _dmc_colors
csv_data = _GetCsvString()
lines = csv_data.splitlines()
# Skip first line
lines = lines[1:]
reader = csv.reader(lines, delimiter='\t')
dmc_colors = set()
for row in reader:
dmc_colors.add(_CreateDmcColorFromRow(row))
return dmc_colors
def GetDMCColors():
global _dmc_colors
if not _dmc_colors:
_dmc_colors = frozenset(_CreateDMCColors())
return _dmc_colors
def GetClosestDMCColorsPairs(rgb_color):
pairs = list()
for dcolor in GetDMCColors():
pairs.append((dcolor, color.RGBColor.distance(rgb_color, dcolor.color)))
return sorted(pairs, key=lambda pair: pair[1])
def GetClosestDMCColors(rgb_color):
return [pair[0] for pair in GetClosestDMCColorsPairs(rgb_color)]
class DMCColor(object):
def __init__(self, number, name, color):
self.number = number
self.name = name
self.color = color
def __str__(self):
return super(DMCColor, self).__str__() + str((self.number, self.name, self.color))
def GetStringForDMCColor(dmc_color):
return "%s %s %s" % (dmc_color.number, dmc_color.name, dmc_color.color)
# Simple executable functionality for debugging.
def main():
for color in GetDMCColors():
print color
if __name__ == '__main__':
main()
| {
"content_hash": "2063ad1ddc94eb279fb635471ee5a122",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 86,
"avg_line_length": 22.94871794871795,
"alnum_prop": 0.6804469273743017,
"repo_name": "nanaze/pystitch",
"id": "fd3984b534dd7ca8e09df8dadc0359fabf1e0211",
"size": "1790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pystitch/dmc_colors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10352"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
} |
from random import randint
### AlphabetLengths:
alphabetLengths = [2,4,8,16,32,64,128,256]
#InstanceSize
instanceSize = 2**20
numberOfQuestions = 10*instanceSize
def generateInstance(alphabetLength,instanceSize,numberOfQuestions,name):
filer = open(name,'w')
sb = str(instanceSize) + " " + str(numberOfQuestions) + "\n"
vector = [randint(1,alphabetLength) for i in range(instanceSize)]
for element in vector:
sb += str(element) + " "
sb += "\n"
for i in range(numberOfQuestions):
print i*100.0/float(numberOfQuestions)
## cambiar por generateQuestion para generar rank question.
sb += generateRangeQuestion(alphabetLength) + "\n"
filer.write(sb)
filer.close()
def generateQuestion(alphabetLength):
a = randint(0,instanceSize-1)
b = randint(1,alphabetLength)
return str(a) + " " + str(b)
def generateRangeQuestion(alphabetLength):
i = randint(0,instanceSize-1)
j = randint(i,instanceSize-1)
x = randint(1,alphabetLength)
y = randint(x,alphabetLength)
return str(i) + " " + str(j) + " " + str(x) + " " + str(y)
def generateAllInstances(alphabetLengths,instanceSize,numberOfQuestions):
for length in alphabetLengths:
generateInstance(length,instanceSize,numberOfQuestions,"instanceRange" + str(alphabetLengths.index(length)) +".txt")
generateAllInstances(alphabetLengths,instanceSize,numberOfQuestions)
| {
"content_hash": "4ff8b8b14f3efb0d142e2d8146585f35",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 125,
"avg_line_length": 31.77777777777778,
"alnum_prop": 0.6937062937062937,
"repo_name": "bsubercaseaux/dcc",
"id": "6877f84e4d149c770c86e39b9f288a3401e27116",
"size": "1460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Diseño y Análisis de Algoritmos/Tarea 2 DAA/generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4473"
},
{
"name": "C++",
"bytes": "16947"
},
{
"name": "Java",
"bytes": "85900"
},
{
"name": "Python",
"bytes": "29723"
},
{
"name": "Tcl",
"bytes": "2724"
}
],
"symlink_target": ""
} |
from django.http import HttpResponseNotFound
def test_404(request):
return HttpResponseNotFound("Not found")
| {
"content_hash": "d42728169004cfe057f7b9d3d43a1f4d",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 44,
"avg_line_length": 23,
"alnum_prop": 0.7913043478260869,
"repo_name": "dominicrodger/tinyblog",
"id": "f66cfc1b79f7e404c128a710f36aacadb5952c1d",
"size": "115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6445"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "Python",
"bytes": "52339"
}
],
"symlink_target": ""
} |
from tempest_lib import exceptions as lib_exc
from tempest.api.network import base
from tempest import config
from tempest import test
CONF = config.CONF
class ExternalNetworksAdminNegativeTestJSON(base.BaseAdminNetworkTest):
@test.attr(type=['negative'])
@test.idempotent_id('d402ae6c-0be0-4d8e-833b-a738895d98d0')
def test_create_port_with_precreated_floatingip_as_fixed_ip(self):
"""
External networks can be used to create both floating-ip as well
as instance-ip. So, creating an instance-ip with a value of a
pre-created floating-ip should be denied.
"""
# create a floating ip
client = self.admin_client
body = client.create_floatingip(
floating_network_id=CONF.network.public_network_id)
created_floating_ip = body['floatingip']
self.addCleanup(self._try_delete_resource,
client.delete_floatingip,
created_floating_ip['id'])
floating_ip_address = created_floating_ip['floating_ip_address']
self.assertIsNotNone(floating_ip_address)
# use the same value of floatingip as fixed-ip to create_port()
fixed_ips = [{'ip_address': floating_ip_address}]
# create a port which will internally create an instance-ip
self.assertRaises(lib_exc.Conflict,
self.admin_ports_client.create_port,
network_id=CONF.network.public_network_id,
fixed_ips=fixed_ips)
| {
"content_hash": "aac1e6d3eeb1b53355f3b2ce24e23e75",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 39.38461538461539,
"alnum_prop": 0.6451822916666666,
"repo_name": "liucode/tempest-master",
"id": "8dfce248f23eff49bcb5361229c468753910cc85",
"size": "2172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/network/admin/test_external_networks_negative.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2834934"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
from thrift.protocol.TProtocol import *
from struct import pack, unpack
class TBinaryProtocol(TProtocolBase):
"""Binary implementation of the Thrift protocol driver."""
# For simpler THeaderTransport
PROTOCOL_ID = 0x80
# NastyHaxx. Python 2.4+ on 32-bit machines forces hex constants to be
# positive, converting this into a long. If we hardcode the int value
# instead it'll stay in 32 bit-land.
# VERSION_MASK = 0xffff0000
VERSION_MASK = -65536
# VERSION_1 = 0x80010000
VERSION_1 = -2147418112
TYPE_MASK = 0x000000FF
def __init__(self, trans, strictRead=False, strictWrite=True):
TProtocolBase.__init__(self, trans)
self.strictRead = strictRead
self.strictWrite = strictWrite
def writeMessageBegin(self, name, type, seqid):
if self.strictWrite:
self.writeI32(TBinaryProtocol.VERSION_1 | type)
self.writeString(name)
self.writeI32(seqid)
else:
self.writeString(name)
self.writeByte(type)
self.writeI32(seqid)
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, type, id):
self.writeByte(type)
self.writeI16(id)
def writeFieldEnd(self):
pass
def writeFieldStop(self):
self.writeByte(TType.STOP)
def writeMapBegin(self, ktype, vtype, size):
self.writeByte(ktype)
self.writeByte(vtype)
self.writeI32(size)
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
self.writeByte(etype)
self.writeI32(size)
def writeSetEnd(self):
pass
def writeBool(self, bool):
if bool:
self.writeByte(1)
else:
self.writeByte(0)
def writeByte(self, byte):
buff = pack(b"!b", byte)
self.trans.write(buff)
def writeI16(self, i16):
buff = pack(b"!h", i16)
self.trans.write(buff)
def writeI32(self, i32):
buff = pack(b"!i", i32)
self.trans.write(buff)
def writeI64(self, i64):
buff = pack(b"!q", i64)
self.trans.write(buff)
def writeDouble(self, dub):
buff = pack(b"!d", dub)
self.trans.write(buff)
def writeFloat(self, flt):
buff = pack(b"!f", flt)
self.trans.write(buff)
def writeString(self, str):
if sys.version_info[0] >= 3 and not isinstance(str, bytes):
str = str.encode("utf-8")
self.writeI32(len(str))
self.trans.write(str)
def readMessageBegin(self):
sz = self.readI32()
if sz < 0:
version = sz & TBinaryProtocol.VERSION_MASK
if version != TBinaryProtocol.VERSION_1:
raise TProtocolException(
TProtocolException.BAD_VERSION,
"Bad version in readMessageBegin: %d" % (sz),
)
type = sz & TBinaryProtocol.TYPE_MASK
name = self.readString()
seqid = self.readI32()
else:
if self.strictRead:
raise TProtocolException(
TProtocolException.BAD_VERSION, "No protocol version header"
)
name = self.trans.readAll(sz)
type = self.readByte()
seqid = self.readI32()
return (name, type, seqid)
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
type = self.readByte()
if type == TType.STOP:
return (None, type, 0)
id = self.readI16()
return (None, type, id)
def readFieldEnd(self):
pass
def readMapBegin(self):
ktype = self.readByte()
vtype = self.readByte()
size = self.readI32()
return (ktype, vtype, size)
def readMapEnd(self):
pass
def readListBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readListEnd(self):
pass
def readSetBegin(self):
etype = self.readByte()
size = self.readI32()
return (etype, size)
def readSetEnd(self):
pass
def readBool(self):
byte = self.readByte()
if byte == 0:
return False
return True
def readByte(self):
buff = self.trans.readAll(1)
(val,) = unpack(b"!b", buff)
return val
def readI16(self):
buff = self.trans.readAll(2)
(val,) = unpack(b"!h", buff)
return val
def readI32(self):
buff = self.trans.readAll(4)
(val,) = unpack(b"!i", buff)
return val
def readI64(self):
buff = self.trans.readAll(8)
(val,) = unpack(b"!q", buff)
return val
def readDouble(self):
buff = self.trans.readAll(8)
(val,) = unpack(b"!d", buff)
return val
def readFloat(self):
buff = self.trans.readAll(4)
(val,) = unpack(b"!f", buff)
return val
def readString(self):
len = self.readI32()
str = self.trans.readAll(len)
return str
class TBinaryProtocolFactory:
def __init__(self, strictRead: bool = False, strictWrite: bool = True) -> None:
self.strictRead = strictRead
self.strictWrite = strictWrite
def getProtocol(self, trans):
prot = TBinaryProtocol(trans, self.strictRead, self.strictWrite)
return prot
class TBinaryProtocolAccelerated(TBinaryProtocol):
"""C-Accelerated version of TBinaryProtocol.
This class does not override any of TBinaryProtocol's methods,
but the generated code recognizes it directly and will call into
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastproto module doesn't work for some
reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
NOTE: This code was contributed by an external developer.
The internal Thrift team has reviewed and tested it,
but we cannot guarantee that it is production-ready.
Please feel free to report bugs and/or success stories
to the public mailing list.
"""
pass
class TBinaryProtocolAcceleratedFactory(TBinaryProtocolFactory):
def getProtocol(self, trans):
return TBinaryProtocolAccelerated(trans, self.strictRead, self.strictWrite)
| {
"content_hash": "8668f67edf2001abf232f773d3a425bd",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 83,
"avg_line_length": 26.64503816793893,
"alnum_prop": 0.5989113307549062,
"repo_name": "facebook/fbthrift",
"id": "f93caf9bf991e5dac88ac4341ff4ab0d11f53dc6",
"size": "7596",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "thrift/lib/py/protocol/TBinaryProtocol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15608"
},
{
"name": "C++",
"bytes": "10658844"
},
{
"name": "CMake",
"bytes": "147347"
},
{
"name": "CSS",
"bytes": "4028"
},
{
"name": "Cython",
"bytes": "339005"
},
{
"name": "Emacs Lisp",
"bytes": "11229"
},
{
"name": "Go",
"bytes": "447092"
},
{
"name": "Hack",
"bytes": "313122"
},
{
"name": "Java",
"bytes": "1990062"
},
{
"name": "JavaScript",
"bytes": "38872"
},
{
"name": "Mustache",
"bytes": "1269560"
},
{
"name": "Python",
"bytes": "1623026"
},
{
"name": "Ruby",
"bytes": "6111"
},
{
"name": "Rust",
"bytes": "283392"
},
{
"name": "Shell",
"bytes": "6615"
},
{
"name": "Thrift",
"bytes": "1859041"
},
{
"name": "Vim Script",
"bytes": "2887"
}
],
"symlink_target": ""
} |
import unittest
import time
from django.contrib.sessions.backends.base import CreateError
from django.conf import settings
## on tests.. configure the settings manuall
settings.configure(
SESSION_ENGINE='rethinkdb_sessions.main'
)
####
from rethinkdb_sessions import main
class TestSequenceFunctions(unittest.TestCase):
##on each test run.. setup the connection
def setUp(self):
''' init the storage engine '''
self.rtdb_instance = main.SessionStore()
def test_modify_and_keys(self):
self.assertFalse(self.rtdb_instance.modified)
self.rtdb_instance['test'] = 'test_me'
self.assertTrue(self.rtdb_instance.modified)
self.assertEquals(self.rtdb_instance['test'], 'test_me')
"""
simple test to create a key
"""
def test_save_and_delete(self):
self.rtdb_instance["key"] = "value"
self.rtdb_instance.save()
## test implicit
self.assertTrue(self.rtdb_instance.exists(self.rtdb_instance.session_key))
self.rtdb_instance.delete()
self.assertFalse(self.rtdb_instance.exists(self.rtdb_instance.session_key))
def test_save_and_delete_exp(self):
self.rtdb_instance["key"] = "value"
self.rtdb_instance.save()
## test implicit
self.assertTrue(self.rtdb_instance.exists(self.rtdb_instance.session_key))
self.rtdb_instance.delete(self.rtdb_instance.session_key)
self.assertFalse(self.rtdb_instance.exists(self.rtdb_instance.session_key))
def test_save_twice(self):
self.rtdb_instance["key"] = "value"
self.rtdb_instance.save()
self.rtdb_instance["key2"] = "value2"
self.rtdb_instance.save()
def test_flush(self):
self.rtdb_instance['key'] = 'another_value'
self.rtdb_instance.save()
key = self.rtdb_instance.session_key
self.rtdb_instance.flush()
self.assertFalse(self.rtdb_instance.exists(key))
def test_load(self):
self.rtdb_instance['key'] = 'another_value'
self.rtdb_instance.save()
test_key = self.rtdb_instance.session_key
self.rtdb_instance = main.SessionStore(test_key)
self.assertTrue("key" in self.rtdb_instance)
def test_upsert_false(self):
self.rtdb_instance['key'] = 'another_value'
self.rtdb_instance.save()
self.assertRaises(CreateError,self.rtdb_instance.save,must_create=True)
def test_expire(self):
self.rtdb_instance.set_expiry(1)
# Test if the expiry age is set correctly
self.assertEquals(self.rtdb_instance.get_expiry_age(), 1)
self.rtdb_instance['key'] = 'expiring_value'
self.rtdb_instance.save()
key = self.rtdb_instance.session_key
self.assertEquals(self.rtdb_instance.exists(key), True)
time.sleep(2)
self.assertEquals(self.rtdb_instance.exists(key), False)
def test_expire_cleanup(self):
self.rtdb_instance.set_expiry(1)
# Test if the expiry age is set correctly
self.assertEquals(self.rtdb_instance.get_expiry_age(), 1)
self.rtdb_instance['key'] = 'expiring_value'
self.rtdb_instance.save()
key = self.rtdb_instance.session_key
self.assertEquals(self.rtdb_instance.exists(key), True)
time.sleep(2)
main.SessionStore.clear_expired()
self.assertEquals(self.rtdb_instance.exists(key), False)
if __name__ == '__main__':
unittest.main() | {
"content_hash": "59f47b4a3cac0a4ce8a061f2ae6007f6",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 31.08653846153846,
"alnum_prop": 0.7015156201670275,
"repo_name": "MaxPresman/django-rethinkdb-sessions",
"id": "423fbf468b270eab7feebcd4eea4dd563e04cfc6",
"size": "3233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8277"
}
],
"symlink_target": ""
} |
from __future__ import division
import vtk
################################################################################
# Some fixed classes that solve a few VTK API issues
# This dictionary stores the patched class to vtk class mapping.
# This would be naturally better stored as an attribute directly on the
# patched class. VTK, however, doesn't like class attributes.
description = {}
# http://www.vtk.org/doc/nightly/html/classvtkImagePlaneWidget.html
# SetUserControlledLookupTable needs to be set before calling
# SetLookupTable. VTK should do it automatically, so let's fix it
# This fix seems to break on VTK versions larger than 5.0.3. It might also
# be because of an interaction with python 2.6, but I haven't checked that.
class vtkImagePlaneWidget_fixed(vtk.vtkImagePlaneWidget):
def SetLookupTable(self, lookup_table):
self.UserControlledLookupTableOn()
vtk.vtkImagePlaneWidget.SetLookupTable(self, lookup_table)
v = vtk.vtkVersion()
version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
if version < [5, 0, 4]:
description[vtkImagePlaneWidget_fixed] = vtk.vtkImagePlaneWidget
else:
description[id(vtkImagePlaneWidget_fixed)] = vtk.vtkImagePlaneWidget
# Set docstring to wrap it correctly
vtkImagePlaneWidget_fixed.SetLookupTable.__doc__ = vtk.vtkImagePlaneWidget.SetLookupTable.__doc__
| {
"content_hash": "92d70f0a66bddb874277c2b69ccfbc93",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 97,
"avg_line_length": 40.970588235294116,
"alnum_prop": 0.7164393395549175,
"repo_name": "VisTrails/VisTrails",
"id": "b4d351965ad4b5e6268cc33af189edb53c985446",
"size": "3307",
"binary": false,
"copies": "2",
"ref": "refs/heads/v2.2",
"path": "vistrails/packages/vtk/vtk_wrapper/fix_classes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1129"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19779006"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "SQLPL",
"bytes": "2323"
},
{
"name": "Shell",
"bytes": "26542"
},
{
"name": "TeX",
"bytes": "147247"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
import re
USERS = {
"UID": 0x100000000,
"GID": 0x200000000,
"ALL": 0x400000000,
"CLUSTER": 0x800000000
}
RESOURCES = {
"VM" : 0x1000000000,
"HOST": 0x2000000000,
"NET": 0x4000000000,
"IMAGE": 0x8000000000,
"USER": 0x10000000000,
"TEMPLATE": 0x20000000000,
"GROUP": 0x40000000000,
"DATASTORE": 0x100000000000,
"CLUSTER": 0x200000000000,
"DOCUMENT": 0x400000000000,
"ZONE": 0x800000000000,
"SECGROUP": 0x1000000000000,
"VDC": 0x2000000000000,
"VROUTER": 0x4000000000000,
"MARKETPLACE": 0x8000000000000,
"MARKETPLACEAPP": 0x10000000000000,
"VMGROUP": 0x20000000000000,
"VNTEMPLATE": 0x40000000000000
}
RIGHTS = {
"USE": 0x1, # Auth. to use an object
"MANAGE": 0x2, # Auth. to perform management actions
"ADMIN": 0x4, # Auth. to perform administrative actions
"CREATE": 0x8 # Auth. to create an object
}
class OneAcl():
# Converts a string in the form [#<id>, @<id>, *] to a hex. number
#
# @param users [String] Users component string
#
# @return [String] A string containing a hex number
def parse_users(self, users):
return hex(self.calculate_ids(users))
# Converts a resources string to a hex. number
#
# @param resources [String] Resources component string
#
# @return [String] A string containing a hex number
def parse_resources(self, resources):
ret = 0
resources = resources.split("/")
if len(resources) != 2:
raise Exception("Resource '{}' malformed".format("/".join(resources)))
res = resources[0].split("+")
for resource in res:
if not resource.upper() in RESOURCES:
raise Exception("Resource '{}' does not exist".format(resource))
ret += RESOURCES[resource.upper()]
ret += self.calculate_ids(resources[1])
return hex(ret)
# Converts a rights string to a hex. number
#
# @param rights [String] Rights component string
#
# @return [String] A string containing a hex number
def parse_rights(self, rights):
ret = 0
rights = rights.split("+")
for right in rights:
if not right.upper() in RIGHTS:
raise Exception("Right '{}' does not exist".format(right))
ret += RIGHTS[right.upper()]
return hex(ret)
# Converts a string in the form [#<id>, *] to a hex. number
#
# @param zone [String] Zone component string
#
# @return [String] A string containing a hex number
def parse_zone(self, zone):
return hex(self.calculate_ids(zone))
# Parses a rule string, e.g. "#5 HOST+VM/@12 INFO+CREATE+DELETE #0"
#
# @param rule_str [String] an ACL rule in string format
#
# @return Tuple an Tuple containing 3(4) strings (hex 64b numbers)
def parse_rule(self, rule_str):
ret = []
rule_str = rule_str.split(" ")
if len(rule_str) != 3 and len(rule_str) != 4:
raise Exception("String needs three or four components: User, Resource, Rights [,Zone]")
ret.append(self.parse_users(rule_str[0]))
ret.append(self.parse_resources(rule_str[1]))
ret.append(self.parse_rights(rule_str[2]))
if len(rule_str) == 3:
return ret[0], ret[1], ret[2]
ret.append(self.parse_zone(rule_str[3]))
return ret[0], ret[1], ret[2], ret[3]
# Calculates the numeric value for a String containing an individual
# (#<id>), group (@<id>) or all (*) ID component
#
# @param id_str [String] Rule Id string
#
# @return [Integer] the numeric value for the given id_str
def calculate_ids(self, id_str):
if not re.match('^([\#@\%]\d+|\*)$', id_str):
raise Exception("ID string '{}' malformed".format(id_str))
users_value = 0
if id_str[0] == "#":
value = USERS["UID"]
users_value = int(id_str[1:]) + value
if id_str[0] == "@":
value = USERS["GID"]
users_value = int(id_str[1:]) + value
if id_str[0] == "*":
users_value = USERS["ALL"]
if id_str[0] == "%":
value = USERS["CLUSTER"]
users_value = int(id_str[1:]) + value
return users_value
| {
"content_hash": "7237c7fc708b623e0fde54af8a6867c1",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 100,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.5608880915003364,
"repo_name": "baby-gnu/one",
"id": "bf673f4e29db29c10504fcfff822c08d739b197a",
"size": "5898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/oca/python/pyone/acl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Bison",
"bytes": "34829"
},
{
"name": "C",
"bytes": "177645"
},
{
"name": "C++",
"bytes": "2883899"
},
{
"name": "CSS",
"bytes": "103263"
},
{
"name": "Java",
"bytes": "384998"
},
{
"name": "JavaScript",
"bytes": "1665458"
},
{
"name": "Perl",
"bytes": "2617"
},
{
"name": "Prolog",
"bytes": "1741"
},
{
"name": "Python",
"bytes": "121964"
},
{
"name": "Ruby",
"bytes": "2190760"
},
{
"name": "Shell",
"bytes": "616311"
}
],
"symlink_target": ""
} |
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
- Definir una clase llamada Circulo que pueda ser construida por el radio.
La clase Circulo debe de contener un método que pueda calcular el área
"""
| {
"content_hash": "39b5ab1b5902c8317ad2646abbc6b89e",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 75,
"avg_line_length": 27.5,
"alnum_prop": 0.7454545454545455,
"repo_name": "jorgemauricio/INIFAP_Course",
"id": "9cae67656dc0a8f36362ce6621e6fd461d3eaf65",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Exams/ejercicio_14.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11052861"
},
{
"name": "Python",
"bytes": "59081"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='TUF',
version='0.0',
description='A secure updater framework for Python',
author='lots of people',
url='https://updateframework.com',
packages=['tuf',
'tuf.repo',
'tuf.client',
'tuf.pushtools',
'tuf.pushtools.transfer',
'simplejson'],
scripts=['quickstart.py', 'tuf/pushtools/push.py', 'tuf/pushtools/receivetools/receive.py', 'tuf/repo/signercli.py']
)
| {
"content_hash": "2e08ffa68653e1a5512fd5f7df48b185",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 122,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.643956043956044,
"repo_name": "monzum/tuf-legacy",
"id": "8a30b6be764bcc56bc1ee8c307b67d29e83deabf",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/TUF/src/setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "109811"
},
{
"name": "Python",
"bytes": "703260"
},
{
"name": "Shell",
"bytes": "867"
}
],
"symlink_target": ""
} |
from django import template
from library.ui.models import Media
from settings import BASE_URL, MEDIA_URL, MEDIA_ROOT
import os
import string
register = template.Library()
@register.filter
def thumbnail(media):
location = media.locationSingularString.strip(string.lowercase)
name = MEDIA_URL + "thumbnails/" + location + ".jpg"
path = MEDIA_ROOT + name
if not os.path.exists(path):
name = MEDIA_URL + "dl2/" + media.uuidString + "-256.png"
path = MEDIA_ROOT + "/" + name
if not os.path.exists(path):
name = MEDIA_URL + "NoFrontCover256.png"
return BASE_URL + name
| {
"content_hash": "02b492e2c37d724357aea67224b9b53d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 67,
"avg_line_length": 29.19047619047619,
"alnum_prop": 0.6802610114192496,
"repo_name": "pusateri/vsd",
"id": "2f07adedded63bf22a97f31da8d84a102dc2d108",
"size": "613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp/ui/templatetags/thumbnail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37699"
},
{
"name": "HTML",
"bytes": "21142"
},
{
"name": "JavaScript",
"bytes": "113705"
},
{
"name": "Python",
"bytes": "49286"
}
],
"symlink_target": ""
} |
"""
@package ion.agents.platform.rsn.test.oms_simple
@file ion/agents/platform/rsn/test/oms_simple.py
@author Carlos Rueda
@brief Program that connects to the real RSN OMS endpoint to do basic
verification of the operations. Note that VPN is required.
Also, port 5000 on the localhost (via corresponding fully-qualified
domain name as returned by socket.getfqdn()) needs to be accessible
from OMS for the event notification to be received here.
For usage, call:
bin/python ion/agents/platform/rsn/test/oms_simple.py --help
@see https://confluence.oceanobservatories.org/display/CIDev/RSN+OMS+endpoint+implementation+verification
@see https://confluence.oceanobservatories.org/display/syseng/CIAD+MI+SV+CI-OMS+interface
"""
__author__ = 'Carlos Rueda'
__license__ = 'Apache 2.0'
from ion.agents.platform.rsn.oms_event_listener import OmsEventListener
from ion.agents.platform.responses import InvalidResponse
from pyon.util.breakpoint import breakpoint
import xmlrpclib
import sys
import pprint
import socket
DEFAULT_RSN_OMS_URI = "http://alice:[email protected]:9021/"
DEFAULT_MAX_WAIT = 70
INVALID_PLATFORM_ID = InvalidResponse.PLATFORM_ID
# use full-qualified domain name as the external host for the registration
HTTP_SERVER_HOST = socket.getfqdn()
HTTP_SERVER_PORT = 5000
EVENT_LISTENER_URL = "http://%s:%d/oms" % (HTTP_SERVER_HOST, HTTP_SERVER_PORT)
# max time to wait to receive the test event
max_wait = 0
# launch IPython shell?
launch_breakpoint = False
tried = {}
def launch_listener(): # pragma: no cover
def notify_driver_event(evt):
print("notify_driver_event received: %s" % str(evt.event_instance))
print 'launching listener, port=%d ...' % HTTP_SERVER_PORT
oms_event_listener = OmsEventListener("dummy_plat_id", notify_driver_event)
oms_event_listener.keep_notifications()
oms_event_listener.start_http_server(host='', port=HTTP_SERVER_PORT)
print 'listener launched'
return oms_event_listener
def main(uri): # pragma: no cover
oms_event_listener = launch_listener()
print '\nconnecting to %r ...' % uri
proxy = xmlrpclib.ServerProxy(uri, allow_none=True)
print 'connection established.'
pp = pprint.PrettyPrinter()
def show_listeners():
from datetime import datetime
from ion.agents.platform.util import ntp_2_ion_ts
event_listeners = proxy.event.get_registered_event_listeners()
print("Event listeners (%d):" % len(event_listeners))
for a, b in sorted(event_listeners.iteritems(),
lambda a, b: int(a[1] - b[1])):
time = datetime.fromtimestamp(float(ntp_2_ion_ts(b)) / 1000)
print(" %s %s" % (time, a))
print
def format_val(value):
prefix = "\t\t"
print "\n%s%s" % (prefix, pp.pformat(value).replace("\n", "\n" + prefix))
def format_err(msg):
prefix = "\t\t"
print "\n%s%s" % (prefix, msg.replace("\n", "\n" + prefix))
def get_method(handler_name, method_name):
"""
Gets the method from the proxy.
@param handler_name Name of the handler; can be None to indicate get
method directly from proxy.
@param method_name Method's name
@return callable; None if any error getting the method
"""
# get method:
if handler_name:
# get handler:
try:
handler = getattr(proxy, handler_name)
except Exception as e:
print "error getting handler %s: %s: %s" % (handler_name, type(e), str(e))
return None
try:
method = getattr(handler, method_name)
return method
except Exception as e:
print "error method %s.%s: %s: %s" % (handler_name, method_name, type(e), str(e))
return None
else:
try:
method = getattr(proxy, method_name)
return method
except Exception as e:
print "error getting proxy's method %s: %s: %s" % (method_name, type(e), str(e))
return None
def run(full_method_name, *args):
"""
Runs a method against the proxy.
@param full_method_name
@param args
"""
global tried
tried[full_method_name] = ""
handler_name, method_name = full_method_name.split(".")
# get the method
method = get_method(handler_name, method_name)
if method is None:
tried[full_method_name] = "could not get handler or method"
return
sargs = ", ".join(["%r" % a for a in args])
sys.stdout.write("\n%s(%s) -> " % (full_method_name, sargs))
sys.stdout.flush()
# run method
retval, reterr = None, None
try:
retval = method(*args)
tried[full_method_name] = "OK"
# print "%r" % retval
format_val(retval)
except xmlrpclib.Fault as e:
if e.faultCode == 8001:
reterr = "-- NOT FOUND (fault %s)" % e.faultCode
else:
reterr = "-- Fault %d: %s" % (e.faultCode, e.faultString)
# raise
# print "Exception: %s: %s" % (type(e), str(e))
# tried[full_method_name] = str(e)
tried[full_method_name] = reterr
format_err(reterr)
return retval, reterr
def verify_entry_in_dict(retval, reterr, entry):
if reterr is not None:
return retval, reterr
if not isinstance(retval, dict):
reterr = "-- expecting a dict with entry %r" % entry
elif entry not in retval:
reterr = "-- expecting a dict with entry %r" % entry
else:
retval = retval[entry]
print("full_method_name = %s" % full_method_name)
if reterr:
tried[full_method_name] = reterr
format_err(reterr)
return retval, reterr
def verify_test_event_notified(retval, reterr, event):
print("waiting for a max of %d secs for test event to be notified..." % max_wait)
import time
wait_until = time.time() + max_wait
got_it = False
while not got_it and time.time() <= wait_until:
time.sleep(1)
for evt in oms_event_listener.notifications:
if event['message'] == evt['message']:
got_it = True
break
# print("Received external events: %s" % oms_event_listener.notifications)
if not got_it:
reterr = "error: didn't get expected test event notification within %d " \
"secs. (Got %d event notifications.)" % (
max_wait, len(oms_event_listener.notifications))
print("full_method_name = %s" % full_method_name)
if reterr:
tried[full_method_name] = reterr
format_err(reterr)
return retval, reterr
show_listeners()
if launch_breakpoint:
breakpoint(locals())
print "Basic verification of the operations:\n"
#----------------------------------------------------------------------
full_method_name = "hello.ping"
retval, reterr = run(full_method_name)
if retval and retval.lower() != "pong":
error = "expecting 'pong'"
tried[full_method_name] = error
format_err(error)
#----------------------------------------------------------------------
full_method_name = "config.get_platform_types"
retval, reterr = run(full_method_name)
if retval and not isinstance(retval, dict):
error = "expecting a dict"
tried[full_method_name] = error
format_err(error)
platform_id = "dummy_platform_id"
#----------------------------------------------------------------------
full_method_name = "config.get_platform_map"
retval, reterr = run(full_method_name)
if retval is not None:
if isinstance(retval, list):
if len(retval):
if isinstance(retval[0], (tuple, list)):
platform_id = retval[0][0]
else:
reterr = "expecting a list of tuples or lists"
else:
reterr = "expecting a non-empty list"
else:
reterr = "expecting a list"
if reterr:
tried[full_method_name] = reterr
format_err(reterr)
#----------------------------------------------------------------------
full_method_name = "config.get_platform_metadata"
retval, reterr = run(full_method_name, platform_id)
retval, reterr = verify_entry_in_dict(retval, reterr, platform_id)
#----------------------------------------------------------------------
full_method_name = "attr.get_platform_attributes"
retval, reterr = run(full_method_name, platform_id)
retval, reterr = verify_entry_in_dict(retval, reterr, platform_id)
#----------------------------------------------------------------------
full_method_name = "attr.get_platform_attribute_values"
retval, reterr = run(full_method_name, platform_id, [])
retval, reterr = verify_entry_in_dict(retval, reterr, platform_id)
#----------------------------------------------------------------------
full_method_name = "attr.set_platform_attribute_values"
retval, reterr = run(full_method_name, platform_id, {})
retval, reterr = verify_entry_in_dict(retval, reterr, platform_id)
port_id = "dummy_port_id"
#----------------------------------------------------------------------
full_method_name = "port.get_platform_ports"
retval, reterr = run(full_method_name, platform_id)
retval, reterr = verify_entry_in_dict(retval, reterr, platform_id)
if retval is not None:
if isinstance(retval, dict):
if len(retval):
port_id = retval.keys()[0]
else:
reterr = "empty dict of ports for platform %r" % platform_id
else:
reterr = "expecting a dict {%r: ...}. got: %s" % (platform_id, type(retval))
if reterr:
tried[full_method_name] = reterr
format_err(reterr)
instrument_id = "dummy_instrument_id"
if reterr is None:
full_method_name = "port.get_platform_ports"
retval, reterr = run(full_method_name, "dummy_platform_id")
orig_retval = retval
retval, reterr = verify_entry_in_dict(retval, reterr, "dummy_platform_id")
if retval != INVALID_PLATFORM_ID:
reterr = "expecting dict {%r: %r}. got: %r" % (
"dummy_platform_id", INVALID_PLATFORM_ID, orig_retval)
tried[full_method_name] = reterr
format_err(reterr)
instrument_id = "dummy_instrument_id"
#----------------------------------------------------------------------
full_method_name = "instr.connect_instrument"
retval, reterr = run(full_method_name, platform_id, port_id, instrument_id, {})
retval, reterr = verify_entry_in_dict(retval, reterr, platform_id)
retval, reterr = verify_entry_in_dict(retval, reterr, port_id)
retval, reterr = verify_entry_in_dict(retval, reterr, instrument_id)
connect_instrument_error = reterr
#----------------------------------------------------------------------
full_method_name = "instr.get_connected_instruments"
retval, reterr = run(full_method_name, platform_id, port_id)
retval, reterr = verify_entry_in_dict(retval, reterr, platform_id)
retval, reterr = verify_entry_in_dict(retval, reterr, port_id)
# note, in case of error in instr.connect_instrument, don't expect the
# instrument_id to be reported:
if connect_instrument_error is None:
retval, reterr = verify_entry_in_dict(retval, reterr, instrument_id)
#----------------------------------------------------------------------
full_method_name = "instr.disconnect_instrument"
retval, reterr = run(full_method_name, platform_id, port_id, instrument_id)
retval, reterr = verify_entry_in_dict(retval, reterr, platform_id)
retval, reterr = verify_entry_in_dict(retval, reterr, port_id)
retval, reterr = verify_entry_in_dict(retval, reterr, instrument_id)
#----------------------------------------------------------------------
full_method_name = "port.turn_on_platform_port"
retval, reterr = run(full_method_name, platform_id, port_id)
#----------------------------------------------------------------------
full_method_name = "port.turn_off_platform_port"
retval, reterr = run(full_method_name, platform_id, port_id)
#----------------------------------------------------------------------
url = EVENT_LISTENER_URL
#----------------------------------------------------------------------
full_method_name = "event.register_event_listener"
retval, reterr = run(full_method_name, url)
retval, reterr = verify_entry_in_dict(retval, reterr, url)
#----------------------------------------------------------------------
full_method_name = "event.get_registered_event_listeners"
retval, reterr = run(full_method_name)
urls = retval
retval, reterr = verify_entry_in_dict(retval, reterr, url)
#----------------------------------------------------------------------
full_method_name = "event.unregister_event_listener"
if isinstance(urls, dict):
# this part just as a convenience to unregister listeners that were
# left registered by some error in a prior interaction.
prefix = "http://127.0.0.1:" # or some other needed prefix
for url2 in urls:
if url2.find(prefix) >= 0:
retval, reterr = run(full_method_name, url2)
retval, reterr = verify_entry_in_dict(retval, reterr, url2)
if reterr is not None:
break
if reterr is None:
retval, reterr = run(full_method_name, url)
retval, reterr = verify_entry_in_dict(retval, reterr, url)
#----------------------------------------------------------------------
full_method_name = "config.get_checksum"
retval, reterr = run(full_method_name, platform_id)
# the following to specifically verify reception of test event
if max_wait:
full_method_name = "event.register_event_listener"
retval, reterr = run(full_method_name, EVENT_LISTENER_URL)
retval, reterr = verify_entry_in_dict(retval, reterr, EVENT_LISTENER_URL)
full_method_name = "event.generate_test_event"
event = {
'message' : "fake event triggered from CI using OMS' generate_test_event",
'platform_id' : "fake_platform_id",
'severity' : "3",
'group ' : "power",
}
retval, reterr = run(full_method_name, event)
if max_wait:
verify_test_event_notified(retval, reterr, event)
full_method_name = "event.unregister_event_listener"
retval, reterr = run(full_method_name, EVENT_LISTENER_URL)
retval, reterr = verify_entry_in_dict(retval, reterr, EVENT_LISTENER_URL)
elif not reterr:
ok_but = "OK (but verification of event reception was not performed)"
tried[full_method_name] = ok_but
format_err(ok_but)
show_listeners()
#######################################################################
print("\nSummary of basic verification:")
okeys = 0
for full_method_name, result in sorted(tried.iteritems()):
print("%20s %-40s: %s" % ("", full_method_name, result))
if result.startswith("OK"):
okeys += 1
print("OK methods %d out of %s" % (okeys, len(tried)))
if __name__ == "__main__": # pragma: no cover
import argparse
parser = argparse.ArgumentParser(description="Basic CI-OMS verification program")
parser.add_argument("-u", "--uri",
help="RSN OMS URI (default: %s)" % DEFAULT_RSN_OMS_URI,
default=DEFAULT_RSN_OMS_URI)
parser.add_argument("-w", "--wait",
help="Max wait time for test event (default: %d)" % DEFAULT_MAX_WAIT,
default=DEFAULT_MAX_WAIT)
parser.add_argument("-b", "--breakpoint",
help="Launch IPython shell at beginning",
action='store_const', const=True)
opts = parser.parse_args()
uri = opts.uri
max_wait = int(opts.wait)
launch_breakpoint = bool(opts.breakpoint)
main(uri)
| {
"content_hash": "4eaf30bae8b0006cc5c880586fe4587d",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 109,
"avg_line_length": 38.11212814645309,
"alnum_prop": 0.551606124287001,
"repo_name": "mikeh77/mi-instrument",
"id": "a228e7c71dd846cba1900f9140713e941e4ac2c6",
"size": "16678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mi/platform/rsn/test/oms_simple.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "7381630"
},
{
"name": "Shell",
"bytes": "22"
}
],
"symlink_target": ""
} |
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import euclidean_distances
from sklearn.manifold import MDS
from output.emission_prob import plain_posttype_txt
from sequences.label_dictionary import LabelDictionary
from readers.vocab import read_vocab
from util.util import nparr_to_str
def get_w_indices(targets, vocab):
if not targets:
return {}
w_dict = LabelDictionary(read_vocab(vocab))
return {w_dict.get_label_id(t) for t in targets if t in w_dict}
def get_w_reps(idx, w_reps, vocab):
ws = []
reps = []
if not idx:
return ws, reps
w_dict = LabelDictionary(read_vocab(vocab))
for w, rep in w_reps:
if w_dict.get_label_id(w) in idx:
assert not np.isnan(np.sum(rep))
ws.append(w)
reps.append(rep)
return ws, reps
def get_twodim_reps(reps, seed, distance=euclidean_distances):
reps = reps.astype(np.float64)
similarities = distance(reps)
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=seed)
return mds.fit(similarities).embedding_
def plot(scaled, ws):
fig, ax = plt.subplots()
ax.scatter(scaled[:, 0], scaled[:, 1])
for i, w in enumerate(ws):
ax.annotate(w, (scaled[i, 0], scaled[i, 1]))
return fig
def write_fig_data(reps, ws, outfile):
with open(outfile, "w") as out:
for w, arr in zip(ws, reps):
out.write("{} {}\n".format(w, nparr_to_str(arr)))
def expand_w_reps(rep_file, ws, reps):
if rep_file is not None:
with open(rep_file) as infile:
for l in infile:
w, rep = l.strip().split(" ", 1)
num_rep = np.array(rep.split()).astype("f")
assert not np.isnan(np.sum(num_rep))
ws.append(w)
reps.append(num_rep)
return ws, np.array(reps)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-vocab", help="vocabulary file for the (hmm) word representations")
parser.add_argument("-posttypes", help="npy file containing posterior types")
parser.add_argument("-targets", nargs="+", help="target words to scale down")
parser.add_argument("-incontext_file",
help="incontext representations for words (optional). These will be added to posterior types")
parser.add_argument("-outfile", help="file path to write the plot to")
args = parser.parse_args()
if args.targets is None:
targets = set()
print("No targets specified, using vectors from incontext_file")
else:
targets = set(args.targets)
outfile = os.path.splitext(args.outfile)[0] if args.outfile.endswith(".pdf") else args.outfile
m, n, w_reps = plain_posttype_txt(posttype_f=args.posttypes, vocab_f=args.vocab, threedim=False, vocab_r=None)
idx = get_w_indices(targets, args.vocab)
ws, reps = expand_w_reps(args.incontext_file, *get_w_reps(idx, w_reps, args.vocab))
scaled = get_twodim_reps(reps, seed=np.random.RandomState(seed=3)) # a m*2-dim np array
assert len(ws) == scaled.shape[0]
fig = plot(scaled, ws)
fig.savefig("{}.pdf".format(outfile))
write_fig_data(scaled, ws, outfile)
| {
"content_hash": "591cc8fab420ec687655dd3f7341f4c4",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 118,
"avg_line_length": 32.53,
"alnum_prop": 0.6387949584998462,
"repo_name": "rug-compling/hmm-reps",
"id": "29bf5635b315d6d38f8bcdfeb51bf510f9ddb77e",
"size": "3253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "output/multidim_scaling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "12728"
},
{
"name": "Python",
"bytes": "341860"
}
],
"symlink_target": ""
} |
from model import TripletEmbedding
import numpy as np
import logging
logging.basicConfig(level=logging.INFO)
te = TripletEmbedding(100, 20, 5, 3, [4], learning_rate=10e-2, sigma=10e-4)
subjects = np.random.randint(0, 100, size=1000)
objects = np.random.randint(0, 100, size=1000)
predicates = np.random.randint(0, 20, size=1000)
labels = 2 * (-0.5 + np.random.randint(0, 2, size=1000))
te.fit(subjects, objects, predicates, labels)
print(te.get_entity_embedding([1,2,3,4,5,6]))
print(te.get_relation_embedding([1,2,3]))
print(te.get_estimated_embedding([1, 3, 5], [2, 4, 6]))
| {
"content_hash": "0734bc99a46535f1398acf483a08c921",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 27.80952380952381,
"alnum_prop": 0.708904109589041,
"repo_name": "nukui-s/TripletEmbedding",
"id": "f702f70a23f3467cebe8751358af95555d5e6ac1",
"size": "584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tripletembed/test_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "674560"
},
{
"name": "Python",
"bytes": "16064"
}
],
"symlink_target": ""
} |
"""Load and Unload all GitGutter modules.
This module exports __all__ modules, which Sublime Text needs to know about.
The list of __all__ exported symbols is defined in modules/__init__.py.
"""
import sublime
if int(sublime.version()) < 3176:
print('GitGutter requires ST3 3176+')
else:
import sys
prefix = __package__ + '.' # don't clear the base package
for module_name in [
module_name for module_name in sys.modules
if module_name.startswith(prefix) and module_name != __name__]:
del sys.modules[module_name]
prefix = None
from .modules import *
| {
"content_hash": "13fb783cc568009bcea7b6a45832332c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 30.6,
"alnum_prop": 0.6552287581699346,
"repo_name": "jisaacks/GitGutter",
"id": "91540dee49e5e0486dec34e573c37f391a4975db",
"size": "636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4556"
},
{
"name": "Makefile",
"bytes": "171"
},
{
"name": "Python",
"bytes": "171127"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import readtime
AUTHOR = 'David Jenkins'
SITENAME = 'DJenkins Dev'
SITEURL = 'http://localhost:8000'
PATH = 'content'
TIMEZONE = 'America/New_York'
DEFAULT_LANG = 'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
LOAD_CONTENT_CACHE = False
# Blogroll
LINKS = ()
# Social widget
SOCIAL = ()
DEFAULT_PAGINATION = False
THEME = 'themes/djenkinsdev_theme'
PLUGINS = [readtime]
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
| {
"content_hash": "d64976236ddc6eb1ff7e34d0f4f24167",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 17.864864864864863,
"alnum_prop": 0.7397881996974282,
"repo_name": "JenkinsDev/DJenkinsDev",
"id": "ecbbc7c91d95285ddd7344ff5bf4f182dbbd4684",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pelicanconf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8515"
},
{
"name": "HTML",
"bytes": "6121"
},
{
"name": "JavaScript",
"bytes": "1873"
},
{
"name": "Makefile",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "1286"
}
],
"symlink_target": ""
} |
class HuluError(Exception):
pass
| {
"content_hash": "022dc1c887182fee0331eb3fa00aa150",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 27,
"avg_line_length": 18.5,
"alnum_prop": 0.7297297297297297,
"repo_name": "michaelhelmick/hulu",
"id": "a3399b5e3df750e541cc2e7ac8adf0ca72862b1b",
"size": "37",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hulu/exceptions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4646"
}
],
"symlink_target": ""
} |
import os
from flask import Flask
from flask import render_template
from flask import url_for
from flask import request
from twilio import twiml
from twilio.util import TwilioCapability
# Declare and configure application
app = Flask(__name__, static_url_path='/static')
app.config.from_pyfile('local_settings.py')
# Voice Request URL
@app.route('/voice', methods=['GET', 'POST'])
def voice():
response = twiml.Response()
response.say("Congratulations! You deployed the Twilio Hackpack"
" for Heroku and Flask.")
return str(response)
# SMS Request URL
@app.route('/sms', methods=['GET', 'POST'])
def sms():
response = twiml.Response()
response.sms("Congratulations! You deployed the Twilio Hackpack"
" for Heroku and Flask.")
return str(response)
# Twilio Client demo template
@app.route('/client')
def client():
configuration_error = None
for key in ('TWILIO_ACCOUNT_SID', 'TWILIO_AUTH_TOKEN', 'TWILIO_APP_SID',
'TWILIO_CALLER_ID'):
if not app.config[key]:
configuration_error = "Missing from local_settings.py: %s" % key
token = None
if not configuration_error:
capability = TwilioCapability(app.config['TWILIO_ACCOUNT_SID'],
app.config['TWILIO_AUTH_TOKEN'])
capability.allow_client_incoming("joey_ramone")
capability.allow_client_outgoing(app.config['TWILIO_APP_SID'])
token = capability.generate()
params = {'token': token}
return render_template('client.html', params=params,
configuration_error=configuration_error)
# Installation success page
@app.route('/')
def index():
params = {
'Voice Request URL': url_for('.voice', _external=True),
'SMS Request URL': url_for('.sms', _external=True),
'Client URL': url_for('.client', _external=True)}
return render_template('index.html', params=params,
configuration_error=None)
# If PORT not specified by environment, assume development config.
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
if port == 5000:
app.debug = True
app.run(host='0.0.0.0', port=port)
| {
"content_hash": "cc80762cc084978e69f52430ea0d0ae8",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 76,
"avg_line_length": 31.633802816901408,
"alnum_prop": 0.6340160284951024,
"repo_name": "jpf/Twilio-SxSW-game",
"id": "8957fd038119de886d5e04b4310c970dac3f18fd",
"size": "2246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old-app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27373"
}
],
"symlink_target": ""
} |
"""
Module for ordering errata.
"""
import datetime
import os
import copy
import time
import logging
from dateutil import parser as dateutil_parser
from dateutil import tz as dateutil_tz
from updatebot import update
from updatebot import conaryhelper
from updatebot.errors import MissingErrataError
from updatebot.errors import ErrataPackageNotFoundError
from updatebot.errors import ErrataSourceDataMissingError
from updatebot.errors import PackageNotFoundInBucketError
from updatebot.errors import AdvisoryPackageMissingFromBucketError
# update errors
from updatebot.errors import UpdateGoesBackwardsError
from updatebot.errors import UpdateRemovesPackageError
from updatebot.errors import UpdateReusesPackageError
# Fix default type of _findTrovesCache
from updatebot.lib.findtroves import FindTrovesCache
log = logging.getLogger('updatebot.errata')
def loadErrata(func):
def wrapper(self, *args, **kwargs):
if not self._order:
self._orderErrata()
return func(self, *args, **kwargs)
return wrapper
class ErrataFilter(object):
"""
Filter data from a given errataSource in chronological order.
"""
def __init__(self, cfg, ui, pkgSource, errataSource):
self._cfg = cfg
self._ui = ui
self._pkgSource = pkgSource
self._errata = errataSource
# timestamp: srcPkg Set
self._order = {}
# timestamp: advisory info
self._advMap = {}
# advisory: nevras
self._advPkgMap = {}
# nevra: advisories
self._advPkgRevMap = {}
@loadErrata
def getInitialPackages(self):
"""
Get the initial set of packages.
"""
return self._order[0]
@loadErrata
def getUpdateDetail(self, bucketId):
"""
Given a errata timestamp lookup the name and summary.
"""
return [ dict(x) for x in self._advMap.get(bucketId, tuple()) ]
@loadErrata
def getUpdateDetailMessage(self, bucketId):
"""
Given a errata timestamp create a name and summary message.
"""
if bucketId in self._advMap:
msg = ''
for adv in self._advMap[bucketId]:
msg += '(%(name)s: %(summary)s) ' % dict(adv)
return msg
else:
return '%s (no detail found)' % bucketId
@loadErrata
def getAdvisoryPackages(self, advisory):
"""
Give a valid advisory name, return a set of applicable source package
objects.
"""
return self._advPkgMap.get(advisory, set())
@loadErrata
def getVersions(self, bucketId):
"""
Get a set of group versions that should be built for the given bucketId.
@param bucketId: identifier for a given update slice
@type bucketId: integer (unix time)
"""
versions = dict()
for advInfo in self.getUpdateDetail(bucketId):
advisory = advInfo['name']
versions[advisory] = self._errata.getGroupVersion(advisory)
return versions
@loadErrata
def getNames(self, bucketId):
"""
Get a map of group names by advisory.
"""
names = dict()
for advInfo in self.getUpdateDetail(bucketId):
advisory = advInfo['name']
names[advisory] = self._errata.getGroupName(advisory)
return names
def getBucketVersion(self, bucketId):
"""
Convert a bucketId to a conary version.
@param bucketId: identifier for a given update slice
@type bucketId: integer (unix time)
"""
version = self._cfg.upstreamVersionMap.get(bucketId, None)
if not version:
version = time.strftime('%Y.%m.%d_%H%M.%S', time.gmtime(bucketId))
return version
@loadErrata
def getModifiedErrata(self, current):
"""
Get all updates that were issued before current, but have been modified
after current.
@param current: the current state, start iterating after this state has
been reached.
@type current: int
"""
# Get modified errata from the model
modified = self._errata.getModifiedErrata(current)
# Map this errata to srpms
modMap = {}
for e in modified:
advisory = e.advisory
last_modified = e.last_modified_date
issue_date = e.issue_date
pkgs = self._advPkgMap[advisory]
assert advisory not in modMap
modMap[advisory] = {
'advisory': advisory,
'last_modified': last_modified,
'issue_date': issue_date,
'srpms': pkgs,
}
return modMap
@loadErrata
def iterByIssueDate(self, current=None):
"""
Yield sets of srcPkgs by errata release date.
@param current: the current state, start iterating after this state has
been reached.
@type current: int
"""
for stamp in sorted(self._order.keys()):
if current >= stamp:
continue
yield stamp, self._order[stamp]
@loadErrata
def sanityCheckOrder(self):
"""
Validate the update order for:
1. package revisions going backwards
2. packages being removed
3. same package in bucket multiple times
4. obsolete packages still included in groups
Raise an exception if sanity checks are not satisfied.
"""
log.info('sanity checking ordering')
def tconv(stamp):
return time.strftime('%m-%d-%Y %H:%M:%S', time.gmtime(stamp))
def rhnUrl(errataId):
errataId = errataId.replace(':', '-')
return 'http://rhn.redhat.com/errata/%s.html' % errataId
def rhnUrls(errataSet):
return ' '.join(rhnUrl(x) for x in errataSet)
# duplicate updater and pkgsource so as to not change state.
pkgSource = copy.copy(self._pkgSource)
updater = update.Updater(self._cfg, self._ui, pkgSource)
updater._conaryhelper = _ConaryHelperShim(self._cfg)
if self._cfg.platformSearchPath:
log.info('prefetching sources for parent platform labels')
for label in self._cfg.platformSearchPath:
updater._conaryhelper.cacheSources(label, latest=False)
log.info('prefetching findTroves information for parent platform '
'labels')
updater._conaryhelper.populateFindTrovesCache(
self._cfg.platformSearchPath)
# build a mapping of srpm to bucketId for debuging purposes
srpmToBucketId = {}
for bucketId, srpms in self._order.iteritems():
for srpm in srpms:
srpmToBucketId[srpm] = bucketId
# build a mapping of srpm to advisory for debuging purposes
srpmToAdvisory = {}
for advisory, srpms in self._advPkgMap.iteritems():
for srpm in srpms:
srpmToAdvisory.setdefault(srpm, set()).add(advisory)
# convert keepObsolete config into set of edges
keepObsolete = set(self._cfg.keepObsolete)
keepObsoleteSource = set(self._cfg.keepObsoleteSource)
errors = {}
# Make sure there no buckets that contain the same srpm name twice.
for updateId, srpms in sorted(self._order.iteritems()):
seen = {}
dups = {}
for srpm in srpms:
if srpm.name in seen:
thisdup = dups.setdefault(srpm.name, set())
thisdup.add(seen[srpm.name])
thisdup.add(srpm)
else:
seen[srpm.name] = srpm
continue
if dups:
log.error('found duplicates in %s' % updateId)
errors.setdefault(updateId, []).append(('duplicates', dups))
# Play though entire update history to check for iregularities.
current = {}
childPackages = []
parentPackages = []
removals = self._cfg.updateRemovesPackages
replaces = self._cfg.updateReplacesPackages
downgraded = self._cfg.allowPackageDowngrades
currentlyRemovedBinaryNevras = set()
foundObsoleteEdges = set()
foundObsoleteSrcs = set()
for updateId in sorted(self._order.keys()):
log.info('validating %s' % updateId)
expectedRemovals = removals.get(updateId, [])
expectedReplaces = replaces.get(updateId, [])
expectedKeepRemovals = self._cfg.keepRemoved.get(updateId, [])
explicitSourceRemovals = self._cfg.removeSource.get(updateId, set())
explicitBinaryRemovals = self._cfg.removeObsoleted.get(updateId, set())
explicitIgnoreSources = self._cfg.ignoreSourceUpdate.get(updateId, set())
if explicitIgnoreSources:
log.info('explicitly ignoring %s in update %s' %
(explicitIgnoreSources, updateId))
explicitPackageDowngrades = downgraded.get(updateId, None)
assert len(self._order[updateId])
for srpm in self._order[updateId]:
nvf = (srpm.name, None, None)
# validate updates
try:
toUpdate = updater._sanitizeTrove(nvf, srpm,
expectedRemovals=expectedRemovals + expectedReplaces,
allowPackageDowngrades=explicitPackageDowngrades,
keepRemovedPackages=expectedKeepRemovals)
# If a source was manually added to this updateId it may
# have already been part of another update, which would
# cause the manifest not to change.
if (srpm.getNevra() not in
self._cfg.addSource.get(updateId, [])):
assert toUpdate
except (UpdateGoesBackwardsError,
UpdateRemovesPackageError,
UpdateReusesPackageError), e:
errors.setdefault(updateId, []).append(e)
# apply update to checkout
if srpm.getNevra() in explicitSourceRemovals:
log.error('Removing %s in %s would cause it never to be promoted' %
(str(' '.join(srpm.getNevra())), updateId))
if srpm.getNevra() in explicitIgnoreSources:
log.warn('Ignoring %s in %s will cause it never to be promoted' %
(str(' '.join(srpm.getNevra())), updateId))
else:
current[srpm.name] = srpm
version = updater.update(nvf, srpm)
assert (not version or
not updater.isPlatformTrove(version))
if version:
parentPackages.append(((nvf, srpm), version))
else:
childPackages.append(((nvf, srpm), None))
# all package names obsoleted by packages in the current set
obsoleteNames = set()
obsoleteBinaries = set()
obsoleteSources = set()
obsoletingPkgMap = {}
pkgNames = set()
pkgNameMap = {}
srpmNameMap = {}
# Create maps for processing obsoletes
for srpm in sorted(current.itervalues()):
if srpm.getNevra() in explicitSourceRemovals:
current.pop(srpm.name, None)
continue
if srpm.getNevra() in explicitIgnoreSources:
log.info('explicitly ignoring source package update %s' % [explicitIgnoreSources])
continue
for pkg in sorted(self._pkgSource.srcPkgMap[srpm]):
if pkg.arch == 'src':
continue
pkgNames.add(pkg.name)
pkgNameMap[pkg.name] = pkg
if pkg in self._pkgSource.obsoletesMap:
pkgObsoleteNames = self._pkgSource.obsoletesMap[pkg]
for obsoleteName in pkgObsoleteNames:
obsoletingPkgMap[obsoleteName] = pkg
obsoleteNames.add(obsoleteName)
# packages that really moved from one source to another
removedShouldBeReplaced = set(expectedRemovals) & pkgNames
for obsoleteName in explicitBinaryRemovals:
# these nevra-nevra edges are already handled in config,
# do not report them in the wrong bucket
obsoletingPkg = obsoletingPkgMap[obsoleteName]
obsoletedPkg = pkgNameMap[obsoleteName]
obsoleteEdge = (obsoletingPkg, obsoletedPkg)
foundObsoleteEdges.add(obsoleteEdge)
# coalesce obsoleted packages by src package, filtering
# by explicit configs
obsoletePkgMap = {}
for obsoleteName in obsoleteNames:
if obsoleteName in pkgNames:
obsoletingPkg = obsoletingPkgMap[obsoleteName]
obsoletedPkg = pkgNameMap[obsoleteName]
obsoleteNevraEdge = (obsoletingPkg.getNevra(),
obsoletedPkg.getNevra())
if obsoleteNevraEdge in keepObsolete:
# We have configured to keep this "obsolete" package
continue
obsoleteEdge = (obsoletingPkg, obsoletedPkg)
if obsoleteEdge in foundObsoleteEdges:
# report each obsoleting relationship only once
continue
foundObsoleteEdges.add(obsoleteEdge)
obsoleteSrcPkg = self._pkgSource.binPkgMap[obsoletedPkg]
obsoletePkgMap.setdefault(obsoleteSrcPkg,
set()).add(obsoleteEdge)
# report sets of obsoleted packages inappropriately included
for srcPkg, obsoleteEdgeSet in sorted(obsoletePkgMap.iteritems()):
# determine whether bins or srcs need removal
pkgsBySrc = self._pkgSource.srcPkgMap[srcPkg]
binPkgs = tuple(sorted(set(x for x in
pkgsBySrc if x.arch != 'src')))
unremovedBinPkgs = tuple(sorted(set(x for x in
pkgsBySrc if x.arch != 'src'
and x.name not in obsoleteNames)))
if unremovedBinPkgs:
obsoleteBinaries.add(
(tuple(sorted(obsoleteEdgeSet)),
srcPkg,
binPkgs,
unremovedBinPkgs))
else:
# choose whether to include or exclude pkg sets by sources
if srcPkg not in foundObsoleteSrcs:
obsoletingSrcPkgs = tuple(sorted(set(
self._pkgSource.binPkgMap[x]
for x, y in obsoleteEdgeSet)))
newEdgeSet = set()
obsoletingSrcPkgs = set()
for obsoletingPkg, obsoletedPkg in obsoleteEdgeSet:
obsoletingSrcPkg = self._pkgSource.binPkgMap[obsoletingPkg]
if (obsoletingSrcPkg.getNevra(), srcPkg.getNevra()) in keepObsoleteSource:
continue
newEdgeSet.add((obsoletingPkg, obsoletedPkg))
obsoletingSrcPkgs.add(obsoletingSrcPkg)
if newEdgeSet:
# we exclude any source only once, not per bucket
for obsoletingPkg, obsoletedPkg in newEdgeSet:
obsoleteSources.add(
(obsoletedPkg.name,
obsoletedPkg,
tuple(obsoletingSrcPkgs),
srcPkg,
binPkgs))
foundObsoleteSrcs.add(srcPkg)
if obsoleteBinaries:
log.error('found obsolete binary packages in %s' % updateId)
errors.setdefault(updateId, []).append(('obsoleteBinaries',
obsoleteBinaries))
if obsoleteSources:
log.error('found obsolete source packages in %s' % updateId)
errors.setdefault(updateId, []).append(('obsoleteSources',
obsoleteSources))
if removedShouldBeReplaced:
log.error('found removals for replacements in %s' % updateId)
errors.setdefault(updateId, []).append(('removedShouldBeReplaced',
removedShouldBeReplaced))
# Report errors.
for updateId, error in sorted(errors.iteritems()):
for e in error:
if isinstance(e, UpdateGoesBackwardsError):
one, two = e.why
oneNevra = str(' '.join(one.getNevra()))
twoNevra = str(' '.join(two.getNevra()))
log.error('%s %s -revertsTo-> %s' % (updateId,
oneNevra, twoNevra))
if one in srpmToAdvisory and two in srpmToAdvisory:
log.info('%s -revertsTo-> %s' % (
rhnUrls(srpmToAdvisory[one]),
rhnUrls(srpmToAdvisory[two])))
if one in srpmToBucketId and two in srpmToBucketId:
log.info('%s %s (%d) -> %s (%d)' % (updateId,
tconv(srpmToBucketId[one]), srpmToBucketId[one],
tconv(srpmToBucketId[two]), srpmToBucketId[two]))
log.info('? reorderSource %s otherId<%s %s' % (
updateId, srpmToBucketId[one], twoNevra))
for errataId in srpmToAdvisory.get(two, []):
log.info('? reorderAdvisory %s otherId<%s %s' % (
updateId, srpmToBucketId[one], errataId))
elif isinstance(e, UpdateReusesPackageError):
# Note that updateObsoletesPackages not yet implemented...
log.error('%s %s reused in %s; check for obsoletes?' % (
updateId, e.pkgNames, e.newspkg))
for name in sorted(set(p.name for p in e.pkgList)):
log.info('? updateObsoletesPackages %s %s' % (
updateId, name))
elif isinstance(e, UpdateRemovesPackageError):
log.error('%s %s removed in %s' % (
updateId, e.pkgNames, e.newspkg))
for name, p in sorted(dict((p.name, p) for p in e.pkgList).items()):
log.info('? updateRemovesPackages %s %s' % (
updateId, name))
log.info('? keepRemoved %s %s' % (updateId, '%s %s %s %s %s' % p.getNevra()))
elif isinstance(e, tuple):
if e[0] == 'duplicates':
sortedOrder = sorted(self._order)
previousId = sortedOrder[sortedOrder.index(updateId)-1]
nextId = sortedOrder[sortedOrder.index(updateId)+1]
for dupName, dupSet in e[1].iteritems():
dupList = sorted(dupSet)
log.error('%s contains duplicate %s %s' %(updateId,
dupName, dupList))
# Changing to use older pkgs to make msg
for srcPkg in sorted(dupList[1:]):
srcNevra = str(' '.join(srcPkg.getNevra()))
if srcPkg in srpmToAdvisory:
log.info('%s : %s' % (
srcNevra, rhnUrls(srpmToAdvisory[srcPkg])))
log.info('? reorderSource %s earlierId> %s %s' %
(updateId, previousId, srcNevra))
log.info('? reorderSource %s laterId> %s %s' %
(updateId, nextId, srcNevra))
for errataId in srpmToAdvisory.get(srcPkg, []):
log.info(
'? reorderAdvisory %s earlierId> %s %r'
% (updateId, previousId, errataId))
log.info(
'? reorderAdvisory %s laterId> %s %r'
% (updateId, nextId, errataId))
elif e[0] == 'obsoleteBinaries':
for (obsoleteEdgeList, srcPkg, binPkgs,
unremovedBinPkgs) in e[1]:
obsoletingNevra = str(' '.join(obsoletingPkg.getNevra()))
obsoletedNevra = str(' '.join(obsoletedPkg.getNevra()))
srcNevra = srcPkg.getNevra()
srcNevraStr = str(' '.join(srcNevra))
unremovedStr = str(' '.join(sorted(
repr(x) for x in unremovedBinPkgs)))
obsoleteNames = set()
for obsoleteEdge in obsoleteEdgeList:
obsoletingPkg, obsoletedPkg = obsoleteEdge
obsoletingNevra = str(' '.join(obsoletingPkg.getNevra()))
obsoletedNevra = str(' '.join(obsoletedPkg.getNevra()))
obsoleteName = obsoletedPkg.name
obsoleteNames.add(obsoleteName)
log.error('%s %s obsoletes %s (%s)' % (
updateId, obsoletingPkg,
obsoletedPkg, obsoleteName))
log.info('? keepObsolete %s %s' %
(obsoletingNevra, obsoletedNevra))
log.info('? removeObsoleted %s %s' % (updateId,
obsoleteName))
log.error('Not "removeSource %s"; that would remove non-obsoleted %s' %
(srcNevraStr, unremovedStr))
elif e[0] == 'obsoleteSources':
for (obsoleteName, obsoletedPkg, obsoletingSrcPkgs,
srcPkg, binPkgs) in e[1]:
srcNevra = srcPkg.getNevra()
srcNevraStr = str(' '.join(srcNevra))
obsoletingSrcPkgNames = str(' '.join(sorted(set(
x.name for x in obsoletingSrcPkgs))))
pkgList = str(' '.join(repr(x) for x in binPkgs))
log.error('%s %s obsolete(s) %s (%s)' % (
updateId, obsoletingSrcPkgNames,
obsoletedPkg, obsoleteName))
log.info('? removeSource %s %s # %s' % (
updateId, srcNevraStr,
obsoletingSrcPkgNames))
for obsoletingSrcPkg in obsoletingSrcPkgs:
log.info('? keepObsoleteSource %s %s'
% (str(' '.join(obsoletingSrcPkg.getNevra())),
srcNevraStr))
log.info(' will remove the following: %s' % pkgList)
elif e[0] == 'removedShouldBeReplaced':
for pkgName in e[1]:
log.error('%s removed package %s should be replaced' % (
updateId, pkgName))
log.info('? updateReplacesPackages %s %s' % (
updateId, pkgName))
# Clear the cache since it would be dirty at this point.
updater._conaryhelper.clearCache()
# Fail if there are any errors.
assert not errors
log.info('order sanity checking complete')
return childPackages, parentPackages
def _orderErrata(self):
"""
Order errata by timestamp.
"""
# order packages by errata release
buckets, other = self._sortPackagesByErrataTimestamp()
# insert packages that did not have errata and were not in the initial
# set of packages (golden bits)
srcMap = {}
missing = set()
for pkg in other:
if pkg.getNevra() not in self._cfg.allowMissingErrata:
missing.add(pkg)
src = self._pkgSource.binPkgMap[pkg]
srcMap.setdefault(src, list()).append(pkg)
# Raise an error if there are any packages missing an errata that are
# now explicitly allowed by the config.
if missing:
raise MissingErrataError(packages=list(missing))
# insert bins by buildstamp
extras = {}
# Build a reverse map of broken errata so that we can match packages
# and advisories
nevraAdvMap = {}
for adv, nevras in self._cfg.brokenErrata.iteritems():
for nevra in nevras:
assert nevra not in nevraAdvMap
nevraAdvMap[nevra] = adv
# Build reverse map of advisory to bucketId.
advRevMap = {}
for bucketId, advInfoList in self._advMap.iteritems():
for advInfo in advInfoList:
advDict = dict(advInfo)
assert advDict['name'] not in advRevMap
advRevMap[advDict['name']] = bucketId
for src, bins in srcMap.iteritems():
# Pull out any package sets that look like they are incomplete.
if len(bins) != len(set([ (x.name, x.arch) for x in self._pkgSource.srcPkgMap[src] ])) - 1:
extras[src] = bins
continue
if src.getNevra() in nevraAdvMap:
advisory = nevraAdvMap[src.getNevra()]
bucketId = advRevMap[advisory]
log.info('inserting %s for advisory %s into bucket %s'
% (src, advisory, bucketId))
buckets.setdefault(bucketId, set()).add(src)
continue
buildstamp = int(sorted(bins)[0].buildTimestamp)
buckets.setdefault(buildstamp, set()).update(set(bins))
# get sources to build
srpmToBucketId = {}
for bucketId in sorted(buckets.keys()):
bucket = buckets[bucketId]
self._order[bucketId] = set()
for pkg in bucket:
src = self._pkgSource.binPkgMap[pkg]
self._order[bucketId].add(src)
srpmToBucketId[src] = bucketId
# Make sure extra packages are already included in the order.
for src, bins in extras.iteritems():
assert src in srpmToBucketId
assert src in self._order[srpmToBucketId[src]]
for bin in bins:
assert bin in self._pkgSource.srcPkgMap[src]
self._handleLastErrata()
##
# Start order munging here
##
# Remove any source packages we're deliberately ignoring:
# Note that we do this before we check for drops, as some drops
# are deliberate.
ignoredCount = 0
for source, nevras in self._cfg.ignoreSourceUpdate.iteritems():
for nevra in nevras:
self._reorderSource(source, None, nevra)
ignoredCount += 1
# Make sure we don't drop any updates
totalPkgs = sum([ len(x) for x in self._order.itervalues() ])
pkgs = set()
for pkgSet in self._order.itervalues():
pkgs.update(pkgSet)
# This assert validates that no one srpm is mentioned in more than
# one bucket. This can happen when a partial set of packages was
# released and the code tried to fill in the other packages by build
# time.
#
# This has to be commented out for SLES11e due to a reissuing
# of python-base as an update (when it was already provided in
# the base).
# Need to work around this programmatically.
#
# assert len(pkgs) == totalPkgs
# fold together updates to preserve dep closure.
for mergeList in self._cfg.mergeUpdates:
self._mergeUpdates(mergeList)
# reschedule any updates that may have been released out of order.
for source, dest in self._cfg.reorderUpdates:
self._reorderUpdates(source, dest)
# reschedule any individual advisories.
for source, dest, advisory in self._cfg.reorderAdvisory:
self._reorderAdvisory(source, dest, advisory)
# reschedule individual packages
for source, dest, nevra in self._cfg.reorderSource:
self._reorderSource(source, dest, nevra)
# add a source to a specific bucket, used to "promote" newer versions
# forward.
nevras = dict([ (x.getNevra(), x)
for x in self._pkgSource.srcPkgMap ])
diffCount = 0
for updateId, srcNevras in self._cfg.addSource.iteritems():
sources = set(nevras[x] for x in srcNevras)
self._order.setdefault(updateId, set()).update(sources)
diffCount += len(srcNevras)
# Make sure we don't drop any updates
totalPkgs2 = sum([ len(x) for x in self._order.itervalues() ])
pkgs = set()
for pkgSet in self._order.itervalues():
pkgs.update(pkgSet)
# assert len(pkgs) == totalPkgs2 - diffCount
# assert totalPkgs2 == totalPkgs + diffCount
# pop off future updates
for x in self._order.keys():
if int(x) > time.time():
self._order.pop(x)
if self._advMap.has_key(x):
self._advMap.pop(x)
def _mergeUpdates(self, mergeList):
"""
Merge a list of updates into one bucket.
"""
target = mergeList[0]
# merge remaining updates into target.
for source in mergeList[1:]:
log.info('merging errata bucket %s -> %s' % (source, target))
updateSet = self._order.pop(source)
oldNames = set([ x.name for x in self._order[target]])
newNames = set([ x.name for x in updateSet ])
# Check for overlapping updates. If there is overlap, these
# things can't be merged since all versions need to be
# represented in the repository.
if oldNames & newNames:
log.warn('merge causes package overlap')
self._order[target].update(updateSet)
# merge advisory detail.
if source in self._advMap:
advInfo = self._advMap.pop(source)
if target not in self._advMap:
self._advMap[target] = set()
self._advMap[target].update(advInfo)
def _handleLastErrata(self):
"""
Remove timestamps past the configured lastErrata to prevent
processing them.
"""
if self._cfg.lastErrata:
log.info('handling configured lastErrata (%s)' %
self._cfg.lastErrata)
updateIds = [ x for x in self._order.iterkeys() ]
for x in updateIds:
if x > self._cfg.lastErrata:
log.info('unsequencing timestamp %s (> %s)' %
(x, self._cfg.lastErrata))
del self._order[x]
def _reorderUpdates(self, source, dest):
"""
Reschedule an update from one timestamp to another.
"""
# Probably don't want to move an update into an already
# existing bucket.
assert dest not in self._order
log.info('rescheduling %s -> %s' % (source, dest))
# remove old version
bucket = self._order.pop(source)
# There will not be an entry for sources that do not have
# advisories, default to None.
adv = self._advMap.pop(source, None)
# move to new version
self._order[dest] = bucket
if adv: self._advMap[dest] = adv
def _reorderAdvisory(self, source, dest, advisory):
"""
Reschedule a single advisory.
"""
log.info('rescheduling %s %s -> %s' % (advisory, source, dest))
# Find the srpms that apply to this advisory
srpms = self._advPkgMap[advisory]
# Remove them from the source bucket Id, while making sure they are
# all in the source bucketId.
bucketNevras = dict([ (x.getNevra(), x)
for x in self._order[source] ])
for srpm in srpms:
# Make sure to only move packages if they haven't already
# been moved.
if (srpm not in self._order[source] and
dest in self._order and
srpm in self._order[dest]):
continue
nevra = srpm.getNevra()
if nevra not in bucketNevras:
raise AdvisoryPackageMissingFromBucketError(nevra=nevra)
self._order[source].remove(srpm)
if not len(self._order[source]):
del self._order[source]
# Make sure that each package that we are moving is only
# mentioned in one advisory.
advisories = self._advPkgRevMap[srpm]
if len(advisories) > 1:
advisories = advisories.difference(set(advisory))
# Make sure all advisories this srpm is mentioned in are also
# scheduled to be moved to the same bucket.
for adv in advisories:
assert (source, dest, adv) in self._cfg.reorderAdvisory
# Move packages to destination bucket Id.
self._order.setdefault(dest, set()).add(srpm)
# Remove the advisory information for the source bucket Id.
for advInfo in self._advMap[source]:
name = dict(advInfo)['name']
if name == advisory:
self._advMap[source].remove(advInfo)
if not len(self._advMap[source]):
del self._advMap[source]
self._advMap.setdefault(dest, set()).add(advInfo)
break
def _reorderSource(self, source, dest, nevra):
"""
Reschedule an individual srpm to another bucket.
If destination bucket is None, simply remove srpm from source bucket.
"""
if dest:
log.info('rescheduling %s %s -> %s' % (nevra, source, dest))
else:
log.info('removing ignored %s from %s' % (nevra, source))
# Remove specified source nevra from the source bucket
bucketNevras = dict([ (x.getNevra(), x)
for x in self._order[source] ])
# FIXME: the above line will fail with a KeyError exception in
# cases where a removal directive refers to a bucket that
# doesn't exist. Add an option to prevent that and silently
# ignore? (PFM-806)
if nevra not in bucketNevras:
raise PackageNotFoundInBucketError(nevra=nevra, bucketId=source)
srpm = bucketNevras[nevra]
self._order[source].remove(srpm)
if not len(self._order[source]):
del self._order[source]
# Remove all references to this srpm being part of an advisory
for advisory in self._advPkgRevMap.pop(srpm, set()):
self._advPkgMap[advisory].remove(srpm)
if not len(self._advPkgMap[advisory]):
del self._advPkgMap[advisory]
if dest:
# Move srpm to destination bucket if not a removal.
self._order.setdefault(dest, set()).add(srpm)
def _getNevra(self, pkg):
"""
Get the NEVRA of a package object and do any transformation required.
"""
# convert nevra to yum compatible nevra
nevra = list(pkg.nevra.getNevra())
if nevra[1] is None:
nevra[1] = '0'
if type(nevra[1]) == int:
nevra[1] = str(nevra[1])
nevra = tuple(nevra)
return nevra
@staticmethod
def _mktime(date_str):
"""Convert a datetime string, assumed to be in the EST5EDT zone, to a
POSIX timestamp.
"""
# Shouldn't this be in datetime or something? It's pretty awful.
ref_zone = dateutil_tz.gettz('EST5EDT')
assert ref_zone is not None
utc_zone = dateutil_tz.tzutc()
epoch = datetime.datetime.fromtimestamp(0, utc_zone)
as_local = dateutil_parser.parse(date_str).replace(tzinfo=ref_zone)
as_utc = as_local.astimezone(utc_zone)
offset = as_utc - epoch
return ((offset.days * 60*60*24) +
(offset.seconds) +
(offset.microseconds * 1e-6))
def _sortPackagesByErrataTimestamp(self):
"""
Sort packages by errata release timestamp.
"""
# get mapping of nevra to source pkg object
sources = dict( ((x.name, x.epoch, x.version, x.release, x.arch), y)
for x, y in self._pkgSource.binPkgMap.iteritems() )
# get mapping of nevra to pkg obj
nevras = dict(((x.name, x.epoch, x.version, x.release, x.arch), x)
for x in self._pkgSource.binPkgMap.keys()
if x.arch != 'src' and '-debuginfo' not in x.name)
# pull nevras into errata sized buckets
broken = []
buckets = {}
nevraMap = {}
log.info('processing errata')
indexedChannels = set(self._errata.getChannels())
# FIXME: This should not be a hard coded set of arches.
arches = ('i386', 'i486', 'i586', 'i686', 'x86_64', 'noarch')
for e in self._errata.iterByIssueDate():
bucket = []
allocated = []
bucketId = None
log.info('processing %s' % e.advisory)
# Get unique list of nevras for which we have packages indexed and
# are of a supported arch.
errataNevras = set([ self._getNevra(x) for x in e.nevraChannels
if x.channel.label in indexedChannels and
x.nevra.arch in arches ])
for nevra in errataNevras:
# add package to advisory package map
self._advPkgMap.setdefault(e.advisory,
set()).add(sources[nevra])
self._advPkgRevMap.setdefault(sources[nevra],
set()).add(e.advisory)
# move nevra to errata buckets
if nevra in nevras:
binPkg = nevras.pop(nevra)
bucket.append(binPkg)
allocated.append(nevra)
# nevra is already part of another bucket
elif nevra in nevraMap:
bucketId = nevraMap[nevra]
# raise error if we can't find the required package
else:
raise ErrataPackageNotFoundError(pkg=nevra)
# There should be packages in the bucket or the packages should
# already be in an existing bucket (bucketId != None), if there
# aren't the errata store is probably broken.
if not bucket and bucketId is None:
if e.advisory in self._cfg.brokenErrata:
msg = log.warn
else:
broken.append(e.advisory)
msg = log.critical
msg('broken advisory: %s' % e.advisory)
if bucketId is None:
bucketId = int(self._mktime(e.issue_date))
if bucketId not in buckets:
buckets[bucketId] = set()
buckets[bucketId].update(bucket)
for nevra in allocated:
nevraMap[nevra] = bucketId
if bucketId not in self._advMap:
self._advMap[bucketId] = set()
self._advMap[bucketId].add((('name', e.advisory),
('summary', e.synopsis)))
if broken:
raise ErrataSourceDataMissingError(broken=broken)
# separate out golden bits
other = []
golden = []
if self._cfg.firstErrata:
firstErrata = self._cfg.firstErrata
else:
firstErrata = int(time.time())
if len(buckets):
firstErrata = sorted(buckets.keys())[0]
for nevra, pkg in nevras.iteritems():
buildtime = int(pkg.buildTimestamp)
if buildtime < firstErrata:
golden.append(pkg)
else:
other.append(pkg)
buckets[0] = golden
# Dump cached errata results once we are done with them.
self._errata.cleanup()
return buckets, other
class _ConaryHelperShim(conaryhelper.ConaryHelper):
"""
Shim class that doesn't actually change the repository.
"""
def __init__(self, cfg):
conaryhelper.ConaryHelper.__init__(self, cfg)
self._client = None
# This doesn't work... leave uninitialized
#self._findTrovesCache = FindTrovesCache(None)
@staticmethod
def _getCacheKey(nvf):
n, v, f = nvf
if v and hasattr(v, 'trailingRevision'):
v = v.trailingRevision().getVersion()
return (n, v)
def populateFindTrovesCache(self, labels):
"""
Pre populate the find troves cache with all versions from all labels
listed.
"""
req = { None: dict((x, None) for x in labels) }
trvs = self._repos.getTroveVersionsByLabel(req)
for n, vd in trvs.iteritems():
for v, fs in vd.iteritems():
key = self._getCacheKey((n, v, None))
self._findTrovesCache[key] = [ (n, v, f) for f in fs ]
def findTroves(self, troveList, labels=None, *args, **kwargs):
"""
Aggresivly cache all findTroves queries as they are not likely to change
while sanity checking.
"""
if not labels:
return []
# Find any requests that are already cached.
cached = set([ x for x in troveList
if self._getCacheKey(x) in self._findTrovesCache ])
# Filter out cached requests.
needed = set(troveList) - cached
# Query for new requets.
if needed:
#log.critical('CACHE MISS')
#log.critical('request: %s' % troveList)
trvs = conaryhelper.ConaryHelper.findTroves(self, needed,
labels=labels, *args, **kwargs)
else:
#log.info('CACHE HIT')
trvs = {}
# Cache new requests.
self._findTrovesCache.update(dict([ (self._getCacheKey(x), y)
for x, y in trvs.iteritems() ]))
# Pull results out of the cache.
res = dict([ (x, self._findTrovesCache.get(self._getCacheKey(x), []))
for x in troveList ])
return res
def getLatestSourceVersion(self, pkgname):
"""
Stub for latest version.
"""
return False
def _not_implemented(self, *args, **kwargs):
"""
Stub for methods that this class does not implemented.
"""
raise NotImplementedError
setTroveMetadata = _not_implemented
mirror = _not_implemented
promote = _not_implemented
getSourceTroves = _not_implemented
getSourceVersions = _not_implemented
def _checkout(self, pkgname, version=None):
"""
Checkout stub.
"""
if version and not self.isOnBuildLabel(version):
return conaryhelper.ConaryHelper._checkout(self, pkgname,
version=version)
recipeDir = self._getRecipeDir(pkgname)
return recipeDir
_newpkg = _checkout
@staticmethod
def _addFile(pkgDir, fileName):
"""
addFile stub.
"""
_removeFile = _addFile
def _commit(self, pkgDir, commitMessage):
"""
commit stub.
"""
log.info('committing %s' % os.path.basename(pkgDir))
| {
"content_hash": "7957d272a0b81e0b734e4af356353551",
"timestamp": "",
"source": "github",
"line_count": 1125,
"max_line_length": 103,
"avg_line_length": 40.31022222222222,
"alnum_prop": 0.5313016825067808,
"repo_name": "sassoftware/mirrorball",
"id": "1be4dcf87aabf021e0ec0cfe0579f42dc924bfa6",
"size": "45928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "updatebot/errata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "12888"
},
{
"name": "Python",
"bytes": "918663"
},
{
"name": "Shell",
"bytes": "12243"
}
],
"symlink_target": ""
} |
import simplejson as json
import logging.config
import socket
import sys
import hpfeeds
from datetime import datetime
from logging.handlers import SocketHandler
from twisted.internet import reactor
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def getLogger(config):
try:
d = config.getVal('logger')
except Exception as e:
print >> sys.stderr, "Error: config does not have 'logger' section"
exit(1)
classname = d.get('class', None)
if classname is None:
print >> sys.stderr, "Logger section is missing the class key."
exit(1)
LoggerClass = globals().get(classname, None)
if LoggerClass is None:
print >> sys.stderr, "Logger class (%s) is not defined." % classname
exit(1)
kwargs = d.get('kwargs', None)
if kwargs is None:
print >> sys.stderr, "Logger section is missing the kwargs key."
exit(1)
try:
logger = LoggerClass(config, **kwargs)
except Exception as e:
print >> sys.stderr, "An error occured initialising the logger class"
print e
exit(1)
return logger
class LoggerBase(object):
LOG_BASE_BOOT = 1000
LOG_BASE_MSG = 1001
LOG_BASE_DEBUG = 1002
LOG_BASE_ERROR = 1003
LOG_BASE_PING = 1004
LOG_BASE_CONFIG_SAVE = 1005
LOG_BASE_EXAMPLE = 1006
LOG_FTP_LOGIN_ATTEMPT = 2000
LOG_HTTP_GET = 3000
LOG_HTTP_POST_LOGIN_ATTEMPT = 3001
LOG_SSH_NEW_CONNECTION = 4000
LOG_SSH_REMOTE_VERSION_SENT = 4001
LOG_SSH_LOGIN_ATTEMPT = 4002
LOG_SMB_FILE_OPEN = 5000
LOG_PORT_SYN = 5001
LOG_TELNET_LOGIN_ATTEMPT = 6001
LOG_HTTPPROXY_LOGIN_ATTEMPT = 7001
LOG_MYSQL_LOGIN_ATTEMPT = 8001
LOG_MSSQL_LOGIN_SQLAUTH = 9001
LOG_MSSQL_LOGIN_WINAUTH = 9002
LOG_TFTP = 10001
LOG_NTP_MONLIST = 11001
LOG_VNC = 12001
LOG_SNMP_CMD = 13001
LOG_RDP = 14001
LOG_SIP_REQUEST = 15001
def sanitizeLog(self, logdata):
logdata['node_id'] = self.node_id
logdata['local_time'] = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f")
if not logdata.has_key('src_host'):
logdata['src_host'] = ''
if not logdata.has_key('src_port'):
logdata['src_port'] = -1
if not logdata.has_key('dst_host'):
logdata['dst_host'] = ''
if not logdata.has_key('dst_port'):
logdata['dst_port'] = -1
if not logdata.has_key('logtype'):
logdata['logtype'] = self.LOG_BASE_MSG
if not logdata.has_key('logdata'):
logdata['logdata'] = {}
return logdata
class PyLogger(LoggerBase):
"""
Generic python logging
"""
__metaclass__ = Singleton
def __init__(self, config, handlers, formatters={}):
self.node_id = config.getVal('device.node_id')
# Build config dict to initialise
# Ensure all handlers don't drop logs based on severity level
for h in handlers:
handlers[h]["level"] = "NOTSET"
logconfig = {
"version": 1,
"formatters" : formatters,
"handlers": handlers,
# initialise all defined logger handlers
"loggers": {
self.node_id : {
"handlers": handlers.keys()
}
}
}
try:
logging.config.dictConfig(logconfig)
except Exception as e:
print >> sys.stderr, "Invalid logging config"
print type(e)
print e
exit(1)
self.logger = logging.getLogger(self.node_id)
def error(self, data):
data['local_time'] = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f")
msg = '[ERR] %r' % json.dumps(data, sort_keys=True)
print >> sys.stderr, msg
self.logger.warn(msg)
def log(self, logdata, retry=True):
logdata = self.sanitizeLog(logdata)
self.logger.warn(json.dumps(logdata, sort_keys=True))
class SocketJSONHandler(SocketHandler):
"""Emits JSON messages over TCP delimited by newlines ('\n')"""
def makeSocket(self, timeout=1):
s = SocketHandler.makeSocket(self,timeout)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
return s
def __init__(self, *args, **kwargs):
SocketHandler.__init__(self, *args, **kwargs)
self.retryStart = 0
self.retryMax = 0
self.retryFactor = 0
def send(self, s, attempt=0):
if attempt >= 10:
print "Dropping log message due to too many failed sends"
return
if self.sock is None:
self.createSocket()
if self.sock:
try:
# TODO: Weirdly, one the other ends drops the
# connection for the next msg, sendall still reports
# successful write on a disconnected socket but then
# on subsequent writes it fails correctly.
self.sock.sendall(s)
return
except socket.error:
self.sock.close()
self.sock = None
# Here, we've failed to send s so retry
reactor.callLater(1.5, lambda x: self.send(s, attempt + 1), None)
def makePickle(self, record):
return record.getMessage() + "\n"
class HpfeedsHandler(logging.Handler):
def __init__(self,host,port,ident, secret,channels):
logging.Handler.__init__(self)
self.host=str(host)
self.port=int(port)
self.ident=str(ident)
self.secret=str(secret)
self.channels=map(str,channels)
hpc=hpfeeds.new(self.host, self.port, self.ident, self.secret)
hpc.subscribe(channels)
self.hpc=hpc
def emit(self, record):
try:
msg = self.format(record)
self.hpc.publish(self.channels,msg)
except:
print "Error on publishing to server"
| {
"content_hash": "fcf3313aaadc971e2110d5323b431216",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 82,
"avg_line_length": 34.2020202020202,
"alnum_prop": 0.5234790313053751,
"repo_name": "aabed/opencanary",
"id": "22bbaeaaf784d742f413491a3eac39f5eed05bb7",
"size": "6772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opencanary/logger.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38501"
},
{
"name": "HTML",
"bytes": "15302"
},
{
"name": "JavaScript",
"bytes": "1056"
},
{
"name": "Python",
"bytes": "136157"
},
{
"name": "Shell",
"bytes": "1424"
}
],
"symlink_target": ""
} |
"""Ino (http://inotool.org/) based arduino build wrapper."""
import argparse
import io
import logging
import os
import shutil
import subprocess
import sys
import tempfile
from ino.environment import Environment
from ino.commands.build import Build
from ino.exc import Abort
from b3.steps.step_base import StepBase
class _Stream(io.StringIO):
def __init__(self, logger):
io.StringIO.__init__(self)
self._logger = logger
def write(self, message):
self._logger.debug(message)
def fileno(self):
return -1
def isatty(self):
return False
class _Build(Build):
"""Ino build command wrapper that redirects STDOUT and STDERR."""
def __init__(self, *args, **kwargs):
Build.__init__(self, *args, **kwargs)
self._logger = logging.getLogger("b3")
def make(self, makefile, **kwargs):
makefile = self.render_template(makefile + '.jinja', makefile, **kwargs)
stdout = tempfile.NamedTemporaryFile()
stderr = tempfile.NamedTemporaryFile()
try:
ret = subprocess.call([self.e.make, '-f', makefile, 'all'],
stdout=stdout, stderr=stderr)
finally:
stdout.seek(0)
sys.stdout.write(stdout.read())
stdout.close()
stderr.seek(0)
sys.stderr.write(stderr.read())
stderr.close()
if ret != 0:
raise Abort("Make failed with code %s" % ret)
def run(self, args):
sys.stdout = _Stream(self._logger)
Build.run(self, args)
sys.stdout = sys.__stdout__
class InoStep(StepBase):
def __init__(self, board_model, source_dir_path, include_paths,
output_dir_path, output_file_path):
self._output_dir_path = output_dir_path
self._output_file_path = output_file_path
self._build_cmd = self._gen_ino_build_command(
lib_dir_path=source_dir_path,
src_dir_path=source_dir_path,
build_dir_path=output_dir_path)
self._build_args = self._gen_ino_build_arguments(
include_paths=include_paths,
board_model=board_model)
@staticmethod
def _gen_ino_build_command(lib_dir_path, src_dir_path, build_dir_path):
env = Environment()
env.load()
env.lib_dir = lib_dir_path
env.src_dir = src_dir_path
env.build_dir = build_dir_path
return _Build(env)
@staticmethod
def _gen_ino_build_arguments(include_paths, board_model):
args = argparse.Namespace()
args.verbose = False
for arg in ("default_make", "default_cc", "default_cxx", "default_ar",
"default_objcopy", "default_cppflags", "default_cxxflags",
"default_cflags", "default_ldflags"):
setattr(args, arg[len("default_"):], getattr(Build, arg))
if len(include_paths):
args.cppflags += " " + " ".join(["-I" + path for path in include_paths])
args.board_model = board_model
return args
def execute(self):
self._build_cmd.run(self._build_args)
output_file_path = os.path.join(self._output_dir_path, "firmware.hex")
shutil.copy(output_file_path, self._output_file_path)
return 0
def get_description(self):
return "ino"
def get_short_name(self):
return "ino"
| {
"content_hash": "95c6804cb49a0eeb9d4a4d1b0157a270",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 78,
"avg_line_length": 28.027027027027028,
"alnum_prop": 0.6470588235294118,
"repo_name": "robionica/b3",
"id": "1d1cbc5c51ef79a42a3c06e3ec93283b0568f768",
"size": "3734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/b3/steps/ino_step.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "143712"
},
{
"name": "Shell",
"bytes": "274"
}
],
"symlink_target": ""
} |
import urllib, json
def results(parsed, original_query):
search_specs = [
["Thingiverse", "~thingquery", "http://www.thingiverse.com/search?q="],
]
for name, key, url in search_specs:
if key in parsed:
search_url = url + urllib.quote_plus(parsed[key])
return {
"title": "Search {0} for '{1}'".format(name, parsed[key]),
"run_args": [search_url],
"html": """
<script>
setTimeout(function() {
window.location = %s
}, 500);
</script>
"""%(json.dumps(search_url)),
"webview_user_agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 7_0 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11A465 Safari/9537.53",
"webview_links_open_in_browser": True
}
def run(url):
import os
os.system('open "{0}"'.format(url))
| {
"content_hash": "0122e2cd0d16d38b325d3a8423db52d8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 164,
"avg_line_length": 30.5,
"alnum_prop": 0.6216897856242118,
"repo_name": "thenewhobbyist/thingiverse-flashlight-plugin",
"id": "9f4e9ffdc69542f9b8e15a5884c6efae543b4655",
"size": "793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thing.bundle/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "793"
}
],
"symlink_target": ""
} |
from bot_tests import BotTests
from bot import BADLY_FORMATTED_DATA_ERROR
class InvalidPostTests(BotTests):
def test_only_one_data_set(self):
resp = self.app.post_json('/', {"data": [
[1, 2, 3],
]}, expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertIn("You'll need to provide more than one dataset.", resp)
def test_unequal_data_sets(self):
resp = self.app.post_json('/', {"data": [
[1, 2, 3],
[4, 5],
]}, expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertIn("Datasets were of unequal length.", resp)
def test_data_sets_have_non_numbers(self):
resp = self.app.post_json('/', {"data": [
["yo1", 2, 3],
[4, 5]
]}, expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertIn("Posted data contains a non-number: 'yo1'", resp)
def test_bad_format(self):
resp = self.app.post_json('/', {
"hey there": 1
}, expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertIn("Data format wasn't correct.", resp)
| {
"content_hash": "ef9b8e30178d60a978b68f8ef66ee304",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 33.857142857142854,
"alnum_prop": 0.569620253164557,
"repo_name": "skoczen/correlationbot",
"id": "2b0b71836eabb0136caf07a75b7782b90eeb6e9b",
"size": "1185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_invalid_posts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3351"
},
{
"name": "Python",
"bytes": "9277"
}
],
"symlink_target": ""
} |
import pytest
from django.contrib.auth.models import User
from django.test.testcases import TestCase, TransactionTestCase
from pydjango.patches import Function, Class, Instance
def test_no_savepoints(request):
"no fixtures and no setup_module so shouldnt have savepoints"
for node in request.node.listchain():
assert not getattr(node, 'savepoints', {})
def test_function_has_savepoints(request):
assert User.objects.count() == 0
assert request.node.savepoints
for node in request.node.listchain():
if node != request.node:
assert not getattr(node, 'savepoints', {})
class TestClassLazySavepoint(object):
@classmethod
def setup_class(cls):
assert User.objects.count() == 0
User.objects.create(username='test', password='pass')
def setup_method(self, method):
assert User.objects.count() == 1
User.objects.create(username='test2', password='pass')
def test_lazy_method(self, request):
assert User.objects.count() == 2
assert request.node.savepoints
for node in request.node.listchain():
if isinstance(node, (Function, Class, Instance)):
assert node.savepoints
@pytest.fixture(scope='class')
def classrequest(request):
request.cls.request = request
@pytest.mark.usefixtures("classrequest")
class TestCaceLazySavepoint(TestCase):
def test_lazy_method(self):
# touch database
User.objects.create(username='test2', password='pass')
assert self.request.node.savepoints
for node in self.request.node.listchain():
if isinstance(node, (Function, Class, Instance)):
assert node.savepoints
@pytest.mark.usefixtures("classrequest")
class TestTransactionSavepoints(TransactionTestCase):
def test_lazy_savepoints(self):
User.objects.create(username='test2', password='pass')
for node in self.request.node.listchain():
assert not getattr(node, 'savepoints', {}), node.cls
| {
"content_hash": "fdc4f8ac1667cc8279c901c7c13f681c",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 65,
"avg_line_length": 33.63333333333333,
"alnum_prop": 0.6818632309217046,
"repo_name": "krya/pydjango",
"id": "3927b454a5ffa704a7d4c7ca12eae9ef368eec88",
"size": "2043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_savepoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33631"
}
],
"symlink_target": ""
} |
"""
Fine-tuning a 🤗 Transformers model on text translation.
"""
# You can also adapt this script on your own text translation task. Pointers for this are left as comments.
import argparse
import logging
import math
import os
import random
import datasets
import numpy as np
import torch
from datasets import load_dataset, load_metric
from torch.utils.data.dataloader import DataLoader
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AdamW,
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
MBartTokenizer,
MBartTokenizerFast,
SchedulerType,
default_data_collator,
get_scheduler,
set_seed,
)
logger = logging.getLogger(__name__)
# You should update this to your particular problem to have better documentation of `model_type`
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
# Parsing input arguments
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--predict_with_generate",
type=bool,
default=True,
help="",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--num_beams",
type=int,
default=None,
help="Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.",
)
parser.add_argument(
"--max_source_length",
type=int,
default=1024,
help="The maximum total input sequence length after "
"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_target_length",
type=int,
default=128,
help="The maximum total sequence length for target text after "
"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded."
"during ``evaluate`` and ``predict``.",
)
parser.add_argument(
"--val_max_target_length",
type=int,
default=None,
help="The maximum total sequence length for validation "
"target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be "
"padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` "
"param of ``model.generate``, which is used during ``evaluate`` and ``predict``.",
)
parser.add_argument(
"--pad_to_max_length",
type=bool,
default=False,
help="Whether to pad all samples to model maximum sentence "
"length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More"
"efficient on GPU but very bad for TPU.",
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--ignore_pad_token_for_loss",
type=bool,
default=True,
help="Whether to ignore the tokens corresponding to " "padded labels in the loss computation or not.",
)
parser.add_argument("--source_lang", type=str, default=None, help="Source language id for translation.")
parser.add_argument("--target_lang", type=str, default=None, help="Target language id for translation.")
parser.add_argument(
"--source_prefix",
type=str,
default=None,
help="A prefix to add before every source text " "(useful for T5 models).",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", type=bool, default=None, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_lengh` is passed."
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
choices=MODEL_TYPES,
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
return args
def main():
# Parse the arguments
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.model_name_or_path)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
assert (
args.target_lang is not None and args.source_lang is not None
), "mBart requires --target_lang and --source_lang"
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
prefix = args.source_prefix if args.source_prefix is not None else ""
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
# For translation we set the codes of our source and target languages (only useful for mBART, the others will
# ignore those attributes).
if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if args.source_lang is not None:
tokenizer.src_lang = args.source_lang
if args.target_lang is not None:
tokenizer.tgt_lang = args.target_lang
# Get the language codes for input/target.
source_lang = args.source_lang.split("_")[0]
target_lang = args.target_lang.split("_")[0]
padding = "max_length" if args.pad_to_max_length else False
# Temporarily set max_target_length for training.
max_target_length = args.max_target_length
padding = "max_length" if args.pad_to_max_length else False
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples["translation"]]
targets = [ex[target_lang] for ex in examples["translation"]]
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
label_pad_token_id = -100 if args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if accelerator.use_fp16 else None,
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
metric = load_metric("sacrebleu")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
model.eval()
if args.val_max_target_length is None:
args.val_max_target_length = args.max_target_length
gen_kwargs = {
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": args.num_beams,
}
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
)
generated_tokens = accelerator.pad_across_processes(
generated_tokens, dim=1, pad_index=tokenizer.pad_token_id
)
labels = batch["labels"]
if not args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id)
generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()
labels = accelerator.gather(labels).cpu().numpy()
if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
eval_metric = metric.compute()
logger.info({"bleu": eval_metric["score"]})
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if __name__ == "__main__":
main()
| {
"content_hash": "8a967102c5dd48b60bef7ce3b51cc0e4",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 119,
"avg_line_length": 40.508256880733946,
"alnum_prop": 0.646827014540019,
"repo_name": "huggingface/pytorch-transformers",
"id": "4350d59b9a2ee0434d49c6d53da7c70fe291801f",
"size": "22751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pytorch/translation/run_translation_no_trainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
try:
from django.apps import apps
get_model = apps.get_model
except:
from django.db.models.loading import get_model
from scoutandrove.apps.sr.models import get_test_models, TestResultSet
class Command(BaseCommand):
args = ''
help = ''
def handle(self, *args, **options):
model = get_model(settings.SR_SITE_PROFILE_MODEL.split('.')[0], settings.SR_SITE_PROFILE_MODEL.split('.')[1])
profiles = model.objects.all()
test_models = get_test_models()
for profile in profiles:
profile_settings = profile.get_settings()
profile_tests = list(set([profile_setting.test for profile_setting in profile_settings]))
tests_to_run = [test for test in profile_tests if test.needsToRun(profile)]
if tests_to_run > 0:
print "Found %s test to run on profile %s"%(len(tests_to_run), profile)
test_result_set = TestResultSet(profile=profile)
test_result_set.save()
for test in tests_to_run:
print "--- Running %s"%(test)
test.run(test_result_set)
| {
"content_hash": "1ba066d48814bb0b32cf3cd325ea7ff7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 117,
"avg_line_length": 35.810810810810814,
"alnum_prop": 0.6120754716981132,
"repo_name": "ninapavlich/scout-and-rove",
"id": "2d1d980f13081ac0c0688976067e5392cdf0dc07",
"size": "1325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scoutandrove/apps/sr/management/commands/run_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6030"
},
{
"name": "Python",
"bytes": "53752"
}
],
"symlink_target": ""
} |
"""Tests for the NgramAnnotator"""
from __future__ import absolute_import
import unittest
from epitator.annotator import AnnoDoc
from epitator.ngram_annotator import NgramAnnotator
class NgramAnnotatorTest(unittest.TestCase):
def setUp(self):
self.annotator = NgramAnnotator()
def test_one_word_sentence(self):
doc = AnnoDoc("Hi")
doc.add_tier(self.annotator)
self.assertEqual(len(doc.tiers['ngrams'].spans), 1)
self.assertEqual(doc.tiers['ngrams'].spans[0].text, 'Hi')
def test_two_word_sentence(self):
doc = AnnoDoc("Hi there")
doc.add_tier(self.annotator)
self.assertEqual(len(doc.tiers['ngrams'].spans), 3)
self.assertEqual(doc.tiers['ngrams'].spans[0].text, 'Hi')
self.assertEqual(doc.tiers['ngrams'].spans[1].text, 'Hi there')
self.assertEqual(doc.tiers['ngrams'].spans[2].text, 'there')
def test_three_word_sentence_with_period(self):
doc = AnnoDoc("Bears eat tacos.")
doc.add_tier(self.annotator)
self.assertEqual(len(doc.tiers['ngrams'].spans), 10)
span_iter = iter(doc.tiers['ngrams'].spans)
self.assertEqual(next(span_iter).text, 'Bears')
self.assertEqual(next(span_iter).text, 'Bears eat')
self.assertEqual(next(span_iter).text, 'Bears eat tacos')
self.assertEqual(next(span_iter).text, 'Bears eat tacos.')
self.assertEqual(next(span_iter).text, 'eat')
self.assertEqual(next(span_iter).text, 'eat tacos')
self.assertEqual(next(span_iter).text, 'eat tacos.')
self.assertEqual(next(span_iter).text, 'tacos')
self.assertEqual(next(span_iter).text, 'tacos.')
self.assertEqual(next(span_iter).text, '.')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f33481e2a18b2d1538f783dff118cd9e",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 71,
"avg_line_length": 33.166666666666664,
"alnum_prop": 0.6454494695700725,
"repo_name": "ecohealthalliance/EpiTator",
"id": "928e6066a8b447cde434dd36546ce6078c62a4a5",
"size": "1813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/annotator/test_ngram_annotator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "329685"
}
],
"symlink_target": ""
} |
"""Displays calibration and guide offsets
History:
2011-02-16 ROwen
2011-02-18 ROwen Removed the rotator axis since we never set rotator calib or guide offsets on the 3.5m.
"""
import Tkinter
import RO.CoordSys
import RO.StringUtil
import RO.Wdg
import TUI.TCC.TCCModel
_HelpURL = "Telescope/StatusWin.html#Offsets"
_DataWidth = 10
class AxisOffsetWdg (Tkinter.Frame):
def __init__ (self, master=None, **kargs):
"""creates a new offset display frame
Inputs:
- master master Tk widget -- typically a frame or window
"""
Tkinter.Frame.__init__(self, master, **kargs)
self.tccModel = TUI.TCC.TCCModel.getModel()
self.isArc = False
gr = RO.Wdg.Gridder(self, sticky="w")
gr.gridWdg("Calib Off")
gr.startNewCol()
# just display az and alt offsets because there's no way to measure
# rotator correction so it's never set
MountLabels = ("Az", "Alt")
# calib offset
self.calibOffWdgSet = [
RO.Wdg.DMSLabel(
master = self,
precision = 1,
width = _DataWidth,
helpText = "Calibration offset",
helpURL = _HelpURL,
)
for ii in range(len(MountLabels))
]
for ii, label in enumerate(MountLabels):
wdgSet = gr.gridWdg (
label = label,
dataWdg = self.calibOffWdgSet[ii],
units = RO.StringUtil.DMSStr,
)
wdgSet.labelWdg.configure(width=4, anchor="e")
gr.startNewCol()
gr.gridWdg(" Guide Off")
gr.startNewCol()
# guide offset
self.guideOffWdgSet = [
RO.Wdg.DMSLabel(
master = self,
precision = 1,
width = _DataWidth,
helpText = "Guide offset",
helpURL = _HelpURL,
)
for ii in range(len(MountLabels))
]
for ii, label in enumerate(MountLabels):
wdgSet = gr.gridWdg (
label = label,
dataWdg = self.guideOffWdgSet[ii],
units = RO.StringUtil.DMSStr,
)
wdgSet.labelWdg.configure(width=4, anchor="e")
# allow the last+1 column to grow to fill the available space
self.columnconfigure(gr.getMaxNextCol(), weight=1)
self.tccModel.calibOff.addROWdgSet(self.calibOffWdgSet)
self.tccModel.guideOff.addROWdgSet(self.guideOffWdgSet)
if __name__ == "__main__":
import TestData
tuiModel = TestData.tuiModel
testFrame = OffsetWdg(tuiModel.tkRoot)
testFrame.pack()
dataList = (
"ObjSys=ICRS, 0",
"ObjInstAng=30.0, 0.0, 4494436859.66000",
"ObjArcOff=-0.012, 0.0, 4494436859.66000, -0.0234, 0.000000, 4494436859.66000",
"Boresight=0.0054, 0.0, 4494436859.66000, -0.0078, 0.000000, 4494436859.66000",
"CalibOff=-0.001, 0.0, 4494436859.66000, 0.003, 0.000000, 4494436859.66000, -0.017, 0.000000, 4494436859.66000",
"GuideOff=-0.003, 0.0, 4494436859.66000, -0.002, 0.000000, 4494436859.66000, 0.023, 0.000000, 4494436859.66000",
)
TestData.testDispatcher.dispatch(dataList)
tuiModel.tkRoot.mainloop()
| {
"content_hash": "3c8ab6a2544d90da0651a32dab5daf0f",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 120,
"avg_line_length": 32.509803921568626,
"alnum_prop": 0.5729794933655006,
"repo_name": "r-owen/TUI",
"id": "b75c68f9a02595055107c8f3928cdff4aacc6163",
"size": "3338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TUI/TCC/StatusWdg/AxisOffsetWdg.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "412255"
},
{
"name": "Python",
"bytes": "1443987"
}
],
"symlink_target": ""
} |
from commander.public import Commander
from source.public import Source
from .cloud_commanding import \
CreateTargetCommand, \
StopTargetCommand, \
DeleteBootstrapNetworkInterfaceCommand, \
DeleteBootstrapVolumeCommand, \
ConfigureBootDeviceCommand, \
StartTargetCommand
from .target_system_info_inspection import GetTargetSystemInfoCommand
from .device_identification import DeviceIdentificationCommand
from .partition_creation import CreatePartitionsCommand
from .filesystem_creation import CreateFilesystemsCommand
from .filesystem_mounting import FilesystemMountCommand
from .syncing import SyncCommand, FinalSyncCommand
from .config_adjustment import NetworkConfigAdjustmentCommand, SshConfigAdjustmentCommand, FstabAdjustmentCommand
from .bootloader_reinstallation import BootloaderReinstallationCommand
class MigrationCommander(Commander):
_COMMAND_DRIVER = {
Source.Status.CREATE_TARGET: CreateTargetCommand,
Source.Status.GET_TARGET_SYSTEM_INFORMATION: GetTargetSystemInfoCommand,
Source.Status.IDENTIFY_DEVICES: DeviceIdentificationCommand,
Source.Status.CREATE_PARTITIONS: CreatePartitionsCommand,
Source.Status.CREATE_FILESYSTEMS: CreateFilesystemsCommand,
Source.Status.MOUNT_FILESYSTEMS: FilesystemMountCommand,
Source.Status.SYNC: SyncCommand,
Source.Status.FINAL_SYNC: FinalSyncCommand,
Source.Status.ADJUST_NETWORK_CONFIG: NetworkConfigAdjustmentCommand,
Source.Status.ADJUST_SSH_CONFIG: SshConfigAdjustmentCommand,
Source.Status.ADJUST_FSTAB: FstabAdjustmentCommand,
Source.Status.REINSTALL_BOOTLOADER: BootloaderReinstallationCommand,
Source.Status.STOP_TARGET: StopTargetCommand,
Source.Status.DELETE_BOOTSTRAP_VOLUME: DeleteBootstrapVolumeCommand,
Source.Status.DELETE_BOOTSTRAP_NETWORK_INTERFACE: DeleteBootstrapNetworkInterfaceCommand,
Source.Status.CONFIGURE_BOOT_DEVICE: ConfigureBootDeviceCommand,
Source.Status.START_TARGET: StartTargetCommand,
}
@property
def _commander_driver(self):
return self._COMMAND_DRIVER
| {
"content_hash": "02b422367f083cccd64ed500b50d93e6",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 113,
"avg_line_length": 47.333333333333336,
"alnum_prop": 0.7953051643192488,
"repo_name": "jdepoix/goto_cloud",
"id": "a1c21401476470d1912e20b26d628cc286aa8ba1",
"size": "2130",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "goto_cloud/migration_commander/migration_commander.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "354421"
},
{
"name": "Shell",
"bytes": "619"
}
],
"symlink_target": ""
} |
from coapthon import defines
from coapthon.messages.option import Option
__author__ = 'Giacomo Tanganelli'
__version__ = "2.0"
class Message(object):
"""
Manage messages.
"""
def __init__(self):
"""
Initialize a CoAP Message.
"""
# The type. One of {CON, NON, ACK or RST}.
self._type = None
# The 16-bit Message Identification.
self._mid = None
# The token, a 0-8 byte array.
self.token = None
# The set of options of this message.
self._options = []
# The payload of this message.
self.payload = None
# The destination address of this message.
self.destination = None
# The source address of this message.
self.source = None
# Indicates if the message has been acknowledged.
self._acknowledged = False
# Indicates if the message has been rejected.
self._rejected = False
# Indicates if the message has timeouted.
self._timeouted = False
# Indicates if the message has been canceled.
self._canceled = False
# Indicates if the message is a duplicate.
self._duplicate = False
# The timestamp
self._timestamp = None
# The code
self.code = None
@property
def options(self):
"""
Property for retrieving the options of the message.
:return: the options
"""
return self._options
def add_option(self, option):
"""
Add an option to the message.
:type option: coapthon2.messages.option.Option
:param option: the option
:raise TypeError: if the option is not repeatable and such option is already present in the message
"""
assert isinstance(option, Option)
name, type_value, repeatable, defaults = defines.options[option.number]
if not repeatable:
try:
self._options.index(option)
raise TypeError("Option : %s is not repeatable", name)
except ValueError:
self._options.append(option)
else:
self._options.append(option)
def del_option(self, option):
"""
Delete an option from the message
:type option: coapthon2.messages.option.Option
:param option: the option
"""
assert isinstance(option, Option)
try:
while True:
self._options.remove(option)
except ValueError:
pass
def del_option_name(self, name):
"""
Delete an option from the message by name
:param name: option name
"""
for o in self._options:
assert isinstance(o, Option)
if o.number == defines.inv_options[name]:
self._options.remove(o)
@property
def mid(self):
"""
Return the mid of the message.
:return: the MID
"""
return self._mid
@mid.setter
def mid(self, m):
"""
Sets the MID of the message.
:param m: the MID
:raise AttributeError: if m is not int or cannot be represented on 16 bits.
"""
if not isinstance(m, int) or m > 65536:
raise AttributeError
self._mid = m
@property
def type(self):
"""
Return the type of the message.
:return: the type
"""
return self._type
@type.setter
def type(self, t):
"""
Sets the type of the message.
:param t: the type
:raise AttributeError: if t is not a valid type
"""
if not isinstance(t, int) or t not in defines.types:
raise AttributeError
self._type = t
@property
def duplicated(self):
"""
Checks if this message is a duplicate.
:return: True, if is a duplicate
"""
return self._duplicate
@duplicated.setter
def duplicated(self, d):
"""
Marks this message as a duplicate.
:param d: if a duplicate
"""
assert isinstance(d, bool)
self._duplicate = d
@property
def acknowledged(self):
"""
Checks if is this message has been acknowledged.
:return: True, if is acknowledged
"""
return self._acknowledged
@acknowledged.setter
def acknowledged(self, a):
"""
Marks this message as acknowledged.
:param a: if acknowledged
"""
assert isinstance(a, bool)
self._acknowledged = a
@property
def rejected(self):
"""
Checks if this message has been rejected.
:return: True, if is rejected
"""
return self._rejected
@rejected.setter
def rejected(self, r):
"""
Marks this message as rejected.
:param r: if rejected
"""
assert isinstance(r, bool)
self._rejected = r
@property
def timeouted(self):
"""
Checks if this message has timeouted. Confirmable messages in particular
might timeout.
:return: True, if has timeouted
"""
return self._timeouted
@timeouted.setter
def timeouted(self, t):
"""
Marks this message as timeouted. Confirmable messages in particular might
timeout.
:param t: if timeouted
"""
assert isinstance(t, bool)
self._timeouted = t
@property
def cancelled(self):
"""
Checks if this message has been canceled.
:return: True, if is canceled
"""
return self._canceled
@cancelled.setter
def cancelled(self, c):
"""
Marks this message as canceled.
:param c: if canceled
"""
assert isinstance(c, bool)
self._canceled = c
@staticmethod
def new_ack(message):
"""
Create a new acknowledgment for the specified message.
:param message: the message to acknowledge
:return: the acknowledgment
"""
ack = Message()
types = {v: k for k, v in defines.types.iteritems()}
ack.type = types['ACK']
ack._mid = message.mid
ack.code = 0
ack.token = None
ack.destination = message.source
return ack
@staticmethod
def new_rst(message):
"""
Create a new reset message for the specified message.
:param message: the message to reject
:return: the rst message
"""
rst = Message()
types = {v: k for k, v in defines.types.iteritems()}
rst.type = types['RST']
rst._mid = message.mid
rst.token = None
rst.code = 0
rst.destination = message.source
return rst
def __str__(self):
"""
Return the message as a formatted string.
:return: the string representing the message
"""
msg = "Source: " + str(self.source) + "\n"
msg += "Destination: " + str(self.destination) + "\n"
msg += "Type: " + str(defines.types[self.type]) + "\n"
msg += "MID: " + str(self._mid) + "\n"
if self.code is None:
self.code = 0
try:
msg += "Code: " + str(defines.inv_responses[self.code]) + "\n"
except KeyError:
msg += "Code: " + str(defines.codes[self.code]) + "\n"
msg += "Token: " + str(self.token) + "\n"
for opt in self._options:
msg += str(opt)
msg += "Payload: " + "\n"
msg += str(self.payload) + "\n"
return msg
@property
def etag(self):
"""
Get the ETag option of the message.
:return: the ETag values or [] if not specified by the request
"""
value = []
for option in self.options:
if option.number == defines.inv_options['ETag']:
value.append(option.value)
return value
@etag.setter
def etag(self, etag):
"""
Add an ETag option to the message.
:param etag: the etag
"""
option = Option()
option.number = defines.inv_options['ETag']
option.value = etag
self.add_option(option)
@etag.deleter
def etag(self):
"""
Delete an ETag from a message.
"""
self.del_option_name("ETag")
@property
def content_type(self):
"""
Get the Content-Type option of a response.
:return: the Content-Type value or 0 if not specified by the response
"""
value = 0
for option in self.options:
if option.number == defines.inv_options['Content-Type']:
value = int(option.value)
return value
@content_type.setter
def content_type(self, content_type):
"""
Set the Content-Type option of a response.
:type content_type: int
:param content_type: the Content-Type
"""
option = Option()
option.number = defines.inv_options['Content-Type']
option.value = int(content_type)
self.add_option(option)
| {
"content_hash": "d8f8518bba55f161282c2565ca042d60",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 107,
"avg_line_length": 26.059490084985836,
"alnum_prop": 0.5435373410153278,
"repo_name": "kuggenhoffen/CoAPthon",
"id": "b8b8d531e595045aba5d9512b358fbc905643dec",
"size": "9199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coapthon/messages/message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "215810"
}
],
"symlink_target": ""
} |
import os
import sys
from setuptools import setup
# To use:
# python setup.py bdist --format=wininst
from flopy import __version__, __name__, __author__
# trap someone trying to install flopy with something other
# than python 2 or 3
if not sys.version_info[0] in [2, 3]:
print('Sorry, Flopy not supported in your Python version')
print(' Supported versions: 2 and 3')
print(' Your version of Python: {}'.format(sys.version_info[0]))
sys.exit(1) # return non-zero value for failure
long_description = ''
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except:
pass
setup(name=__name__,
description='FloPy is a Python package to create, run, and post-process MODFLOW-based models.',
long_description=long_description,
author=__author__,
author_email='[email protected], [email protected], [email protected], [email protected], ' +
'[email protected], [email protected], [email protected], [email protected], [email protected]',
url='https://github.com/modflowpy/flopy/',
license='New BSD',
platforms='Windows, Mac OS-X',
install_requires=['numpy>=1.7'],
packages=['flopy', 'flopy.modflow', 'flopy.modflowlgr', 'flopy.modpath',
'flopy.mt3d', 'flopy.seawat', 'flopy.utils', 'flopy.plot',
'flopy.pest', 'flopy.export'],
include_package_data=True, # includes files listed in MANIFEST.in
# use this version ID if .svn data cannot be found
version=__version__)
| {
"content_hash": "04d46dbb65562c61eab57ca5ad002c3e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 115,
"avg_line_length": 39.1219512195122,
"alnum_prop": 0.6396508728179551,
"repo_name": "brclark-usgs/flopy",
"id": "fbdb73cbc6a6e48ce963349d4be06209b369105e",
"size": "1604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "2502771"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
from google.cloud import osconfig_v1alpha
def sample_update_os_policy_assignment():
# Create a client
client = osconfig_v1alpha.OsConfigZonalServiceClient()
# Initialize request argument(s)
os_policy_assignment = osconfig_v1alpha.OSPolicyAssignment()
os_policy_assignment.os_policies.id = "id_value"
os_policy_assignment.os_policies.mode = "ENFORCEMENT"
os_policy_assignment.os_policies.resource_groups.resources.pkg.apt.name = "name_value"
os_policy_assignment.os_policies.resource_groups.resources.pkg.desired_state = "REMOVED"
os_policy_assignment.os_policies.resource_groups.resources.id = "id_value"
os_policy_assignment.rollout.disruption_budget.fixed = 528
request = osconfig_v1alpha.UpdateOSPolicyAssignmentRequest(
os_policy_assignment=os_policy_assignment,
)
# Make the request
operation = client.update_os_policy_assignment(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END osconfig_v1alpha_generated_OsConfigZonalService_UpdateOSPolicyAssignment_sync]
| {
"content_hash": "df7f0f6073991792d9dc3a4b4d18a998",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 92,
"avg_line_length": 36.806451612903224,
"alnum_prop": 0.7502191060473269,
"repo_name": "googleapis/python-os-config",
"id": "4cc1a2b74a84044a429a68029dcbb3828f73eecf",
"size": "2563",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/osconfig_v1alpha_generated_os_config_zonal_service_update_os_policy_assignment_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1810720"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
} |
"""A plugin to tag events according to rules in a tag file."""
import logging
import re
import os
from efilter import ast as efilter_ast
from efilter import api as efilter_api
from efilter import errors as efilter_errors
from efilter import query as efilter_query
from plaso.analysis import interface
from plaso.analysis import manager
from plaso.containers import reports
class TaggingAnalysisPlugin(interface.AnalysisPlugin):
"""Analysis plugin that tags events according to rules in a tag file."""
NAME = u'tagging'
ENABLE_IN_EXTRACTION = True
_EVENT_TAG_COMMENT = u'Tag applied by tagging analysis plugin.'
_OS_TAG_FILES = {
u'macosx': u'tag_macosx.txt',
u'windows': u'tag_windows.txt'}
# A line with no indent is a tag name.
_TAG_LABEL_LINE = re.compile(r'^(\w+)')
# A line with leading indent is one of the rules for the preceding tag.
_TAG_RULE_LINE = re.compile(r'^\s+(.+)')
# If any of these words are in the query then it's probably objectfilter.
_OBJECTFILTER_WORDS = re.compile(
r'\s(is|isnot|equals|notequals|inset|notinset|contains|notcontains)\s')
def __init__(self):
"""Initializes a tagging analysis plugin."""
super(TaggingAnalysisPlugin, self).__init__()
self._autodetect_tag_file_attempt = False
self._number_of_event_tags = 0
self._tag_rules = None
self._tagging_file_name = None
def _AttemptAutoDetectTagFile(self, analysis_mediator):
"""Detects which tag file is most appropriate.
Args:
analysis_mediator (AnalysisMediator): analysis mediator.
Returns:
bool: True if a tag file is autodetected.
"""
self._autodetect_tag_file_attempt = True
if not analysis_mediator.data_location:
return False
platform = analysis_mediator.platform
filename = self._OS_TAG_FILES.get(platform.lower(), None)
if not filename:
return False
logging.info(u'Using auto detected tag file: {0:s}'.format(filename))
tag_file_path = os.path.join(analysis_mediator.data_location, filename)
self.SetAndLoadTagFile(tag_file_path)
return True
def _ParseDefinitions(self, tag_file_path):
"""Parses the tag file and yields tuples of label name, list of rule ASTs.
Args:
tag_file_path (str): path to the tag file.
Yields:
tuple: contains:
str: label name.
list[efilter.query.Query]: efilter queries.
"""
queries = None
tag = None
with open(tag_file_path, u'r') as tag_file:
for line in tag_file.readlines():
label_match = self._TAG_LABEL_LINE.match(line)
if label_match:
if tag and queries:
yield tag, queries
queries = []
tag = label_match.group(1)
continue
rule_match = self._TAG_RULE_LINE.match(line)
if rule_match:
rule = rule_match.group(1)
query = self._ParseRule(rule)
if query:
queries.append(query)
# Yield any remaining tags once we reach the end of the file.
if tag and queries:
yield tag, queries
def _ParseRule(self, rule):
"""Parses a single tagging rule.
This method attempts to detect whether the rule is written with objectfilter
or dottysql syntax - either is acceptable.
Example:
_ParseRule('5 + 5')
# Returns Sum(Literal(5), Literal(5))
Args:
rule (str): rule in either objectfilter or dottysql syntax.
Returns:
efilter.query.Query: efilter query of the rule or None.
"""
if self._OBJECTFILTER_WORDS.search(rule):
syntax = u'objectfilter'
else:
syntax = u'dottysql'
try:
return efilter_query.Query(rule, syntax=syntax)
except efilter_errors.EfilterParseError as exception:
stripped_rule = rule.rstrip()
logging.warning(
u'Unable to build query from rule: "{0:s}" with error: {1:s}'.format(
stripped_rule, exception.message))
def _ParseTaggingFile(self, tag_file_path):
"""Parses tag definitions from the source.
Args:
tag_file_path (str): path to the tag file.
Returns:
efilter.ast.Expression: efilter abstract syntax tree (AST), containing the
tagging rules.
"""
tags = []
for label_name, rules in self._ParseDefinitions(tag_file_path):
if not rules:
logging.warning(u'All rules for label "{0:s}" are invalid.'.format(
label_name))
continue
tag = efilter_ast.IfElse(
# Union will be true if any of the 'rules' match.
efilter_ast.Union(*[rule.root for rule in rules]),
# If so then evaluate to a string with the name of the tag.
efilter_ast.Literal(label_name),
# Otherwise don't return anything.
efilter_ast.Literal(None))
tags.append(tag)
# Generate a repeated value with all the tags (None will be skipped).
return efilter_ast.Repeat(*tags)
def CompileReport(self, mediator):
"""Compiles an analysis report.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: analysis report.
"""
report_text = u'Tagging plugin produced {0:d} tags.\n'.format(
self._number_of_event_tags)
return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
def ExamineEvent(self, mediator, event):
"""Analyzes an EventObject and tags it according to rules in the tag file.
Args:
mediator (AnalysisMediator): mediates interactions between analysis
plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
"""
if self._tag_rules is None:
if self._autodetect_tag_file_attempt:
# There's nothing to tag with, and we've already tried to find a good
# tag file, so there's nothing we can do with this event (or any other).
return
if not self._AttemptAutoDetectTagFile(mediator):
logging.info(
u'No tag definition file specified, and plaso was not able to '
u'autoselect a tagging file. As no definitions were specified, '
u'no events will be tagged.')
return
try:
matched_labels = efilter_api.apply(self._tag_rules, vars=event)
except efilter_errors.EfilterTypeError as exception:
logging.warning(u'Unable to apply efilter query with error: {0:s}'.format(
exception))
matched_labels = None
if not matched_labels:
return
labels = list(efilter_api.getvalues(matched_labels))
event_tag = self._CreateEventTag(event, self._EVENT_TAG_COMMENT, labels)
mediator.ProduceEventTag(event_tag)
def SetAndLoadTagFile(self, tagging_file_path):
"""Sets the tag file to be used by the plugin.
Args:
tagging_file_path (str): path of the tagging file.
"""
self._tagging_file_name = tagging_file_path
self._tag_rules = self._ParseTaggingFile(self._tagging_file_name)
manager.AnalysisPluginManager.RegisterPlugin(TaggingAnalysisPlugin)
| {
"content_hash": "e8e5075b4307d75cd0caed0990eac471",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 80,
"avg_line_length": 32.15384615384615,
"alnum_prop": 0.6611314382212216,
"repo_name": "dc3-plaso/plaso",
"id": "3104c3cd76281223f638b1df9ee38ffd4ddfe840",
"size": "7130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/analysis/tagging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1683"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Python",
"bytes": "3875098"
},
{
"name": "Shell",
"bytes": "17861"
}
],
"symlink_target": ""
} |
"""implement adder quantizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
from absl import logging
from qkeras.qtools.quantized_operators import adder_impl
from qkeras.qtools.quantized_operators import quantizer_impl
class IAdder(abc.ABC):
"""abstract class for adder."""
def __init__(self):
self.adder_impl_table = [
[
adder_impl.FixedPointAdder,
adder_impl.Po2FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FloatingPointAdder
],
[
adder_impl.Po2FixedPointAdder,
adder_impl.Po2Adder,
adder_impl.Po2FixedPointAdder,
adder_impl.Po2FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FloatingPointAdder
],
[
adder_impl.FixedPointAdder,
adder_impl.Po2FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FloatingPointAdder
],
[
adder_impl.FixedPointAdder,
adder_impl.Po2FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FloatingPointAdder
],
[
adder_impl.FixedPointAdder,
adder_impl.Po2FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FixedPointAdder,
adder_impl.FloatingPointAdder
],
[
adder_impl.FloatingPointAdder,
adder_impl.FloatingPointAdder,
adder_impl.FloatingPointAdder,
adder_impl.FloatingPointAdder,
adder_impl.FloatingPointAdder,
adder_impl.FloatingPointAdder
]
]
def make_quantizer(self, quantizer_1: quantizer_impl.IQuantizer,
quantizer_2: quantizer_impl.IQuantizer):
"""make adder quantizer."""
local_quantizer_1 = copy.deepcopy(quantizer_1)
local_quantizer_2 = copy.deepcopy(quantizer_2)
mode1 = local_quantizer_1.mode
mode2 = local_quantizer_2.mode
adder_impl_class = self.adder_impl_table[mode1][mode2]
logging.debug(
"qbn adder implemented as class %s",
adder_impl_class.implemented_as())
return adder_impl_class(
local_quantizer_1,
local_quantizer_2
)
| {
"content_hash": "24da91a6e46672602741baa376832f7b",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 66,
"avg_line_length": 29.852272727272727,
"alnum_prop": 0.6128663875142748,
"repo_name": "google/qkeras",
"id": "7250c1e34c98276af17c17641cf8e228f8dbd3d5",
"size": "3284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qkeras/qtools/quantized_operators/adder_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "129705"
},
{
"name": "Python",
"bytes": "1004364"
}
],
"symlink_target": ""
} |
import re
import base64
import pathlib
import json
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
from cumulusci.core.exceptions import CumulusCIException
from simple_salesforce.exceptions import SalesforceMalformedRequest
def join_errors(e: SalesforceMalformedRequest) -> str:
return "; ".join([error.get("message", "Unknown.") for error in e.content])
class UploadProfilePhoto(BaseSalesforceApiTask):
task_docs = """
Uploads a profile photo for a specified or default User.
Examples
--------
Upload a profile photo for the default user.
.. code-block:: yaml
tasks:
upload_profile_photo_default:
group: Internal storytelling data
class_path: cumulusci.tasks.salesforce.users.UploadProfilePhoto
description: Uploads a profile photo for the default user.
options:
photo: storytelling/photos/default.png
Upload a profile photo for a user whose Alias equals ``grace`` or ``walker``, is active, and created today.
.. code-block:: yaml
tasks:
upload_profile_photo_grace:
group: Internal storytelling data
class_path: cumulusci.tasks.salesforce.users.UploadProfilePhoto
description: Uploads a profile photo for Grace.
options:
photo: storytelling/photos/grace.png
where: (Alias = 'grace' OR Alias = 'walker') AND IsActive = true AND CreatedDate = TODAY
"""
task_options = {
"photo": {"description": "Path to user's profile photo.", "required": True},
"where": {
"description": """WHERE clause used querying for which User to upload the profile photo for.
* No need to prefix with ``WHERE``
* The SOQL query must return one and only one User record.
* If no "where" is supplied, uploads the photo for the org's default User.
""",
"required": False,
},
}
def _raise_cumulusci_exception(self, e: SalesforceMalformedRequest) -> None:
raise CumulusCIException(join_errors(e))
def _get_user_id_by_query(self, where: str) -> str:
# Query for the User removing a "WHERE " prefix from where if exists.
query = "SELECT Id FROM User WHERE {} LIMIT 2".format(
re.sub(r"^WHERE ", "", where, flags=re.I)
)
self.logger.info(f"Querying User: {query}")
user_ids = []
try:
for record in self.sf.query_all(query)["records"]:
user_ids.append(record["Id"])
except SalesforceMalformedRequest as e:
# Raise an easier to digest exception.
self._raise_cumulusci_exception(e)
# Validate only 1 User found.
if len(user_ids) < 1:
raise CumulusCIException("No Users found.")
if 1 < len(user_ids):
raise CumulusCIException(
"More than one User found (at least 2): {}".format(", ".join(user_ids))
)
# Log and return User ID.
self.logger.info(f"Uploading profile photo for the User with ID {user_ids[0]}")
return user_ids[0]
def _get_default_user_id(self) -> str:
user_id = self.sf.restful("")["identity"][-18:]
self.logger.info(
f"Uploading profile photo for the default User with ID {user_id}"
)
return user_id
def _insert_content_document(self, photo_path) -> str:
path = pathlib.Path(photo_path)
if not path.exists():
raise CumulusCIException(f"No photo found at {path}")
self.logger.info(f"Setting user photo to {path}")
result = self.sf.ContentVersion.create(
{
"PathOnClient": path.name,
"Title": path.stem,
"VersionData": base64.b64encode(path.read_bytes()).decode("utf-8"),
}
)
if not result["success"]:
raise CumulusCIException(
"Failed to create photo ContentVersion: {}".format(result["errors"])
)
content_version_id = result["id"]
# Query the ContentDocumentId for our created record.
content_document_id = self.sf.query(
f"SELECT Id, ContentDocumentId FROM ContentVersion WHERE Id = '{content_version_id}'"
)["records"][0]["ContentDocumentId"]
self.logger.info(
f"Uploaded profile photo ContentDocument {content_document_id}"
)
return content_document_id
def _delete_content_document(self, content_document_id: str):
self.sf.ContentDocument.delete(content_document_id)
def _assign_user_profile_photo(self, user_id: str, content_document_id: str):
# Call the Connect API to set our user photo.
try:
self.sf.restful(
f"connect/user-profiles/{user_id}/photo",
data=json.dumps({"fileId": content_document_id}),
method="POST",
)
except SalesforceMalformedRequest as e:
# Rollback ContentDocument, and raise an easier to digest exception.
self.logger.error(
"An error occured assigning the ContentDocument as the users's profile photo."
)
self.logger.error(f"Deleting ContentDocument {content_document_id}")
self._delete_content_document(content_document_id)
self._raise_cumulusci_exception(e)
def _run_task(self):
user_id = (
self._get_user_id_by_query(self.options["where"])
if self.options.get("where")
else self._get_default_user_id()
)
content_document_id = self._insert_content_document(self.options["photo"])
self._assign_user_profile_photo(user_id, content_document_id)
| {
"content_hash": "51a8a3b72c5c3a812c1a5c00f54a39c5",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 107,
"avg_line_length": 35.67701863354037,
"alnum_prop": 0.6119428969359332,
"repo_name": "SalesforceFoundation/CumulusCI",
"id": "177f7b0115bad62cf66cb5eff2d843ed4db4ff75",
"size": "5744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulusci/tasks/salesforce/users/photos.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "754354"
},
{
"name": "RobotFramework",
"bytes": "9330"
},
{
"name": "Shell",
"bytes": "5555"
}
],
"symlink_target": ""
} |
import unittest
from transformers import AutoFeatureExtractor, AutoTokenizer, Speech2TextForConditionalGeneration, Wav2Vec2ForCTC
from transformers.pipelines import AutomaticSpeechRecognitionPipeline
from transformers.testing_utils import require_datasets, require_torch, require_torchaudio, slow
# from .test_pipelines_common import CustomInputPipelineCommonMixin
class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase):
# pipeline_task = "automatic-speech-recognition"
# small_models = ["facebook/s2t-small-mustc-en-fr-st"] # Models tested without the @slow decorator
# large_models = [
# "facebook/wav2vec2-base-960h",
# "facebook/s2t-small-mustc-en-fr-st",
# ] # Models tested with the @slow decorator
@slow
@require_torch
@require_datasets
def test_simple_wav2vec2(self):
import numpy as np
from datasets import load_dataset
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor)
waveform = np.zeros((34000,))
output = asr(waveform)
self.assertEqual(output, {"text": ""})
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
filename = ds[0]["file"]
output = asr(filename)
self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"})
filename = ds[0]["file"]
with open(filename, "rb") as f:
data = f.read()
output = asr(data)
self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"})
@slow
@require_torch
@require_torchaudio
@require_datasets
def test_simple_s2t(self):
import numpy as np
from datasets import load_dataset
model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-mustc-en-it-st")
tokenizer = AutoTokenizer.from_pretrained("facebook/s2t-small-mustc-en-it-st")
feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-mustc-en-it-st")
asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor)
waveform = np.zeros((34000,))
output = asr(waveform)
self.assertEqual(output, {"text": "E questo è il motivo per cui non ci siamo mai incontrati."})
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
filename = ds[0]["file"]
output = asr(filename)
self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."})
filename = ds[0]["file"]
with open(filename, "rb") as f:
data = f.read()
output = asr(data)
self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."})
| {
"content_hash": "9b9ed0afdab1f19e7309cbd9ec5088fa",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 119,
"avg_line_length": 41.54666666666667,
"alnum_prop": 0.6822849807445442,
"repo_name": "huggingface/pytorch-transformers",
"id": "91dcc71de0182742124b168ffe3ab197d71b5119",
"size": "3724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pipelines_automatic_speech_recognition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Jupyter Notebook",
"bytes": "535623"
},
{
"name": "Python",
"bytes": "897445"
}
],
"symlink_target": ""
} |
import El
import time
m = 4000
n = 2000
display = True
worldRank = El.mpi.WorldRank()
# Make a sparse matrix with the last column dense
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
firstLocalRow = A.FirstLocalRow()
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = firstLocalRow + sLoc
if s < width:
A.QueueLocalUpdate( sLoc, s, 11 )
if s >= 1 and s-1 < width:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if s+1 < width:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if s >= height and s-height < width:
A.QueueLocalUpdate( sLoc, s-height, -3 )
if s+height < width:
A.QueueLocalUpdate( sLoc, s+height, 4 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -5/height );
A.MakeConsistent()
return A
A = Rectang(m,n)
b = El.DistMultiVec()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
startNNLS = time.clock()
x = El.NNLS( A, b )
endNNLS = time.clock()
if worldRank == 0:
print "NNLS time: ", endNNLS-startNNLS
if display:
El.Display( x, "x" )
e = El.DistMultiVec()
El.Copy( b, e )
El.SparseMultiply( El.NORMAL, -1., A, x, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| A x - b ||_2 =", eTwoNorm
startLS = time.clock()
xLS = El.LeastSquares( A, b )
endLS = time.clock()
if worldRank == 0:
print "LS time: ", endLS-startLS
El.Copy( b, e )
El.SparseMultiply( El.NORMAL, -1., A, xLS, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| A x_{LS} - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
commSize = El.mpi.Size( El.mpi.COMM_WORLD() )
El.Finalize()
if commSize == 1:
raw_input('Press Enter to exit')
| {
"content_hash": "95be54c8fbb11770240ccfb7bb1e8f5b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 66,
"avg_line_length": 24.342105263157894,
"alnum_prop": 0.6232432432432432,
"repo_name": "sg0/Elemental",
"id": "6d3978fe805fa43ee2fb40314a8d2d7bdc7c7f55",
"size": "2114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/interface/NNLS.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "436612"
},
{
"name": "C++",
"bytes": "6916029"
},
{
"name": "CMake",
"bytes": "72646"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "Matlab",
"bytes": "13306"
},
{
"name": "Objective-C",
"bytes": "127079"
},
{
"name": "Python",
"bytes": "948517"
},
{
"name": "Ruby",
"bytes": "1393"
},
{
"name": "Shell",
"bytes": "1374"
},
{
"name": "TeX",
"bytes": "23728"
}
],
"symlink_target": ""
} |
r"""A placer that implements coordinate descent algorithm.
The placer can start from a scratch (i.e., empty grid), or from an existing node
locations specified by --init_placement.
The algorithm runs for a given number of epochs (iterations).
For each iteartion, for each node by a given --cd_node_order, place the node
greedily on the best grid location.
If --cd_use_stdcell_placer is True, place hard macros greedily first,
then followed by stdcell placer to place all stdcells.
When --cd_epochs=1, this algorithm is equivalent to greedy algorithm.
Example usage:
python circuit_training/environment/coordinate_descent_placer_main.py
--netlist_file "/path/to/netlist.pb.txt"
--init_placement "/path/to/initial_placement.plc"
"""
import functools
from absl import app
from absl import flags
from circuit_training.environment import coordinate_descent_placer
from circuit_training.environment import environment
from circuit_training.environment import placement_util
import numpy as np
flags.DEFINE_string('netlist_file', None, 'Path to netlist file.')
flags.DEFINE_string('init_placement', None, 'Path to initial placement file.')
flags.DEFINE_string('cd_output_dir', '/tmp/cd', 'CD output dir.')
flags.DEFINE_string('cd_placement_filename', 'cd', 'CD placement filename.')
FLAGS = flags.FLAGS
def main(_):
np.random.seed(FLAGS.seed)
plc = placement_util.create_placement_cost(FLAGS.netlist_file,
FLAGS.init_placement)
if not FLAGS.cd_use_init_location:
plc.unplace_all_nodes()
def cost_fn(plc):
return environment.cost_info_function(plc=plc, done=True)
cost_fn = functools.partial(
cost_fn, wirelength_weight=1.0, density_weight=0.1, congestion_weight=0.1)
placer = coordinate_descent_placer.CoordinateDescentPlacer(plc, cost_fn)
placer.place()
placer.save_placement(FLAGS.cd_output_dir,
f'{FLAGS.cd_placement_filename}.plc')
print(f'Final CD placement can be found at {FLAGS.cd_output_dir}')
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "314ccdd3e65a1df75327c89b6e885464",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 80,
"avg_line_length": 33.20967741935484,
"alnum_prop": 0.7294803302574066,
"repo_name": "google-research/circuit_training",
"id": "ce88fe1320b844fe1e2f972b2ee3fdec56212519",
"size": "2672",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "circuit_training/environment/coordinate_descent_placer_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "474797"
}
],
"symlink_target": ""
} |
import requests
import numpy as np
class ModelServiceClient(object):
def __init__(self, host, api_key):
self.host = host
self.api_key = api_key
def predict(self, X):
if type(X) is np.ndarray:
X = X.tolist()
response = requests.post(self.host + '/predict', json={'X': X},
params={'key': self.api_key})
response_json = response.json()
if 'y' in response_json:
return response_json['y']
else:
print(response_json)
| {
"content_hash": "9d1071d82b9a344fdfd21ba2e80af2a6",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 71,
"avg_line_length": 26.35,
"alnum_prop": 0.5483870967741935,
"repo_name": "GoogleCloudPlatform/ml-on-gcp",
"id": "e022e432c78710a7b1e17cb3feafcec8716d8f03",
"size": "1124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/sklearn/gae_serve/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2829"
},
{
"name": "HTML",
"bytes": "2609"
},
{
"name": "Jupyter Notebook",
"bytes": "485638"
},
{
"name": "Python",
"bytes": "909187"
},
{
"name": "R",
"bytes": "9425"
},
{
"name": "Shell",
"bytes": "72356"
}
],
"symlink_target": ""
} |
"""
callbacks functions used in training process
"""
from __future__ import annotations
import logging
import os
import re
import warnings
from collections import deque
from glob import glob
import numpy as np
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.utils import Sequence
from megnet.utils.metrics import accuracy, mae
from megnet.utils.preprocessing import DummyScaler, Scaler
logger = logging.getLogger(__name__)
class ModelCheckpointMAE(Callback):
"""
Save the best MAE model with target scaler
"""
def __init__(
self,
filepath: str = "./callback/val_mae_{epoch:05d}_{val_mae:.6f}.hdf5",
monitor: str = "val_mae",
verbose: int = 0,
save_best_only: bool = True,
save_weights_only: bool = False,
val_gen: Sequence = None,
steps_per_val: int | None = None,
target_scaler: Scaler | None = None,
period: int = 1,
mode: str = "auto",
):
"""
Args:
filepath (string): path to save the model file with format. For example
`weights.{epoch:02d}-{val_mae:.6f}.hdf5` will save the corresponding epoch and
val_mae in the filename
monitor (string): quantity to monitor, default to "val_mae"
verbose (int): 0 for no training log, 1 for only epoch-level log and 2 for batch-level log
save_best_only (bool): whether to save only the best model
save_weights_only (bool): whether to save the weights only excluding model structure
val_gen (generator): validation generator
steps_per_val (int): steps per epoch for validation generator
target_scaler (object): exposing inverse_transform method to scale the output
period (int): number of epoch interval for this callback
mode: (string) choose from "min", "max" or "auto"
"""
super().__init__()
if val_gen is None:
raise ValueError("No validation data is provided!")
self.verbose = verbose
if self.verbose > 0:
logging.basicConfig(level=logging.INFO)
self.filepath = filepath
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.epochs_since_last_save = 0
self.val_gen = val_gen
self.steps_per_val = steps_per_val or len(val_gen)
self.target_scaler = target_scaler or DummyScaler()
if monitor == "val_mae":
self.metric = mae
self.monitor = "val_mae"
elif monitor == "val_acc":
self.metric = accuracy
self.filepath = self.filepath.replace("val_mae", "val_acc")
self.monitor = "val_acc"
if mode == "min":
self.monitor_op = np.less
self.best = np.Inf
elif mode == "max":
self.monitor_op = np.greater
self.best = -np.Inf
else:
if "acc" in self.monitor or self.monitor.startswith("fmeasure"):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch: int, logs: dict | None = None) -> None:
"""
Codes called by the callback at the end of epoch
Args:
epoch (int): epoch id
logs (dict): logs of training
Returns:
None
"""
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
val_pred = []
val_y = []
for i in range(self.steps_per_val):
val_data = self.val_gen[i] # type: ignore
nb_atom = _count(np.array(val_data[0][-2]))
stop_training = self.model.stop_training # save stop_trainings state
pred_ = self.model.predict(val_data[0])
self.model.stop_training = stop_training
val_pred.append(self.target_scaler.inverse_transform(pred_[0, :, :], nb_atom[:, None]))
val_y.append(self.target_scaler.inverse_transform(val_data[1][0, :, :], nb_atom[:, None]))
current = self.metric(np.concatenate(val_y, axis=0), np.concatenate(val_pred, axis=0))
filepath = self.filepath.format(**{"epoch": epoch + 1, self.monitor: current})
if self.save_best_only:
if current is None:
warnings.warn(f"Can save best model only with {self.monitor} available, skipping.", RuntimeWarning)
else:
if self.monitor_op(current, self.best):
logger.info(
f"\nEpoch {epoch+1:05d}: {self.monitor} improved from {self.best:.5f} to {current:.5f},"
f" saving model to {filepath}"
)
self.best = current
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
else:
if self.verbose > 0:
logger.info(f"\nEpoch {epoch+1:05d}: {self.monitor} did not improve from {self.best:.5f}")
else:
logger.info(f"\nEpoch {epoch+1:05d}: saving model to {filepath}")
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
class ManualStop(Callback):
"""
Stop the training manually by putting a "STOP" file in the directory
"""
def on_batch_end(self, epoch: int, logs: dict | None = None) -> None:
"""
Codes called at the end of a batch
Args:
epoch (int): epoch id
logs (dict): log dict
Returns: None
"""
if os.path.isfile("STOP"):
self.model.stop_training = True
class EarlyStopping(Callback):
"""
Implements EarlyStopping callback using saved model files
"""
def __init__(
self,
filepath: str = "./callback/val_mae_{epoch:05d}_{val_mae:.6f}.hdf5",
patience: int = 500,
monitor: str = "val_mae",
mode: str = "auto",
):
"""
Args:
filepath (str): filepath for saved model checkpoint, should be consistent with
checkpoint callback
patience (int): number of steps that the val mae does not change.
It is a criteria for early stopping
monitor (str): target metric to monitor
mode (str): min, max or auto
"""
self.filepath = filepath
self.losses: deque = deque([], maxlen=10)
self.patience = patience
self.monitor = monitor
super().__init__()
if mode == "min":
self.monitor_op = np.argmin
elif mode == "max":
self.monitor_op = np.argmax
else:
if "acc" in self.monitor:
self.monitor_op = np.argmax
else:
self.monitor_op = np.argmin
# get variable name
variable_name_pattern = r"{(.+?)}"
self.variable_names = re.findall(variable_name_pattern, filepath)
self.variable_names = [i.split(":")[0] for i in self.variable_names]
if self.monitor not in self.variable_names:
raise ValueError("The monitored metric should be in the name pattern")
def on_epoch_end(self, epoch: int, logs: dict | None = None):
"""
Check the loss value at the end of an epoch for early stopping
Args:
epoch (int): epoch id
logs (dict): log history
Returns: None
"""
logs = logs or {}
loss = logs.get("loss")
if loss is not None:
self.losses.append(loss)
if np.isnan(loss) or np.isinf(loss):
logger.info("Nan loss found!")
self.model.stop_training = True
last_saved_epoch, last_metric, last_file = self._get_checkpoints()
if last_saved_epoch is not None:
if last_saved_epoch + self.patience <= epoch:
self.model.stop_training = True
logger.info(f"{self.monitor} does not improve after {self.patience}, stopping the fitting...")
def _get_checkpoints(self):
file_pattern = re.sub(r"{(.+?)}", r"([0-9\.]+)", self.filepath)
glob_pattern = re.sub(r"{(.+?)}", r"*", self.filepath)
all_check_points = glob(glob_pattern)
if len(all_check_points) > 0:
metric_index = self.variable_names.index(self.monitor)
epoch_index = self.variable_names.index("epoch")
metric_values = []
epochs = []
for i in all_check_points:
metrics = re.findall(file_pattern, i)[0]
metric_values.append(float(metrics[metric_index]))
epochs.append(int(metrics[epoch_index]))
ind = self.monitor_op(metric_values)
return epochs[ind], metric_values[ind], all_check_points[ind]
return None, None, None
def _count(a: np.ndarray) -> np.ndarray:
"""
count number of appearance for each element in a
Args:
a: (np.array)
Returns:
(np.array) number of appearance of each element in a
"""
a = a.ravel()
a = np.r_[a[0], a, np.Inf]
z = np.where(np.abs(np.diff(a)) > 0)[0]
z = np.r_[0, z]
return np.diff(z)
| {
"content_hash": "6652cecb7434db15ed2d7d67f1c90596",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 119,
"avg_line_length": 37.041666666666664,
"alnum_prop": 0.5492381634113918,
"repo_name": "materialsvirtuallab/megnet",
"id": "22e8da5907a2a4c3dcde5dab61e56c65e829198f",
"size": "9779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "megnet/callbacks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "4156"
},
{
"name": "HTML",
"bytes": "11342"
},
{
"name": "JavaScript",
"bytes": "9573"
},
{
"name": "Jupyter Notebook",
"bytes": "1346431"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Procfile",
"bytes": "49"
},
{
"name": "Python",
"bytes": "268660"
},
{
"name": "R",
"bytes": "10398"
},
{
"name": "Shell",
"bytes": "380"
}
],
"symlink_target": ""
} |
import json
from collections import namedtuple
import os
import tables
import numpy as np
from extremefill2D.systems import ExtremeFillSystem, ConstantCurrentSystem
from extremefill2D.meshes import ExtremeFill2DMesh
def assert_close(v1, v2, **kwargs):
assert np.allclose(v1, v2, **kwargs)
def get_path():
return os.path.split(__file__)[0]
def read_params(jsonfile=None, **kwargs):
if jsonfile is None:
jsonfile = os.path.join(get_path(), 'params.json')
with open(jsonfile, 'r') as ff:
params_dict = json.load(ff)
for k, v in kwargs.items():
params_dict[k] = v
return namedtuple('ParamClass', params_dict.keys())(*params_dict.values())
def read_data(attrs):
datafile = os.path.join(get_path(), 'annular.h5')
h5file = tables.open_file(datafile, mode='r')
index = h5file.root._v_attrs.latestIndex
datadict = h5file.get_node('/ID' + str(int(index)))
data = dict()
for attr in attrs:
data[attr] = getattr(datadict, attr).read()
h5file.close()
return data
def test():
params = read_params(totalSteps=5)
system = ExtremeFillSystem(params)
system.run(print_data=False)
attrs = ['distance', 'potential', 'theta', 'suppressor', 'cupric']
data = dict((attr, getattr(system.variables, attr)) for attr in attrs)
data_other = read_data(attrs)
for k, v in data.items():
o = data_other[k]
L2 = np.sqrt(((v - o)**2).sum()) / len(o)
assert np.allclose(v, o, rtol=1e-3, atol=1e-3) or (L2 < max(abs(v - o)))
# yield assert_close, v, o
def constant_current_json():
return os.path.join(get_path(), 'constant_current.json')
def test_constant_current():
params = read_params(jsonfile=constant_current_json(), totalSteps=1, sweeps=50)
system = ConstantCurrentSystem(params)
system.run(print_data=False)
assert_close(float(system.variables.current), params.current)
def test_mesh():
params = read_params()
mesh = ExtremeFill2DMesh(params)
dx = mesh.get_nonuniform_dx(0.1, 3.0, 4.0, 10.0, 0.3, 2.0)
assert_close(np.sum(dx[:7]), 3.0)
solution = [ 2. , 0.4, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2,
0.4, 0.8, 4.2]
assert_close(dx, solution)
def test_mesh_via():
params = read_params()
mesh = ExtremeFill2DMesh(params)
x2 = 3.0
dx = mesh.get_nonuniform_dx(0.2, 0.0, 1.0, x2, 0.4, 2.0)
assert_close(np.sum(dx), x2)
assert_close(len(dx), 9)
def test_goemtery():
params = read_params()
mesh = ExtremeFill2DMesh(params)
spacing = mesh.geometric_spacing(1., 10., 1.1)
solution = [ 1. , 1.1 , 1.21 , 1.331 , 1.4641 , 1.61051, 2.28439]
assert_close(spacing, solution)
assert_close(np.sum(spacing), 10.0)
def test_hemispherical_cap():
params = read_params(jsonfile=constant_current_json(), totalSteps=1, sweeps=10, cap_radius=3.75e-5, dt=10.0)
system = ConstantCurrentSystem(params)
system.run(print_data=False)
mesh = system.distance.mesh
x = mesh.x.value
y = mesh.y.value
center =(0.0, max(y))
radius = np.sqrt((x - center[0])**2 + (y - center[1])**2)
mask = np.array(radius < params.cap_radius)
value = np.array(system.variables.cupric)
assert_close(value[mask], params.bulkCupric, rtol=1e-4)
# min_mask = (x > 2e-5) & (x < 4e-5) & (y < params.rinner / 2.)
# min_value = min(value[min_mask])
# assert 650. < min_value < 750.
def test_hemispherical_cap_retreat():
params = read_params(jsonfile=constant_current_json(), totalSteps=1, sweeps=10, cap_radius=3.75e-5, dt=10.0)
system = ConstantCurrentSystem(params)
system.run(print_data=False)
assert np.sum(system.variables.cap.value) == 493
phi = system.variables.distance
phi.setValue(phi.value - params.router / 2.)
assert np.sum(system.variables.cap.value) == 147
phi.setValue(-1)
assert np.sum(system.variables.cap.value) == 0
if __name__ == '__main__':
test()
| {
"content_hash": "304882f8be78e422e71dfc9a10ca30dd",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 112,
"avg_line_length": 35.321739130434786,
"alnum_prop": 0.6277695716395865,
"repo_name": "wd15/extremefill2D",
"id": "c83dcb346bd9b3e4a3d86056723864ddc6a98d34",
"size": "4062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "36781013"
},
{
"name": "Python",
"bytes": "104267"
},
{
"name": "Shell",
"bytes": "107"
}
],
"symlink_target": ""
} |
import six
import itertools
from st2common.util.enum import Enum
from st2common.constants.types import ResourceType as SystemResourceType
__all__ = [
'SystemRole',
'PermissionType',
'ResourceType',
'RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP',
'PERMISION_TYPE_TO_DESCRIPTION_MAP',
'ALL_PERMISSION_TYPES',
'GLOBAL_PERMISSION_TYPES',
'GLOBAL_PACK_PERMISSION_TYPES',
'LIST_PERMISSION_TYPES',
'get_resource_permission_types_with_descriptions'
]
class PermissionType(Enum):
"""
Available permission types.
"""
# Note: There is no create endpoint for runner types right now
RUNNER_LIST = 'runner_type_list'
RUNNER_VIEW = 'runner_type_view'
RUNNER_MODIFY = 'runner_type_modify'
RUNNER_ALL = 'runner_type_all'
PACK_LIST = 'pack_list'
PACK_VIEW = 'pack_view'
PACK_CREATE = 'pack_create'
PACK_MODIFY = 'pack_modify'
PACK_DELETE = 'pack_delete'
# Pack-management specific permissions
# Note: Right now those permissions are global and apply to all the packs.
# In the future we plan to support globs.
PACK_INSTALL = 'pack_install'
PACK_UNINSTALL = 'pack_uninstall'
PACK_REGISTER = 'pack_register'
PACK_CONFIG = 'pack_config'
PACK_SEARCH = 'pack_search'
PACK_VIEWS_INDEX_HEALTH = 'pack_views_index_health'
PACK_ALL = 'pack_all'
# Note: Right now we only have read endpoints + update for sensors types
SENSOR_LIST = 'sensor_type_list'
SENSOR_VIEW = 'sensor_type_view'
SENSOR_MODIFY = 'sensor_type_modify'
SENSOR_ALL = 'sensor_type_all'
ACTION_LIST = 'action_list'
ACTION_VIEW = 'action_view'
ACTION_CREATE = 'action_create'
ACTION_MODIFY = 'action_modify'
ACTION_DELETE = 'action_delete'
ACTION_EXECUTE = 'action_execute'
ACTION_ALL = 'action_all'
ACTION_ALIAS_LIST = 'action_alias_list'
ACTION_ALIAS_VIEW = 'action_alias_view'
ACTION_ALIAS_CREATE = 'action_alias_create'
ACTION_ALIAS_MODIFY = 'action_alias_modify'
ACTION_ALIAS_MATCH = 'action_alias_match'
ACTION_ALIAS_HELP = 'action_alias_help'
ACTION_ALIAS_DELETE = 'action_alias_delete'
ACTION_ALIAS_ALL = 'action_alias_all'
# Note: Execution create is granted with "action_execute"
EXECUTION_LIST = 'execution_list'
EXECUTION_VIEW = 'execution_view'
EXECUTION_RE_RUN = 'execution_rerun'
EXECUTION_STOP = 'execution_stop'
EXECUTION_ALL = 'execution_all'
EXECUTION_VIEWS_FILTERS_LIST = 'execution_views_filters_list'
RULE_LIST = 'rule_list'
RULE_VIEW = 'rule_view'
RULE_CREATE = 'rule_create'
RULE_MODIFY = 'rule_modify'
RULE_DELETE = 'rule_delete'
RULE_ALL = 'rule_all'
RULE_ENFORCEMENT_LIST = 'rule_enforcement_list'
RULE_ENFORCEMENT_VIEW = 'rule_enforcement_view'
# TODO - Maybe "datastore_item" / key_value_item ?
KEY_VALUE_VIEW = 'key_value_pair_view'
KEY_VALUE_SET = 'key_value_pair_set'
KEY_VALUE_DELETE = 'key_value_pair_delete'
WEBHOOK_LIST = 'webhook_list'
WEBHOOK_VIEW = 'webhook_view'
WEBHOOK_CREATE = 'webhook_create'
WEBHOOK_SEND = 'webhook_send'
WEBHOOK_DELETE = 'webhook_delete'
WEBHOOK_ALL = 'webhook_all'
TIMER_LIST = 'timer_list'
TIMER_VIEW = 'timer_view'
TIMER_ALL = 'timer_all'
API_KEY_LIST = 'api_key_list'
API_KEY_VIEW = 'api_key_view'
API_KEY_CREATE = 'api_key_create'
API_KEY_MODIFY = 'api_key_modify'
API_KEY_DELETE = 'api_key_delete'
API_KEY_ALL = 'api_key_all'
TRACE_LIST = 'trace_list'
TRACE_VIEW = 'trace_view'
TRACE_ALL = 'trace_all'
# Note: Trigger permissions types are also used for Timer API endpoint since timer is just
# a special type of a trigger
TRIGGER_LIST = 'trigger_list'
TRIGGER_VIEW = 'trigger_view'
TRIGGER_ALL = 'trigger_all'
POLICY_TYPE_LIST = 'policy_type_list'
POLICY_TYPE_VIEW = 'policy_type_view'
POLICY_TYPE_ALL = 'policy_type_all'
POLICY_LIST = 'policy_list'
POLICY_VIEW = 'policy_view'
POLICY_CREATE = 'policy_create'
POLICY_MODIFY = 'policy_modify'
POLICY_DELETE = 'policy_delete'
POLICY_ALL = 'policy_all'
@classmethod
def get_valid_permissions_for_resource_type(cls, resource_type):
"""
Return valid permissions for the provided resource type.
:rtype: ``list``
"""
valid_permissions = RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP[resource_type]
return valid_permissions
@classmethod
def get_resource_type(cls, permission_type):
"""
Retrieve resource type from the provided permission type.
:rtype: ``str``
"""
# Special case for:
# * PACK_VIEWS_INDEX_HEALTH
# * EXECUTION_VIEWS_FILTERS_LIST
if permission_type == PermissionType.PACK_VIEWS_INDEX_HEALTH:
return ResourceType.PACK
elif permission_type == PermissionType.EXECUTION_VIEWS_FILTERS_LIST:
return ResourceType.EXECUTION
split = permission_type.split('_')
assert len(split) >= 2
return '_'.join(split[:-1])
@classmethod
def get_permission_name(cls, permission_type):
"""
Retrieve permission name from the provided permission type.
:rtype: ``str``
"""
split = permission_type.split('_')
assert len(split) >= 2
# Special case for PACK_VIEWS_INDEX_HEALTH
if permission_type == PermissionType.PACK_VIEWS_INDEX_HEALTH:
split = permission_type.split('_', 1)
return split[1]
return split[-1]
@classmethod
def get_permission_description(cls, permission_type):
"""
Retrieve a description for the provided permission_type.
:rtype: ``str``
"""
description = PERMISION_TYPE_TO_DESCRIPTION_MAP[permission_type]
return description
@classmethod
def get_permission_type(cls, resource_type, permission_name):
"""
Retrieve permission type enum value for the provided resource type and permission name.
:rtype: ``str``
"""
# Special case for sensor type (sensor_type -> sensor)
if resource_type == ResourceType.SENSOR:
resource_type = 'sensor'
permission_enum = '%s_%s' % (resource_type.upper(), permission_name.upper())
result = getattr(cls, permission_enum, None)
if not result:
raise ValueError('Unsupported permission type for type "%s" and name "%s"' %
(resource_type, permission_name))
return result
class ResourceType(Enum):
"""
Resource types on which permissions can be granted.
"""
RUNNER = SystemResourceType.RUNNER_TYPE
PACK = SystemResourceType.PACK
SENSOR = SystemResourceType.SENSOR_TYPE
ACTION = SystemResourceType.ACTION
ACTION_ALIAS = SystemResourceType.ACTION_ALIAS
RULE = SystemResourceType.RULE
RULE_ENFORCEMENT = SystemResourceType.RULE_ENFORCEMENT
POLICY_TYPE = SystemResourceType.POLICY_TYPE
POLICY = SystemResourceType.POLICY
EXECUTION = SystemResourceType.EXECUTION
KEY_VALUE_PAIR = SystemResourceType.KEY_VALUE_PAIR
WEBHOOK = SystemResourceType.WEBHOOK
TIMER = SystemResourceType.TIMER
API_KEY = SystemResourceType.API_KEY
TRACE = SystemResourceType.TRACE
TRIGGER = SystemResourceType.TRIGGER
class SystemRole(Enum):
"""
Default system roles which can't be manipulated (modified or removed).
"""
SYSTEM_ADMIN = 'system_admin' # Special role which can't be revoked.
ADMIN = 'admin'
OBSERVER = 'observer'
# Maps a list of available permission types for each resource
RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP = {
ResourceType.RUNNER: [
PermissionType.RUNNER_LIST,
PermissionType.RUNNER_VIEW,
PermissionType.RUNNER_MODIFY,
PermissionType.RUNNER_ALL,
],
ResourceType.PACK: [
PermissionType.PACK_LIST,
PermissionType.PACK_VIEW,
PermissionType.PACK_CREATE,
PermissionType.PACK_MODIFY,
PermissionType.PACK_DELETE,
PermissionType.PACK_INSTALL,
PermissionType.PACK_UNINSTALL,
PermissionType.PACK_REGISTER,
PermissionType.PACK_CONFIG,
PermissionType.PACK_SEARCH,
PermissionType.PACK_VIEWS_INDEX_HEALTH,
PermissionType.PACK_ALL,
PermissionType.SENSOR_VIEW,
PermissionType.SENSOR_MODIFY,
PermissionType.SENSOR_ALL,
PermissionType.ACTION_VIEW,
PermissionType.ACTION_CREATE,
PermissionType.ACTION_MODIFY,
PermissionType.ACTION_DELETE,
PermissionType.ACTION_EXECUTE,
PermissionType.ACTION_ALL,
PermissionType.ACTION_ALIAS_VIEW,
PermissionType.ACTION_ALIAS_CREATE,
PermissionType.ACTION_ALIAS_MODIFY,
PermissionType.ACTION_ALIAS_DELETE,
PermissionType.ACTION_ALIAS_ALL,
PermissionType.RULE_VIEW,
PermissionType.RULE_CREATE,
PermissionType.RULE_MODIFY,
PermissionType.RULE_DELETE,
PermissionType.RULE_ALL
],
ResourceType.SENSOR: [
PermissionType.SENSOR_LIST,
PermissionType.SENSOR_VIEW,
PermissionType.SENSOR_MODIFY,
PermissionType.SENSOR_ALL
],
ResourceType.ACTION: [
PermissionType.ACTION_LIST,
PermissionType.ACTION_VIEW,
PermissionType.ACTION_CREATE,
PermissionType.ACTION_MODIFY,
PermissionType.ACTION_DELETE,
PermissionType.ACTION_EXECUTE,
PermissionType.ACTION_ALL
],
ResourceType.ACTION_ALIAS: [
PermissionType.ACTION_ALIAS_LIST,
PermissionType.ACTION_ALIAS_VIEW,
PermissionType.ACTION_ALIAS_CREATE,
PermissionType.ACTION_ALIAS_MODIFY,
PermissionType.ACTION_ALIAS_MATCH,
PermissionType.ACTION_ALIAS_HELP,
PermissionType.ACTION_ALIAS_DELETE,
PermissionType.ACTION_ALIAS_ALL
],
ResourceType.RULE: [
PermissionType.RULE_LIST,
PermissionType.RULE_VIEW,
PermissionType.RULE_CREATE,
PermissionType.RULE_MODIFY,
PermissionType.RULE_DELETE,
PermissionType.RULE_ALL
],
ResourceType.RULE_ENFORCEMENT: [
PermissionType.RULE_ENFORCEMENT_LIST,
PermissionType.RULE_ENFORCEMENT_VIEW,
],
ResourceType.EXECUTION: [
PermissionType.EXECUTION_LIST,
PermissionType.EXECUTION_VIEW,
PermissionType.EXECUTION_RE_RUN,
PermissionType.EXECUTION_STOP,
PermissionType.EXECUTION_ALL,
PermissionType.EXECUTION_VIEWS_FILTERS_LIST,
],
ResourceType.KEY_VALUE_PAIR: [
PermissionType.KEY_VALUE_VIEW,
PermissionType.KEY_VALUE_SET,
PermissionType.KEY_VALUE_DELETE
],
ResourceType.WEBHOOK: [
PermissionType.WEBHOOK_LIST,
PermissionType.WEBHOOK_VIEW,
PermissionType.WEBHOOK_CREATE,
PermissionType.WEBHOOK_SEND,
PermissionType.WEBHOOK_DELETE,
PermissionType.WEBHOOK_ALL
],
ResourceType.TIMER: [
PermissionType.TIMER_LIST,
PermissionType.TIMER_VIEW,
PermissionType.TIMER_ALL
],
ResourceType.API_KEY: [
PermissionType.API_KEY_LIST,
PermissionType.API_KEY_VIEW,
PermissionType.API_KEY_CREATE,
PermissionType.API_KEY_MODIFY,
PermissionType.API_KEY_DELETE,
PermissionType.API_KEY_ALL
],
ResourceType.TRACE: [
PermissionType.TRACE_LIST,
PermissionType.TRACE_VIEW,
PermissionType.TRACE_ALL
],
ResourceType.TRIGGER: [
PermissionType.TRIGGER_LIST,
PermissionType.TRIGGER_VIEW,
PermissionType.TRIGGER_ALL
],
ResourceType.POLICY_TYPE: [
PermissionType.POLICY_TYPE_LIST,
PermissionType.POLICY_TYPE_VIEW,
PermissionType.POLICY_TYPE_ALL,
],
ResourceType.POLICY: [
PermissionType.POLICY_LIST,
PermissionType.POLICY_VIEW,
PermissionType.POLICY_CREATE,
PermissionType.POLICY_MODIFY,
PermissionType.POLICY_DELETE,
PermissionType.POLICY_ALL,
]
}
ALL_PERMISSION_TYPES = RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP.values()
ALL_PERMISSION_TYPES = list(itertools.chain(*ALL_PERMISSION_TYPES))
LIST_PERMISSION_TYPES = [permission_type for permission_type in ALL_PERMISSION_TYPES if
permission_type.endswith('_list')]
# List of global permissions (ones which don't apply to a specific resource)
GLOBAL_PERMISSION_TYPES = [
# Pack global permission types
PermissionType.PACK_INSTALL,
PermissionType.PACK_UNINSTALL,
PermissionType.PACK_CREATE,
PermissionType.PACK_REGISTER,
PermissionType.PACK_CONFIG,
PermissionType.PACK_SEARCH,
PermissionType.PACK_VIEWS_INDEX_HEALTH,
# Action alias global permission types
PermissionType.ACTION_ALIAS_MATCH,
PermissionType.ACTION_ALIAS_HELP,
# API key global permission types
PermissionType.API_KEY_CREATE,
# Policy global permission types
PermissionType.POLICY_CREATE,
# Execution
PermissionType.EXECUTION_VIEWS_FILTERS_LIST
] + LIST_PERMISSION_TYPES
GLOBAL_PACK_PERMISSION_TYPES = [permission_type for permission_type in GLOBAL_PERMISSION_TYPES if
permission_type.startswith('pack_')]
# Maps a permission type to the corresponding description
PERMISION_TYPE_TO_DESCRIPTION_MAP = {
PermissionType.PACK_LIST: 'Ability to list (view all) packs.',
PermissionType.PACK_VIEW: 'Ability to view a pack.',
PermissionType.PACK_CREATE: 'Ability to create a new pack.',
PermissionType.PACK_MODIFY: 'Ability to modify (update) an existing pack.',
PermissionType.PACK_DELETE: 'Ability to delete an existing pack.',
PermissionType.PACK_INSTALL: 'Ability to install packs.',
PermissionType.PACK_UNINSTALL: 'Ability to uninstall packs.',
PermissionType.PACK_REGISTER: 'Ability to register packs and corresponding resources.',
PermissionType.PACK_CONFIG: 'Ability to configure a pack.',
PermissionType.PACK_SEARCH: 'Ability to query registry and search packs.',
PermissionType.PACK_VIEWS_INDEX_HEALTH: 'Ability to query health of pack registries.',
PermissionType.PACK_ALL: ('Ability to perform all the supported operations on a particular '
'pack.'),
PermissionType.SENSOR_LIST: 'Ability to list (view all) sensors.',
PermissionType.SENSOR_VIEW: 'Ability to view a sensor',
PermissionType.SENSOR_MODIFY: ('Ability to modify (update) an existing sensor. Also implies '
'"sensor_type_view" permission.'),
PermissionType.SENSOR_ALL: ('Ability to perform all the supported operations on a particular '
'sensor.'),
PermissionType.ACTION_LIST: 'Ability to list (view all) actions.',
PermissionType.ACTION_VIEW: 'Ability to view an action.',
PermissionType.ACTION_CREATE: ('Ability to create a new action. Also implies "action_view" '
'permission.'),
PermissionType.ACTION_MODIFY: ('Ability to modify (update) an existing action. Also implies '
'"action_view" permission.'),
PermissionType.ACTION_DELETE: ('Ability to delete an existing action. Also implies '
'"action_view" permission.'),
PermissionType.ACTION_EXECUTE: ('Ability to execute (run) an action. Also implies '
'"action_view" permission.'),
PermissionType.ACTION_ALL: ('Ability to perform all the supported operations on a particular '
'action.'),
PermissionType.ACTION_ALIAS_LIST: 'Ability to list (view all) action aliases.',
PermissionType.ACTION_ALIAS_VIEW: 'Ability to view an action alias.',
PermissionType.ACTION_ALIAS_CREATE: ('Ability to create a new action alias. Also implies'
' "action_alias_view" permission.'),
PermissionType.ACTION_ALIAS_MODIFY: ('Ability to modify (update) an existing action alias. '
'Also implies "action_alias_view" permission.'),
PermissionType.ACTION_ALIAS_MATCH: ('Ability to use action alias match API endpoint.'),
PermissionType.ACTION_ALIAS_HELP: ('Ability to use action alias help API endpoint.'),
PermissionType.ACTION_ALIAS_DELETE: ('Ability to delete an existing action alias. Also '
'implies "action_alias_view" permission.'),
PermissionType.ACTION_ALIAS_ALL: ('Ability to perform all the supported operations on a '
'particular action alias.'),
PermissionType.EXECUTION_LIST: 'Ability to list (view all) executions.',
PermissionType.EXECUTION_VIEW: 'Ability to view an execution.',
PermissionType.EXECUTION_RE_RUN: 'Ability to create a new action.',
PermissionType.EXECUTION_STOP: 'Ability to stop (cancel) a running execution.',
PermissionType.EXECUTION_ALL: ('Ability to perform all the supported operations on a '
'particular execution.'),
PermissionType.EXECUTION_VIEWS_FILTERS_LIST: ('Ability view all the distinct execution '
'filters.'),
PermissionType.RULE_LIST: 'Ability to list (view all) rules.',
PermissionType.RULE_VIEW: 'Ability to view a rule.',
PermissionType.RULE_CREATE: ('Ability to create a new rule. Also implies "rule_view" '
'permission'),
PermissionType.RULE_MODIFY: ('Ability to modify (update) an existing rule. Also implies '
'"rule_view" permission.'),
PermissionType.RULE_DELETE: ('Ability to delete an existing rule. Also implies "rule_view" '
'permission.'),
PermissionType.RULE_ALL: ('Ability to perform all the supported operations on a particular '
'rule.'),
PermissionType.RULE_ENFORCEMENT_LIST: 'Ability to list (view all) rule enforcements.',
PermissionType.RULE_ENFORCEMENT_VIEW: 'Ability to view a rule enforcement.',
PermissionType.RUNNER_LIST: 'Ability to list (view all) runners.',
PermissionType.RUNNER_VIEW: 'Ability to view a runner.',
PermissionType.RUNNER_MODIFY: ('Ability to modify (update) an existing runner. Also implies '
'"runner_type_view" permission.'),
PermissionType.RUNNER_ALL: ('Ability to perform all the supported operations on a particular '
'runner.'),
PermissionType.WEBHOOK_LIST: 'Ability to list (view all) webhooks.',
PermissionType.WEBHOOK_VIEW: ('Ability to view a webhook.'),
PermissionType.WEBHOOK_CREATE: ('Ability to create a new webhook.'),
PermissionType.WEBHOOK_SEND: ('Ability to send / POST data to an existing webhook.'),
PermissionType.WEBHOOK_DELETE: ('Ability to delete an existing webhook.'),
PermissionType.WEBHOOK_ALL: ('Ability to perform all the supported operations on a particular '
'webhook.'),
PermissionType.TIMER_LIST: 'Ability to list (view all) timers.',
PermissionType.TIMER_VIEW: ('Ability to view a timer.'),
PermissionType.TIMER_ALL: ('Ability to perform all the supported operations on timers'),
PermissionType.API_KEY_LIST: 'Ability to list (view all) API keys.',
PermissionType.API_KEY_VIEW: ('Ability to view an API Key.'),
PermissionType.API_KEY_CREATE: ('Ability to create a new API Key.'),
PermissionType.API_KEY_MODIFY: ('Ability to modify (update) an existing API key. Also implies '
'"api_key_view" permission.'),
PermissionType.API_KEY_DELETE: ('Ability to delete an existing API Keys.'),
PermissionType.API_KEY_ALL: ('Ability to perform all the supported operations on an API Key.'),
PermissionType.KEY_VALUE_VIEW: ('Ability to view Key-Value Pairs.'),
PermissionType.KEY_VALUE_SET: ('Ability to set a Key-Value Pair.'),
PermissionType.KEY_VALUE_DELETE: ('Ability to delete an existing Key-Value Pair.'),
PermissionType.TRACE_LIST: ('Ability to list (view all) traces.'),
PermissionType.TRACE_VIEW: ('Ability to view a trace.'),
PermissionType.TRACE_ALL: ('Ability to perform all the supported operations on traces.'),
PermissionType.TRIGGER_LIST: ('Ability to list (view all) triggers.'),
PermissionType.TRIGGER_VIEW: ('Ability to view a trigger.'),
PermissionType.TRIGGER_ALL: ('Ability to perform all the supported operations on triggers.'),
PermissionType.POLICY_TYPE_LIST: ('Ability to list (view all) policy types.'),
PermissionType.POLICY_TYPE_VIEW: ('Ability to view a policy types.'),
PermissionType.POLICY_TYPE_ALL: ('Ability to perform all the supported operations on policy'
' types.'),
PermissionType.POLICY_LIST: 'Ability to list (view all) policies.',
PermissionType.POLICY_VIEW: ('Ability to view a policy.'),
PermissionType.POLICY_CREATE: ('Ability to create a new policy.'),
PermissionType.POLICY_MODIFY: ('Ability to modify an existing policy.'),
PermissionType.POLICY_DELETE: ('Ability to delete an existing policy.'),
PermissionType.POLICY_ALL: ('Ability to perform all the supported operations on a particular '
'policy.')
}
def get_resource_permission_types_with_descriptions():
"""
Return available permission types for each resource types with corresponding descriptions.
:rtype: ``dict`
"""
result = {}
for resource_type, permission_types in six.iteritems(RESOURCE_TYPE_TO_PERMISSION_TYPES_MAP):
result[resource_type] = {}
for permission_type in permission_types:
result[resource_type][permission_type] = \
PERMISION_TYPE_TO_DESCRIPTION_MAP[permission_type]
return result
| {
"content_hash": "cce5db5052c29fe52795ed1856beeb99",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 99,
"avg_line_length": 39.001773049645394,
"alnum_prop": 0.660499158976224,
"repo_name": "tonybaloney/st2",
"id": "5b3b4f947962957957f140aa97430ca0fc3bd380",
"size": "22777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2common/st2common/rbac/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "46066"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "4278891"
},
{
"name": "Shell",
"bytes": "47687"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
'''Package initialisation for asq.
'''
from .version import __version__
from .initiators import query # noqa
__author__ = 'Sixty North'
| {
"content_hash": "4cbbc7335ea9314bd121a74c3382ce6c",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 37,
"avg_line_length": 17.5,
"alnum_prop": 0.6857142857142857,
"repo_name": "rob-smallshire/asq",
"id": "faa28105cb9378f2cf10dda37e6175261aeca12e",
"size": "140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asq/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "284574"
}
],
"symlink_target": ""
} |
"""
CalDAV methods.
Modules in this package are imported by twistedcaldav.resource in order to
bind methods to CalDAVResource.
"""
__all__ = [
"acl",
"get",
"mkcalendar",
"mkcol",
"post",
"propfind",
"report",
"report_freebusy",
"report_calendar_multiget",
"report_calendar_query",
"report_addressbook_multiget",
"report_addressbook_query",
"report_sync_collection",
]
| {
"content_hash": "b965c75285fa6b9e0727f4aeb27b6fd6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 19.227272727272727,
"alnum_prop": 0.6312056737588653,
"repo_name": "red-hood/calendarserver",
"id": "a58b53c0417e545c43826e8c7f79ae9ec6217056",
"size": "1030",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "twistedcaldav/method/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1482"
},
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "DIGITAL Command Language",
"bytes": "1234"
},
{
"name": "DTrace",
"bytes": "13143"
},
{
"name": "HTML",
"bytes": "36120"
},
{
"name": "JavaScript",
"bytes": "80248"
},
{
"name": "Makefile",
"bytes": "14429"
},
{
"name": "PLSQL",
"bytes": "12719"
},
{
"name": "PLpgSQL",
"bytes": "291431"
},
{
"name": "Python",
"bytes": "10537612"
},
{
"name": "R",
"bytes": "1091"
},
{
"name": "SQLPL",
"bytes": "6430"
},
{
"name": "Shell",
"bytes": "96975"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
def set_default_display_values(apps, schema_editor):
Question = apps.get_model('surveys', 'Question')
SubQuestion = apps.get_model('surveys', 'Question')
for question in Question.objects.all():
try:
question.left_label = question.properties['left_label']
except KeyError:
pass
try:
question.right_label = question.properties['right_label']
except KeyError:
pass
question.save()
for sub_question in SubQuestion.objects.all():
sub_question.display_title = sub_question.title
sub_question.save()
class Migration(migrations.Migration):
dependencies = [
('surveys', '0028_auto_20160929_0849'),
]
operations = [
migrations.RunPython(set_default_display_values)
]
| {
"content_hash": "9ee87663e37406c5ed10d8da367e9fdc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 69,
"avg_line_length": 26.96969696969697,
"alnum_prop": 0.6337078651685393,
"repo_name": "onepercentclub/bluebottle",
"id": "cde699988637b04877463f87380b3c327901c60c",
"size": "962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/surveys/migrations/0029_auto_20160929_0932.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import re
from core.layers import *
import sys, os
import Levenshtein
import codecs
###############################################################################
# Unofficial p0f3 implementation in Python + plugins for other protos
# Python passive fingerprinter with scapy and based on p0f v3 database
#------------------------------------------------------------------------------
# This script applies the p0f v3 database on packet using different methods
# to calculate the distance between signatures, but implement some 'push-ack'
# (data) processing methods that can reveal more informations like it should
# be when looking for HTTP headers by example.
#
# Comparing to the initial p0f3 project, this script way slower!!! But could be
# intersting for Python native implementations to be entirely independant of
# the p0f3 binary. Moreover, p0f implementation in scapy doesn't support
# the new p0f3 database, which is why this piece of code was developed.
#------------------------------------------------------------------------------
# The module and script supports Python 2 and Python 3
# (Scapy in Python3 version is available at
# https://pypi.python.org/pypi/scapy-python3/0.18)
#------------------------------------------------------------------------------
# Author: @FlUxIuS (sebastien.dudek_A|T_synacktiv.com)
# Contributors: needed!
#------------------------------------------------------------------------------
# Tasks to complet =>
# TODO: + complet signature generation for IPv6
# + complet push-ack signatures --> infinite work
# + update p0f3 database --> inifite work
# + implement other distance calculation methods
###############################################################################
dir_ = os.path.dirname(__file__)
default_fp = os.path.abspath(dir_ + "/../data/p0f.fp")
class ClassifiedSignature(object):
"""
Structure for matched signatures by p0f KB
"""
top3 = None
computedsig = None
signature = None
label = None
distance = None
orig = None
class PacketSignature(object):
"""
Structure for generated signature
"""
flag = None
protocol = None
signature = None
packet_type = None
extra = None
class p0f3p(object):
"""
p0f3+
Passive OS and application fingerprinting.
Two techniques used:
- p0f3 knowledge base matching
- payload analysis (extended to other protocols)
"""
__fpobj = None
__fppath = None
def __init__(self, fpfile=None):
"""
Init p0f python object.
optional in(1): fpfile path.
"""
self.__fppath = default_fp
if fpfile is not None:
self.__fppath = fpfile
def parse_p0fconf(self, fpfile=None):
"""
Parse p0f3 configuration file.
optional in(1): (string) fpfile path.
"""
if fpfile is not None:
self.__fppath = fpfile
dicts = {}
p0fcon = open(self.__fppath,"r").read().split("\n")
cursection = None
curlabel = None
cursigs = []
for i in p0fcon:
section = re.match(r"\[(\S+)\]", i)
sig = re.match(r"sig\s+\=\s+(.*)", i)
label = re.match(r"label\s+\=\s+(.*)", i)
if section is not None:
cursection = section.group(1)
curlabel = None
dicts[cursection] = {}
elif label is not None:
if curlabel is not None and cursection is not None:
dicts[cursection][curlabel] = cursigs
curlabel = label.group(1)
cursigs = []
elif sig is not None:
cursigs.append(sig.group(1))
self.__fpobj = dicts
return dicts
def getsigs(self):
"""
Get signature dictionary.
out: dict signatures.
"""
return self.__fpobj
def reaghttp(self, sig_base, sig_pkt):
"""
Compute HTTP signature base's wildcard with the signature's packet.
in(1): (string) p0f signature to be compared.
in(2): (string) pkt's signature to be compared with p0f's one.
out: adapted signature.
"""
tsig1 = sig_base.split(":")
tsig2 = sig_pkt.split(":")
for x in range(len(tsig1)):
if x >= len(tsig2):
break
tsig1c = tsig1[x].split(",")
ntsig1c = []
tsig2c = tsig2[x].split(",")
for y in range(len(tsig1c)):
if tsig1c[y] == "":
break
if tsig1c[y][0] == "?":
if tsig1c[y][1:] in tsig2c:
ntsig1c.append(tsig1c[y][1:])
else:
ntsig1c.append(tsig1c[y])
tsig1[x] = ",".join(ntsig1c)
if tsig2[x] == "*":
tsig2[x] = tsig1[x]
sig_base = ":".join(tsig1)
sig_pkt = ":".join(tsig2)
return (sig_base, sig_pkt)
def http2sig(self, strload):
"""
Generates HTTP p0f signature.
in(1) : (String) strload - pkt load
out: (Tuple) (visual prints of headers, p0f signature)
"""
sig = ""
server = "*"
servervalues = {}
if type(strload).__name__ == "bytes":
strload = strload.decode("utf-8", "ignore")
loadtab = strload.split("\r\n")
valuestopush = ["Accept",
"Accept-Encoding",
"Accept-Ranges",
"Keep-Alive",
"Transfer-Encoding",
"Connection"]
query = loadtab[0]
version = None
name = ""
try:
version = re.match(r".*HTTP\/([\d\.]+)", query).group(1)
sig += version[-1] # keep the last digit of the HTTP version / TODO: look other cases
except:
pass
sig += ":"
headers = []
for line in loadtab[1:]:
if line != "":
try:
header, value = re.match(r"(\S+):\s+(.*)", line).groups()
topush = "%s" % header
if header in valuestopush:
topush = "%s=[%s]" % (header, value)
headers.append(topush)
if "Server" in header:
# Started to work on a parser, but servers don't have a conventional name, version format :/
server = value
name, version, os = re.match(r"^([\w\d]+)?/?([\d.]+)?\s?\(?([\d\w\_]+)?", value).groups() # ServerName?/?version ?(distribution)
if version is None: # Servername?space?(version)
name2, version2 = re.match(r"^([\w\d]+)?\s?\(?([\d\.\w\_]+)?", value).groups()
if name2 is not None and version2 is not None:
name = name2
version = version2
os = None
# TODO: other corner cases
servervalues["application"] = name
if name == "":
name = re.match(r"^(\w+)", value).groups()
servervalues["version"] = version
servervalues["os"] = os
server = name
elif "User-Agent" in header:
agents = value.split(" ")
selected = agents[-1]
for agent in agents:
if "Chrome" in agent:
selected = agent
name, version = re.match(r"^([\w\d]+)?/?([\d.]+)?", selected).groups()
servervalues["application"] = name
servervalues["version"] = version
if "linux" in value.lower(): # os simple match. TODO: Add other OS like the vegetarian one and others...
servervalues["os"] = "Linux"
elif "windows" in value.lower():
servervalues["os"] = "Windows"
elif "Mac OS X" in value.lower():
servervalues["os"] = "Mac OS X"
if "Access" in header or "Allow" in header:
servervalues[header.lower()] = value
except:
pass
else:
break
sig += ",".join(headers)
sig += ":*:%s" % name
return (servervalues, sig)
def pktloadid(self, pkt, pkt_sign):
"""
Payload identification
in(1): scapy packet 'pkt'.
in(2): PacketSignature object
out: PacketSignature object completed with extra data.
"""
# HTTP RESPONSES AND REQUESTS
if b"HTTP/" in pkt.load[:5] or b"GET" in pkt.load[:3]:
othervalues, sig = self.http2sig(pkt.load)
pkttype = None
if pkt.load[:4] == b"HTTP":
pkttype = "http-response"
if pkt.load[:3] == b"GET":
pkttype = "http-request"
pkt_sign.flag = pkt["TCP"].flags
pkt_sign.protocol = pkt["IP"].proto
pkt_sign.signature = sig
pkt_sign.packet_type = pkttype
pkt_sign.extra = othervalues
pkt_sign.extra["apptype"] = 'http'
# NetBIOS SMB fingerprint processing
elif b"SMB" in pkt.load[:10]:# and pkt.sport == 139:
nbios = NetBIOS(pkt.load)
if nbios.haslayer("SMBHead"):
try:
pkt_sign.extra = {}
pkt_sign.extra["application"] = "NetBIOS"
pkt_sign.extra["apptype"] = 'smb'
pkt_sign.extra["version"] = None
pkt_sign.extra["os"] = nbios[SMBHead].SSAXP[SessionSetupAndXResponse].NativeOS.decode('utf-16')
pkt_sign.extra["NativeLANManager"] = nbios[SMBHead].SSAXP[SessionSetupAndXResponse].NativeOS.decode('utf-16')
except Exception:
pass
# SSH fingerprint processing
elif b"SSH" in pkt.load[:3] and b"\r\n" in pkt.load:
strload = pkt.load
pkt_sign.extra = {}
if type(strload).__name__ == "bytes":
strload = strload.decode("utf-8", "ignore")
sshheader = ""
sshheader = strload.split("\r\n")[0]
application = None
version = None
distribution = None
try:
application, version, distribution = re.match(r"^SSH\-[\d\w.]+\-([a-zA-Z0-9]+)?\_?([\w\d.]+)?\s?\(?([\d\w\_]+)?", strload).groups()
pkt_sign.extra["application"] = application
pkt_sign.extra["version"] = version
pkt_sign.extra["os"] = distribution
pkt_sign.extra["apptype"] = 'ssh'
except:
pkt_sign.extra["application"] = sshheader.split("-")[2]
pkt_sign.extra["version"] = None
pkt_sign.extra["os"] = None
pkt_sign.extra["apptype"] = 'ssh'
# FTP fingerprint processing
elif b"220" in pkt.load[:3]:
strload = pkt.load[4:]
pkt_sign.extra = {}
if type(strload).__name__ == "bytes":
strload = strload.decode("utf-8", "ignore")
match = re.match(r"([\w]+) ([\d\w\.]+)", strload)
if match is not None:
pkt_sign.extra["application"] = match.group(1)
pkt_sign.extra["version"] = match.group(2)
pkt_sign.extra["apptype"] = 'ftp'
return pkt_sign
def pkt2sig(self, pkt):
"""
Packet2sig - Generate a signature from a packet.
in(1): (Scapy Packet) pkt.
out: PacketSignature object.
Signature are computed respecting the p0f3 specs: http://lcamtuf.coredump.cx/p0f3/README.
"""
sig = ""
flag = 0x2 # SYN by default
proto = None
pkttype = None # pkttype if ack-push
pkt_sign = PacketSignature()
if pkt.haslayer("IP") or pkt.haslayer("IPv6"):
if pkt.haslayer("IP"):
proto = pkt["IP"].proto
else:
proto = pkt["IPv6"].nh
sig += str(pkt.version)
if pkt.haslayer("IP"):
sig += ":"+str(pkt.ttl)
# TODO: Olen for IPV6
sig += ":0"
if pkt.haslayer("TCP"):
flag = pkt["TCP"].flags
if hasattr(pkt["TCP"], "load"): # process the payload to get extra information
if len(pkt.load) > 5:
# we use a dedicated method to process the signature
if b"HTTP/" in pkt.load[:5] or b"GET" in pkt.load[:3]:
return self.pktloadid(pkt, pkt_sign)
else:
pkt_sign = self.pktloadid(pkt, pkt_sign)
if pkt.haslayer("IPv6"):
return pkt_sign
optiondict = {}
for option in pkt["TCP"].options:
optiondict[option[0].lower()] = option[1]
sig += ":"
if "mss" in optiondict:
sig += str(optiondict["mss"])
sig += ":"
sig += str(pkt["TCP"].window)
if "wscale" in optiondict:
sig += ","
sig += str(optiondict["wscale"])
diffopt = 0
if len(pkt["TCP"].options) > 1:
temppkt = TCP(bytes(pkt["TCP"]))
temppkt.options = []
diffopt = len(pkt[TCP])-len(temppkt)
optionscl = [x.lower() for x,y in pkt[TCP].options]
noptions = []
sig += ":"
"""
olayout's part
"""
optionsize = 0
for ocl in optionscl:
if ocl == "sackok":
optionsize += 2
noptions.append("sok")
elif ocl == "timestamp":
optionsize += 10
noptions.append("ts")
elif ocl == "wscale":
optionsize += 3
noptions.append("ws")
elif ocl == "mss":
optionsize += 4
noptions.append("mss")
elif ocl == "eol":
optionsize += 1
eol_string = "eol+"
zdiff = diffopt - optionsize
if zdiff > 0:
eol_string += str(zdiff)
else:
eol_string += "0"
noptions.append(eol_string)
else: # TODO: do more tests and see if a '?n' is required instead
optionsize += 1
noptions.append(ocl)
sig += ",".join(noptions)
sig += ":"
opt2 = []
"""
quirks' part
"""
if pkt["IP"].version == 4: # TODO: sig for IPv6 packets
if pkt["IP"].flags == 0x2:
opt2.append("df")
if pkt["IP"].id != 0:
opt2.append("id+")
else:
if pkt["IP"].id == 0:
opt2.append("id-")
if pkt["TCP"].flags & 0b1000000 > 0:
opt2.append("ecn")
if (pkt["TCP"].flags >> 12) > 0:
opt2.append("0+")
if pkt["TCP"].seq == 0:
opt2.append("seq-")
if pkt["TCP"].ack != 0 and (pkt["TCP"].flags & 0b10000 == 0):
opt2.append("ack+")
elif pkt["TCP"].ack == 0 and (pkt["TCP"].flags & 0b10000 > 0):
opt2.append("ack-")
if pkt["TCP"].flags & 0b100000 == 0 and hasattr(pkt["TCP"], "urgptr"):
if pkt["TCP"].urgptr > 0:
opt2.append("uptr+")
elif pkt["TCP"].flags & 0b100000 > 0:
opt2.append("urgf+")
if pkt["TCP"].flags & 0b1000 > 0:
opt2.append("pushf+")
if "timestamp" in optiondict:
if int(optiondict["timestamp"][0]) == 0:
opt2.append("ts1-")
if int(optiondict["timestamp"][1]) != 0 and pkt["TCP"].flags == 0x2:
opt2.append("ts2+")
hexlitcp = int(codecs.encode(bytes(pkt[TCP]), "hex"), 16)
if hexlitcp & 0x04000000 > 0:
opt2.append("opt+")
if "wscale" in optiondict:
if int(optiondict["wscale"]) > 14:
opt2.append("exws")
#TODO: bad (malformed TCP option)
sig += ",".join(opt2)
else:
sig += "*"
sig += ":0" # TODO: look for packet classes to implement other cases
pkt_sign.flag = flag
pkt_sign.protocol = proto
pkt_sign.signature = sig
pkt_sign.packet_type = pkttype
return pkt_sign
def reasig(self, sig_base, sig_pkt):
"""
Compute signature base's wildcard with the signature's packet.
in(1): (string) p0f signature to be compared.
in(2): (string) pkt's signature to be compared with p0f's one.
out: adapted signature.
"""
sig1 = sig_base.split(":")
sig2 = sig_pkt.split(":")
for i in range(len(sig1)):
if i >= len(sig2):
break
if sig1[i] == "*":
sig1[i] = sig2[i]
elif "mss*" in sig1[i] and sig2[3] !="":
cols = sig1[i].split(",")
cols2 = sig2[i].split(",")
operand = int(cols[0].strip("mss*"))
result = int(sig2[3]) * operand
sig1[i] = str(result)
if len(cols) == 2:
if cols[1] == "*" and len(cols2) == 2:
cols[1] = cols2[1]
sig1[i] += "," + cols[1]
elif sig1[i] == "64-":
if int(sig1[i][:2]) > int(sig2[i]):
sig1[i] = sig2[i]
commas1 = sig1[i].split(",")
commas2 = sig2[i].split(",")
rcstr = []
for j in range(len(commas1)):
if j >= len(commas2):
break
if commas1 == "*":
rcstr.append(commas2[j])
else:
rcstr.append(commas1[j])
sig1[i] = ",".join(rcstr)
return ":".join(sig1)
def calcpktdist(self, sig_base, sig_pkt, method=0):
"""
Generique method to calculate distances between p0f conventional signatures.
in(1): (string) p0f signature to be compared.
in(2): (string) pkt's signature to be compared.
optional in(3): (int) method - by default => 0.
out: distance calculated by a choosen method (default: levenshtein -> beta)
"""
result = None
oldsig = sig_base
sig_base = self.reasig(sig_base, sig_pkt)
if method == 0: # use Leveinstein by default (this is a beta test and lazy comparaison method on a whole signature)
result = Levenshtein.distance(sig_base, sig_pkt)
#TODO: other methods
return result, sig_base
def calchttptdist(self, sig_base, sig_pkt, method=0):
result = None
nsig_base, nsig_pkt = self.reaghttp(sig_base, sig_pkt)
if method == 0:
result = Levenshtein.distance(nsig_base, nsig_pkt)
return result, nsig_base
def matchsig(self, pkt_sign):
"""
Find the best match.
in(1): (tuple) pkt signature tuple.
out: (distance, label, wildcarded_sig, orig_base_sig, [top 3 sigs: useful if the distance is too far].
"""
sigtuple = None
if pkt_sign is not None:
sigtuple = (pkt_sign.flag, pkt_sign.protocol, pkt_sign.signature, pkt_sign.packet_type)
if self.__fpobj is None:
self.parse_p0fconf()
bestsig = None
top3sig = []
if sigtuple is not None:
sigtype=None # by default
distfunc = self.calcpktdist
if sigtuple[0] == 0x18: # TODO: more processing methods
if sigtuple[3] == "http-response":
sigtype = "http:response"
distfunc = self.calchttptdist
elif sigtuple[3] == "http-request":
sigtype = "http:request"
distfunc = self.calchttptdist
else:
if (sigtuple[0] & 0x2 > 0) and (sigtuple[0] & 0b10000 > 0) and sigtuple[1] == 0x6: # TCP response packet
sigtype = "tcp:response"
elif (sigtuple[0] & 0x2 > 0) and (sigtuple[0] & 0b10000 == 0) and sigtuple[1] == 0x6: # TCP request packet
sigtype="tcp:request" # by default
sig = sigtuple[2]
if sigtype is not None:
for label in self.__fpobj[sigtype]:
for s in self.__fpobj[sigtype][label]:
curdist, oldsig = distfunc(s, sig)
if bestsig is None or bestsig.distance >= curdist:
if bestsig is None:
bestsig = ClassifiedSignature()
bestsig.orig = sig
if len(top3sig) >= 3:
del top3sig[2]
top3sig.insert(0, {"sig":s, "label":label, "distance":curdist})
else:
top3sig.append({"sig":s, "label":label, "distance":curdist})
bestsig.distance = curdist
bestsig.label = label
bestsig.signature = s
bestsig.computedsig = oldsig
bestsig.top3 = top3sig
return bestsig
| {
"content_hash": "e7bfa26a1d7bac9a6fb8c90ef5b8e442",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 152,
"avg_line_length": 42.62592592592593,
"alnum_prop": 0.4473455556520984,
"repo_name": "FlUxIuS/p0f3plus",
"id": "b0cefa6c2409bff79774289d7ac92bbd2dc11fec",
"size": "23078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/p0f3p.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GLSL",
"bytes": "36289"
},
{
"name": "Python",
"bytes": "68519"
},
{
"name": "Shell",
"bytes": "129"
}
],
"symlink_target": ""
} |
"""
Package for characters
"""
from pyherc.test.builders import CharacterBuilder
from pyherc.test.cutesy.dictionary import add_history_value
def strong(character):
"""
Modifies character to be strong
:param character: character to modify
:type character: Character
"""
character.body = 10
character.hit_points = 20
character.maximum_hit_points = 20
return character
def weak(character):
"""
Modifies character to be weak
:param character: character to modify
:type character: Character
"""
character.body = 2
character.hit_points = 5
character.maximum_hit_points = 5
return character
def Adventurer():
"""
Creates a adventurer character
:returns: fully initialised adventurer
:rtype: Character
"""
character = (CharacterBuilder()
.with_hit_points(10)
.with_max_hp(10)
.with_speed(5)
.with_body(5)
.with_mind(5)
.with_attack(1)
.with_name('Adventurer')
.build())
add_history_value(character, 'hit_points')
return character
def Wizard():
"""
Creates a wizardcharacter
:returns: fully initialised wizard
:rtype: Character
"""
character = (CharacterBuilder()
.with_hit_points(5)
.with_max_hp(5)
.with_spirit(20)
.with_max_spirit(20)
.with_speed(4)
.with_body(4)
.with_mind(8)
.with_attack(1)
.with_name('Wizard')
.build())
return character
def Goblin(action=None):
"""
Creates a goblin
:returns: fully initialised goblin
:rtype: Character
"""
character = (CharacterBuilder()
.with_hit_points(5)
.with_max_hp(5)
.with_speed(3)
.with_body(3)
.with_attack(1)
.with_name('Goblin')
.build())
if action is not None:
action(character)
return character
| {
"content_hash": "c35eaf1e084ffe667ca18070fd673d0f",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 59,
"avg_line_length": 22.46875,
"alnum_prop": 0.5336114974501622,
"repo_name": "tuturto/pyherc",
"id": "eed895cfefe79e03891b041ff6780ed8f1a26469",
"size": "3283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyherc/test/cutesy/characters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "8825"
},
{
"name": "HTML",
"bytes": "529"
},
{
"name": "Hy",
"bytes": "603756"
},
{
"name": "Python",
"bytes": "975380"
}
],
"symlink_target": ""
} |
'''
Created on 09/08/2011
@author: mikel
'''
__all__ = ["fonts", "includes"]
import os
from os import listdir
from os.path import isdir, isfile, dirname, basename
import sys
import time
import xbmc, xbmcgui
import shutil
import re
from datetime import datetime
import xml.etree.ElementTree as ET
import logging
class SkinUtilsError(Exception):
pass
def reload_skin():
xbmc.executebuiltin("XBMC.ReloadSkin()")
def setup_logging():
#Keep comaptibility with Python2.6
if hasattr(logging, 'NullHandler'):
logger = logging.getLogger('skinutils')
logger.addHandler(logging.NullHandler())
def get_logger():
return logging.getLogger('skinutils')
def debug_log(msg):
get_logger().debug(msg)
def get_sha1_obj():
#SHA1 lib 2.4 compatibility
try:
from hashlib import sha1
return sha1()
except:
import sha
return sha.new()
def sha1_file(file, block_size=2**20):
f = open(file, 'rb')
sha1 = get_sha1_obj()
while True:
data = f.read(block_size)
if not data:
break
sha1.update(data)
f.close()
return sha1.hexdigest()
def try_remove_file(file, wait=0.5, tries=10):
removed = False
num_try = 0
while num_try < tries and not removed:
try:
os.remove(file)
return True
except OSError:
num_try += 1
time.sleep(wait)
return False
def case_file_exists(file):
if not os.path.isfile(file):
return False
else:
file_dir = dirname(file)
if not isdir(file_dir):
return False
else:
dir_contents = listdir(file_dir)
return basename(file) in dir_contents
def get_current_skin_path():
return os.path.normpath(xbmc.translatePath("special://skin/"))
def get_skin_name():
return os.path.basename(get_current_skin_path())
def get_local_skin_path():
user_addons_path = xbmc.translatePath("special://home/addons")
return os.path.normpath(
os.path.join(user_addons_path, get_skin_name())
)
def copy_skin_to_userdata(ask_user=True):
#Warn user before doing this weird thing
d = xbmcgui.Dialog()
msg1 = "This addon needs to install some extra resources."
msg2 = "This installation requires a manual XBMC restart."
msg3 = "Begin installation now? After that it will exit."
make_copy = (
not ask_user or
d.yesno("Notice", msg1, msg2, msg3)
)
if make_copy:
#Get skin dest name
local_skin_path = get_local_skin_path()
#If it was not copied before...
if not os.path.exists(local_skin_path):
shutil.copytree(get_current_skin_path(), local_skin_path)
return make_copy
def is_invalid_local_skin():
#Get skin paths
current_skin_path = get_current_skin_path()
local_skin_path = get_local_skin_path()
#If the local path does not exist
if not os.path.isdir(local_skin_path):
return False
else:
#Get addon xml paths
current_xml = os.path.join(current_skin_path, 'addon.xml')
local_xml = os.path.join(local_skin_path, 'addon.xml')
#Both files must exist
if not os.path.isfile(current_xml) or not os.path.isfile(local_xml):
return True
#If sum of both files mismatch, got it!
elif sha1_file(current_xml) != sha1_file(local_xml):
return True
#Otherwise everything is ok
else:
return False
def fix_invalid_local_skin():
local_skin_path = get_local_skin_path()
time_suffix = datetime.now().strftime('%Y%m%d%H%M%S')
backup_skin_path = local_skin_path + '-skinutils-' + time_suffix
#Just move the skin, if it already exists someone is trolling us...
shutil.move(local_skin_path, backup_skin_path)
#And now do the real copy
copy_skin_to_userdata(ask_user=False)
#Inform the user about the operation...
d = xbmcgui.Dialog()
l1 = "Your local skin is not in use (probably outdated)."
l2 = "Press OK to apply a fix (archiving the old skin)."
l3 = "You will need to restart XBMC once more."
d.ok("Notice", l1, l2, l3)
sys.exit()
#Skin was copied but XBMC was not restarted
def check_needs_restart():
#Get skin paths
current_skin_path = get_current_skin_path()
local_skin_path = get_local_skin_path()
#Local skin exists and does not match current skin path
if os.path.isdir(local_skin_path) and current_skin_path != local_skin_path:
#Check if the local skin is a leftover from a previous XBMC install
if is_invalid_local_skin():
fix_invalid_local_skin()
#Local skin is correct, a restart is needed
else:
d = xbmcgui.Dialog()
d.ok("Notice", "Restart XBMC to complete the installation.")
sys.exit()
def do_write_test(path):
test_file = os.path.join(path, 'write_test.txt')
get_logger().debug('performing write test: %s' % test_file)
try:
#Open and cleanup
open(test_file,'w').close()
os.remove(test_file)
return True
except Exception:
return False
def skin_is_local():
return get_current_skin_path() == get_local_skin_path()
def check_skin_writability():
#Some debug info
debug_log("-- skinutils debug info --")
debug_log("current skin path: %s\n" % get_current_skin_path())
debug_log("local path should be: %s" % get_local_skin_path())
#Check if XBMC needs a restart
check_needs_restart()
#Get the current skin's path
skin_path = get_local_skin_path()
#Check if it's local or not (contained in userdata)
if not skin_is_local():
copy_skin_to_userdata()
sys.exit()
#Check if this path is writable
elif not os.access(skin_path, os.W_OK) or not do_write_test(skin_path):
raise IOError("Skin directory is not writable.")
def make_backup(path):
backup_path = path + '-skinutilsbackup'
#If the backup already exists, don't overwrite it
if not os.path.exists(backup_path):
shutil.copy(path, backup_path)
def restore_backup(path):
backup_path = path + '-skinutilsbackup'
#Do nothing if no backup exists
if os.path.exists(backup_path):
#os.rename is atomic on unix, and it will overwrite silently
if os.name != 'nt':
os.rename(backup_path, path)
#Windows will complain if the file exists
else:
os.remove(path)
os.rename(backup_path, path)
def is_invalid_xml(file):
contents = open(file, 'r').read()
#Check for invalid comments
pattern = re.compile('<!--(.*?)-->', re.MULTILINE | re.DOTALL)
group_pattern = re.compile('^-|--|-$')
for match in re.finditer(pattern, contents):
if re.match(group_pattern, match.group(1)) is not None:
return True
#Check also for whitespace prior to declaration
whitespace_pattern = re.compile('^\s+', re.MULTILINE)
return whitespace_pattern.match(contents) is not None
def sanitize_xml(file):
contents = open(file, 'r').read()
#Remove leading whitespace to declaration
contents = contents.lstrip()
#Strip invalid comments
p = re.compile('<!--.*?-->', re.MULTILINE | re.DOTALL)
clean_contents, num_repl = re.subn(p, '', contents)
open(file, 'w').write(clean_contents)
def install_resources():
pass
class DocumentCache:
__cached_docs = None
def __init__(self):
self.__cached_docs = {}
def _check_file_exists(self, file):
if not os.path.isfile(file):
raise IOError('File not found: %s' % file)
def contains(self, file):
return file in self.__cached_docs
def _check_file_known(self, file):
if not self.contains(file):
raise KeyError('Unknown file: %s' % file)
def list_files(self):
return self.__cached_docs.keys()
def items(self):
return self.__cached_docs.items()
def add(self, file):
self._check_file_exists(file)
self.__cached_docs[file] = None
def read(self, file):
self._check_file_exists(file)
#If there is no cached data...
if not self.contains(file) or self.__cached_docs[file] is None:
#Check if the file about to load is sane
if is_invalid_xml(file):
make_backup(file)
sanitize_xml(file)
#Parse the document
self.__cached_docs[file] = ET.parse(file)
return self.__cached_docs[file]
def write(self, file):
self._check_file_known(file)
#If there is a document in cache it may contain modifications
if self.__cached_docs[file] is not None:
make_backup(file)
self.__cached_docs[file].write(file)
def write_all(self):
for item in self.__cached_docs:
self.write(item)
def clear(self, file):
self._check_file_known(file)
self.__cached_docs[file] = None
def clear_all(self):
for item in self.__cached_docs:
self.clear(item)
def rollback(self, file):
self._check_file_known(file)
restore_backup(file)
self.clear(file)
def rollback_all(self):
for item in self.__cached_docs:
self.rollback(item)
setup_logging()
| {
"content_hash": "c13b07a8f8ced6142ca3120db912124b",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 79,
"avg_line_length": 26.232558139534884,
"alnum_prop": 0.5689519306540584,
"repo_name": "mazkolain/xbmc-skinutils",
"id": "3c156dc041b7ec3c73269a5e44a3cfb8a2b5b1f2",
"size": "10152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/skinutils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31420"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.frontdoor import FrontDoorManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-frontdoor
# USAGE
python network_experiment_update_profile.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = FrontDoorManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.network_experiment_profiles.begin_update(
resource_group_name="MyResourceGroup",
profile_name="MyProfile",
parameters={"properties": {"enabledState": "Enabled"}, "tags": {"key1": "value1", "key2": "value2"}},
).result()
print(response)
# x-ms-original-file: specification/frontdoor/resource-manager/Microsoft.Network/stable/2019-11-01/examples/NetworkExperimentUpdateProfile.json
if __name__ == "__main__":
main()
| {
"content_hash": "565a4f01d5d490e7fbdc7a8d80f1f1af",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 143,
"avg_line_length": 36.26470588235294,
"alnum_prop": 0.7250608272506083,
"repo_name": "Azure/azure-sdk-for-python",
"id": "f1c4c2fd400a605d264ad97fefe3fdbb64845b92",
"size": "1701",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/network/azure-mgmt-frontdoor/generated_samples/network_experiment_update_profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
Custom managers for Django models registered with the tagging
application.
"""
from django.contrib.contenttypes.models import ContentType
from django.db import models
class ModelTagManager(models.Manager):
"""
A manager for retrieving tags for a particular model.
"""
def __init__(self, tag_model):
super(ModelTagManager, self).__init__()
self.tag_model = tag_model
def get_query_set(self):
content_type = ContentType.objects.get_for_model(self.model)
return self.tag_model.objects.filter(
items__content_type__pk=content_type.pk).distinct()
def related(self, tags, *args, **kwargs):
return self.tag_model.objects.related_for_model(tags, self.model, *args, **kwargs)
def usage(self, *args, **kwargs):
return self.tag_model.objects.usage_for_model(self.model, *args, **kwargs)
class ModelTaggedItemManager(models.Manager):
"""
A manager for retrieving model instances based on their tags.
"""
def __init__(self, tag_model):
super(ModelTaggedItemManager, self).__init__()
self.intermediary_table_model = tag_model.objects.intermediary_table_model
def related_to(self, obj, queryset=None, num=None):
if queryset is None:
return self.intermediary_table_model.objects.get_related(obj, self.model, num=num)
else:
return self.intermediary_table_model.objects.get_related(obj, queryset, num=num)
def with_all(self, tags, queryset=None):
if queryset is None:
return self.intermediary_table_model.objects.get_by_model(self.model, tags)
else:
return self.intermediary_table_model.objects.get_by_model(queryset, tags)
def with_any(self, tags, queryset=None):
if queryset is None:
return self.intermediary_table_model.objects.get_union_by_model(self.model, tags)
else:
return self.intermediary_table_model.objects.get_union_by_model(queryset, tags)
class TagDescriptor(object):
"""
A descriptor which provides access to a ``ModelTagManager`` for
model classes and simple retrieval, updating and deletion of tags
for model instances.
"""
def __init__(self, tag_model):
self.tag_model = tag_model
def __get__(self, instance, owner):
if not instance:
tag_manager = ModelTagManager(self.tag_model)
tag_manager.model = owner
return tag_manager
else:
return self.tag_model.objects.get_for_object(instance)
def __set__(self, instance, value):
self.tag_model.objects.update_tags(instance, value)
def __del__(self, instance):
self.tag_model.objects.update_tags(instance, [])
| {
"content_hash": "e962089b762f6456458c608035675759",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 94,
"avg_line_length": 35.44871794871795,
"alnum_prop": 0.6546112115732369,
"repo_name": "mstepniowski/django-newtagging",
"id": "1dbcb2999b6a281f624777d32c20b3df30f0182d",
"size": "2765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newtagging/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28649"
}
],
"symlink_target": ""
} |
import urllib
import urllib2
import json
from BeautifulSoup import BeautifulSoup
from odm.catalogs.utils import metautils
from odm.catalogs.CatalogReader import CatalogReader
def berlin_to_odm(group):
# One dataset about WLAN locations...
if group == 'oeffentlich':
return [u'Infrastruktur, Bauen und Wohnen']
if group in (u'demographie', u'jugend'):
return [u'Bevölkerung']
if group == u'bildung':
return [u'Bildung und Wissenschaft']
if group == u'gesundheit':
return [u'Gesundheit']
if group in (u'transport', u'verkehr'):
return [u'Transport und Verkehr']
if group == u'wahl':
return [u'Politik und Wahlen']
if group == u'justiz':
return [u'Gesetze und Justiz']
if group == u'geo':
return [u'Infrastruktur, Bauen und Wohnen', u'Geographie, Geologie und Geobasisdaten']
if group in (u'wohnen', u'verentsorgung'):
return [u'Infrastruktur, Bauen und Wohnen']
if group in (u'kultur', u'tourismus', u'erholung'):
return [u'Kultur, Freizeit, Sport, Tourismus']
if group == u'sozial':
return [u'Soziales']
if group == u'umwelt':
return [u'Umwelt und Klima']
if group == u'verbraucher':
return [u'Verbraucherschutz']
if group in (u'verwaltung', u'sicherheit'):
return [u'Öffentliche Verwaltung, Haushalt und Steuern']
if group in (u'wirtschaft', u'arbeit'):
return [u'Wirtschaft und Arbeit']
if group in (u'sonstiges', u'protokolle'):
return [u'Sonstiges']
else:
print 'WARNING: Found no category or categories for ' + group
return []
offenesdatenportal = ("moers", "krefeld", "stadt-bottrop", "stadt-geldern", "stadt-kleve", "stadt-wesel", "kreis-wesel", "kreis-viersen", "kreis-kleve", "gemeinde-wachtendonk")
v3cities = offenesdatenportal + ("hamburg", "koeln", "bonn", "muenchen", "aachen", "frankfurt", "rostock")
def gatherCity(cityname, url, apikey):
if cityname in v3cities:
if cityname == 'bonn':
jsonurl = urllib.urlopen(url + "/data.json")
elif cityname in offenesdatenportal:
jsonurl = urllib.urlopen(url + "/api/action/organization_show?include_datasets=true&id=" + cityname)
else:
jsonurl = urllib.urlopen(url + "/api/3/action/package_list")
listpackages = json.loads(jsonurl.read())
if cityname in offenesdatenportal:
listpackages = listpackages['result']['packages']
elif cityname == 'bonn':
listpackages = listpackages[1:]
else:
listpackages = listpackages['result']
groups = []
print 'INFO: the names that follow have had special characters removed'
for item in listpackages:
if cityname == 'bonn':
urltoread = url + "/api/3/action/package_show?id=" + item['identifier']
elif cityname in offenesdatenportal:
urltoread = url + "/api/action/package_show?id=" + item['name']
else:
urltoread = url + "/api/3/action/package_show?id=" + item
print 'Downloading ' + metautils.findLcGermanCharsAndReplace(urltoread)
trycount = 0
try:
req = urllib2.Request(urltoread.encode('utf8'))
resp = urllib2.urlopen(req)
urldata = resp.read()
except IOError:
if trycount == 100:
print 'Download failed 100 times, giving up...'
exit()
print 'Something went wrong, retrying...'
trycount += 1
pdata = json.loads(urldata)
if 'success' in pdata and pdata['success']:
if cityname in ["koeln", "bonn"]:
groups.append(pdata['result'][0])
else:
groups.append(pdata['result'])
else:
print 'WARNING: No result - access denied?\n' + metautils.findLcGermanCharsAndReplace(item)
else:
print 'Downloading ' + url + "/api/3/action/current_package_list_with_resources..."
if cityname == "berlin":
# Berlin is special, it is CKAN 1.8 with V3 API in beta. We have to *post* with an empty dict. And we have to authenticate!
request = urllib2.Request(url + '/api/3/action/current_package_list_with_resources')
request.add_header('Authorization', apikey)
jsonurl = urllib2.urlopen(request, "{}")
else:
jsonurl = urllib.urlopen(url + "/api/3/action/current_package_list_with_resources")
groups = json.loads(jsonurl.read())
groups = groups['result']
#Our CKAN doesn't like owner_org=null even though this occasionally happens. Its also confusing as we use owner_org for our own purposes later.
for group in groups:
group.pop('owner_org', None)
return groups
def importCity(cityname, url, package):
if cityname == 'hamburg':
# Only take 'open data'
if package['type'] != 'dataset' or 'forward-reference' in package['title']:
return {}
#There is a version of CKAN that can output private datasets!
if package['private']:
return {}
resources = []
formats = set()
files = []
# Key for the file link in the resource
urlkeys = ['url']
formatkey = 'format'
if ('resources' in package):
resources = package['resources']
for file in resources:
for urlkey in urlkeys:
if (file[urlkey] not in [None, '']):
if '://' not in file[urlkey]:
files.append(url + file[urlkey])
else:
files.append(file[urlkey])
break
if formatkey in file and file[formatkey] not in [None, '']:
format = file[formatkey]
formats.add(format.upper())
row = {}
row[u'Stadt'] = cityname
row[u'Dateibezeichnung'] = package['title']
row[u'URL PARENT'] = url + '/dataset/' + package['name']
if cityname in (offenesdatenportal + ('hamburg', 'koeln', 'frankfurt', 'aachen', 'berlin', 'muenchen', 'rostock')):
if cityname in (offenesdatenportal + ('hamburg', 'frankfurt', 'aachen', 'rostock')):
licensekey = 'license_id'
vstellekey = 'author'
catskey = 'groups'
catssubkey = 'title'
elif cityname == 'muenchen':
licensekey = 'license_id'
vstellekey = 'maintainer'
catskey = 'groups'
catssubkey = 'title'
elif cityname in ('koeln', 'berlin'):
licensekey = 'license_title'
vstellekey = 'maintainer'
if cityname == 'koeln':
catskey = 'tags'
elif cityname == 'berlin':
catskey = 'groups'
catssubkey = 'name'
# Generate URL for the catalog page
if 'notes' in package and package['notes'] != None:
row[u'Beschreibung'] = package['notes']
if cityname == 'koeln':
soup = BeautifulSoup(row[u'Beschreibung'])
row[u'Beschreibung'] = soup.getText('\n')
else:
row[u'Beschreibung'] = ''
row[u'Zeitlicher Bezug'] = ''
if licensekey in package and package[licensekey] != None:
row[u'Lizenz'] = package[licensekey]
# if not already short, try to convert
if metautils.isopen(row[u'Lizenz']) is 'Unbekannt':
row[u'Lizenz'] = metautils.long_license_to_short(row[u'Lizenz'])
else:
row[u'Lizenz'] = 'nicht bekannt'
if vstellekey in package and package[vstellekey] != None:
row[u'Veröffentlichende Stelle'] = package[vstellekey]
else:
row[u'Veröffentlichende Stelle'] = ''
if 'extras' in package:
print 'WARNING: No author/maintainer/publisher, checking extras'
for extra in package['extras']:
if extra['key'] == 'contacts':
print 'WARNING: No author, but amazingly there is possibly data in the contacts: ' + extra['value']
for group in metautils.setofvaluesasarray(package[catskey], catssubkey):
if cityname != 'berlin':
odm_cats = metautils.govDataLongToODM(group)
else:
odm_cats = berlin_to_odm(group)
row[u'categories'] = odm_cats
# Bonn is just different enough to do it separately. TODO: Consider combining into above.
elif cityname == 'bonn':
row[u'Beschreibung'] = package.get('description', '')
for timeattempt in ['temporal', 'modified']:
if timeattempt in package and package[timeattempt] not in [None, '']:
row[u'Zeitlicher Bezug'] = package[timeattempt]
break
row[u'Zeitlicher Bezug'] = row.get(u'Zeitlicher Bezug', '')
row[u'Lizenz'] = package.get('license', False)
if not row[u'Lizenz']:
row[u'Lizenz'] = package['license_title']
row[u'Veröffentlichende Stelle'] = package.get('publisher', '')
cats = package.get('keyword', [])
odm_cats = map(lambda x: metautils.govDataLongToODM(x, checkAll=True), cats)
resources = package.get(u'distribution', [])
for r in resources:
files.append(r[u'accessURL'])
formats.append(r[u'format'])
row[u'Format'] = formats
row[u'files'] = files
row['metadata'] = package
return row
class CkanReader(CatalogReader):
city = None
url = None
portalname = None
def __init__(self, cityname):
self.city = cityname
if cityname == "koeln":
self.url = "http://offenedaten-koeln.de"
self.portalname = "offenedaten-koeln.de"
elif cityname == "bonn":
self.url = "http://opendata.bonn.de"
self.portalname = "opendata.bonn.de"
elif cityname == "hamburg":
self.url = "http://suche.transparenz.hamburg.de"
self.portalname = "transparenz.hamburg.de"
elif cityname == "frankfurt":
self.url = "http://www.offenedaten.frankfurt.de"
self.portalname = "offenedaten.frankfurt.de"
elif cityname == "aachen":
self.url = "http://daten.aachen.de"
self.portalname = "daten.aachen.de"
elif cityname == "berlin":
self.url = "http://datenregister.berlin.de"
self.portalname = "datenregister.berlin.de"
elif cityname == "muenchen":
self.url = "http://www.opengov-muenchen.de"
self.portalname = "opengov-muenchen.de"
elif cityname == "rostock":
self.url = "http://opendata-hro.de"
self.portalname = "opendata-hro.de"
elif cityname in offenesdatenportal:
self.url = "https://www.offenesdatenportal.de"
self.portalname = "www.offenesdatenportal.de/organization/" + cityname
else:
print 'First argument must be an city; unsupported city'
exit()
def info(self):
return {
'name': self.city + '_harvester',
'title': str(self.portalname),
'description': ''
}
def gather(self, apikey = None):
data = gatherCity(self.city, self.url, apikey)
return data
def fetch(self, d):
return d
def import_data(self, d):
d = importCity(self.city, self.url, d)
if d != {}:
d = metautils.gerToEngKeys(d)
d = dict(d)
d['city'] = self.city
d['originating_portal'] = self.portalname
d['accepted'] = True
d['costs'] = None
d['spatial'] = None
d['source'] = 'd'
d['metadata_xml'] = None
d['formats'] = list(d['formats'])
d['open'] = metautils.isopen(d['licenseshort'].strip())
if 'categoies' not in d:
d['categories'] = []
d['filelist'] = d['files']
return d
| {
"content_hash": "c32a3188b3c92d95ad3ab6755ad32591",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 176,
"avg_line_length": 40.3421926910299,
"alnum_prop": 0.5668286255455818,
"repo_name": "mattfullerton/odm-catalogreaders",
"id": "5c66a2e79d29b5e760a05e9cb396b85db1b6e714",
"size": "12172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odm/catalogs/portals/ckanApiV3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108180"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('delibere', '0029_auto_20170616_0033'),
]
operations = [
migrations.AlterModelOptions(
name='amministrazione',
options={'ordering': ('posizione',), 'verbose_name_plural': 'amministrazioni'},
),
migrations.AddField(
model_name='amministrazione',
name='posizione',
field=models.PositiveIntegerField(default=0),
),
]
| {
"content_hash": "dfa9edb59846374d3c2a6e2025baf987",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 91,
"avg_line_length": 26.045454545454547,
"alnum_prop": 0.6003490401396161,
"repo_name": "guglielmo/mosic2-db-delibere",
"id": "4a5896152269237c9d4ce056ffb0f90fd75b584c",
"size": "646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/delibere/migrations/0030_auto_20170616_0040.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "132694"
},
{
"name": "HTML",
"bytes": "31797"
},
{
"name": "JavaScript",
"bytes": "16111"
},
{
"name": "Python",
"bytes": "135060"
}
],
"symlink_target": ""
} |
__author__ = 'rolandh'
INC = "http://id.incommon.org/category/research-and-scholarship"
RELEASE = {
"": ["eduPersonTargetedID"],
INC: ["eduPersonPrincipalName", "eduPersonScopedAffiliation", "mail",
"givenName", "sn", "displayName"]
}
| {
"content_hash": "434509f4844187d2239a9244b33c8374",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 73,
"avg_line_length": 25.6,
"alnum_prop": 0.6484375,
"repo_name": "arbn/pysaml2",
"id": "2956ff2b78b15a20d8b854abde0dc63ff2d6631a",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/saml2/entity_category/incommon.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2404671"
},
{
"name": "Shell",
"bytes": "3398"
}
],
"symlink_target": ""
} |
import tempfile
import unittest
from telemetry.internal import story_runner
from telemetry.page import page
from telemetry.page import page_test
from telemetry.page import shared_page_state
from telemetry import story as story_module
from telemetry.testing import fakes
from telemetry.util import wpr_modes
def SetUpPageRunnerArguments(options):
parser = options.CreateParser()
story_runner.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
story_runner.ProcessCommandLineArgs(parser, options)
class DummyTest(page_test.PageTest):
def ValidateAndMeasurePage(self, *_):
pass
class FakeNetworkController(object):
def __init__(self):
self.archive_path = None
self.wpr_mode = None
def SetReplayArgs(self, archive_path, wpr_mode, _netsim, _extra_wpr_args,
_make_javascript_deterministic=False):
self.archive_path = archive_path
self.wpr_mode = wpr_mode
class SharedPageStateTests(unittest.TestCase):
def setUp(self):
self.options = fakes.CreateBrowserFinderOptions()
self.options.use_live_sites = False
self.options.output_formats = ['none']
self.options.suppress_gtest_report = True
# pylint: disable=W0212
def TestUseLiveSitesFlag(self, expected_wpr_mode):
with tempfile.NamedTemporaryFile() as f:
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
fake_network_controller = FakeNetworkController()
run_state._PrepareWpr(fake_network_controller, f.name, None)
self.assertEquals(fake_network_controller.wpr_mode, expected_wpr_mode)
self.assertEquals(fake_network_controller.archive_path, f.name)
def testUseLiveSitesFlagSet(self):
self.options.use_live_sites = True
self.TestUseLiveSitesFlag(expected_wpr_mode=wpr_modes.WPR_OFF)
def testUseLiveSitesFlagUnset(self):
self.TestUseLiveSitesFlag(expected_wpr_mode=wpr_modes.WPR_REPLAY)
def testConstructorCallsSetOptions(self):
test = DummyTest()
shared_page_state.SharedPageState(
test, self.options, story_module.StorySet())
self.assertEqual(test.options, self.options)
def assertUserAgentSetCorrectly(
self, shared_page_state_class, expected_user_agent):
story = page.Page(
'http://www.google.com',
shared_page_state_class=shared_page_state_class)
test = DummyTest()
story_set = story_module.StorySet()
story_set.AddStory(story)
story.shared_state_class(test, self.options, story_set)
browser_options = self.options.browser_options
actual_user_agent = browser_options.browser_user_agent_type
self.assertEqual(expected_user_agent, actual_user_agent)
def testPageStatesUserAgentType(self):
self.assertUserAgentSetCorrectly(
shared_page_state.SharedMobilePageState, 'mobile')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedDesktopPageState, 'desktop')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedTabletPageState, 'tablet')
self.assertUserAgentSetCorrectly(
shared_page_state.Shared10InchTabletPageState, 'tablet_10_inch')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedPageState, None)
def testBrowserStartupURLSetCorrectly(self):
story_set = story_module.StorySet()
google_page = page.Page(
'http://www.google.com',
startup_url='http://www.google.com', page_set=story_set)
example_page = page.Page(
'https://www.example.com',
startup_url='https://www.example.com', page_set=story_set)
gmail_page = page.Page(
'https://www.gmail.com',
startup_url='https://www.gmail.com', page_set=story_set)
for p in (google_page, example_page, gmail_page):
story_set.AddStory(p)
shared_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_set)
for p in (google_page, example_page, gmail_page):
shared_state.WillRunStory(p)
self.assertEquals(
p.startup_url, self.options.browser_options.startup_url)
| {
"content_hash": "33fc50a73e0e74b953c9d08f2e417c7f",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 76,
"avg_line_length": 36.06194690265487,
"alnum_prop": 0.7241717791411043,
"repo_name": "Bysmyyr/chromium-crosswalk",
"id": "323f6c3dbe5235b4e07b5e77958a87c5b4b79cc1",
"size": "4238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/page/shared_page_state_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Tests the script accuss to the basis solver.
"""
import pytest
import os
# The virtual, pseudorandom port is setup as a session fixture in conftest.py
def get_sargs(args):
"""Returns the list of arguments parsed from sys.argv.
"""
import sys
sys.argv = args
from basis.solve import _parser_options
return _parser_options()
def test_examples():
"""Makes sure the script examples work properly.
"""
argv = ["py.test", "-examples"]
assert get_sargs(argv) is None
def test_run(capfd):
"""Test that a default solve works properly.
"""
from basis.solve import run
f_name ='test_output.dat'
argv = ["py.test", "2", "-potential", "potentials/bump_2.cfg","-outfile",'test_output.dat',"-solutions","2"]
args = get_sargs(argv)
run(args)
model = open("tests/model_output/inf_square.out","r")
temp = model.read()
temp2 = open(f_name,"r").read()
model.close()
assert temp2 == temp
os.system("rm test_output.dat")
from basis.solve import run
f_name ='test_output.dat'
argv = ["py.test", "2", "-potential", "potentials/bump.cfg","-outfile",'test_output.dat',"-solutions","2"]
args = get_sargs(argv)
run(args)
model = open("tests/model_output/bump_1.out","r")
temp = model.read()
temp2 = open(f_name,"r").read()
model.close()
assert temp2 == temp
os.system("rm test_output.dat")
# argv = ["py.test", "2", "-potential", "potentials/bump_2.cfg","-outfile",'test_output.dat',"-solutions","2","-plot","pot"]
# args = get_sargs(argv)
# run(args)
# os.system("rm test_output.dat")
argv = ["py.test", "2"]
args = get_sargs(argv)
with pytest.raises(KeyError):
run(args)
| {
"content_hash": "8f046a692e14a93dc147cd0a006d9f3f",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 128,
"avg_line_length": 31.071428571428573,
"alnum_prop": 0.6120689655172413,
"repo_name": "wsmorgan/782",
"id": "fdcc42f048deb835b0d945d78f061097a800f45d",
"size": "1740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_solve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "22891"
},
{
"name": "Python",
"bytes": "31319"
}
],
"symlink_target": ""
} |
from scrapy.item import Item, Field
class REIVAuctionItem(Item):
src_domains = Field()
src_url = Field()
prop_address = Field()
prop_suburb = Field()
prop_bedr = Field()
prop_price = Field()
prop_type = Field()
prop_url = Field()
auc_method = Field()
auc_saledate = Field()
auc_agent = Field()
| {
"content_hash": "a851ce2020ab28d13730e5c978fb0622",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 35,
"avg_line_length": 24.214285714285715,
"alnum_prop": 0.6047197640117994,
"repo_name": "johnconnelly75/RealEstateSpider",
"id": "deda6a7bee0b9b31968b65801bd796f139fde21a",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RealEstateSpider/items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Platform for cover integration."""
from boschshcpy import SHCSession, SHCShutterControl
from homeassistant.components.cover import (
ATTR_POSITION,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverDeviceClass,
CoverEntity,
)
from .const import DATA_SESSION, DOMAIN
from .entity import SHCEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the SHC cover platform."""
entities = []
session: SHCSession = hass.data[DOMAIN][config_entry.entry_id][DATA_SESSION]
for cover in session.device_helper.shutter_controls:
entities.append(
ShutterControlCover(
device=cover,
parent_id=session.information.unique_id,
entry_id=config_entry.entry_id,
)
)
if entities:
async_add_entities(entities)
class ShutterControlCover(SHCEntity, CoverEntity):
"""Representation of a SHC shutter control device."""
_attr_device_class = CoverDeviceClass.SHUTTER
_attr_supported_features = (
SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP | SUPPORT_SET_POSITION
)
@property
def current_cover_position(self):
"""Return the current cover position."""
return round(self._device.level * 100.0)
def stop_cover(self, **kwargs):
"""Stop the cover."""
self._device.stop()
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return self.current_cover_position == 0
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return (
self._device.operation_state
== SHCShutterControl.ShutterControlService.State.OPENING
)
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return (
self._device.operation_state
== SHCShutterControl.ShutterControlService.State.CLOSING
)
def open_cover(self, **kwargs):
"""Open the cover."""
self._device.level = 1.0
def close_cover(self, **kwargs):
"""Close cover."""
self._device.level = 0.0
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
position = kwargs[ATTR_POSITION]
self._device.level = position / 100.0
| {
"content_hash": "0700fa60fb39209381e060f449a1f496",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 27.88372093023256,
"alnum_prop": 0.6209341117597998,
"repo_name": "home-assistant/home-assistant",
"id": "c08984ca0c222903cdaae4659935db1117efc997",
"size": "2398",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/bosch_shc/cover.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
from typing import List, Optional
import httpx
import networkx as nx
from qcs_api_client.models import InstructionSetArchitecture
from qcs_api_client.operations.sync import get_instruction_set_architecture
from pyquil.api import QCSClientConfiguration
from pyquil.api._qcs_client import qcs_client
from pyquil.external.rpcq import CompilerISA
from pyquil.noise import NoiseModel
from pyquil.quantum_processor import AbstractQuantumProcessor
from pyquil.quantum_processor.transformers import qcs_isa_to_compiler_isa, qcs_isa_to_graph
class QCSQuantumProcessor(AbstractQuantumProcessor):
"""
An AbstractQuantumProcessor initialized with an ``InstructionSetArchitecture`` returned
from the QCS API. Notably, this class is able to serialize a ``CompilerISA`` based
on the architecture instructions.
"""
quantum_processor_id: str
_isa: InstructionSetArchitecture
noise_model: Optional[NoiseModel]
def __init__(
self,
quantum_processor_id: str,
isa: InstructionSetArchitecture,
noise_model: Optional[NoiseModel] = None,
):
"""
Initialize a new QCSQuantumProcessor.
:param quantum_processor_id: The id of the quantum processor.
:param isa: The QCS API ``InstructionSetArchitecture``.
:param noise_model: An optional ``NoiseModel`` for configuring a noisy quantum_processor on the ``QVM``.
"""
self.quantum_processor_id = quantum_processor_id
self._isa = isa
self.noise_model = noise_model
def qubits(self) -> List[int]:
return sorted(node.node_id for node in self._isa.architecture.nodes)
def qubit_topology(self) -> nx.Graph:
return qcs_isa_to_graph(self._isa)
def to_compiler_isa(self) -> CompilerISA:
return qcs_isa_to_compiler_isa(self._isa)
def __str__(self) -> str:
return "<QCSQuantumProcessor {}>".format(self.quantum_processor_id)
def __repr__(self) -> str:
return str(self)
def get_qcs_quantum_processor(
quantum_processor_id: str,
client_configuration: Optional[QCSClientConfiguration] = None,
timeout: float = 10.0,
) -> QCSQuantumProcessor:
"""
Retrieve an instruction set architecture for the specified ``quantum_processor_id`` and initialize a
``QCSQuantumProcessor`` with it.
:param quantum_processor_id: QCS ID for the quantum processor.
:param timeout: Time limit for request, in seconds.
:param client_configuration: Optional client configuration. If none is provided, a default one will
be loaded.
:return: A ``QCSQuantumProcessor`` with the requested ISA.
"""
client_configuration = client_configuration or QCSClientConfiguration.load()
with qcs_client(client_configuration=client_configuration, request_timeout=timeout) as client: # type: httpx.Client
isa = get_instruction_set_architecture(client=client, quantum_processor_id=quantum_processor_id).parsed
return QCSQuantumProcessor(quantum_processor_id=quantum_processor_id, isa=isa)
| {
"content_hash": "19965c0fa0509b9c7097187904e0fd97",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 120,
"avg_line_length": 37.629629629629626,
"alnum_prop": 0.7171916010498688,
"repo_name": "rigetticomputing/pyquil",
"id": "49c96290b5b477e4648e214bc372259beff8e2e7",
"size": "3048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyquil/quantum_processor/qcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "7178"
},
{
"name": "Python",
"bytes": "741472"
}
],
"symlink_target": ""
} |
import sys
print(sum([(int((int(line) // 3)) - 2) for line in sys.argv[1].splitlines()]))
| {
"content_hash": "b27bb61b38a964a8d911d95ab1736e05",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 78,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.6153846153846154,
"repo_name": "mre/the-coding-interview",
"id": "c39133f8fa8f41ef1d69a5365246a1da7996ad71",
"size": "114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problems/advent-of-code/2019/01/01/solution.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4745"
},
{
"name": "C#",
"bytes": "14295"
},
{
"name": "C++",
"bytes": "16563"
},
{
"name": "CoffeeScript",
"bytes": "16079"
},
{
"name": "Dart",
"bytes": "1025"
},
{
"name": "F#",
"bytes": "2318"
},
{
"name": "Go",
"bytes": "8067"
},
{
"name": "Haskell",
"bytes": "1585"
},
{
"name": "Java",
"bytes": "18047"
},
{
"name": "JavaScript",
"bytes": "52163"
},
{
"name": "Julia",
"bytes": "559"
},
{
"name": "Kotlin",
"bytes": "17919"
},
{
"name": "LOLCODE",
"bytes": "647"
},
{
"name": "Makefile",
"bytes": "521"
},
{
"name": "PHP",
"bytes": "14313"
},
{
"name": "Python",
"bytes": "149850"
},
{
"name": "Ruby",
"bytes": "5883"
},
{
"name": "Rust",
"bytes": "22731"
},
{
"name": "Scala",
"bytes": "154"
},
{
"name": "Shell",
"bytes": "11341"
},
{
"name": "Swift",
"bytes": "1811"
},
{
"name": "TypeScript",
"bytes": "1405"
}
],
"symlink_target": ""
} |
import os
import logging
from emonitor.extensions import events
from emonitor.modules.settings.settings import Settings
logger = logging.getLogger(__name__)
BEFORE = AFTER = {}
events.addEvent('file_added', handlers=[], parameters=['out.incomepath', 'out.filename'])
events.addEvent('file_removed', handlers=[], parameters=['out.incomepath', 'out.filename'])
OBSERVERACTIVE = 1
ERROR_RAISED = 0
FILES = []
INPUTFORMAT = Settings.get('ocr.inputformat', ['pdf']) + Settings.get('ocr.inputtextformat', []) + Settings.get('xml.inputformat', ['xml'])
#INPUTFORMAT = Settings.get('ocr.inputformat', ['pdf']) + Settings.get('ocr.inputtextformat', [])
def observeFolder(**kwargs):
"""
Observer method to observe given folder
:param kwargs:
"""
global BEFORE, AFTER, FILES, ERROR_RAISED
if OBSERVERACTIVE == 0:
return
if 'path' in kwargs:
path = kwargs['path']
else:
return
if not os.path.exists(path):
if ERROR_RAISED == 0:
ERROR_RAISED = 1
logger.error(u"observer path {} not found".format(path))
return # error delivered
elif ERROR_RAISED == 1: # path found again
ERROR_RAISED = 0
logger.info(u"observer path {} present again".format(path))
if ERROR_RAISED == 1:
ERROR_RAISED = 0 # reset errorstate
AFTER = dict([(f, None) for f in os.listdir(path)])
for a in [f for f in AFTER if f not in BEFORE and os.path.splitext(f)[-1][1:] in INPUTFORMAT]: # new files added
if a not in FILES:
events.raiseEvent('file_added', incomepath=path, filename=a)
logger.info(u"file_added: {}{}".format(path, a))
FILES.append(a)
for r in [f for f in BEFORE if f not in AFTER and os.path.splitext(f)[-1][1:] in INPUTFORMAT]:
if r in FILES:
events.raiseEvent('file_removed', incomepath=path, filename=r)
logger.info(u"file_removed: {}{}".format(path, r))
FILES.remove(r)
BEFORE = AFTER
return
| {
"content_hash": "9ad8d8e71ee00dd983ef0f7c75cf289f",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 139,
"avg_line_length": 33.47540983606557,
"alnum_prop": 0.6214495592556317,
"repo_name": "digifant/eMonitor",
"id": "9073bf65b0ee99ed9ed3f4d566d84cfaaee3903b",
"size": "2042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emonitor/observer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2392"
},
{
"name": "CSS",
"bytes": "73342"
},
{
"name": "HTML",
"bytes": "527895"
},
{
"name": "JavaScript",
"bytes": "282583"
},
{
"name": "Mako",
"bytes": "413"
},
{
"name": "Python",
"bytes": "788596"
}
],
"symlink_target": ""
} |
import unittest
import imath
import IECore
import IECoreScene
import Gaffer
import GafferScene
import GafferSceneTest
class PointsTypeTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
points = IECoreScene.PointsPrimitive( 1 )
points["P"] = IECoreScene.PrimitiveVariable(
IECoreScene.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData( [ imath.V3f( 1, 2, 3 ) ] ),
)
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( points )
group = GafferScene.Group()
group["in"][0].setInput( objectToScene["out"] )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/object" ] ) )
pointsType = GafferScene.PointsType()
pointsType["in"].setInput( group["out"] )
pointsType["filter"].setInput( filter["out"] )
def assertExpectedOutput( type, unchanged ) :
self.assertSceneValid( pointsType["out"] )
if type is not None :
self.assertEqual( pointsType["out"].object( "/group/object" )["type"].data.value, type )
else :
self.assertFalse( "type" in pointsType["out"].object( "/group/object" ) )
if unchanged :
self.assertScenesEqual( pointsType["out"], group["out"] )
self.assertEqual( pointsType["out"].object( "/group/object" ), group["out"].object( "/group/object" ) )
self.assertTrue(
pointsType["out"].object( "/group/object", _copy = False ).isSame( group["out"].object( "/group/object", _copy = False ) )
)
# Test unchanged settings (no type on input points).
assertExpectedOutput( type = None, unchanged = True )
# Test unchanged settings.
points["type"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.StringData( "particles" ) )
objectToScene["object"].setValue( points )
assertExpectedOutput( type = "particles", unchanged = True )
# Test converting particles to particles ( shouldn't do anything )
pointsType["type"].setValue( "particles" )
assertExpectedOutput( type = "particles", unchanged = True )
# Test converting particles to sphere
pointsType["type"].setValue( "sphere" )
assertExpectedOutput( type = "sphere", unchanged = False )
# Test converting particles to patches. The bound should change at this point.
pointsType["type"].setValue( "patch" )
assertExpectedOutput( type = "patch", unchanged = False )
def testNonPrimitiveObject( self ) :
c = GafferScene.Camera()
p = GafferScene.PointsType()
p["in"].setInput( c["out"] )
self.assertSceneValid( p["out"] )
self.failUnless( isinstance( p["out"].object( "/camera" ), IECoreScene.Camera ) )
self.assertEqual( p["out"].object( "/camera" ), c["out"].object( "/camera" ) )
self.assertTrue(
p["out"].object( "/camera", _copy = False ).isSame( c["out"].object( "/camera", _copy = False ) )
)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "e729b120f903272dbfbe0b2a7786c784",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 138,
"avg_line_length": 31.54945054945055,
"alnum_prop": 0.6896551724137931,
"repo_name": "ivanimanishi/gaffer",
"id": "bbbf9d45bba12bbce153d15443cd65d17050251b",
"size": "4674",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/GafferSceneTest/PointsTypeTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39753"
},
{
"name": "C++",
"bytes": "6086015"
},
{
"name": "CMake",
"bytes": "83446"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "6120483"
},
{
"name": "Shell",
"bytes": "13049"
},
{
"name": "Slash",
"bytes": "2870"
}
],
"symlink_target": ""
} |
import os
import sys
import unittest
import jinja2
import mock
import six
from timid import context
from timid import environment
from timid import utils
class ContextTest(unittest.TestCase):
@mock.patch.object(environment, 'Environment')
def test_init_base(self, mock_Environment):
result = context.Context()
self.assertEqual(result.verbose, 1)
self.assertEqual(result.debug, False)
self.assertTrue(isinstance(result.variables, utils.SensitiveDict))
self.assertEqual(result.variables, {})
self.assertEqual(result.environment, mock_Environment.return_value)
self.assertEqual(result.steps, [])
self.assertTrue(isinstance(result._jinja, jinja2.Environment))
self.assertEqual(id(result._jinja.globals['env']),
id(result.environment))
mock_Environment.assert_called_once_with(cwd=None)
@mock.patch.object(environment, 'Environment')
def test_init_alt(self, mock_Environment):
result = context.Context(5, True, 'some/dir/ectory')
self.assertEqual(result.verbose, 5)
self.assertEqual(result.debug, True)
self.assertTrue(isinstance(result.variables, utils.SensitiveDict))
self.assertEqual(result.variables, {})
self.assertEqual(result.environment, mock_Environment.return_value)
self.assertEqual(result.steps, [])
self.assertTrue(isinstance(result._jinja, jinja2.Environment))
self.assertEqual(id(result._jinja.globals['env']),
id(result.environment))
mock_Environment.assert_called_once_with(cwd='some/dir/ectory')
@mock.patch.object(environment, 'Environment')
@mock.patch.object(sys, 'stdout', six.StringIO())
@mock.patch.object(sys, 'stderr', six.StringIO())
def test_emit_debug_true(self, mock_Environment):
obj = context.Context(5, True, 'some/dir/ectory')
obj.emit('test message', level=10, debug=True)
self.assertEqual(sys.stdout.getvalue(), '')
self.assertEqual(sys.stderr.getvalue(), 'test message\n')
@mock.patch.object(environment, 'Environment')
@mock.patch.object(sys, 'stdout', six.StringIO())
@mock.patch.object(sys, 'stderr', six.StringIO())
def test_emit_debug_false(self, mock_Environment):
obj = context.Context(5, False, 'some/dir/ectory')
obj.emit('test message', level=10, debug=True)
self.assertEqual(sys.stdout.getvalue(), '')
self.assertEqual(sys.stderr.getvalue(), '')
@mock.patch.object(environment, 'Environment')
@mock.patch.object(sys, 'stdout', six.StringIO())
@mock.patch.object(sys, 'stderr', six.StringIO())
def test_emit_verbosity_low(self, mock_Environment):
obj = context.Context(1, False, 'some/dir/ectory')
obj.emit('test message', level=3)
self.assertEqual(sys.stdout.getvalue(), '')
self.assertEqual(sys.stderr.getvalue(), '')
@mock.patch.object(environment, 'Environment')
@mock.patch.object(sys, 'stdout', six.StringIO())
@mock.patch.object(sys, 'stderr', six.StringIO())
def test_emit_verbosity_high(self, mock_Environment):
obj = context.Context(5, False, 'some/dir/ectory')
obj.emit('test message', level=3)
self.assertEqual(sys.stdout.getvalue(), 'test message\n')
self.assertEqual(sys.stderr.getvalue(), '')
@mock.patch.object(jinja2, 'Environment', return_value=mock.Mock(**{
'globals': {},
'from_string.return_value': mock.Mock(**{
'render.return_value': 'rendered',
}),
}))
def test_template_nonstr(self, mock_Environment):
jinja_env = mock_Environment.return_value
tmpl = jinja_env.from_string.return_value
obj = context.Context()
result = obj.template(1234)
self.assertTrue(callable(result))
self.assertFalse(jinja_env.from_string.called)
self.assertFalse(tmpl.render.called)
rendered = result(obj)
self.assertEqual(rendered, 1234)
self.assertFalse(tmpl.render.called)
@mock.patch.object(jinja2, 'Environment', return_value=mock.Mock(**{
'globals': {},
'from_string.return_value': mock.Mock(**{
'render.return_value': 'rendered',
}),
}))
def test_template_str(self, mock_Environment):
jinja_env = mock_Environment.return_value
tmpl = jinja_env.from_string.return_value
obj = context.Context()
result = obj.template('spam')
self.assertTrue(callable(result))
jinja_env.from_string.assert_called_once_with('spam')
self.assertFalse(tmpl.render.called)
rendered = result(obj)
self.assertEqual(rendered, 'rendered')
tmpl.render.assert_called_once_with(obj.variables)
@mock.patch.object(jinja2, 'Environment', return_value=mock.Mock(**{
'globals': {},
'compile_expression.return_value': mock.Mock(**{
'return_value': 'rendered',
}),
}))
def test_expression_nonstr(self, mock_Environment):
jinja_env = mock_Environment.return_value
expr = jinja_env.compile_expression.return_value
obj = context.Context()
result = obj.expression(1234)
self.assertTrue(callable(result))
self.assertFalse(jinja_env.compile_expression.called)
self.assertFalse(expr.called)
rendered = result(obj)
self.assertEqual(rendered, 1234)
self.assertFalse(expr.called)
@mock.patch.object(jinja2, 'Environment', return_value=mock.Mock(**{
'globals': {},
'compile_expression.return_value': mock.Mock(**{
'return_value': 'rendered',
}),
}))
def test_expression_str(self, mock_Environment):
jinja_env = mock_Environment.return_value
expr = jinja_env.compile_expression.return_value
obj = context.Context()
result = obj.expression('spam')
self.assertTrue(callable(result))
jinja_env.compile_expression.assert_called_once_with('spam')
self.assertFalse(expr.called)
rendered = result(obj)
self.assertEqual(rendered, 'rendered')
expr.assert_called_once_with(obj.variables)
| {
"content_hash": "aa49d10e234e03fc505821335d44c11a",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 75,
"avg_line_length": 35.674285714285716,
"alnum_prop": 0.6452026269421752,
"repo_name": "rackerlabs/timid",
"id": "844b0b65dd75c06677b746d13974f82e03589297",
"size": "6868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "270430"
},
{
"name": "Shell",
"bytes": "5704"
}
],
"symlink_target": ""
} |
import argparse
import logging
def main():
parser = argparse.ArgumentParser(description="Boilerplate example script")
parser.add_argument('--level', '-l', default='info',
choices=['debug', 'info', 'warning', 'error', 'critical', ],
help="Set the log level")
args = parser.parse_args()
logging.basicConfig(level=args.level.upper())
| {
"content_hash": "65655815569bac6ade75ea295dcacbf7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 30.692307692307693,
"alnum_prop": 0.6040100250626567,
"repo_name": "westphahl/python-boilerplate",
"id": "3293fed612d26fbacedc8196e9fa7e938dcf53fd",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boilerplate/script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1425"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login as auth_login
from django.views.decorators.csrf import csrf_exempt
import requests
def home(request):
return render(request, "home.html")
def handle_404(request):
return render(request, "404.html")
def handle_500(request):
return render(request, "500.html")
| {
"content_hash": "8b357fd99b05127ef0648b2983f20a72",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 65,
"avg_line_length": 29.545454545454547,
"alnum_prop": 0.8061538461538461,
"repo_name": "MagicWishMonkey/ronweb",
"id": "863bd2f18106a4a3f02ebe36f6bb34be8e637c6a",
"size": "650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ronweb/views/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4096"
}
],
"symlink_target": ""
} |
__author__ = 'gzs2478'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from Service import Service
class LoginService(Service):
serviceID = 1001
def __init__(self,parent):
Service.__init__(self,self.serviceID,parent)
self.initData()
def initData(self):
self.registers({\
1001 : self.loginSuccessHandler,\
1002 : self.loginFailedHandler\
})
self.director = self.parent.parent.parent.parent
print "Director is"
print type(self.director)
def loginSuccessHandler(self,msg,owner):
print "LoginSuccessHandler"
data = msg['data']
self.emit(\
SIGNAL('goToHallFromLoginWindow(bool,int,int)'),\
data['is_first_login'],data['table_col_num'],\
data['table_row_num'])
def loginFailedHandler(self,msg,owner):
print "LoginFailedHandler"
self.emit(SIGNAL('loginFailed(QString)'),u"密码错误,或已经登陆!") | {
"content_hash": "52acf1196af3f66cff7d655e57a07541",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 64,
"avg_line_length": 33.13793103448276,
"alnum_prop": 0.619146722164412,
"repo_name": "kelvict/Online-GoBang-Center",
"id": "15ffb2c3982a9afb99ea30f03b9957f33a17c855",
"size": "1007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Client/LoginService.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3724765"
}
],
"symlink_target": ""
} |
"""Unit tests for the :mod:`networkx.algorithms.community.quality`
module.
"""
import networkx as nx
from networkx import barbell_graph
from networkx.algorithms.community import coverage
from networkx.algorithms.community import modularity
from networkx.algorithms.community import performance
from networkx.algorithms.community.quality import inter_community_edges
from networkx.testing import almost_equal
class TestPerformance(object):
"""Unit tests for the :func:`performance` function."""
def test_bad_partition(self):
"""Tests that a poor partition has a low performance measure."""
G = barbell_graph(3, 0)
partition = [{0, 1, 4}, {2, 3, 5}]
assert almost_equal(8 / 15, performance(G, partition))
def test_good_partition(self):
"""Tests that a good partition has a high performance measure.
"""
G = barbell_graph(3, 0)
partition = [{0, 1, 2}, {3, 4, 5}]
assert almost_equal(14 / 15, performance(G, partition))
class TestCoverage(object):
"""Unit tests for the :func:`coverage` function."""
def test_bad_partition(self):
"""Tests that a poor partition has a low coverage measure."""
G = barbell_graph(3, 0)
partition = [{0, 1, 4}, {2, 3, 5}]
assert almost_equal(3 / 7, coverage(G, partition))
def test_good_partition(self):
"""Tests that a good partition has a high coverage measure."""
G = barbell_graph(3, 0)
partition = [{0, 1, 2}, {3, 4, 5}]
assert almost_equal(6 / 7, coverage(G, partition))
def test_modularity():
G = nx.barbell_graph(3, 0)
C = [{0, 1, 4}, {2, 3, 5}]
assert almost_equal(-16 / (14 ** 2), modularity(G, C))
C = [{0, 1, 2}, {3, 4, 5}]
assert almost_equal((35 * 2) / (14 ** 2), modularity(G, C))
def test_inter_community_edges_with_digraphs():
G = nx.complete_graph(2, create_using=nx.DiGraph())
partition = [{0}, {1}]
assert inter_community_edges(G, partition) == 2
G = nx.complete_graph(10, create_using=nx.DiGraph())
partition = [{0}, {1, 2}, {3, 4, 5}, {6, 7, 8, 9}]
assert inter_community_edges(G, partition) == 70
G = nx.cycle_graph(4, create_using=nx.DiGraph())
partition = [{0, 1}, {2, 3}]
assert inter_community_edges(G, partition) == 2
| {
"content_hash": "46a182cc5665a4eaa85140f3b7bb407b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 72,
"avg_line_length": 34.37313432835821,
"alnum_prop": 0.6261398176291794,
"repo_name": "sserrot/champion_relationships",
"id": "eda7bb3538b679172e1461202e7a42195c1ced80",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/networkx/algorithms/community/tests/test_quality.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.utils.translation import ugettext_lazy as _
from accounts.forms import EmailUserChangeForm, EmailUserCreationForm
from accounts.models import MyUser
class MyUserAdmin(UserAdmin):
form = EmailUserChangeForm
add_form = EmailUserCreationForm
fieldsets = (
(None, {'fields': ('email', 'name', 'slug', 'password')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'user_permissions')}), # , 'groups'
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = ((
None, {
'classes': ('wide',),
'fields': ('email', 'name', 'password1', 'password2')
}
),
)
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'name', 'slug', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active') # , 'groups'
search_fields = ('email', 'name', 'slug')
ordering = ('email', 'name')
filter_horizontal = ('user_permissions',) # 'groups',
admin.site.register(MyUser, MyUserAdmin)
admin.site.unregister(Group)
| {
"content_hash": "da9963cceff5cd67b08657023481f367",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 33.95238095238095,
"alnum_prop": 0.6276297335203366,
"repo_name": "Sound-Colour-Space/sound-colour-space",
"id": "489cba73c677ced3dbf8d053934f0a6957bed361",
"size": "1426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/apps/accounts/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1553375"
},
{
"name": "HTML",
"bytes": "34822"
},
{
"name": "JavaScript",
"bytes": "2851313"
},
{
"name": "Jupyter Notebook",
"bytes": "138364"
},
{
"name": "Python",
"bytes": "162565"
},
{
"name": "Shell",
"bytes": "270"
}
],
"symlink_target": ""
} |
import paddle
import math
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def network(batch_size, items_num, hidden_size, step):
stdv = 1.0 / math.sqrt(hidden_size)
items = layers.data(
name="items",
shape=[batch_size, items_num, 1],
dtype="int64",
append_batch_size=False) #[bs, uniq_max, 1]
seq_index = layers.data(
name="seq_index",
shape=[batch_size, items_num],
dtype="int32",
append_batch_size=False) #[-1(seq_max)*batch_size, 1]
last_index = layers.data(
name="last_index",
shape=[batch_size],
dtype="int32",
append_batch_size=False) #[batch_size, 1]
adj_in = layers.data(
name="adj_in",
shape=[batch_size, items_num, items_num],
dtype="float32",
append_batch_size=False)
adj_out = layers.data(
name="adj_out",
shape=[batch_size, items_num, items_num],
dtype="float32",
append_batch_size=False)
mask = layers.data(
name="mask",
shape=[batch_size, -1, 1],
dtype="float32",
append_batch_size=False)
label = layers.data(
name="label",
shape=[batch_size, 1],
dtype="int64",
append_batch_size=False)
items_emb = layers.embedding(
input=items,
param_attr=fluid.ParamAttr(
name="emb",
initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv)),
size=[items_num, hidden_size]) #[batch_size, uniq_max, h]
pre_state = items_emb
for i in range(step):
pre_state = layers.reshape(
x=pre_state, shape=[batch_size, -1, hidden_size])
state_in = layers.fc(
input=pre_state,
name="state_in",
size=hidden_size,
act=None,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv)),
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv))) #[batch_size, uniq_max, h]
state_out = layers.fc(
input=pre_state,
name="state_out",
size=hidden_size,
act=None,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv)),
bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv))) #[batch_size, uniq_max, h]
state_adj_in = layers.matmul(adj_in, state_in) #[batch_size, uniq_max, h]
state_adj_out = layers.matmul(adj_out, state_out) #[batch_size, uniq_max, h]
gru_input = layers.concat([state_adj_in, state_adj_out], axis=2)
gru_input = layers.reshape(x=gru_input, shape=[-1, hidden_size * 2])
gru_fc = layers.fc(
input=gru_input,
name="gru_fc",
size=3 * hidden_size,
bias_attr=False)
pre_state, _, _ = fluid.layers.gru_unit(
input=gru_fc,
hidden=layers.reshape(
x=pre_state, shape=[-1, hidden_size]),
size=3 * hidden_size)
final_state = pre_state
seq_index = layers.reshape(seq_index, shape=[-1])
seq = layers.gather(final_state, seq_index) #[batch_size*-1(seq_max), h]
last = layers.gather(final_state, last_index) #[batch_size, h]
seq = layers.reshape(
seq, shape=[batch_size, -1, hidden_size]) #[batch_size, -1(seq_max), h]
last = layers.reshape(
last, shape=[batch_size, hidden_size]) #[batch_size, h]
seq_fc = layers.fc(
input=seq,
name="seq_fc",
size=hidden_size,
bias_attr=False,
act=None,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv))) #[batch_size, -1(seq_max), h]
last_fc = layers.fc(
input=last,
name="last_fc",
size=hidden_size,
bias_attr=False,
act=None,
num_flatten_dims=1,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv))) #[bathc_size, h]
seq_fc_t = layers.transpose(
seq_fc, perm=[1, 0, 2]) #[-1(seq_max), batch_size, h]
add = layers.elementwise_add(
seq_fc_t, last_fc) #[-1(seq_max), batch_size, h]
b = layers.create_parameter(
shape=[hidden_size],
dtype='float32',
default_initializer=fluid.initializer.Constant(value=0.0)) #[h]
add = layers.elementwise_add(add, b) #[-1(seq_max), batch_size, h]
add_sigmoid = layers.sigmoid(add) #[-1(seq_max), batch_size, h]
add_sigmoid = layers.transpose(
add_sigmoid, perm=[1, 0, 2]) #[batch_size, -1(seq_max), h]
weight = layers.fc(
input=add_sigmoid,
name="weight_fc",
size=1,
act=None,
num_flatten_dims=2,
bias_attr=False,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv))) #[batch_size, -1, 1]
weight *= mask
weight_mask = layers.elementwise_mul(seq, weight, axis=0)
global_attention = layers.reduce_sum(weight_mask, dim=1)
final_attention = layers.concat(
[global_attention, last], axis=1) #[batch_size, 2*h]
final_attention_fc = layers.fc(
input=final_attention,
name="fina_attention_fc",
size=hidden_size,
bias_attr=False,
act=None,
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv))) #[batch_size, h]
all_vocab = layers.create_global_var(
shape=[items_num - 1, 1],
value=0,
dtype="int64",
persistable=True,
name="all_vocab")
all_emb = layers.embedding(
input=all_vocab,
param_attr=fluid.ParamAttr(
name="emb",
initializer=fluid.initializer.Uniform(
low=-stdv, high=stdv)),
size=[items_num, hidden_size]) #[all_vocab, h]
logits = layers.matmul(
x=final_attention_fc, y=all_emb,
transpose_y=True) #[batch_size, all_vocab]
softmax = layers.softmax_with_cross_entropy(
logits=logits, label=label) #[batch_size, 1]
loss = layers.reduce_mean(softmax) # [1]
acc = layers.accuracy(input=logits, label=label, k=20)
return loss, acc
| {
"content_hash": "80b22c1e0b70005dee8f9eac121ae7e6",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 86,
"avg_line_length": 34.682539682539684,
"alnum_prop": 0.5652173913043478,
"repo_name": "kuke/models",
"id": "1cd1af9243603a9fff0554e9f2c22734e1bee217",
"size": "7157",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluid/PaddleRec/gnn/network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Perl",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "2905007"
},
{
"name": "Shell",
"bytes": "2506531"
}
],
"symlink_target": ""
} |
import tensorflow as tf
import tensorlayer as tl
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
from tensorlayer.layers.core import Layer
__all__ = [
'GaussianNoise',
]
class GaussianNoise(Layer):
"""
The :class:`GaussianNoise` class is noise layer that adding noise with
gaussian distribution to the activation.
Parameters
------------
mean : float
The mean. Default is 0.0.
stddev : float
The standard deviation. Default is 1.0.
is_always : boolean
Is True, add noise for train and eval mode. If False, skip this layer in eval mode.
seed : int or None
The seed for random noise.
name : str
A unique layer name.
Examples
--------
With TensorLayer
>>> net = tl.layers.Input([64, 200], name='input')
>>> net = tl.layers.Dense(n_units=100, act=tf.nn.relu, name='dense')(net)
>>> gaussianlayer = tl.layers.GaussianNoise(name='gaussian')(net)
>>> print(gaussianlayer)
>>> output shape : (64, 100)
"""
def __init__(
self,
mean=0.0,
stddev=1.0,
is_always=True,
seed=None,
name=None, # 'gaussian_noise',
):
super().__init__(name)
self.mean = mean
self.stddev = stddev
self.seed = seed
self.is_always = is_always
self.build()
self._built = True
logging.info("GaussianNoise %s: mean: %f stddev: %f" % (self.name, self.mean, self.stddev))
def __repr__(self):
s = '{classname}(mean={mean}, stddev={stddev}'
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs=None):
pass
def forward(self, inputs):
if (self.is_train or self.is_always) is False:
return inputs
else:
# noise = np.random.normal(0.0 , sigma , tf.to_int64(self.inputs).get_shape())
noise = tf.random.normal(shape=inputs.get_shape(), mean=self.mean, stddev=self.stddev, seed=self.seed)
outputs = inputs + noise
return outputs
| {
"content_hash": "80b2d29e754d87461652e8c4c1854547",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 114,
"avg_line_length": 27.88607594936709,
"alnum_prop": 0.5787562414888788,
"repo_name": "zsdonghao/tensorlayer",
"id": "1a6e8546353af8e6a5051dfc50d1bb801c40b2af",
"size": "2247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorlayer/layers/noise.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "172"
},
{
"name": "Makefile",
"bytes": "1310"
},
{
"name": "Python",
"bytes": "1252514"
},
{
"name": "Shell",
"bytes": "502"
}
],
"symlink_target": ""
} |
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class DigiByteTestMetaClass(type):
"""Metaclass for DigiByteTestFramework.
Ensures that any attempt to register a subclass of `DigiByteTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'DigiByteTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("DigiByteTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("DigiByteTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class DigiByteTestFramework(metaclass=DigiByteTestMetaClass):
"""Base class for a digibyte test script.
Individual digibyte test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.mocktime = 0
self.rpc_timewait = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = False
self.bind_to_localhost_only = True
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave digibyteds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop digibyteds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use digibyte-cli instead of RPC for all commands")
self.add_options(parser)
self.options = parser.parse_args()
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.options.digibyted = os.getenv("DIGIBYTED", default=config["environment"]["BUILDDIR"] + '/src/digibyted' + config["environment"]["EXEEXT"])
self.options.digibytecli = os.getenv("DIGIBYTECLI", default=config["environment"]["BUILDDIR"] + '/src/digibyte-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.import_deterministic_coinbase_privkeys()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: digibyteds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def import_deterministic_coinbase_privkeys(self):
if self.setup_clean_chain:
return
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(n.get_deterministic_priv_key()[1])
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects"""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.digibyted] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, get_datadir_path(self.options.tmpdir, i), rpchost=rpchost, timewait=self.rpc_timewait, digibyted=binary[i], digibyte_cli=self.options.digibytecli, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a digibyted"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple digibyteds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr=''):
"""Stop a digibyted test node"""
self.nodes[i].stop_node(expected_stderr)
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple digibyted test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backward compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as digibyted's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("DigiByteRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run digibyteds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [self.options.digibyted, "-datadir=" + datadir, '-disablewallet']
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, get_datadir_path(self.options.cachedir, i), extra_conf=["bind=127.0.0.1"], extra_args=[], rpchost=None, timewait=self.rpc_timewait, digibyted=self.options.digibyted, digibyte_cli=self.options.digibytecli, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generatetoaddress(1, self.nodes[peer].get_deterministic_priv_key()[0])
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
os.rmdir(cache_path(i, 'wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path(i)):
if entry not in ['chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in digibyte.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_digibyted_zmq(self):
"""Skip the running test if digibyted has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("digibyted has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_cli(self):
"""Skip the running test if digibyte-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("digibyte-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether digibyte-cli was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_UTILS")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_ZMQ")
| {
"content_hash": "82d94b4b4799ad3a85680681bfd958fe",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 334,
"avg_line_length": 42.11710037174721,
"alnum_prop": 0.6147667593450726,
"repo_name": "digibyte/digibyte",
"id": "ea011cd235f0c1d421ec647457ed62cc33c261ad",
"size": "22928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/test_framework.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "1383691"
},
{
"name": "C++",
"bytes": "6176996"
},
{
"name": "CSS",
"bytes": "126479"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30298"
},
{
"name": "M4",
"bytes": "196094"
},
{
"name": "Makefile",
"bytes": "118253"
},
{
"name": "Objective-C",
"bytes": "6742"
},
{
"name": "Objective-C++",
"bytes": "6587"
},
{
"name": "Python",
"bytes": "1705099"
},
{
"name": "QMake",
"bytes": "757"
},
{
"name": "Shell",
"bytes": "98920"
}
],
"symlink_target": ""
} |
import typing
if typing.TYPE_CHECKING: # pragma: no cover
import importlib.metadata as importlib_metadata
else:
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
__version__ = importlib_metadata.version(__name__.split(".", 1)[0])
__all__ = ["__version__"]
| {
"content_hash": "743f19a168a043cab79fbee7f964b336",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 22.6,
"alnum_prop": 0.672566371681416,
"repo_name": "futursolo/destination",
"id": "8b4268be6becc4f8bb71a0f61093def3d2c41fe5",
"size": "1472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "destination/_version.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22545"
}
],
"symlink_target": ""
} |
import keystoneauth1.identity.generic as auth_plugins
from keystoneauth1 import loading
from keystoneauth1 import session as ks_session
from keystoneauth1.token_endpoint import Token
from keystoneclient import service_catalog as ks_service_catalog
from keystoneclient.v3 import client as ks_client
from keystoneclient.v3 import endpoints as ks_endpoints
from oslo_config import cfg
from oslo_utils import timeutils
import six
from mistral import context
from mistral import exceptions
CONF = cfg.CONF
CONF.register_opt(cfg.IntOpt('timeout'), group='keystone_authtoken')
def client():
ctx = context.ctx()
auth_url = ctx.auth_uri or CONF.keystone_authtoken.auth_uri
cl = ks_client.Client(
user_id=ctx.user_id,
token=ctx.auth_token,
tenant_id=ctx.project_id,
auth_url=auth_url
)
cl.management_url = auth_url
return cl
def _determine_verify(ctx):
if ctx.insecure:
return False
elif ctx.auth_cacert:
return ctx.auth_cacert
else:
return True
def get_session_and_auth(context, **kwargs):
"""Get session and auth parameters
:param context: action context
:return: dict to be used as kwargs for client serviceinitialization
"""
if not context:
raise AssertionError('context is mandatory')
project_endpoint = get_endpoint_for_project(**kwargs)
endpoint = format_url(
project_endpoint.url,
{
'tenant_id': context.project_id,
'project_id': context.project_id
}
)
auth = Token(endpoint=endpoint, token=context.auth_token)
auth_uri = context.auth_uri or CONF.keystone_authtoken.auth_uri
ks_auth = Token(
endpoint=auth_uri,
token=context.auth_token
)
session = ks_session.Session(
auth=ks_auth,
verify=_determine_verify(context)
)
return {
"session": session,
"auth": auth
}
def _admin_client(trust_id=None):
if CONF.keystone_authtoken.auth_type is None:
auth_url = CONF.keystone_authtoken.auth_uri
project_name = CONF.keystone_authtoken.admin_tenant_name
# You can't use trust and project together
if trust_id:
project_name = None
cl = ks_client.Client(
username=CONF.keystone_authtoken.admin_user,
password=CONF.keystone_authtoken.admin_password,
project_name=project_name,
auth_url=auth_url,
trust_id=trust_id
)
cl.management_url = auth_url
return cl
else:
kwargs = {}
if trust_id:
# Remove project_name and project_id, since we need a trust scoped
# auth object
kwargs['project_name'] = None
kwargs['project_domain_name'] = None
kwargs['project_id'] = None
kwargs['trust_id'] = trust_id
auth = loading.load_auth_from_conf_options(
CONF,
'keystone_authtoken',
**kwargs
)
sess = loading.load_session_from_conf_options(
CONF,
'keystone_authtoken',
auth=auth
)
return ks_client.Client(session=sess)
def client_for_admin():
return _admin_client()
def client_for_trusts(trust_id):
return _admin_client(trust_id=trust_id)
def get_endpoint_for_project(service_name=None, service_type=None,
region_name=None):
if service_name is None and service_type is None:
raise exceptions.MistralException(
"Either 'service_name' or 'service_type' must be provided."
)
ctx = context.ctx()
service_catalog = obtain_service_catalog(ctx)
# When region_name is not passed, first get from context as region_name
# could be passed to rest api in http header ('X-Region-Name'). Otherwise,
# just get region from mistral configuration.
region = (region_name or ctx.region_name)
if service_name == 'keystone':
# Determining keystone endpoint should be done using
# keystone_authtoken section as this option is special for keystone.
region = region or CONF.keystone_authtoken.region_name
else:
region = region or CONF.openstack_actions.default_region
service_endpoints = service_catalog.get_endpoints(
service_name=service_name,
service_type=service_type,
region_name=region
)
endpoint = None
os_actions_endpoint_type = CONF.openstack_actions.os_actions_endpoint_type
for endpoints in six.itervalues(service_endpoints):
for ep in endpoints:
# is V3 interface?
if 'interface' in ep:
interface_type = ep['interface']
if os_actions_endpoint_type in interface_type:
endpoint = ks_endpoints.Endpoint(
None,
ep,
loaded=True
)
break
# is V2 interface?
if 'publicURL' in ep:
endpoint_data = {
'url': ep['publicURL'],
'region': ep['region']
}
endpoint = ks_endpoints.Endpoint(
None,
endpoint_data,
loaded=True
)
break
if not endpoint:
raise exceptions.MistralException(
"No endpoints found [service_name=%s, service_type=%s,"
" region_name=%s]"
% (service_name, service_type, region)
)
else:
return endpoint
def obtain_service_catalog(ctx):
token = ctx.auth_token
if ctx.is_trust_scoped and is_token_trust_scoped(token):
if ctx.trust_id is None:
raise Exception(
"'trust_id' must be provided in the admin context."
)
trust_client = client_for_trusts(ctx.trust_id)
token_data = trust_client.tokens.get_token_data(
token,
include_catalog=True
)
response = token_data['token']
else:
response = ctx.service_catalog
# Target service catalog may not be passed via API.
# If we don't have the catalog yet, it should be requested.
if not response:
response = client().tokens.get_token_data(
token,
include_catalog=True
)['token']
if not response:
raise exceptions.UnauthorizedException()
service_catalog = ks_service_catalog.ServiceCatalog.factory(response)
return service_catalog
def get_keystone_endpoint_v2():
return get_endpoint_for_project('keystone', service_type='identity')
def get_keystone_url_v2():
return get_endpoint_for_project('keystone', service_type='identity').url
def format_url(url_template, values):
# Since we can't use keystone module, we can do similar thing:
# see https://github.com/openstack/keystone/blob/master/keystone/
# catalog/core.py#L42-L60
return url_template.replace('$(', '%(') % values
def is_token_trust_scoped(auth_token):
return 'OS-TRUST:trust' in client_for_admin().tokens.validate(auth_token)
def get_admin_session():
"""Returns a keystone session from Mistral's service credentials."""
if CONF.keystone_authtoken.auth_type is None:
auth = auth_plugins.Password(
CONF.keystone_authtoken.auth_uri,
username=CONF.keystone_authtoken.admin_user,
password=CONF.keystone_authtoken.admin_password,
project_name=CONF.keystone_authtoken.admin_tenant_name,
# NOTE(jaosorior): Once mistral supports keystone v3 properly, we
# can fetch the following values from the configuration.
user_domain_name='Default',
project_domain_name='Default')
return ks_session.Session(auth=auth)
else:
auth = loading.load_auth_from_conf_options(
CONF,
'keystone_authtoken'
)
return loading.load_session_from_conf_options(
CONF,
'keystone_authtoken',
auth=auth
)
def will_expire_soon(expires_at):
if not expires_at:
return False
stale_duration = CONF.expiration_token_duration
assert stale_duration, "expiration_token_duration must be specified"
expires = timeutils.parse_isotime(expires_at)
return timeutils.is_soon(expires, stale_duration)
| {
"content_hash": "6f36539b711b1cfb22747f4816167e24",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 78,
"avg_line_length": 29.627177700348433,
"alnum_prop": 0.6097847818417029,
"repo_name": "StackStorm/mistral",
"id": "088bf1c065073bc316eb6a469f5f5b521c8a0586",
"size": "9086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/utils/openstack/keystone.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1494"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2249335"
},
{
"name": "Shell",
"bytes": "31326"
}
],
"symlink_target": ""
} |
import itertools
from copy import copy
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import AdminSplitDateTime, AdminDateWidget
from django.db.models import ObjectDoesNotExist
from django.template.loader import render_to_string
from django.utils.translation import get_language
from django.utils.translation import ugettext as _
from autoreports.forms import BaseReportForm
from autoreports.model_forms import modelform_factory
from autoreports.utils import (is_iterable, get_fields_from_model, get_field_from_model,
parsed_field_name, transmeta_field_name, SEPARATED_FIELD,
get_class_from_path)
from autoreports.wizards import ModelFieldForm, WizardField, WizardAdminField
class BaseReportField(object):
def __init__(self, model, field, field_name=None, instance=None, treatment_transmeta=True, *args, **kwargs):
super(BaseReportField, self).__init__(*args, **kwargs)
self.model = model
self.field = field
self.field_name = field_name or self.field.name
if treatment_transmeta:
self._treatment_transmeta()
self.field_name_parsed = self.field_name.replace(SEPARATED_FIELD, '__')
self.instance = instance
def get_verbose_name(self):
return self.field.verbose_name
def get_help_text(self):
return self.field.help_text
def get_filter_default(self):
return 'icontains'
@classmethod
def get_filters(self):
return (('exact', _('Exact (case-sensitive)')),
('iexact', _('Exact (case-insensitive)')),
('contains', _('Contains (case-sensitive)')),
('icontains', _('Contains (case-insensitive)')),
('startswith', _('Starts with (case-sensitive)')),
('istartswith', _('Starts with (case-insensitive)')),
('endswith', _('Ends with (case-sensitive)')),
('iendswith', _('Ends with (case-insensitive)')),
('lt', _('Less than')),
('lte', _('Less than or equal')),
('gt', _('Greater than')),
('gte', _('Greater than or equal')),
('in', _('In (coma separated list)')),
#('isnull', _('Is empty')),)
)
@classmethod
def get_widgets_initial(self):
return ('', '---------')
@classmethod
def get_widgets_available(self):
return tuple()
def get_change_filter(self, fil, opts):
return (fil, dict(self.get_filters())[fil])
def _get_widget_from_opts(self, opts):
return opts and opts.get('widget', None) or None
def change_widget(self, field, opts=None):
return field
def change_value(self, value, key, request_get):
return (value, request_get)
def get_value(self, obj, field_name=None):
field_name = field_name or self.field_name_parsed
return getattr(obj, field_name)
def get_label_to_opts(self, opts):
autoreports_i18n = getattr(settings, 'AUTOREPORTS_I18N', False)
if not autoreports_i18n:
return opts.get('label', None)
lang = get_language()
return opts.get('label_%s' % lang, None)
def get_help_text_to_opts(self, opts):
autoreports_i18n = getattr(settings, 'AUTOREPORTS_I18N', False)
if not autoreports_i18n:
return opts.get('help_text', None)
lang = get_language()
return opts.get('help_text_%s' % lang, None)
def get_basic_field_form(self, form, field_name):
return form.base_fields[field_name]
def get_field_form(self, opts=None, default=True,
fields_form_filter=None, fields_form_display=None):
prefix, field_name = parsed_field_name(self.field_name)
form = modelform_factory(self.model, form=BaseReportForm, fields=[field_name])
field = self.get_basic_field_form(form, field_name)
autoreports_initial = getattr(settings, 'AUTOREPORTS_INITIAL', True)
autoreports_subfix = getattr(settings, 'AUTOREPORTS_SUBFIX', True)
if not autoreports_initial:
field.initial = None
if opts:
help_text = self.get_help_text_to_opts(opts)
display = opts.get('display', None)
filters = opts.get('filters', [])
label = self.get_label_to_opts(opts)
if label:
field.label = label
if help_text:
field.help_text = help_text
if display:
fields_form_display[self.field_name] = copy(field)
for fil in filters:
field_copy = copy(field)
fil, verbose_fil = self.get_change_filter(fil, opts)
field_name_subfix = "%s__%s" % (self.field_name_parsed, fil)
if autoreports_subfix:
field_label = u"%s (%s)" % (field_copy.label, verbose_fil)
field_copy.label = field_label
fields_form_filter[field_name_subfix] = self.change_widget(field_copy, opts)
else:
if default:
fil = self.get_filter_default()
if fil is None:
return (fields_form_filter, fields_form_display)
field_name_subfix = "%s__%s" % (self.field_name_parsed, fil)
if autoreports_subfix:
field_label = u"%s (%s)" % (field.label, dict(self.get_filters())[fil])
field.label = field_label
fields_form_filter[field_name_subfix] = self.change_widget(field)
else:
fields_form_display[self.field_name] = field
return (fields_form_filter, fields_form_display)
def get_class_form(self, is_admin=True):
if is_admin:
wizard_admin_path = getattr(settings, 'AUTOREPORTS_WIZARDADMINFIELD', None)
if not wizard_admin_path:
return WizardAdminField
else:
return get_class_from_path(wizard_admin_path)
wizard_path = getattr(settings, 'AUTOREPORTS_WIZARDFIELD', None)
if not wizard_path:
return WizardField
return get_class_from_path(wizard_path)
def get_form(self, is_admin=True):
wizard_class = self.get_class_form(is_admin)
return wizard_class(self,
instance=self.instance,
prefix=id(self))
def render_model_field(self, form, model, is_admin=True):
modelfieldform = ModelFieldForm(initial={'app_label': model._meta.app_label,
'module_name': model._meta.module_name,
'field_name': self.field_name},
instance=self.instance,
prefix=form.prefix)
return unicode(modelfieldform)
def extra_wizard_fields(self):
return {}
def render_wizard(self, is_admin=True):
return unicode(self.get_form(is_admin))
def render_api(self, modelfieldform, wizard):
return self.render_admin(modelfieldform, wizard)
def render_admin(self, modelfieldform, wizard):
return "<div class='adaptor'>%s %s <h2 class='removeAdaptor'>%s</h2></div>" % (modelfieldform,
wizard,
_("Remove"))
def render(self, form, model, is_admin=True):
modelfieldform = self.render_model_field(form, model)
wizard = self.render_wizard(is_admin)
if is_admin:
return self.render_admin(modelfieldform, wizard)
return self.render_api(modelfieldform, wizard)
def render_instance(self, is_admin=True):
wizard = self.get_form(is_admin)
content_type = self.instance.content_type
model, field = get_field_from_model(content_type.model_class(), self.field_name)
return self.render(wizard,
model,
is_admin)
def _treatment_transmeta(self):
self.field_name = transmeta_field_name(self.field, self.field_name)
class TextFieldReportField(BaseReportField):
@classmethod
def get_widgets_available(self):
return (self.get_widgets_initial(), ('textarea', _('Text Area')),)
def get_filter_default(self):
return 'icontains'
def change_value(self, value, key, request_get):
if len(value) <= 0 or not value[0]:
del request_get[key]
return (value, request_get)
def change_widget(self, field, opts=None):
widget = self._get_widget_from_opts(opts)
if widget == 'textarea':
field.widget = forms.Textarea()
else:
field.widget = forms.TextInput()
return field
def extra_wizard_fields(self):
prefix, field_name = parsed_field_name(self.field_name)
prefix = SEPARATED_FIELD.join(prefix)
fields = get_fields_from_model(self.model, adaptors=(TextFieldReportField,))
current_field_name = self.field_name.split(SEPARATED_FIELD)[-1]
choices = [(f['name'], f['verbose_name']) for f in fields[0] if f['name'] != current_field_name]
if not choices:
return {}
initial = None
if self.instance:
field_options = self.instance.options.get(self.field_name, None)
if field_options:
initial = field_options.get('other_fields', None)
return {'other_fields': forms.MultipleChoiceField(label=_('Other fields to filter'),
required=False,
choices=choices,
widget=forms.CheckboxSelectMultiple,
initial=initial,
help_text=_('Choose other fields, when you filter with this field, you will search in these also'))}
@classmethod
def get_filters(self):
return (('exact', _('Exact (case-sensitive)')),
('iexact', _('Exact (case-insensitive)')),
('contains', _('Contains (case-sensitive)')),
('icontains', _('Contains (case-insensitive)')),
('startswith', _('Starts with (case-sensitive)')),
('istartswith', _('Starts with (case-insensitive)')),
('endswith', _('Ends with (case-sensitive)')),
('iendswith', _('Ends with (case-insensitive)')),
#('isnull', _('Is empty')),
)
class ProviderSelectSigle(object):
slug_single = 'single'
@classmethod
def get_widgets_available(self):
return (self.get_widgets_initial(),) + self.get_widgets_available_single()
@classmethod
def get_widgets_available_single(self):
return (('single__select', _('Select')),
('single__radiobuttons', _('Radio buttons')))
def get_change_filter(self, fil, opts):
widget = self._get_widget_from_opts(opts)
if widget and widget.startswith(self.slug_single):
return ('exact', _('Exact'))
return super(ProviderSelectSigle, self).get_change_filter(fil, opts)
def change_widget_sigle(self, field, choices, widget):
field_initial = field.initial and field.initial[0] or None
field = forms.ChoiceField(label=field.label,
choices=choices,
help_text=field.help_text,
initial=field_initial)
if widget == 'single__radiobuttons':
field.widget = forms.RadioSelect(choices=field.widget.choices)
return field
def change_widget(self, field, opts=None):
widget = self._get_widget_from_opts(opts)
choices = field.widget.choices
choice_empty = [self.get_widgets_initial()]
if isinstance(choices, list):
new_choices = choice_empty + choices
else:
new_choices = itertools.chain(choice_empty, choices)
if widget and widget.startswith(self.slug_single):
field = self.change_widget_sigle(field, new_choices, widget)
return field
class ProviderSelectMultiple(object):
slug_multiple = 'multiple'
@classmethod
def get_widgets_available(self):
return (self.get_widgets_initial(),) + self.get_widgets_available_multiple()
@classmethod
def get_widgets_available_multiple(self):
return (('multiple__select', _('Select Multiple')),
('multiple__checkboxes', _('CheckBox Multiple')),)
def get_change_filter(self, fil, opts):
widget = self._get_widget_from_opts(opts)
if widget and widget.startswith(self.slug_multiple):
return ('in', _('In'))
return super(ProviderSelectMultiple, self).get_change_filter(fil, opts)
def change_widget_multiple(self, field, choices, widget):
field = forms.MultipleChoiceField(label=field.label,
choices=choices,
help_text=field.help_text,
initial=(field.initial,))
if widget == 'multiple__checkboxes':
field.widget = forms.CheckboxSelectMultiple(choices=choices)
return field
def change_widget(self, field, opts=None):
widget = self._get_widget_from_opts(opts)
choices = field.widget.choices
if isinstance(choices, list):
new_choices = choices
else:
new_choices = itertools.islice(choices, 1, None)
if widget and widget.startswith(self.slug_multiple):
field = self.change_widget_multiple(field, new_choices, widget)
elif not widget:
choice_empty = [self.get_widgets_initial()]
if isinstance(choices, list):
new_choices = choice_empty + choices
else:
new_choices = choices
field.choices = new_choices
return field
class ChoicesFieldReportField(ProviderSelectMultiple, TextFieldReportField):
def get_value(self, obj, field_name=None):
field_name = field_name or self.field_name_parsed
choice_display = getattr(obj, 'get_%s_display' % field_name, None)
if choice_display and callable(choice_display):
return choice_display()
return super(ChoicesFieldReportField, self).get_value(obj, field_name)
def get_filter_default(self):
return 'exact'
def change_value(self, value, key, request_get):
if not value[0]:
del request_get[key]
return (value, request_get)
@classmethod
def get_filters(self):
return (('exact', _('Exact')),
#('isnull', _('Is empty')),)
)
def extra_wizard_fields(self):
return super(TextFieldReportField, self).extra_wizard_fields()
class NumberFieldReportField(BaseReportField):
def get_filter_default(self):
return 'exact'
def change_value(self, value, key, request_get):
if value and len(value) > 0 and value[0].isnumeric():
return (value, request_get)
del request_get[key]
return (value, request_get)
@classmethod
def get_filters(self):
return (('exact', _('Exact (case-sensitive)')),
('lt', _('Less than')),
('lte', _('Less than or equal')),
('gt', _('Greater than')),
('gte', _('Greater than or equal')),)
class AutoNumberFieldReportField(NumberFieldReportField):
def get_basic_field_form(self, form, field_name):
return forms.IntegerField(label=self.get_verbose_name())
class BaseDateFieldReportField(BaseReportField):
def change_value_date_widget(self, value, key, request_get, field=None):
if len(value) <= 0 or not value[0]:
del request_get[key]
if (key.endswith('__day') or
key.endswith('__month') or
key.endswith('__year')):
return (self.parser_date(value, field), request_get)
return ([unicode(self.parser_date(value, field))], request_get)
def change_value_datetime_widget(self, value, key, request_get, field=None):
if key.endswith('_0'):
key_1 = key.replace('_0', '_1')
if not key_1 in request_get:
return value
key_without_prefix = key.replace('_0', '')
if request_get[key] and request_get[key_1]:
value = "%s %s" % (request_get[key],
request_get[key_1])
value = [unicode(self.parser_date([value], field))]
request_get.setlist(key_without_prefix, value)
initial_date = 'initial-%s' % key_without_prefix
if request_get.get(initial_date, None):
del request_get['initial-%s' % key_without_prefix]
del request_get[key]
del request_get[key_1]
return (value, request_get)
class DateFieldReportField(BaseDateFieldReportField):
def get_filter_default(self):
return 'exact'
def change_widget(self, field, opts=None):
field.widget = AdminDateWidget()
return field
def parser_date(self, value, field=None):
try:
field = field or self.field.formfield()
return field.clean(value[0])
except forms.ValidationError:
return value
def change_value(self, value, key, request_get):
return self.change_value_date_widget(value, key, request_get)
@classmethod
def get_filters(self):
return (('exact', _('Exact')),
('lt', _('Less than')),
('lte', _('Less than or equal')),
('gt', _('Greater than')),
('gte', _('Greater than or equal')),
#('isnull', _('Is empty')),
)
class DateTimeFieldReportField(DateFieldReportField):
@classmethod
def get_widgets_available(self):
return (self.get_widgets_initial(), ('date', _('Date Widget')),)
def change_widget(self, field, opts=None):
widget = self._get_widget_from_opts(opts)
if widget == 'date':
field = forms.DateField(label=field.label,
help_text=field.help_text)
field.widget = AdminDateWidget()
else:
field.widget = AdminSplitDateTime()
return field
def change_value(self, value, key, request_get):
if key.endswith('_0') or key.endswith('_1'):
return self.change_value_datetime_widget(value, key, request_get, field=forms.DateTimeField())
return self.change_value_date_widget(value, key, request_get, field=forms.DateField())
class BooleanFieldReportField(BaseReportField):
def change_widget(self, field, opts=None):
choices = (self.get_widgets_initial(),
('0', _('No')),
('1', _('Yes')),)
field.widget = forms.Select(choices=choices)
return field
def change_value(self, value, key, request_get):
if len(value) > 0:
if value[0] == '0':
return ([False], request_get)
elif value[0] == '1':
return ([True], request_get)
if key in request_get:
del request_get[key]
return (value, request_get)
def get_filter_default(self):
return 'exact'
@classmethod
def get_filters(self):
return (('exact', _('Exact')),
#('isnull', _('Is empty'))
)
class RelatedReportField(BaseReportField):
def _treatment_transmeta(self):
pass
def _post_preccessing_get_value(self, value):
if is_iterable(value):
if len(value) == 0:
return None
elif len(value) == 1:
return value[0]
return value
def get_filter_default(self):
return 'in'
def change_value(self, value, key, request_get):
if len(value) <= 0 or not value[0]:
del request_get[key]
return (value, request_get)
@classmethod
def get_filters(self):
return (('in', _('In')),
#('isnull', _('Is empty')),
)
class RelatedReverseField(ProviderSelectSigle, RelatedReportField):
def get_basic_field_form(self, form, field_name):
return forms.ModelMultipleChoiceField(label=self.get_verbose_name(),
queryset=self.field.model.objects.all())
def get_value(self, obj, field_name=None):
field_name = self.field.get_accessor_name()
return self._post_preccessing_get_value(getattr(obj, field_name).all())
def get_verbose_name(self):
return self.field.field.verbose_name
def get_help_text(self):
return self.field.field.help_text
class RelatedDirectField(RelatedReportField):
pass
class ForeingKeyReportField(ProviderSelectMultiple, RelatedDirectField):
@classmethod
def get_filters(self):
return (('exact', _('Exact')),
#('isnull', _('Is empty')),
)
def get_filter_default(self):
return 'exact'
def get_value(self, obj, field_name=None):
try:
return super(ForeingKeyReportField, self).get_value(obj, field_name)
except ObjectDoesNotExist:
return None # Intigrity Error
class M2MReportField(ProviderSelectSigle, RelatedDirectField):
def get_value(self, obj, field_name=None):
return self._post_preccessing_get_value(
super(RelatedDirectField, self).get_value(obj, field_name).all())
class FuncField(BaseReportField):
from autoreports.utils import add_domain
middleware_value = {'get_absolute_url': add_domain}
def get_basic_field_form(self, form, field_name):
class FakeFuncFieldForm(object):
def __init__(self, label, help_text):
self.label = label
self.help_text = help_text
return FakeFuncFieldForm(label=self.get_verbose_name(),
help_text=self.get_help_text())
def get_verbose_name(self):
label = getattr(self.field, 'label', '')
if label:
return label
prefix, field_name = parsed_field_name(self.field_name)
return field_name
def get_help_text(self):
return getattr(self.field, 'short_description', '')
def get_value(self, obj, field_name=None):
func_args = self.field.im_func.func_code.co_argcount
if func_args == 1:
value = super(FuncField, self).get_value(obj, field_name)()
elif func_args == 2:
value = self.field(obj)
else:
value = 'error'
if field_name in self.middleware_value:
value = self.middleware_value[field_name](value)
return value
def _treatment_transmeta(self):
pass
def get_filter_default(self):
return None
@classmethod
def get_filters(self):
return tuple()
def render_admin(self, modelfieldform, wizard):
try:
from inspect import getsource
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
code = getsource(self.field)
code = highlight(code, PythonLexer(), HtmlFormatter(cssclass="syntax hiddenElement"))
except TypeError:
code = ""
except ImportError:
code = ""
adaptor_help = render_to_string("autoreports/fields/func_field_wizard.html", {'code': code})
return "<div class='adaptor'>%s %s %s<h2 class='removeAdaptor'>%s</h2></div>" % (modelfieldform,
adaptor_help,
wizard,
_("Remove"))
class PropertyField(FuncField):
def get_value(self, obj, field_name=None):
return super(FuncField, self).get_value(obj, field_name)
class GenericFKField(PropertyField):
pass
| {
"content_hash": "7a871d44de278725f0bfffb4f19fcaaf",
"timestamp": "",
"source": "github",
"line_count": 659,
"max_line_length": 152,
"avg_line_length": 37.4051593323217,
"alnum_prop": 0.5675456389452332,
"repo_name": "fatihzkaratana/intranet",
"id": "bdf879c6ed382c4ccc9db74b3ab3d924a74ef8f7",
"size": "24650",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "backend/autoreports/fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "602"
},
{
"name": "CSS",
"bytes": "92307"
},
{
"name": "CoffeeScript",
"bytes": "65346"
},
{
"name": "HTML",
"bytes": "75018"
},
{
"name": "JavaScript",
"bytes": "300050"
},
{
"name": "PHP",
"bytes": "1052"
},
{
"name": "Python",
"bytes": "753185"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "787"
}
],
"symlink_target": ""
} |
import pyro
from pyro.infer.tracegraph_elbo import TraceGraph_ELBO
from tests.common import assert_equal
from tests.integration_tests.test_conjugate_gaussian_models import GaussianChain
class ConjugateChainGradientTests(GaussianChain):
def test_gradients(self):
for N in [3, 5]:
for reparameterized in [True, False]:
self.do_test_gradients(N, reparameterized)
def do_test_gradients(self, N, reparameterized):
pyro.clear_param_store()
self.setup_chain(N)
elbo = TraceGraph_ELBO(
num_particles=100000, vectorize_particles=True, max_plate_nesting=1
)
elbo.loss_and_grads(self.model, self.guide, reparameterized=reparameterized)
for i in range(1, N + 1):
for param_prefix in ["loc_q_%d", "log_sig_q_%d", "kappa_q_%d"]:
if i == N and param_prefix == "kappa_q_%d":
continue
actual_grad = pyro.param(param_prefix % i).grad
assert_equal(
actual_grad,
0.0 * actual_grad,
prec=0.10,
msg="".join(
[
"parameter %s%d" % (param_prefix[:-2], i),
"\nexpected = zero vector",
"\n actual = {}".format(
actual_grad.detach().cpu().numpy()
),
]
),
)
| {
"content_hash": "bb6b6500e0da5b26602109c67135ade2",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 38.175,
"alnum_prop": 0.49246889325474785,
"repo_name": "uber/pyro",
"id": "4d5ef116b8670fe22e120731ba2c774e131ce885",
"size": "1616",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/infer/test_conjugate_gradients.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6121"
},
{
"name": "CSS",
"bytes": "478"
},
{
"name": "Dockerfile",
"bytes": "1635"
},
{
"name": "Makefile",
"bytes": "6857"
},
{
"name": "Python",
"bytes": "3388193"
},
{
"name": "Shell",
"bytes": "6465"
},
{
"name": "TeX",
"bytes": "3649"
}
],
"symlink_target": ""
} |
"""Handler for data backup operation.
Generic datastore admin console transfers control to ConfirmBackupHandler
after selection of entities. The ConfirmBackupHandler confirms with user
his choice, enters a backup name and transfers control to
DoBackupHandler. DoBackupHandler starts backup mappers and displays confirmation
page.
This module also contains actual mapper code for backing data over.
"""
from __future__ import with_statement
import cStringIO
import datetime
import itertools
import logging
import os
import random
import re
import time
import urllib
import xml.dom.minidom
from google.appengine.datastore import entity_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import app_identity
from google.appengine.api import blobstore as blobstore_api
from google.appengine.api import capabilities
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.api import files
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.api.files import records
from google.appengine.api.taskqueue import taskqueue_service_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.datastore_admin import backup_pb2
from google.appengine.ext.datastore_admin import config
from google.appengine.ext.datastore_admin import utils
from google.appengine.runtime import apiproxy_errors
try:
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import datastore_range_iterators as db_iters
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import json_util
from google.appengine.ext.mapreduce import operation as op
from google.appengine.ext.mapreduce import output_writers
except ImportError:
from google.appengine._internal.mapreduce import context
from google.appengine._internal.mapreduce import datastore_range_iterators as db_iters
from google.appengine._internal.mapreduce import input_readers
from google.appengine._internal.mapreduce import json_util
from google.appengine._internal.mapreduce import operation as op
from google.appengine._internal.mapreduce import output_writers
try:
from google.appengine.ext.datastore_admin import services_client
except ImportError:
pass
XSRF_ACTION = 'backup'
BUCKET_PATTERN = (r'^([a-zA-Z0-9]+([\-_]+[a-zA-Z0-9]+)*)'
r'(\.([a-zA-Z0-9]+([\-_]+[a-zA-Z0-9]+)*))*$')
MAX_BUCKET_LEN = 222
MIN_BUCKET_LEN = 3
MAX_BUCKET_SEGMENT_LEN = 63
NUM_KINDS_DEFERRED_THRESHOLD = 10
MAX_BLOBS_PER_DELETE = 500
TEST_WRITE_FILENAME_PREFIX = 'datastore_backup_write_test'
MAX_KEYS_LIST_SIZE = 100
MAX_TEST_FILENAME_TRIES = 10
MEANING_TO_PRIMITIVE_TYPE = {
entity_pb.Property.GD_WHEN: backup_pb2.EntitySchema.DATE_TIME,
entity_pb.Property.GD_RATING: backup_pb2.EntitySchema.RATING,
entity_pb.Property.ATOM_LINK: backup_pb2.EntitySchema.LINK,
entity_pb.Property.ATOM_CATEGORY: backup_pb2.EntitySchema.CATEGORY,
entity_pb.Property.GD_PHONENUMBER: backup_pb2.EntitySchema.PHONE_NUMBER,
entity_pb.Property.GD_POSTALADDRESS: backup_pb2.EntitySchema.POSTAL_ADDRESS,
entity_pb.Property.GD_EMAIL: backup_pb2.EntitySchema.EMAIL,
entity_pb.Property.GD_IM: backup_pb2.EntitySchema.IM_HANDLE,
entity_pb.Property.BLOBKEY: backup_pb2.EntitySchema.BLOB_KEY,
entity_pb.Property.TEXT: backup_pb2.EntitySchema.TEXT,
entity_pb.Property.BLOB: backup_pb2.EntitySchema.BLOB,
entity_pb.Property.BYTESTRING: backup_pb2.EntitySchema.SHORT_BLOB
}
class ConfirmBackupHandler(webapp.RequestHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'confirm_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
kinds = handler.request.get_all('kind')
sizes_known, size_total, remainder = utils.ParseKindsAndSizes(kinds)
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
blob_warning = bool(blobstore.BlobInfo.all().count(1))
template_params = {
'run_as_a_service': handler.request.get('run_as_a_service'),
'form_target': DoBackupHandler.SUFFIX,
'kind_list': kinds,
'remainder': remainder,
'sizes_known': sizes_known,
'size_total': size_total,
'queues': None,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'namespaces': get_namespaces(handler.request.get('namespace', None)),
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'blob_warning': blob_warning,
'backup_name': 'datastore_backup_%s' % time.strftime('%Y_%m_%d')
}
utils.RenderToResponse(handler, 'confirm_backup.html', template_params)
def get_namespaces(selected_namespace):
namespaces = [('--All--', '*', selected_namespace is None)]
for ns in datastore.Query('__namespace__', keys_only=True).Run():
ns_name = ns.name() or ''
namespaces.append((ns_name or '--Default--',
ns_name,
ns_name == selected_namespace))
return namespaces
class ConfirmDeleteBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to delete a backup copy."""
SUFFIX = 'confirm_delete_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
gs_warning = False
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
gs_warning |= backup.filesystem == files.GS_FILESYSTEM
template_params = {
'form_target': DoBackupDeleteHandler.SUFFIX,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'gs_warning': gs_warning,
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_delete_backup.html',
template_params)
class ConfirmAbortBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to abort a backup copy."""
SUFFIX = 'confirm_abort_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
template_params = {
'form_target': DoBackupAbortHandler.SUFFIX,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_abort_backup.html',
template_params)
class ConfirmRestoreFromBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to restore from backup."""
SUFFIX = 'confirm_restore_from_backup'
@classmethod
def Render(cls, handler, default_backup_id=None,
default_delete_backup_after_restore=False):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
default_backup_id: default value for handler.request
default_delete_backup_after_restore: default value for handler.request
"""
backup_id = handler.request.get('backup_id', default_backup_id)
backup = db.get(backup_id) if backup_id else None
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
original_app_warning = backup.original_app
if os.getenv('APPLICATION_ID') == original_app_warning:
original_app_warning = None
template_params = {
'form_target': DoBackupRestoreHandler.SUFFIX,
'queues': None,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'backup': backup,
'delete_backup_after_restore': handler.request.get(
'delete_backup_after_restore', default_delete_backup_after_restore),
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'original_app_warning': original_app_warning,
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_restore_from_backup.html',
template_params)
class ConfirmBackupImportHandler(webapp.RequestHandler):
"""Handler to import backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
gs_handle = handler.request.get('gs_handle')
error = None if gs_handle else 'Google Cloud Storage path is missing'
other_backup_info_files = []
selected_backup_info_file = None
backup_info_specified = False
if not error:
try:
gs_handle = gs_handle.rstrip()
bucket_name, prefix = parse_gs_handle(gs_handle)
validate_gs_bucket_name(bucket_name)
if not is_accessible_bucket_name(bucket_name):
raise BackupValidationError(
'Bucket "%s" is not accessible' % bucket_name)
if prefix.endswith('.backup_info'):
prefix = prefix[0:prefix.rfind('/')]
backup_info_specified = True
elif prefix and not prefix.endswith('/'):
prefix += '/'
for backup_info_file in list_bucket_files(bucket_name, prefix):
backup_info_path = '/gs/%s/%s' % (bucket_name, backup_info_file)
if backup_info_specified and backup_info_path == gs_handle:
selected_backup_info_file = backup_info_path
elif (backup_info_file.endswith('.backup_info')
and backup_info_file.count('.') == 1):
other_backup_info_files.append(backup_info_path)
except Exception, ex:
error = 'Failed to read bucket: %s' % ex.message
logging.exception(ex.message)
template_params = {
'error': error,
'form_target': DoBackupImportHandler.SUFFIX,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'selected_backup_info_file': selected_backup_info_file,
'other_backup_info_files': other_backup_info_files,
'backup_info_specified': backup_info_specified,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_backup_import.html',
template_params)
class BackupInformationHandler(webapp.RequestHandler):
"""Handler to display backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
backup_ids = handler.request.get_all('backup_id')
template_params = {
'backups': db.get(backup_ids),
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'backup_information.html', template_params)
class BaseDoHandler(webapp.RequestHandler):
"""Base class for all Do*Handlers."""
MAPREDUCE_DETAIL = config.MAPREDUCE_PATH + '/detail?mapreduce_id='
def get(self):
"""Handler for get requests to datastore_admin backup operations.
Status of executed jobs is displayed.
"""
jobs = self.request.get_all('job')
remote_job = self.request.get('remote_job')
tasks = self.request.get_all('task')
error = self.request.get('error', '')
xsrf_error = self.request.get('xsrf_error', '')
template_params = {
'job_list': jobs,
'remote_job': remote_job,
'task_list': tasks,
'mapreduce_detail': self.MAPREDUCE_DETAIL,
'error': error,
'xsrf_error': xsrf_error,
'datastore_admin_home': utils.GenerateHomeUrl(self.request),
}
utils.RenderToResponse(self, self._get_html_page, template_params)
@property
def _get_html_page(self):
"""Return the name of the HTML page for HTTP/GET requests."""
raise NotImplementedError
@property
def _get_post_html_page(self):
"""Return the name of the HTML page for HTTP/POST requests."""
raise NotImplementedError
def _ProcessPostRequest(self):
"""Process the HTTP/POST request and return the result as parametrs."""
raise NotImplementedError
def _GetBasicMapperParams(self):
namespace = self.request.get('namespace', None)
if namespace == '*':
namespace = None
return {'namespace': namespace}
def SendRedirect(self, path=None, params=()):
"""Send a redirect response."""
run_as_a_service = self.request.get('run_as_a_service')
if run_as_a_service:
params = list(params)
params.append(('run_as_a_service', True))
dest = config.BASE_PATH
if path:
dest = '%s/%s' % (dest, path)
if params:
dest = '%s?%s' % (dest, urllib.urlencode(params))
self.redirect(dest)
def post(self):
"""Handler for post requests to datastore_admin/backup.do.
Redirects to the get handler after processing the request.
"""
token = self.request.get('xsrf_token')
if not utils.ValidateXsrfToken(token, XSRF_ACTION):
parameters = [('xsrf_error', '1')]
else:
try:
parameters = self._ProcessPostRequest()
except Exception, e:
error = self._HandleException(e)
parameters = [('error', error)]
self.SendRedirect(self._get_post_html_page, parameters)
def _HandleException(self, e):
"""Make exception handling overridable by tests.
Args:
e: The exception to handle.
Returns:
The exception error string.
"""
logging.exception(e.message)
return '%s: %s' % (type(e), e.message)
class BackupValidationError(utils.Error):
"""Raised upon backup request validation."""
def _perform_backup(run_as_a_service, kinds, selected_namespace,
filesystem, gs_bucket_name, backup,
queue, mapper_params, max_jobs):
"""Triggers backup mapper jobs.
Args:
run_as_a_service: True if backup should be done via admin-jobs
kinds: a sequence of kind names
selected_namespace: The selected namespace or None for all
filesystem: files.BLOBSTORE_FILESYSTEM or files.GS_FILESYSTEM
or None to default to blobstore
gs_bucket_name: the GS file system bucket in which to store the backup
when using the GS file system, and otherwise ignored
backup: the backup name
queue: the task queue for the backup task
mapper_params: the mapper parameters
max_jobs: if backup needs more jobs than this, defer them
Returns:
The job or task ids.
Raises:
BackupValidationError: On validation error.
Exception: On other error.
"""
BACKUP_COMPLETE_HANDLER = __name__ + '.BackupCompleteHandler'
BACKUP_HANDLER = __name__ + '.BackupEntity.map'
INPUT_READER = __name__ + '.DatastoreEntityProtoInputReader'
OUTPUT_WRITER = output_writers.__name__ + '.FileRecordsOutputWriter'
if run_as_a_service:
if not gs_bucket_name:
raise BackupValidationError('Bucket name missing.')
gs_bucket_name = validate_and_canonicalize_gs_bucket(gs_bucket_name)
datastore_admin_service = services_client.DatastoreAdminClient()
description = 'Remote backup job: %s' % backup
remote_job_id = datastore_admin_service.create_backup(
description, backup, gs_bucket_name, selected_namespace, kinds)
return [('remote_job', remote_job_id)]
queue = queue or os.environ.get('HTTP_X_APPENGINE_QUEUENAME', 'default')
if queue[0] == '_':
queue = 'default'
if not filesystem:
filesystem = files.BLOBSTORE_FILESYSTEM
if filesystem == files.GS_FILESYSTEM:
if not gs_bucket_name:
raise BackupValidationError('Bucket name missing.')
gs_bucket_name = validate_and_canonicalize_gs_bucket(gs_bucket_name)
elif filesystem == files.BLOBSTORE_FILESYSTEM:
pass
else:
raise BackupValidationError('Unknown filesystem "%s".' % filesystem)
job_name = 'datastore_backup_%s_%%(kind)s' % re.sub(r'[^\w]', '_', backup)
try:
job_operation = utils.StartOperation('Backup: %s' % backup)
backup_info = BackupInformation(parent=job_operation)
backup_info.filesystem = filesystem
backup_info.name = backup
backup_info.kinds = kinds
if selected_namespace is not None:
backup_info.namespaces = [selected_namespace]
backup_info.put(force_writes=True)
mapreduce_params = {
'done_callback_handler': BACKUP_COMPLETE_HANDLER,
'backup_info_pk': str(backup_info.key()),
'force_ops_writes': True,
}
mapper_params = dict(mapper_params)
mapper_params['filesystem'] = filesystem
if filesystem == files.GS_FILESYSTEM:
mapper_params['gs_bucket_name'] = gs_bucket_name
if len(kinds) <= max_jobs:
return [('job', job) for job in _run_map_jobs(
job_operation.key(), backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER, OUTPUT_WRITER,
mapper_params, mapreduce_params, queue)]
else:
retry_options = taskqueue.TaskRetryOptions(task_retry_limit=1)
deferred_task = deferred.defer(_run_map_jobs_deferred,
backup, job_operation.key(),
backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER,
OUTPUT_WRITER, mapper_params,
mapreduce_params, queue, _queue=queue,
_url=config.DEFERRED_PATH,
_retry_options=retry_options)
return [('task', deferred_task.name)]
except Exception:
logging.exception('Failed to start a datastore backup job[s] for "%s".',
backup)
if backup_info:
delete_backup_info(backup_info)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
class BackupLinkHandler(webapp.RequestHandler):
"""Handler to deal with requests to the backup link to backup data."""
SUFFIX = 'backup.create'
def get(self):
"""Handler for get requests to datastore_admin/backup.create."""
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup.create."""
try:
if ('X-AppEngine-TaskName' not in self.request.headers and
'X-AppEngine-Cron' not in self.request.headers):
logging.critical('Scheduled backups must be started via task queue or '
'cron.')
self.response.set_status(403)
return
backup_prefix = self.request.get('name')
if not backup_prefix:
if self.request.headers.get('X-AppEngine-Cron'):
backup_prefix = 'cron-'
else:
backup_prefix = 'link-'
backup_prefix_with_date = backup_prefix + time.strftime('%Y_%m_%d')
backup_name = backup_prefix_with_date
backup_suffix_counter = 1
while BackupInformation.name_exists(backup_name):
backup_suffix_counter += 1
backup_name = backup_prefix_with_date + '-' + str(backup_suffix_counter)
kinds = self.request.get_all('kind')
if not kinds:
self.errorResponse('Backup must include at least one kind.')
return
for kind in kinds:
if not utils.IsKindNameVisible(kind):
self.errorResponse('Invalid kind %s.' % kind)
return
namespace = self.request.get('namespace', None)
if namespace == '*':
namespace = None
mapper_params = {'namespace': namespace}
_perform_backup(self.request.get('run_as_a_service', False),
kinds,
namespace,
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup_name,
self.request.get('queue'),
mapper_params,
1000000)
except Exception, e:
self.errorResponse(e.message)
def errorResponse(self, message):
logging.error('Could not create backup via link: %s', message)
self.response.set_status(400, message)
class DatastoreEntityProtoInputReader(input_readers.RawDatastoreInputReader):
"""An input reader which yields datastore entity proto for a kind."""
_KEY_RANGE_ITER_CLS = db_iters.KeyRangeEntityProtoIterator
class DoBackupHandler(BaseDoHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'backup.do'
_get_html_page = 'do_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup mapper jobs and returns their ids."""
try:
backup = self.request.get('backup_name').strip()
if not backup:
raise BackupValidationError('Unspecified backup name.')
if BackupInformation.name_exists(backup):
raise BackupValidationError('Backup "%s" already exists.' % backup)
mapper_params = self._GetBasicMapperParams()
backup_result = _perform_backup(self.request.get('run_as_a_service',
False),
self.request.get_all('kind'),
mapper_params.get('namespace'),
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup,
self.request.get('queue'),
mapper_params,
10)
return backup_result
except Exception, e:
logging.exception(e.message)
return [('error', e.message)]
def _run_map_jobs_deferred(backup_name, job_operation_key, backup_info_key,
kinds, job_name, backup_handler, input_reader,
output_writer, mapper_params, mapreduce_params,
queue):
backup_info = BackupInformation.get(backup_info_key)
if backup_info:
try:
_run_map_jobs(job_operation_key, backup_info_key, kinds, job_name,
backup_handler, input_reader, output_writer, mapper_params,
mapreduce_params, queue)
except BaseException:
logging.exception('Failed to start a datastore backup job[s] for "%s".',
backup_name)
delete_backup_info(backup_info)
else:
logging.info('Missing backup info, can not start backup jobs for "%s"',
backup_name)
def _run_map_jobs(job_operation_key, backup_info_key, kinds, job_name,
backup_handler, input_reader, output_writer, mapper_params,
mapreduce_params, queue):
"""Creates backup/restore MR jobs for the given operation.
Args:
job_operation_key: a key of utils.DatastoreAdminOperation entity.
backup_info_key: a key of BackupInformation entity.
kinds: a list of kinds to run the M/R for.
job_name: the M/R job name prefix.
backup_handler: M/R job completion handler.
input_reader: M/R input reader.
output_writer: M/R output writer.
mapper_params: custom parameters to pass to mapper.
mapreduce_params: dictionary parameters relevant to the whole job.
queue: the name of the queue that will be used by the M/R.
Returns:
Ids of all started mapper jobs as list of strings.
"""
backup_info = BackupInformation.get(backup_info_key)
if not backup_info:
return []
jobs = utils.RunMapForKinds(
job_operation_key,
kinds,
job_name,
backup_handler,
input_reader,
output_writer,
mapper_params,
mapreduce_params,
queue_name=queue)
backup_info.active_jobs = jobs
backup_info.put(force_writes=True)
return jobs
def get_backup_files(backup_info, selected_kinds=None):
"""Returns the backup filenames for selected kinds or all if None/Empty."""
if backup_info.blob_files:
return backup_info.blob_files
else:
kinds_backup_files = backup_info.get_kind_backup_files(selected_kinds)
return list(itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
def delete_backup_files(filesystem, backup_files):
if backup_files:
if filesystem == files.BLOBSTORE_FILESYSTEM:
blob_keys = []
for fname in backup_files:
blob_key = files.blobstore.get_blob_key(fname)
if blob_key:
blob_keys.append(blob_key)
if len(blob_keys) == MAX_BLOBS_PER_DELETE:
blobstore_api.delete(blob_keys)
blob_keys = []
if blob_keys:
blobstore_api.delete(blob_keys)
def delete_backup_info(backup_info, delete_files=True):
"""Deletes a backup including its associated files and other metadata."""
if backup_info.blob_files:
delete_backup_files(backup_info.filesystem, backup_info.blob_files)
backup_info.delete(force_writes=True)
else:
kinds_backup_files = tuple(backup_info.get_kind_backup_files())
if delete_files:
delete_backup_files(backup_info.filesystem, itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
db.delete(kinds_backup_files + (backup_info,), force_writes=True)
class DoBackupDeleteHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to delete backup data."""
SUFFIX = 'backup_delete.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_delete.do.
Deletes are executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
params = ()
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to delete datastore backup.')
params = [('error', e.message)]
self.SendRedirect(params=params)
class DoBackupAbortHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to abort pending backups."""
SUFFIX = 'backup_abort.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_abort.do.
Abort is executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
params = ()
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
operation = backup_info.parent()
if operation.parent_key():
job_id = str(operation.parent_key())
datastore_admin_service = services_client.DatastoreAdminClient()
datastore_admin_service.abort_backup(job_id)
else:
utils.AbortAdminOperation(operation.key())
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to abort pending datastore backup.')
params = [('error', e.message)]
self.SendRedirect(params=params)
class DoBackupRestoreHandler(BaseDoHandler):
"""Handler to restore backup data.
Deals with requests from the admin console.
"""
SUFFIX = 'backup_restore.do'
BACKUP_RESTORE_HANDLER = __name__ + '.RestoreEntity.map'
RESTORE_COMPLETE_HANDLER = __name__ + '.RestoreCompleteHandler'
INPUT_READER = input_readers.__name__ + '.RecordsReader'
_get_html_page = 'do_restore_from_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup restore mapper jobs and returns their ids."""
backup_id = self.request.get('backup_id')
if not backup_id:
return [('error', 'Unspecified Backup.')]
backup = db.get(db.Key(backup_id))
if not backup:
return [('error', 'Invalid Backup id.')]
if backup.gs_handle:
if not is_readable_gs_handle(backup.gs_handle):
return [('error', 'Backup not readable')]
kinds = set(self.request.get_all('kind'))
if not (backup.blob_files or kinds):
return [('error', 'No kinds were selected')]
backup_kinds = set(backup.kinds)
difference = kinds.difference(backup_kinds)
if difference:
return [('error', 'Backup does not have kind[s] %s' %
', '.join(difference))]
if self.request.get('run_as_a_service', False):
if backup.filesystem != files.GS_FILESYSTEM:
return [('error',
'Restore as a service is only available for GS backups')]
datastore_admin_service = services_client.DatastoreAdminClient()
description = 'Remote restore job: %s' % backup.name
remote_job_id = datastore_admin_service.restore_from_backup(
description, backup_id, list(kinds))
return [('remote_job', remote_job_id)]
queue = self.request.get('queue')
job_name = 'datastore_backup_restore_%s' % re.sub(r'[^\w]', '_',
backup.name)
job_operation = None
try:
operation_name = 'Restoring %s from backup: %s' % (
', '.join(kinds) if kinds else 'all', backup.name)
job_operation = utils.StartOperation(operation_name)
mapper_params = self._GetBasicMapperParams()
kinds = list(kinds) if len(backup_kinds) != len(kinds) else []
mapper_params['files'] = get_backup_files(backup, kinds)
mapper_params['kind_filter'] = kinds
mapper_params['original_app'] = backup.original_app
mapreduce_params = {
'backup_name': backup.name,
'force_ops_writes': True,
}
shard_count = min(max(utils.MAPREDUCE_MIN_SHARDS,
len(mapper_params['files'])),
utils.MAPREDUCE_MAX_SHARDS)
job = utils.StartMap(job_operation.key(), job_name,
self.BACKUP_RESTORE_HANDLER, self.INPUT_READER, None,
mapper_params, mapreduce_params, queue_name=queue,
shard_count=shard_count)
return [('job', job)]
except Exception:
logging.exception('Failed to start a restore from backup job "%s".',
job_name)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
class DoBackupImportHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to import backup info."""
SUFFIX = 'import_backup.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/import_backup.do.
Import is executed and user is redirected to the base-path handler.
"""
gs_handle = self.request.get('gs_handle')
token = self.request.get('xsrf_token')
error = None
if gs_handle and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
bucket_name, path = parse_gs_handle(gs_handle)
file_content = get_gs_object(bucket_name, path)
entities = parse_backup_info_file(file_content)
original_backup_info = entities.next()
entity = datastore.Entity(BackupInformation.kind())
entity.update(original_backup_info)
backup_info = BackupInformation.from_entity(entity)
if original_backup_info.key().app() != os.getenv('APPLICATION_ID'):
backup_info.original_app = original_backup_info.key().app()
def tx():
backup_info.put(force_writes=True)
kind_files_models = []
for entity in entities:
kind_files = backup_info.create_kind_backup_files(
entity.key().name(), entity['files'])
kind_files_models.append(kind_files)
db.put(kind_files_models, force_writes=True)
db.run_in_transaction(tx)
backup_id = str(backup_info.key())
except Exception, e:
logging.exception('Failed to Import datastore backup information.')
error = e.message
if error:
self.SendRedirect(params=[('error', error)])
elif self.request.get('Restore'):
ConfirmRestoreFromBackupHandler.Render(
self, default_backup_id=backup_id,
default_delete_backup_after_restore=True)
else:
self.SendRedirect()
class BackupInformation(db.Model):
"""An entity to keep information on a datastore backup."""
name = db.StringProperty()
kinds = db.StringListProperty()
namespaces = db.StringListProperty()
filesystem = db.StringProperty(default=files.BLOBSTORE_FILESYSTEM)
start_time = db.DateTimeProperty(auto_now_add=True)
active_jobs = db.StringListProperty()
completed_jobs = db.StringListProperty()
complete_time = db.DateTimeProperty(default=None)
blob_files = db.StringListProperty()
original_app = db.StringProperty(default=None)
gs_handle = db.TextProperty(default=None)
destination = db.StringProperty()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND
@classmethod
def name_exists(cls, backup_name):
query = BackupInformation.all(keys_only=True)
query.filter('name =', backup_name)
return query.get() is not None
def create_kind_backup_files_key(self, kind):
return db.Key.from_path(KindBackupFiles.kind(), kind, parent=self.key())
def create_kind_backup_files(self, kind, kind_files):
return KindBackupFiles(key=self.create_kind_backup_files_key(kind),
files=kind_files)
def get_kind_backup_files(self, kinds=None):
if kinds:
return db.get([self.create_kind_backup_files_key(kind) for kind in kinds])
else:
return KindBackupFiles.all().ancestor(self).run()
class KindBackupFiles(db.Model):
"""An entity to keep files information per kind for a backup.
A key for this model should created using kind as a name and the associated
BackupInformation as a parent.
"""
files = db.StringListProperty(indexed=False)
@property
def backup_kind(self):
return self.key().name()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_FILES_KIND
def BackupCompleteHandler(operation, job_id, mapreduce_state):
"""Updates BackupInformation record for a completed mapper job."""
mapreduce_spec = mapreduce_state.mapreduce_spec
filenames = mapreduce_spec.mapper.output_writer_class().get_filenames(
mapreduce_state)
_perform_backup_complete(operation,
job_id,
mapreduce_spec.mapper.params['entity_kind'],
mapreduce_spec.params['backup_info_pk'],
mapreduce_spec.mapper.params.get('gs_bucket_name'),
filenames,
mapreduce_spec.params.get('done_callback_queue'))
@db.transactional
def _perform_backup_complete(
operation, job_id, kind, backup_info_pk, gs_bucket_name, filenames, queue):
backup_info = BackupInformation.get(backup_info_pk)
if backup_info:
if job_id in backup_info.active_jobs:
backup_info.active_jobs.remove(job_id)
backup_info.completed_jobs = list(
set(backup_info.completed_jobs + [job_id]))
if backup_info.filesystem == files.BLOBSTORE_FILESYSTEM:
filenames = drop_empty_files(filenames)
kind_backup_files = backup_info.get_kind_backup_files([kind])[0]
if kind_backup_files:
kind_backup_files.files = list(set(kind_backup_files.files + filenames))
else:
kind_backup_files = backup_info.create_kind_backup_files(kind, filenames)
db.put((backup_info, kind_backup_files), force_writes=True)
if operation.status == utils.DatastoreAdminOperation.STATUS_COMPLETED:
deferred.defer(finalize_backup_info, backup_info.key(),
gs_bucket_name,
_url=config.DEFERRED_PATH,
_queue=queue,
_transactional=True)
else:
logging.warn('BackupInfo was not found for %s', backup_info_pk)
def finalize_backup_info(backup_info_pk, gs_bucket):
"""Finalize the state of BackupInformation and creates info file for GS."""
def get_backup_info():
return BackupInformation.get(backup_info_pk)
backup_info = db.run_in_transaction(get_backup_info)
if backup_info:
complete_time = datetime.datetime.now()
backup_info.complete_time = complete_time
gs_handle = None
if backup_info.filesystem == files.GS_FILESYSTEM:
gs_handle = BackupInfoWriter(gs_bucket).write(backup_info)[0]
def set_backup_info_with_finalize_info():
backup_info = get_backup_info()
backup_info.complete_time = complete_time
backup_info.gs_handle = gs_handle
backup_info.put(force_writes=True)
db.run_in_transaction(set_backup_info_with_finalize_info)
logging.info('Backup %s completed', backup_info.name)
else:
logging.warn('Backup %s could not be found', backup_info_pk)
def parse_backup_info_file(content):
"""Returns entities iterator from a backup_info file content."""
reader = records.RecordsReader(cStringIO.StringIO(content))
version = reader.read()
if version != '1':
raise IOError('Unsupported version')
return (datastore.Entity.FromPb(record) for record in reader)
@db.non_transactional
def drop_empty_files(filenames):
"""Deletes empty files and returns filenames minus the deleted ones."""
non_empty_filenames = []
empty_file_keys = []
blobs_info = blobstore.BlobInfo.get(
[files.blobstore.get_blob_key(fn) for fn in filenames])
for filename, blob_info in itertools.izip(filenames, blobs_info):
if blob_info:
if blob_info.size > 0:
non_empty_filenames.append(filename)
else:
empty_file_keys.append(blob_info.key())
blobstore_api.delete(empty_file_keys)
return non_empty_filenames
class BackupInfoWriter(object):
"""A class for writing Datastore backup metadata files."""
def __init__(self, gs_bucket):
"""Construct a BackupInfoWriter.
Args:
gs_bucket: Required string for the target GS bucket.
"""
self.__gs_bucket = gs_bucket
def write(self, backup_info):
"""Write the metadata files for the given backup_info.
As a side effect, updates the backup_info in-memory entity object with the
gs_handle to the Backup info filename. This is not saved to the datastore.
Args:
backup_info: Required BackupInformation.
Returns:
A list with Backup info filename followed by Kind info filenames.
"""
fn = self._write_backup_info(backup_info)
return [fn] + self._write_kind_info(backup_info)
def _generate_filename(self, backup_info, suffix):
key_str = str(backup_info.key()).replace('/', '_')
return '/gs/%s/%s%s' % (self.__gs_bucket, key_str, suffix)
def _write_backup_info(self, backup_info):
"""Writes a backup_info_file.
Args:
backup_info: Required BackupInformation.
Returns:
Backup info filename.
"""
filename = self._generate_filename(backup_info, '.backup_info')
backup_info.gs_handle = filename
info_file = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
with records.RecordsWriter(info_file) as writer:
writer.write('1')
writer.write(db.model_to_protobuf(backup_info).SerializeToString())
for kind_files in backup_info.get_kind_backup_files():
writer.write(db.model_to_protobuf(kind_files).SerializeToString())
finally:
info_file.close(finalize=True)
return filename
def _write_kind_info(self, backup_info):
"""Writes type information schema for each kind in backup_info.
Args:
backup_info: Required BackupInformation.
Returns:
A list with all created filenames.
"""
def get_backup_files_tx():
kind_backup_files_list = []
for kind_backup_files in backup_info.get_kind_backup_files():
kind_backup_files_list.append(kind_backup_files)
return kind_backup_files_list
kind_backup_files_list = db.run_in_transaction(get_backup_files_tx)
filenames = []
for kind_backup_files in kind_backup_files_list:
backup = self._create_kind_backup(backup_info, kind_backup_files)
filename = self._generate_filename(
backup_info, '.%s.backup_info' % kind_backup_files.backup_kind)
self._write_kind_backup_info_file(filename, backup)
filenames.append(filename)
return filenames
def _create_kind_backup(self, backup_info, kind_backup_files):
"""Creates and populate a backup_pb2.Backup."""
backup = backup_pb2.Backup()
backup.backup_info.backup_name = backup_info.name
backup.backup_info.start_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.start_time)
backup.backup_info.end_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.complete_time)
kind = kind_backup_files.backup_kind
kind_info = backup.kind_info.add()
kind_info.kind = kind
kind_info.entity_schema.kind = kind
kind_info.file.extend(kind_backup_files.files)
entity_type_info = EntityTypeInfo(kind=kind)
for sharded_aggregation in SchemaAggregationResult.load(
backup_info.key(), kind):
if sharded_aggregation.is_partial:
kind_info.is_partial = True
if sharded_aggregation.entity_type_info:
entity_type_info.merge(sharded_aggregation.entity_type_info)
entity_type_info.populate_entity_schema(kind_info.entity_schema)
return backup
@classmethod
def _write_kind_backup_info_file(cls, filename, backup):
"""Writes a kind backup_info.
Args:
filename: The name of the file to be created as string.
backup: apphosting.ext.datastore_admin.Backup proto.
"""
f = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
f.write(backup.SerializeToString())
finally:
f.close(finalize=True)
class PropertyTypeInfo(json_util.JsonMixin):
"""Type information for an entity property."""
def __init__(self, name, is_repeated=False, primitive_types=None,
embedded_entities=None):
"""Construct a PropertyTypeInfo instance.
Args:
name: The name of the property as a string.
is_repeated: A boolean that indicates if the property is repeated.
primitive_types: Optional list of PrimitiveType integer values.
embedded_entities: Optional list of EntityTypeInfo.
"""
self.__name = name
self.__is_repeated = is_repeated
self.__primitive_types = set(primitive_types) if primitive_types else set()
self.__embedded_entities = {}
for entity in embedded_entities or ():
if entity.kind in self.__embedded_entities:
self.__embedded_entities[entity.kind].merge(entity)
else:
self.__embedded_entities[entity.kind] = entity
@property
def name(self):
return self.__name
@property
def is_repeated(self):
return self.__is_repeated
@property
def primitive_types(self):
return self.__primitive_types
def embedded_entities_kind_iter(self):
return self.__embedded_entities.iterkeys()
def get_embedded_entity(self, kind):
return self.__embedded_entities.get(kind)
def merge(self, other):
"""Merge a PropertyTypeInfo with this instance.
Args:
other: Required PropertyTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if property names do not match.
TypeError: if other is not instance of PropertyTypeInfo.
"""
if not isinstance(other, PropertyTypeInfo):
raise TypeError('Expected PropertyTypeInfo, was %r' % (other,))
if other.__name != self.__name:
raise ValueError('Property names mismatch (%s, %s)' %
(self.__name, other.__name))
changed = False
if other.__is_repeated and not self.__is_repeated:
self.__is_repeated = True
changed = True
if not other.__primitive_types.issubset(self.__primitive_types):
self.__primitive_types = self.__primitive_types.union(
other.__primitive_types)
changed = True
for kind, other_embedded_entity in other.__embedded_entities.iteritems():
embedded_entity = self.__embedded_entities.get(kind)
if embedded_entity:
changed = embedded_entity.merge(other_embedded_entity) or changed
else:
self.__embedded_entities[kind] = other_embedded_entity
changed = True
return changed
def populate_entity_schema_field(self, entity_schema):
"""Add an populate a Field to the given entity_schema.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if not (self.__primitive_types or self.__embedded_entities):
return
field = entity_schema.field.add()
field.name = self.__name
field_type = field.type.add()
field_type.is_list = self.__is_repeated
field_type.primitive_type.extend(self.__primitive_types)
for embedded_entity in self.__embedded_entities.itervalues():
embedded_entity_schema = field_type.embedded_schema.add()
embedded_entity.populate_entity_schema(embedded_entity_schema)
def to_json(self):
json = dict()
json['name'] = self.__name
json['is_repeated'] = self.__is_repeated
json['primitive_types'] = list(self.__primitive_types)
json['embedded_entities'] = [e.to_json() for e in
self.__embedded_entities.itervalues()]
return json
@classmethod
def from_json(cls, json):
return cls(json['name'], json['is_repeated'], json.get('primitive_types'),
[EntityTypeInfo.from_json(entity_json) for entity_json
in json.get('embedded_entities')])
class EntityTypeInfo(json_util.JsonMixin):
"""Type information for an entity."""
def __init__(self, kind=None, properties=None):
"""Construct an EntityTypeInfo instance.
Args:
kind: An optional kind name as string.
properties: An optional list of PropertyTypeInfo.
"""
self.__kind = kind
self.__properties = {}
for property_type_info in properties or ():
if property_type_info.name in self.__properties:
self.__properties[property_type_info.name].merge(property_type_info)
else:
self.__properties[property_type_info.name] = property_type_info
@property
def kind(self):
return self.__kind
def properties_name_iter(self):
return self.__properties.iterkeys()
def get_property(self, name):
return self.__properties.get(name)
def merge(self, other):
"""Merge an EntityTypeInfo with this instance.
Args:
other: Required EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if kinds do not match.
TypeError: if other is not instance of EntityTypeInfo.
"""
if not isinstance(other, EntityTypeInfo):
raise TypeError('Expected EntityTypeInfo, was %r' % (other,))
if other.__kind != self.__kind:
raise ValueError('Kinds mismatch (%s, %s)' % (self.__kind, other.__kind))
changed = False
for name, other_property in other.__properties.iteritems():
self_property = self.__properties.get(name)
if self_property:
changed = self_property.merge(other_property) or changed
else:
self.__properties[name] = other_property
changed = True
return changed
def populate_entity_schema(self, entity_schema):
"""Populates the given entity_schema with values from this instance.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if self.__kind:
entity_schema.kind = self.__kind
for property_type_info in self.__properties.itervalues():
property_type_info.populate_entity_schema_field(entity_schema)
def to_json(self):
return {
'kind': self.__kind,
'properties': [p.to_json() for p in self.__properties.itervalues()]
}
@classmethod
def from_json(cls, json):
kind = json.get('kind')
properties_json = json.get('properties')
if properties_json:
return cls(kind, [PropertyTypeInfo.from_json(p) for p in properties_json])
else:
return cls(kind)
@classmethod
def create_from_entity_proto(cls, entity_proto):
"""Creates and populates an EntityTypeInfo from an EntityProto."""
properties = [cls.__get_property_type_info(property_proto) for
property_proto in itertools.chain(
entity_proto.property_list(),
entity_proto.raw_property_list())]
kind = utils.get_kind_from_entity_pb(entity_proto)
return cls(kind, properties)
@classmethod
def __get_property_type_info(cls, property_proto):
"""Returns the type mapping for the provided property."""
name = property_proto.name()
is_repeated = bool(property_proto.multiple())
primitive_type = None
entity_type = None
if property_proto.has_meaning():
primitive_type = MEANING_TO_PRIMITIVE_TYPE.get(property_proto.meaning())
if primitive_type is None:
value = property_proto.value()
if value.has_int64value():
primitive_type = backup_pb2.EntitySchema.INTEGER
elif value.has_booleanvalue():
primitive_type = backup_pb2.EntitySchema.BOOLEAN
elif value.has_stringvalue():
if property_proto.meaning() == entity_pb.Property.ENTITY_PROTO:
entity_proto = entity_pb.EntityProto()
try:
entity_proto.ParsePartialFromString(value.stringvalue())
except Exception:
pass
else:
entity_type = EntityTypeInfo.create_from_entity_proto(entity_proto)
else:
primitive_type = backup_pb2.EntitySchema.STRING
elif value.has_doublevalue():
primitive_type = backup_pb2.EntitySchema.FLOAT
elif value.has_pointvalue():
primitive_type = backup_pb2.EntitySchema.GEO_POINT
elif value.has_uservalue():
primitive_type = backup_pb2.EntitySchema.USER
elif value.has_referencevalue():
primitive_type = backup_pb2.EntitySchema.REFERENCE
return PropertyTypeInfo(
name, is_repeated,
(primitive_type,) if primitive_type is not None else None,
(entity_type,) if entity_type else None)
class SchemaAggregationResult(db.Model):
"""Persistent aggregated type information for a kind.
An instance can be retrieved via the load method or created
using the create method. An instance aggregates all type information
for all seen embedded_entities via the merge method and persisted when needed
using the model put method.
"""
entity_type_info = json_util.JsonProperty(
EntityTypeInfo, default=EntityTypeInfo(), indexed=False)
is_partial = db.BooleanProperty(default=False)
def merge(self, other):
"""Merge a SchemaAggregationResult or an EntityTypeInfo with this instance.
Args:
other: Required SchemaAggregationResult or EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
"""
if self.is_partial:
return False
if isinstance(other, SchemaAggregationResult):
other = other.entity_type_info
return self.entity_type_info.merge(other)
@classmethod
def _get_parent_key(cls, backup_id, kind_name):
return datastore_types.Key.from_path('Kind', kind_name, parent=backup_id)
@classmethod
def create(cls, backup_id, kind_name, shard_id):
"""Create SchemaAggregationResult instance.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Required shard id as string.
Returns:
A new SchemaAggregationResult instance.
"""
parent = cls._get_parent_key(backup_id, kind_name)
return SchemaAggregationResult(
key_name=shard_id, parent=parent,
entity_type_info=EntityTypeInfo(kind=kind_name))
@classmethod
def load(cls, backup_id, kind_name, shard_id=None):
"""Retrieve SchemaAggregationResult from the Datastore.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Optional shard id as string.
Returns:
SchemaAggregationResult iterator or an entity if shard_id not None.
"""
parent = cls._get_parent_key(backup_id, kind_name)
if shard_id:
key = datastore_types.Key.from_path(cls.kind(), shard_id, parent=parent)
return SchemaAggregationResult.get(key)
else:
return db.Query(cls).ancestor(parent).run()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND_TYPE_INFO
class SchemaAggregationPool(object):
"""An MR pool to aggregation type information per kind."""
def __init__(self, backup_id, kind, shard_id):
"""Construct SchemaAggregationPool instance.
Args:
backup_id: Required BackupInformation Key.
kind: Required kind name as string.
shard_id: Required shard id as string.
"""
self.__backup_id = backup_id
self.__kind = kind
self.__shard_id = shard_id
self.__aggregation = SchemaAggregationResult.load(backup_id, kind, shard_id)
if not self.__aggregation:
self.__aggregation = SchemaAggregationResult.create(backup_id, kind,
shard_id)
self.__needs_save = True
else:
self.__needs_save = False
def merge(self, entity_type_info):
"""Merge EntityTypeInfo into aggregated type information."""
if self.__aggregation.merge(entity_type_info):
self.__needs_save = True
def flush(self):
"""Save aggregated type information to the datastore if changed."""
if self.__needs_save:
def update_aggregation_tx():
aggregation = SchemaAggregationResult.load(
self.__backup_id, self.__kind, self.__shard_id)
if aggregation:
if aggregation.merge(self.__aggregation):
aggregation.put(force_writes=True)
self.__aggregation = aggregation
else:
self.__aggregation.put(force_writes=True)
def mark_aggregation_as_partial_tx():
aggregation = SchemaAggregationResult.load(
self.__backup_id, self.__kind, self.__shard_id)
if aggregation is None:
aggregation = SchemaAggregationResult.create(
self.__backup_id, self.__kind, self.__shard_id)
aggregation.is_partial = True
aggregation.put(force_writes=True)
self.__aggregation = aggregation
try:
db.run_in_transaction(update_aggregation_tx)
except apiproxy_errors.RequestTooLargeError:
db.run_in_transaction(mark_aggregation_as_partial_tx)
self.__needs_save = False
class AggregateSchema(op.Operation):
"""An MR Operation to aggregation type information for a kind.
This operation will register an MR pool, SchemaAggregationPool, if
one is not already registered and will invoke the pool's merge operation
per entity. The pool is responsible for keeping a persistent state of
type aggregation using the sharded db model, SchemaAggregationResult.
"""
def __init__(self, entity_proto):
self.__entity_info = EntityTypeInfo.create_from_entity_proto(entity_proto)
def __call__(self, ctx):
pool = ctx.get_pool('schema_aggregation_pool')
if not pool:
backup_id = datastore_types.Key(
context.get().mapreduce_spec.params['backup_info_pk'])
pool = SchemaAggregationPool(
backup_id, self.__entity_info.kind, ctx.shard_id)
ctx.register_pool('schema_aggregation_pool', pool)
pool.merge(self.__entity_info)
class BackupEntity(object):
"""A class which dumps the entity to the writer."""
def map(self, entity_proto):
"""Backup entity map handler.
Args:
entity_proto: An instance of entity_pb.EntityProto.
Yields:
A serialized entity_pb.EntityProto as a string
"""
yield entity_proto.SerializeToString()
yield AggregateSchema(entity_proto)
class RestoreEntity(object):
"""A class which restore the entity to datastore."""
def __init__(self):
self.initialized = False
self.kind_filter = None
self.app_id = None
def initialize(self):
"""Initialize a restore mapper instance."""
if self.initialized:
return
mapper_params = get_mapper_params_from_context()
kind_filter = mapper_params.get('kind_filter')
self.kind_filter = set(kind_filter) if kind_filter else None
original_app = mapper_params.get('original_app')
target_app = os.getenv('APPLICATION_ID')
if original_app and target_app != original_app:
self.app_id = target_app
self.initialized = True
def map(self, record):
"""Restore entity map handler.
Args:
record: A serialized entity_pb.EntityProto.
Yields:
A operation.db.Put for the mapped entity
"""
self.initialize()
pb = entity_pb.EntityProto(contents=record)
if self.app_id:
utils.FixKeys(pb, self.app_id)
if not self.kind_filter or (
utils.get_kind_from_entity_pb(pb) in self.kind_filter):
yield utils.Put(pb)
if self.app_id:
yield utils.ReserveKey(datastore_types.Key._FromPb(pb.key()))
def get_mapper_params_from_context():
"""Get mapper params from MR context. Split out for ease of testing."""
return context.get().mapreduce_spec.mapper.params
def validate_gs_bucket_name(bucket_name):
"""Validate the format of the given bucket_name.
Validation rules are based:
https://developers.google.com/storage/docs/bucketnaming#requirements
Args:
bucket_name: The bucket name to validate.
Raises:
BackupValidationError: If the bucket name is invalid.
"""
if len(bucket_name) > MAX_BUCKET_LEN:
raise BackupValidationError(
'Bucket name length should not be longer than %d' % MAX_BUCKET_LEN)
if len(bucket_name) < MIN_BUCKET_LEN:
raise BackupValidationError(
'Bucket name length should be longer than %d' % MIN_BUCKET_LEN)
if bucket_name.lower().startswith('goog'):
raise BackupValidationError(
'Bucket name should not start with a "goog" prefix')
bucket_elements = bucket_name.split('.')
for bucket_element in bucket_elements:
if len(bucket_element) > MAX_BUCKET_SEGMENT_LEN:
raise BackupValidationError(
'Segment length of bucket name should not be longer than %d' %
MAX_BUCKET_SEGMENT_LEN)
if not re.match(BUCKET_PATTERN, bucket_name):
raise BackupValidationError('Invalid bucket name "%s"' % bucket_name)
def is_accessible_bucket_name(bucket_name):
"""Returns True if the application has access to the specified bucket."""
scope = config.GoogleApiScope('devstorage.read_write')
bucket_url = config.GsBucketURL(bucket_name)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(bucket_url, method=urlfetch.HEAD, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
return result and result.status_code == 200
def verify_bucket_writable(bucket_name):
"""Verify the application can write to the specified bucket.
Args:
bucket_name: The bucket to verify.
Raises:
BackupValidationError: If the bucket is not writable.
"""
path = '/gs/%s' % bucket_name
try:
file_names = files.gs.listdir(path,
{'prefix': TEST_WRITE_FILENAME_PREFIX,
'max_keys': MAX_KEYS_LIST_SIZE})
except (files.InvalidParameterError, files.PermissionDeniedError):
raise BackupValidationError('Bucket "%s" not accessible' % bucket_name)
except files.InvalidFileNameError:
raise BackupValidationError('Bucket "%s" does not exist' % bucket_name)
file_name = '%s/%s.tmp' % (path, TEST_WRITE_FILENAME_PREFIX)
file_name_try = 0
while True:
if file_name_try >= MAX_TEST_FILENAME_TRIES:
return
if file_name not in file_names:
break
gen = random.randint(0, 9999)
file_name = '%s/%s_%s.tmp' % (path, TEST_WRITE_FILENAME_PREFIX, gen)
file_name_try += 1
try:
test_file = files.open(files.gs.create(file_name), 'a', exclusive_lock=True)
try:
test_file.write('test')
finally:
test_file.close(finalize=True)
except files.PermissionDeniedError:
raise BackupValidationError('Bucket "%s" is not writable' % bucket_name)
try:
files.delete(file_name)
except (files.InvalidArgumentError, files.InvalidFileNameError, IOError):
logging.warn('Failed to delete test file %s', file_name)
def is_readable_gs_handle(gs_handle):
"""Return True if the application can read the specified gs_handle."""
try:
with files.open(gs_handle) as bak_file:
bak_file.read(1)
except files.PermissionDeniedError:
return False
return True
def parse_gs_handle(gs_handle):
"""Splits [/gs/]?bucket_name[/folder]*[/file]? to (bucket_name, path | '')."""
if gs_handle.startswith('/'):
filesystem = gs_handle[1:].split('/', 1)[0]
if filesystem == 'gs':
gs_handle = gs_handle[4:]
else:
raise BackupValidationError('Unsupported filesystem: %s' % filesystem)
tokens = gs_handle.split('/', 1)
return (tokens[0], '') if len(tokens) == 1 else tuple(tokens)
def validate_and_canonicalize_gs_bucket(gs_bucket_name):
bucket_name, path = parse_gs_handle(gs_bucket_name)
gs_bucket_name = ('%s/%s' % (bucket_name, path)).rstrip('/')
validate_gs_bucket_name(bucket_name)
verify_bucket_writable(bucket_name)
return gs_bucket_name
def list_bucket_files(bucket_name, prefix, max_keys=1000):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = config.GoogleApiScope('devstorage.read_only')
bucket_url = config.GsBucketURL(bucket_name)
url = bucket_url + '?'
query = [('max-keys', max_keys)]
if prefix:
query.append(('prefix', prefix))
url += urllib.urlencode(query)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
doc = xml.dom.minidom.parseString(result.content)
return [node.childNodes[0].data for node in doc.getElementsByTagName('Key')]
raise BackupValidationError('Request to Google Cloud Storage failed')
def get_gs_object(bucket_name, path):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = config.GoogleApiScope('devstorage.read_only')
bucket_url = config.GsBucketURL(bucket_name)
url = bucket_url + path
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
return result.content
if result and result.status_code == 403:
raise BackupValidationError(
'Requested path %s is not accessible/access denied' % url)
if result and result.status_code == 404:
raise BackupValidationError('Requested path %s was not found' % url)
raise BackupValidationError('Error encountered accessing requested path %s' %
url)
def get_queue_names(app_id=None, max_rows=100):
"""Returns a list with all non-special queue names for app_id."""
rpc = apiproxy_stub_map.UserRPC('taskqueue')
request = taskqueue_service_pb.TaskQueueFetchQueuesRequest()
response = taskqueue_service_pb.TaskQueueFetchQueuesResponse()
if app_id:
request.set_app_id(app_id)
request.set_max_rows(max_rows)
queues = ['default']
try:
rpc.make_call('FetchQueues', request, response)
rpc.check_success()
for queue in response.queue_list():
if (queue.mode() == taskqueue_service_pb.TaskQueueMode.PUSH and
not queue.queue_name().startswith('__') and
queue.queue_name() != 'default'):
queues.append(queue.queue_name())
except Exception:
logging.exception('Failed to get queue names.')
return queues
def handlers_list(base_path):
return [
(r'%s/%s' % (base_path, BackupLinkHandler.SUFFIX),
BackupLinkHandler),
(r'%s/%s' % (base_path, ConfirmBackupHandler.SUFFIX),
ConfirmBackupHandler),
(r'%s/%s' % (base_path, DoBackupHandler.SUFFIX), DoBackupHandler),
(r'%s/%s' % (base_path, DoBackupRestoreHandler.SUFFIX),
DoBackupRestoreHandler),
(r'%s/%s' % (base_path, DoBackupDeleteHandler.SUFFIX),
DoBackupDeleteHandler),
(r'%s/%s' % (base_path, DoBackupAbortHandler.SUFFIX),
DoBackupAbortHandler),
(r'%s/%s' % (base_path, DoBackupImportHandler.SUFFIX),
DoBackupImportHandler),
]
| {
"content_hash": "a07e6feaa83bd6513b4de7a2ca78e259",
"timestamp": "",
"source": "github",
"line_count": 1862,
"max_line_length": 88,
"avg_line_length": 35.5171858216971,
"alnum_prop": 0.6635114088276655,
"repo_name": "levibostian/myBlanky",
"id": "849d1de19ae235cb9e09654e657030af1524fa6e",
"size": "66739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "googleAppEngine/google/appengine/ext/datastore_admin/backup_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29352"
},
{
"name": "JavaScript",
"bytes": "305206"
},
{
"name": "PHP",
"bytes": "4350"
},
{
"name": "Python",
"bytes": "11679977"
}
],
"symlink_target": ""
} |
"""
Base class for gdb-remote test cases.
"""
from __future__ import print_function
import errno
import os
import os.path
import platform
import random
import re
import select
import signal
import socket
import subprocess
import sys
import tempfile
import time
from lldbsuite.test import configuration
from lldbsuite.test.lldbtest import *
from lldbgdbserverutils import *
import logging
class _ConnectionRefused(IOError):
pass
class GdbRemoteTestCaseBase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
_TIMEOUT_SECONDS = 120
_GDBREMOTE_KILL_PACKET = "$k#6b"
# Start the inferior separately, attach to the inferior on the stub
# command line.
_STARTUP_ATTACH = "attach"
# Start the inferior separately, start the stub without attaching, allow
# the test to attach to the inferior however it wants (e.g. $vAttach;pid).
_STARTUP_ATTACH_MANUALLY = "attach_manually"
# Start the stub, and launch the inferior with an $A packet via the
# initial packet stream.
_STARTUP_LAUNCH = "launch"
# GDB Signal numbers that are not target-specific used for common
# exceptions
TARGET_EXC_BAD_ACCESS = 0x91
TARGET_EXC_BAD_INSTRUCTION = 0x92
TARGET_EXC_ARITHMETIC = 0x93
TARGET_EXC_EMULATION = 0x94
TARGET_EXC_SOFTWARE = 0x95
TARGET_EXC_BREAKPOINT = 0x96
_verbose_log_handler = None
_log_formatter = logging.Formatter(
fmt='%(asctime)-15s %(levelname)-8s %(message)s')
def setUpBaseLogging(self):
self.logger = logging.getLogger(__name__)
if len(self.logger.handlers) > 0:
return # We have set up this handler already
self.logger.propagate = False
self.logger.setLevel(logging.DEBUG)
# log all warnings to stderr
handler = logging.StreamHandler()
handler.setLevel(logging.WARNING)
handler.setFormatter(self._log_formatter)
self.logger.addHandler(handler)
def isVerboseLoggingRequested(self):
# We will report our detailed logs if the user requested that the "gdb-remote" channel is
# logged.
return any(("gdb-remote" in channel)
for channel in lldbtest_config.channels)
def setUp(self):
TestBase.setUp(self)
self.setUpBaseLogging()
self.debug_monitor_extra_args = []
self._pump_queues = socket_packet_pump.PumpQueues()
if self.isVerboseLoggingRequested():
# If requested, full logs go to a log file
self._verbose_log_handler = logging.FileHandler(
self.log_basename + "-host.log")
self._verbose_log_handler.setFormatter(self._log_formatter)
self._verbose_log_handler.setLevel(logging.DEBUG)
self.logger.addHandler(self._verbose_log_handler)
self.test_sequence = GdbRemoteTestSequence(self.logger)
self.set_inferior_startup_launch()
self.port = self.get_next_port()
self.named_pipe_path = None
self.named_pipe = None
self.named_pipe_fd = None
self.stub_sends_two_stop_notifications_on_kill = False
if configuration.lldb_platform_url:
if configuration.lldb_platform_url.startswith('unix-'):
url_pattern = '(.+)://\[?(.+?)\]?/.*'
else:
url_pattern = '(.+)://(.+):\d+'
scheme, host = re.match(
url_pattern, configuration.lldb_platform_url).groups()
if configuration.lldb_platform_name == 'remote-android' and host != 'localhost':
self.stub_device = host
self.stub_hostname = 'localhost'
else:
self.stub_device = None
self.stub_hostname = host
else:
self.stub_hostname = "localhost"
def tearDown(self):
self._pump_queues.verify_queues_empty()
self.logger.removeHandler(self._verbose_log_handler)
self._verbose_log_handler = None
TestBase.tearDown(self)
def getLocalServerLogFile(self):
return self.log_basename + "-server.log"
def setUpServerLogging(self, is_llgs):
if len(lldbtest_config.channels) == 0:
return # No logging requested
if lldb.remote_platform:
log_file = lldbutil.join_remote_paths(
lldb.remote_platform.GetWorkingDirectory(), "server.log")
else:
log_file = self.getLocalServerLogFile()
if is_llgs:
self.debug_monitor_extra_args.append("--log-file=" + log_file)
self.debug_monitor_extra_args.append(
"--log-channels={}".format(":".join(lldbtest_config.channels)))
else:
self.debug_monitor_extra_args = [
"--log-file=" + log_file, "--log-flags=0x800000"]
def get_next_port(self):
return 12000 + random.randint(0, 3999)
def reset_test_sequence(self):
self.test_sequence = GdbRemoteTestSequence(self.logger)
def create_named_pipe(self):
# Create a temp dir and name for a pipe.
temp_dir = tempfile.mkdtemp()
named_pipe_path = os.path.join(temp_dir, "stub_port_number")
# Create the named pipe.
os.mkfifo(named_pipe_path)
# Open the read side of the pipe in non-blocking mode. This will
# return right away, ready or not.
named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK)
# Create the file for the named pipe. Note this will follow semantics of
# a non-blocking read side of a named pipe, which has different semantics
# than a named pipe opened for read in non-blocking mode.
named_pipe = os.fdopen(named_pipe_fd, "r")
self.assertIsNotNone(named_pipe)
def shutdown_named_pipe():
# Close the pipe.
try:
named_pipe.close()
except:
print("failed to close named pipe")
None
# Delete the pipe.
try:
os.remove(named_pipe_path)
except:
print("failed to delete named pipe: {}".format(named_pipe_path))
None
# Delete the temp directory.
try:
os.rmdir(temp_dir)
except:
print(
"failed to delete temp dir: {}, directory contents: '{}'".format(
temp_dir, os.listdir(temp_dir)))
None
# Add the shutdown hook to clean up the named pipe.
self.addTearDownHook(shutdown_named_pipe)
# Clear the port so the stub selects a port number.
self.port = 0
return (named_pipe_path, named_pipe, named_pipe_fd)
def get_stub_port_from_named_socket(self, read_timeout_seconds=5):
# Wait for something to read with a max timeout.
(ready_readers, _, _) = select.select(
[self.named_pipe_fd], [], [], read_timeout_seconds)
self.assertIsNotNone(
ready_readers,
"write side of pipe has not written anything - stub isn't writing to pipe.")
self.assertNotEqual(
len(ready_readers),
0,
"write side of pipe has not written anything - stub isn't writing to pipe.")
# Read the port from the named pipe.
stub_port_raw = self.named_pipe.read()
self.assertIsNotNone(stub_port_raw)
self.assertNotEqual(
len(stub_port_raw),
0,
"no content to read on pipe")
# Trim null byte, convert to int.
stub_port_raw = stub_port_raw[:-1]
stub_port = int(stub_port_raw)
self.assertTrue(stub_port > 0)
return stub_port
def init_llgs_test(self, use_named_pipe=True):
if lldb.remote_platform:
# Remote platforms don't support named pipe based port negotiation
use_named_pipe = False
# Grab the ppid from /proc/[shell pid]/stat
err, retcode, shell_stat = self.run_platform_command(
"cat /proc/$$/stat")
self.assertTrue(
err.Success() and retcode == 0,
"Failed to read file /proc/$$/stat: %s, retcode: %d" %
(err.GetCString(),
retcode))
# [pid] ([executable]) [state] [*ppid*]
pid = re.match(r"^\d+ \(.+\) . (\d+)", shell_stat).group(1)
err, retcode, ls_output = self.run_platform_command(
"ls -l /proc/%s/exe" % pid)
self.assertTrue(
err.Success() and retcode == 0,
"Failed to read file /proc/%s/exe: %s, retcode: %d" %
(pid,
err.GetCString(),
retcode))
exe = ls_output.split()[-1]
# If the binary has been deleted, the link name has " (deleted)" appended.
# Remove if it's there.
self.debug_monitor_exe = re.sub(r' \(deleted\)$', '', exe)
else:
self.debug_monitor_exe = get_lldb_server_exe()
if not self.debug_monitor_exe:
self.skipTest("lldb-server exe not found")
self.debug_monitor_extra_args = ["gdbserver"]
self.setUpServerLogging(is_llgs=True)
if use_named_pipe:
(self.named_pipe_path, self.named_pipe,
self.named_pipe_fd) = self.create_named_pipe()
def init_debugserver_test(self, use_named_pipe=True):
self.debug_monitor_exe = get_debugserver_exe()
if not self.debug_monitor_exe:
self.skipTest("debugserver exe not found")
self.setUpServerLogging(is_llgs=False)
if use_named_pipe:
(self.named_pipe_path, self.named_pipe,
self.named_pipe_fd) = self.create_named_pipe()
# The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification
# when the process truly dies.
self.stub_sends_two_stop_notifications_on_kill = True
def forward_adb_port(self, source, target, direction, device):
adb = ['adb'] + (['-s', device] if device else []) + [direction]
def remove_port_forward():
subprocess.call(adb + ["--remove", "tcp:%d" % source])
subprocess.call(adb + ["tcp:%d" % source, "tcp:%d" % target])
self.addTearDownHook(remove_port_forward)
def _verify_socket(self, sock):
# Normally, when the remote stub is not ready, we will get ECONNREFUSED during the
# connect() attempt. However, due to the way how ADB forwarding works, on android targets
# the connect() will always be successful, but the connection will be immediately dropped
# if ADB could not connect on the remote side. This function tries to detect this
# situation, and report it as "connection refused" so that the upper layers attempt the
# connection again.
triple = self.dbg.GetSelectedPlatform().GetTriple()
if not re.match(".*-.*-.*-android", triple):
return # Not android.
can_read, _, _ = select.select([sock], [], [], 0.1)
if sock not in can_read:
return # Data is not available, but the connection is alive.
if len(sock.recv(1, socket.MSG_PEEK)) == 0:
raise _ConnectionRefused() # Got EOF, connection dropped.
def create_socket(self):
sock = socket.socket()
logger = self.logger
triple = self.dbg.GetSelectedPlatform().GetTriple()
if re.match(".*-.*-.*-android", triple):
self.forward_adb_port(
self.port,
self.port,
"forward",
self.stub_device)
logger.info(
"Connecting to debug monitor on %s:%d",
self.stub_hostname,
self.port)
connect_info = (self.stub_hostname, self.port)
try:
sock.connect(connect_info)
except socket.error as serr:
if serr.errno == errno.ECONNREFUSED:
raise _ConnectionRefused()
raise serr
def shutdown_socket():
if sock:
try:
# send the kill packet so lldb-server shuts down gracefully
sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET)
except:
logger.warning(
"failed to send kill packet to debug monitor: {}; ignoring".format(
sys.exc_info()[0]))
try:
sock.close()
except:
logger.warning(
"failed to close socket to debug monitor: {}; ignoring".format(
sys.exc_info()[0]))
self.addTearDownHook(shutdown_socket)
self._verify_socket(sock)
return sock
def set_inferior_startup_launch(self):
self._inferior_startup = self._STARTUP_LAUNCH
def set_inferior_startup_attach(self):
self._inferior_startup = self._STARTUP_ATTACH
def set_inferior_startup_attach_manually(self):
self._inferior_startup = self._STARTUP_ATTACH_MANUALLY
def get_debug_monitor_command_line_args(self, attach_pid=None):
if lldb.remote_platform:
commandline_args = self.debug_monitor_extra_args + \
["*:{}".format(self.port)]
else:
commandline_args = self.debug_monitor_extra_args + \
["127.0.0.1:{}".format(self.port)]
if attach_pid:
commandline_args += ["--attach=%d" % attach_pid]
if self.named_pipe_path:
commandline_args += ["--named-pipe", self.named_pipe_path]
return commandline_args
def launch_debug_monitor(self, attach_pid=None, logfile=None):
# Create the command line.
commandline_args = self.get_debug_monitor_command_line_args(
attach_pid=attach_pid)
# Start the server.
server = self.spawnSubprocess(
self.debug_monitor_exe,
commandline_args,
install_remote=False)
self.addTearDownHook(self.cleanupSubprocesses)
self.assertIsNotNone(server)
# If we're receiving the stub's listening port from the named pipe, do
# that here.
if self.named_pipe:
self.port = self.get_stub_port_from_named_socket()
return server
def connect_to_debug_monitor(self, attach_pid=None):
if self.named_pipe:
# Create the stub.
server = self.launch_debug_monitor(attach_pid=attach_pid)
self.assertIsNotNone(server)
def shutdown_debug_monitor():
try:
server.terminate()
except:
logger.warning(
"failed to terminate server for debug monitor: {}; ignoring".format(
sys.exc_info()[0]))
self.addTearDownHook(shutdown_debug_monitor)
# Schedule debug monitor to be shut down during teardown.
logger = self.logger
# Attach to the stub and return a socket opened to it.
self.sock = self.create_socket()
return server
# We're using a random port algorithm to try not to collide with other ports,
# and retry a max # times.
attempts = 0
MAX_ATTEMPTS = 20
while attempts < MAX_ATTEMPTS:
server = self.launch_debug_monitor(attach_pid=attach_pid)
# Schedule debug monitor to be shut down during teardown.
logger = self.logger
def shutdown_debug_monitor():
try:
server.terminate()
except:
logger.warning(
"failed to terminate server for debug monitor: {}; ignoring".format(
sys.exc_info()[0]))
self.addTearDownHook(shutdown_debug_monitor)
connect_attemps = 0
MAX_CONNECT_ATTEMPTS = 10
while connect_attemps < MAX_CONNECT_ATTEMPTS:
# Create a socket to talk to the server
try:
logger.info("Connect attempt %d", connect_attemps + 1)
self.sock = self.create_socket()
return server
except _ConnectionRefused as serr:
# Ignore, and try again.
pass
time.sleep(0.5)
connect_attemps += 1
# We should close the server here to be safe.
server.terminate()
# Increment attempts.
print(
"connect to debug monitor on port %d failed, attempt #%d of %d" %
(self.port, attempts + 1, MAX_ATTEMPTS))
attempts += 1
# And wait a random length of time before next attempt, to avoid
# collisions.
time.sleep(random.randint(1, 5))
# Now grab a new port number.
self.port = self.get_next_port()
raise Exception(
"failed to create a socket to the launched debug monitor after %d tries" %
attempts)
def launch_process_for_attach(
self,
inferior_args=None,
sleep_seconds=3,
exe_path=None):
# We're going to start a child process that the debug monitor stub can later attach to.
# This process needs to be started so that it just hangs around for a while. We'll
# have it sleep.
if not exe_path:
exe_path = self.getBuildArtifact("a.out")
args = []
if inferior_args:
args.extend(inferior_args)
if sleep_seconds:
args.append("sleep:%d" % sleep_seconds)
inferior = self.spawnSubprocess(exe_path, args)
def shutdown_process_for_attach():
try:
inferior.terminate()
except:
logger.warning(
"failed to terminate inferior process for attach: {}; ignoring".format(
sys.exc_info()[0]))
self.addTearDownHook(shutdown_process_for_attach)
return inferior
def prep_debug_monitor_and_inferior(
self,
inferior_args=None,
inferior_sleep_seconds=3,
inferior_exe_path=None):
"""Prep the debug monitor, the inferior, and the expected packet stream.
Handle the separate cases of using the debug monitor in attach-to-inferior mode
and in launch-inferior mode.
For attach-to-inferior mode, the inferior process is first started, then
the debug monitor is started in attach to pid mode (using --attach on the
stub command line), and the no-ack-mode setup is appended to the packet
stream. The packet stream is not yet executed, ready to have more expected
packet entries added to it.
For launch-inferior mode, the stub is first started, then no ack mode is
setup on the expected packet stream, then the verified launch packets are added
to the expected socket stream. The packet stream is not yet executed, ready
to have more expected packet entries added to it.
The return value is:
{inferior:<inferior>, server:<server>}
"""
inferior = None
attach_pid = None
if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY:
# Launch the process that we'll use as the inferior.
inferior = self.launch_process_for_attach(
inferior_args=inferior_args,
sleep_seconds=inferior_sleep_seconds,
exe_path=inferior_exe_path)
self.assertIsNotNone(inferior)
self.assertTrue(inferior.pid > 0)
if self._inferior_startup == self._STARTUP_ATTACH:
# In this case, we want the stub to attach via the command
# line, so set the command line attach pid here.
attach_pid = inferior.pid
if self._inferior_startup == self._STARTUP_LAUNCH:
# Build launch args
if not inferior_exe_path:
inferior_exe_path = self.getBuildArtifact("a.out")
if lldb.remote_platform:
remote_path = lldbutil.append_to_process_working_directory(self,
os.path.basename(inferior_exe_path))
remote_file_spec = lldb.SBFileSpec(remote_path, False)
err = lldb.remote_platform.Install(lldb.SBFileSpec(
inferior_exe_path, True), remote_file_spec)
if err.Fail():
raise Exception(
"remote_platform.Install('%s', '%s') failed: %s" %
(inferior_exe_path, remote_path, err))
inferior_exe_path = remote_path
launch_args = [inferior_exe_path]
if inferior_args:
launch_args.extend(inferior_args)
# Launch the debug monitor stub, attaching to the inferior.
server = self.connect_to_debug_monitor(attach_pid=attach_pid)
self.assertIsNotNone(server)
# Build the expected protocol stream
self.add_no_ack_remote_stream()
if self._inferior_startup == self._STARTUP_LAUNCH:
self.add_verified_launch_packets(launch_args)
return {"inferior": inferior, "server": server}
def expect_socket_recv(
self,
sock,
expected_content_regex,
timeout_seconds):
response = ""
timeout_time = time.time() + timeout_seconds
while not expected_content_regex.match(
response) and time.time() < timeout_time:
can_read, _, _ = select.select([sock], [], [], timeout_seconds)
if can_read and sock in can_read:
recv_bytes = sock.recv(4096)
if recv_bytes:
response += recv_bytes
self.assertTrue(expected_content_regex.match(response))
def expect_socket_send(self, sock, content, timeout_seconds):
request_bytes_remaining = content
timeout_time = time.time() + timeout_seconds
while len(request_bytes_remaining) > 0 and time.time() < timeout_time:
_, can_write, _ = select.select([], [sock], [], timeout_seconds)
if can_write and sock in can_write:
written_byte_count = sock.send(request_bytes_remaining)
request_bytes_remaining = request_bytes_remaining[
written_byte_count:]
self.assertEqual(len(request_bytes_remaining), 0)
def do_handshake(self, stub_socket, timeout_seconds=5):
# Write the ack.
self.expect_socket_send(stub_socket, "+", timeout_seconds)
# Send the start no ack mode packet.
NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0"
bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST)
self.assertEqual(bytes_sent, len(NO_ACK_MODE_REQUEST))
# Receive the ack and "OK"
self.expect_socket_recv(stub_socket, re.compile(
r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds)
# Send the final ack.
self.expect_socket_send(stub_socket, "+", timeout_seconds)
def add_no_ack_remote_stream(self):
self.test_sequence.add_log_lines(
["read packet: +",
"read packet: $QStartNoAckMode#b0",
"send packet: +",
"send packet: $OK#9a",
"read packet: +"],
True)
def add_verified_launch_packets(self, launch_args):
self.test_sequence.add_log_lines(
["read packet: %s" % build_gdbremote_A_packet(launch_args),
"send packet: $OK#00",
"read packet: $qLaunchSuccess#a5",
"send packet: $OK#00"],
True)
def add_thread_suffix_request_packets(self):
self.test_sequence.add_log_lines(
["read packet: $QThreadSuffixSupported#e4",
"send packet: $OK#00",
], True)
def add_process_info_collection_packets(self):
self.test_sequence.add_log_lines(
["read packet: $qProcessInfo#dc",
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "process_info_raw"}}],
True)
_KNOWN_PROCESS_INFO_KEYS = [
"pid",
"parent-pid",
"real-uid",
"real-gid",
"effective-uid",
"effective-gid",
"cputype",
"cpusubtype",
"ostype",
"triple",
"vendor",
"endian",
"elf_abi",
"ptrsize"
]
def parse_process_info_response(self, context):
# Ensure we have a process info response.
self.assertIsNotNone(context)
process_info_raw = context.get("process_info_raw")
self.assertIsNotNone(process_info_raw)
# Pull out key:value; pairs.
process_info_dict = {
match.group(1): match.group(2) for match in re.finditer(
r"([^:]+):([^;]+);", process_info_raw)}
# Validate keys are known.
for (key, val) in list(process_info_dict.items()):
self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS)
self.assertIsNotNone(val)
return process_info_dict
def add_register_info_collection_packets(self):
self.test_sequence.add_log_lines(
[{"type": "multi_response", "query": "qRegisterInfo", "append_iteration_suffix": True,
"end_regex": re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"),
"save_key": "reg_info_responses"}],
True)
def parse_register_info_packets(self, context):
"""Return an array of register info dictionaries, one per register info."""
reg_info_responses = context.get("reg_info_responses")
self.assertIsNotNone(reg_info_responses)
# Parse register infos.
return [parse_reg_info_response(reg_info_response)
for reg_info_response in reg_info_responses]
def expect_gdbremote_sequence(self, timeout_seconds=None):
if not timeout_seconds:
timeout_seconds = self._TIMEOUT_SECONDS
return expect_lldb_gdbserver_replay(
self,
self.sock,
self.test_sequence,
self._pump_queues,
timeout_seconds,
self.logger)
_KNOWN_REGINFO_KEYS = [
"name",
"alt-name",
"bitsize",
"offset",
"encoding",
"format",
"set",
"gcc",
"ehframe",
"dwarf",
"generic",
"container-regs",
"invalidate-regs",
"dynamic_size_dwarf_expr_bytes",
"dynamic_size_dwarf_len"
]
def assert_valid_reg_info(self, reg_info):
# Assert we know about all the reginfo keys parsed.
for key in reg_info:
self.assertTrue(key in self._KNOWN_REGINFO_KEYS)
# Check the bare-minimum expected set of register info keys.
self.assertTrue("name" in reg_info)
self.assertTrue("bitsize" in reg_info)
self.assertTrue("offset" in reg_info)
self.assertTrue("encoding" in reg_info)
self.assertTrue("format" in reg_info)
def find_pc_reg_info(self, reg_infos):
lldb_reg_index = 0
for reg_info in reg_infos:
if ("generic" in reg_info) and (reg_info["generic"] == "pc"):
return (lldb_reg_index, reg_info)
lldb_reg_index += 1
return (None, None)
def add_lldb_register_index(self, reg_infos):
"""Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry.
We'll use this when we want to call packets like P/p with a register index but do so
on only a subset of the full register info set.
"""
self.assertIsNotNone(reg_infos)
reg_index = 0
for reg_info in reg_infos:
reg_info["lldb_register_index"] = reg_index
reg_index += 1
def add_query_memory_region_packets(self, address):
self.test_sequence.add_log_lines(
["read packet: $qMemoryRegionInfo:{0:x}#00".format(address),
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "memory_region_response"}}],
True)
def parse_key_val_dict(self, key_val_text, allow_dupes=True):
self.assertIsNotNone(key_val_text)
kv_dict = {}
for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text):
key = match.group(1)
val = match.group(2)
if key in kv_dict:
if allow_dupes:
if isinstance(kv_dict[key], list):
kv_dict[key].append(val)
else:
# Promote to list
kv_dict[key] = [kv_dict[key], val]
else:
self.fail(
"key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format(
key, val, key_val_text, kv_dict))
else:
kv_dict[key] = val
return kv_dict
def parse_memory_region_packet(self, context):
# Ensure we have a context.
self.assertIsNotNone(context.get("memory_region_response"))
# Pull out key:value; pairs.
mem_region_dict = self.parse_key_val_dict(
context.get("memory_region_response"))
# Validate keys are known.
for (key, val) in list(mem_region_dict.items()):
self.assertTrue(
key in [
"start",
"size",
"permissions",
"name",
"error"])
self.assertIsNotNone(val)
# Return the dictionary of key-value pairs for the memory region.
return mem_region_dict
def assert_address_within_memory_region(
self, test_address, mem_region_dict):
self.assertIsNotNone(mem_region_dict)
self.assertTrue("start" in mem_region_dict)
self.assertTrue("size" in mem_region_dict)
range_start = int(mem_region_dict["start"], 16)
range_size = int(mem_region_dict["size"], 16)
range_end = range_start + range_size
if test_address < range_start:
self.fail(
"address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(
test_address,
range_start,
range_end,
range_size))
elif test_address >= range_end:
self.fail(
"address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format(
test_address,
range_start,
range_end,
range_size))
def add_threadinfo_collection_packets(self):
self.test_sequence.add_log_lines(
[{"type": "multi_response", "first_query": "qfThreadInfo", "next_query": "qsThreadInfo",
"append_iteration_suffix": False, "end_regex": re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"),
"save_key": "threadinfo_responses"}],
True)
def parse_threadinfo_packets(self, context):
"""Return an array of thread ids (decimal ints), one per thread."""
threadinfo_responses = context.get("threadinfo_responses")
self.assertIsNotNone(threadinfo_responses)
thread_ids = []
for threadinfo_response in threadinfo_responses:
new_thread_infos = parse_threadinfo_response(threadinfo_response)
thread_ids.extend(new_thread_infos)
return thread_ids
def wait_for_thread_count(self, thread_count, timeout_seconds=3):
start_time = time.time()
timeout_time = start_time + timeout_seconds
actual_thread_count = 0
while actual_thread_count < thread_count:
self.reset_test_sequence()
self.add_threadinfo_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
threads = self.parse_threadinfo_packets(context)
self.assertIsNotNone(threads)
actual_thread_count = len(threads)
if time.time() > timeout_time:
raise Exception(
'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format(
timeout_seconds, thread_count, actual_thread_count))
return threads
def add_set_breakpoint_packets(
self,
address,
z_packet_type=0,
do_continue=True,
breakpoint_kind=1):
self.test_sequence.add_log_lines(
[ # Set the breakpoint.
"read packet: $Z{2},{0:x},{1}#00".format(
address, breakpoint_kind, z_packet_type),
# Verify the stub could set it.
"send packet: $OK#00",
], True)
if (do_continue):
self.test_sequence.add_log_lines(
[ # Continue the inferior.
"read packet: $c#63",
# Expect a breakpoint stop report.
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);",
"capture": {1: "stop_signo",
2: "stop_thread_id"}},
], True)
def add_remove_breakpoint_packets(
self,
address,
z_packet_type=0,
breakpoint_kind=1):
self.test_sequence.add_log_lines(
[ # Remove the breakpoint.
"read packet: $z{2},{0:x},{1}#00".format(
address, breakpoint_kind, z_packet_type),
# Verify the stub could unset it.
"send packet: $OK#00",
], True)
def add_qSupported_packets(self):
self.test_sequence.add_log_lines(
["read packet: $qSupported#00",
{"direction": "send", "regex": r"^\$(.*)#[0-9a-fA-F]{2}", "capture": {1: "qSupported_response"}},
], True)
_KNOWN_QSUPPORTED_STUB_FEATURES = [
"augmented-libraries-svr4-read",
"PacketSize",
"QStartNoAckMode",
"QThreadSuffixSupported",
"QListThreadsInStopReply",
"qXfer:auxv:read",
"qXfer:libraries:read",
"qXfer:libraries-svr4:read",
"qXfer:features:read",
"qEcho",
"QPassSignals"
]
def parse_qSupported_response(self, context):
self.assertIsNotNone(context)
raw_response = context.get("qSupported_response")
self.assertIsNotNone(raw_response)
# For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the
# +,-,? is stripped from the key and set as the value.
supported_dict = {}
for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response):
key = match.group(1)
val = match.group(3)
# key=val: store as is
if val and len(val) > 0:
supported_dict[key] = val
else:
if len(key) < 2:
raise Exception(
"singular stub feature is too short: must be stub_feature{+,-,?}")
supported_type = key[-1]
key = key[:-1]
if not supported_type in ["+", "-", "?"]:
raise Exception(
"malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type))
supported_dict[key] = supported_type
# Ensure we know the supported element
if key not in self._KNOWN_QSUPPORTED_STUB_FEATURES:
raise Exception(
"unknown qSupported stub feature reported: %s" %
key)
return supported_dict
def run_process_then_stop(self, run_seconds=1):
# Tell the stub to continue.
self.test_sequence.add_log_lines(
["read packet: $vCont;c#a8"],
True)
context = self.expect_gdbremote_sequence()
# Wait for run_seconds.
time.sleep(run_seconds)
# Send an interrupt, capture a T response.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: {}".format(chr(3)),
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture": {1: "stop_result"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
self.assertIsNotNone(context.get("stop_result"))
return context
def select_modifiable_register(self, reg_infos):
"""Find a register that can be read/written freely."""
PREFERRED_REGISTER_NAMES = set(["rax", ])
# First check for the first register from the preferred register name
# set.
alternative_register_index = None
self.assertIsNotNone(reg_infos)
for reg_info in reg_infos:
if ("name" in reg_info) and (
reg_info["name"] in PREFERRED_REGISTER_NAMES):
# We found a preferred register. Use it.
return reg_info["lldb_register_index"]
if ("generic" in reg_info) and (reg_info["generic"] == "fp" or
reg_info["generic"] == "arg1"):
# A frame pointer or first arg register will do as a
# register to modify temporarily.
alternative_register_index = reg_info["lldb_register_index"]
# We didn't find a preferred register. Return whatever alternative register
# we found, if any.
return alternative_register_index
def extract_registers_from_stop_notification(self, stop_key_vals_text):
self.assertIsNotNone(stop_key_vals_text)
kv_dict = self.parse_key_val_dict(stop_key_vals_text)
registers = {}
for (key, val) in list(kv_dict.items()):
if re.match(r"^[0-9a-fA-F]+$", key):
registers[int(key, 16)] = val
return registers
def gather_register_infos(self):
self.reset_test_sequence()
self.add_register_info_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
reg_infos = self.parse_register_info_packets(context)
self.assertIsNotNone(reg_infos)
self.add_lldb_register_index(reg_infos)
return reg_infos
def find_generic_register_with_name(self, reg_infos, generic_name):
self.assertIsNotNone(reg_infos)
for reg_info in reg_infos:
if ("generic" in reg_info) and (
reg_info["generic"] == generic_name):
return reg_info
return None
def decode_gdbremote_binary(self, encoded_bytes):
decoded_bytes = ""
i = 0
while i < len(encoded_bytes):
if encoded_bytes[i] == "}":
# Handle escaped char.
self.assertTrue(i + 1 < len(encoded_bytes))
decoded_bytes += chr(ord(encoded_bytes[i + 1]) ^ 0x20)
i += 2
elif encoded_bytes[i] == "*":
# Handle run length encoding.
self.assertTrue(len(decoded_bytes) > 0)
self.assertTrue(i + 1 < len(encoded_bytes))
repeat_count = ord(encoded_bytes[i + 1]) - 29
decoded_bytes += decoded_bytes[-1] * repeat_count
i += 2
else:
decoded_bytes += encoded_bytes[i]
i += 1
return decoded_bytes
def build_auxv_dict(self, endian, word_size, auxv_data):
self.assertIsNotNone(endian)
self.assertIsNotNone(word_size)
self.assertIsNotNone(auxv_data)
auxv_dict = {}
# PowerPC64le's auxvec has a special key that must be ignored.
# This special key may be used multiple times, resulting in
# multiple key/value pairs with the same key, which would otherwise
# break this test check for repeated keys.
#
# AT_IGNOREPPC = 22
ignored_keys_for_arch = { 'powerpc64le' : [22] }
arch = self.getArchitecture()
ignore_keys = None
if arch in ignored_keys_for_arch:
ignore_keys = ignored_keys_for_arch[arch]
while len(auxv_data) > 0:
# Chop off key.
raw_key = auxv_data[:word_size]
auxv_data = auxv_data[word_size:]
# Chop of value.
raw_value = auxv_data[:word_size]
auxv_data = auxv_data[word_size:]
# Convert raw text from target endian.
key = unpack_endian_binary_string(endian, raw_key)
value = unpack_endian_binary_string(endian, raw_value)
if ignore_keys and key in ignore_keys:
continue
# Handle ending entry.
if key == 0:
self.assertEqual(value, 0)
return auxv_dict
# The key should not already be present.
self.assertFalse(key in auxv_dict)
auxv_dict[key] = value
self.fail(
"should not reach here - implies required double zero entry not found")
return auxv_dict
def read_binary_data_in_chunks(self, command_prefix, chunk_length):
"""Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned."""
offset = 0
done = False
decoded_data = ""
while not done:
# Grab the next iteration of data.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: ${}{:x},{:x}:#00".format(
command_prefix,
offset,
chunk_length),
{
"direction": "send",
"regex": re.compile(
r"^\$([^E])(.*)#[0-9a-fA-F]{2}$",
re.MULTILINE | re.DOTALL),
"capture": {
1: "response_type",
2: "content_raw"}}],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
response_type = context.get("response_type")
self.assertIsNotNone(response_type)
self.assertTrue(response_type in ["l", "m"])
# Move offset along.
offset += chunk_length
# Figure out if we're done. We're done if the response type is l.
done = response_type == "l"
# Decode binary data.
content_raw = context.get("content_raw")
if content_raw and len(content_raw) > 0:
self.assertIsNotNone(content_raw)
decoded_data += self.decode_gdbremote_binary(content_raw)
return decoded_data
def add_interrupt_packets(self):
self.test_sequence.add_log_lines([
# Send the intterupt.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$",
"capture": {1: "stop_signo",
2: "stop_key_val_text"}},
], True)
def parse_interrupt_packets(self, context):
self.assertIsNotNone(context.get("stop_signo"))
self.assertIsNotNone(context.get("stop_key_val_text"))
return (int(context["stop_signo"], 16), self.parse_key_val_dict(
context["stop_key_val_text"]))
def add_QSaveRegisterState_packets(self, thread_id):
if thread_id:
# Use the thread suffix form.
request = "read packet: $QSaveRegisterState;thread:{:x}#00".format(
thread_id)
else:
request = "read packet: $QSaveRegisterState#00"
self.test_sequence.add_log_lines([request,
{"direction": "send",
"regex": r"^\$(E?.*)#[0-9a-fA-F]{2}$",
"capture": {1: "save_response"}},
],
True)
def parse_QSaveRegisterState_response(self, context):
self.assertIsNotNone(context)
save_response = context.get("save_response")
self.assertIsNotNone(save_response)
if len(save_response) < 1 or save_response[0] == "E":
# error received
return (False, None)
else:
return (True, int(save_response))
def add_QRestoreRegisterState_packets(self, save_id, thread_id=None):
if thread_id:
# Use the thread suffix form.
request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format(
save_id, thread_id)
else:
request = "read packet: $QRestoreRegisterState:{}#00".format(
save_id)
self.test_sequence.add_log_lines([
request,
"send packet: $OK#00"
], True)
def flip_all_bits_in_each_register_value(
self, reg_infos, endian, thread_id=None):
self.assertIsNotNone(reg_infos)
successful_writes = 0
failed_writes = 0
for reg_info in reg_infos:
# Use the lldb register index added to the reg info. We're not necessarily
# working off a full set of register infos, so an inferred register
# index could be wrong.
reg_index = reg_info["lldb_register_index"]
self.assertIsNotNone(reg_index)
reg_byte_size = int(reg_info["bitsize"]) / 8
self.assertTrue(reg_byte_size > 0)
# Handle thread suffix.
if thread_id:
p_request = "read packet: $p{:x};thread:{:x}#00".format(
reg_index, thread_id)
else:
p_request = "read packet: $p{:x}#00".format(reg_index)
# Read the existing value.
self.reset_test_sequence()
self.test_sequence.add_log_lines([
p_request,
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify the response length.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
initial_reg_value = unpack_register_hex_unsigned(
endian, p_response)
# Flip the value by xoring with all 1s
all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) / 8)
flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16)
# print("reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int))
# Handle thread suffix for P.
if thread_id:
P_request = "read packet: $P{:x}={};thread:{:x}#00".format(
reg_index, pack_register_hex(
endian, flipped_bits_int, byte_size=reg_byte_size), thread_id)
else:
P_request = "read packet: $P{:x}={}#00".format(
reg_index, pack_register_hex(
endian, flipped_bits_int, byte_size=reg_byte_size))
# Write the flipped value to the register.
self.reset_test_sequence()
self.test_sequence.add_log_lines([P_request,
{"direction": "send",
"regex": r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}",
"capture": {1: "P_response"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Determine if the write succeeded. There are a handful of registers that can fail, or partially fail
# (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them
# all flipping perfectly.
P_response = context.get("P_response")
self.assertIsNotNone(P_response)
if P_response == "OK":
successful_writes += 1
else:
failed_writes += 1
# print("reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response))
# Read back the register value, ensure it matches the flipped
# value.
if P_response == "OK":
self.reset_test_sequence()
self.test_sequence.add_log_lines([
p_request,
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
verify_p_response_raw = context.get("p_response")
self.assertIsNotNone(verify_p_response_raw)
verify_bits = unpack_register_hex_unsigned(
endian, verify_p_response_raw)
if verify_bits != flipped_bits_int:
# Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts.
# print("reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits))
successful_writes -= 1
failed_writes += 1
return (successful_writes, failed_writes)
def is_bit_flippable_register(self, reg_info):
if not reg_info:
return False
if not "set" in reg_info:
return False
if reg_info["set"] != "General Purpose Registers":
return False
if ("container-regs" in reg_info) and (
len(reg_info["container-regs"]) > 0):
# Don't try to bit flip registers contained in another register.
return False
if re.match("^.s$", reg_info["name"]):
# This is a 2-letter register name that ends in "s", like a segment register.
# Don't try to bit flip these.
return False
if re.match("^(c|)psr$", reg_info["name"]):
# This is an ARM program status register; don't flip it.
return False
# Okay, this looks fine-enough.
return True
def read_register_values(self, reg_infos, endian, thread_id=None):
self.assertIsNotNone(reg_infos)
values = {}
for reg_info in reg_infos:
# We append a register index when load reg infos so we can work
# with subsets.
reg_index = reg_info.get("lldb_register_index")
self.assertIsNotNone(reg_index)
# Handle thread suffix.
if thread_id:
p_request = "read packet: $p{:x};thread:{:x}#00".format(
reg_index, thread_id)
else:
p_request = "read packet: $p{:x}#00".format(reg_index)
# Read it with p.
self.reset_test_sequence()
self.test_sequence.add_log_lines([
p_request,
{"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Convert value from target endian to integral.
p_response = context.get("p_response")
self.assertIsNotNone(p_response)
self.assertTrue(len(p_response) > 0)
self.assertFalse(p_response[0] == "E")
values[reg_index] = unpack_register_hex_unsigned(
endian, p_response)
return values
def add_vCont_query_packets(self):
self.test_sequence.add_log_lines(["read packet: $vCont?#49",
{"direction": "send",
"regex": r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$",
"capture": {2: "vCont_query_response"}},
],
True)
def parse_vCont_query_response(self, context):
self.assertIsNotNone(context)
vCont_query_response = context.get("vCont_query_response")
# Handle case of no vCont support at all - in which case the capture
# group will be none or zero length.
if not vCont_query_response or len(vCont_query_response) == 0:
return {}
return {key: 1 for key in vCont_query_response.split(
";") if key and len(key) > 0}
def count_single_steps_until_true(
self,
thread_id,
predicate,
args,
max_step_count=100,
use_Hc_packet=True,
step_instruction="s"):
"""Used by single step test that appears in a few different contexts."""
single_step_count = 0
while single_step_count < max_step_count:
self.assertIsNotNone(thread_id)
# Build the packet for the single step instruction. We replace
# {thread}, if present, with the thread_id.
step_packet = "read packet: ${}#00".format(
re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction))
# print("\nstep_packet created: {}\n".format(step_packet))
# Single step.
self.reset_test_sequence()
if use_Hc_packet:
self.test_sequence.add_log_lines(
[ # Set the continue thread.
"read packet: $Hc{0:x}#00".format(thread_id),
"send packet: $OK#00",
], True)
self.test_sequence.add_log_lines([
# Single step.
step_packet,
# "read packet: $vCont;s:{0:x}#00".format(thread_id),
# Expect a breakpoint stop report.
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);",
"capture": {1: "stop_signo",
2: "stop_thread_id"}},
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
self.assertIsNotNone(context.get("stop_signo"))
self.assertEqual(int(context.get("stop_signo"), 16),
lldbutil.get_signal_number('SIGTRAP'))
single_step_count += 1
# See if the predicate is true. If so, we're done.
if predicate(args):
return (True, single_step_count)
# The predicate didn't return true within the runaway step count.
return (False, single_step_count)
def g_c1_c2_contents_are(self, args):
"""Used by single step test that appears in a few different contexts."""
g_c1_address = args["g_c1_address"]
g_c2_address = args["g_c2_address"]
expected_g_c1 = args["expected_g_c1"]
expected_g_c2 = args["expected_g_c2"]
# Read g_c1 and g_c2 contents.
self.reset_test_sequence()
self.test_sequence.add_log_lines(
["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1),
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "g_c1_contents"}},
"read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1),
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "g_c2_contents"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Check if what we read from inferior memory is what we are expecting.
self.assertIsNotNone(context.get("g_c1_contents"))
self.assertIsNotNone(context.get("g_c2_contents"))
return (context.get("g_c1_contents").decode("hex") == expected_g_c1) and (
context.get("g_c2_contents").decode("hex") == expected_g_c2)
def single_step_only_steps_one_instruction(
self, use_Hc_packet=True, step_instruction="s"):
"""Used by single step test that appears in a few different contexts."""
# Start up the inferior.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=[
"get-code-address-hex:swap_chars",
"get-data-address-hex:g_c1",
"get-data-address-hex:g_c2",
"sleep:1",
"call-function:swap_chars",
"sleep:5"])
# Run the process
self.test_sequence.add_log_lines(
[ # Start running after initial stop.
"read packet: $c#63",
# Match output line that prints the memory address of the function call entry point.
# Note we require launch-only testing so we can get inferior otuput.
{"type": "output_match", "regex": r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$",
"capture": {1: "function_address", 2: "g_c1_address", 3: "g_c2_address"}},
# Now stop the inferior.
"read packet: {}".format(chr(3)),
# And wait for the stop notification.
{"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}],
True)
# Run the packet stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Grab the main thread id.
self.assertIsNotNone(context.get("stop_thread_id"))
main_thread_id = int(context.get("stop_thread_id"), 16)
# Grab the function address.
self.assertIsNotNone(context.get("function_address"))
function_address = int(context.get("function_address"), 16)
# Grab the data addresses.
self.assertIsNotNone(context.get("g_c1_address"))
g_c1_address = int(context.get("g_c1_address"), 16)
self.assertIsNotNone(context.get("g_c2_address"))
g_c2_address = int(context.get("g_c2_address"), 16)
# Set a breakpoint at the given address.
if self.getArchitecture() == "arm":
# TODO: Handle case when setting breakpoint in thumb code
BREAKPOINT_KIND = 4
else:
BREAKPOINT_KIND = 1
self.reset_test_sequence()
self.add_set_breakpoint_packets(
function_address,
do_continue=True,
breakpoint_kind=BREAKPOINT_KIND)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Remove the breakpoint.
self.reset_test_sequence()
self.add_remove_breakpoint_packets(
function_address, breakpoint_kind=BREAKPOINT_KIND)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Verify g_c1 and g_c2 match expected initial state.
args = {}
args["g_c1_address"] = g_c1_address
args["g_c2_address"] = g_c2_address
args["expected_g_c1"] = "0"
args["expected_g_c2"] = "1"
self.assertTrue(self.g_c1_c2_contents_are(args))
# Verify we take only a small number of steps to hit the first state.
# Might need to work through function entry prologue code.
args["expected_g_c1"] = "1"
args["expected_g_c2"] = "1"
(state_reached,
step_count) = self.count_single_steps_until_true(main_thread_id,
self.g_c1_c2_contents_are,
args,
max_step_count=25,
use_Hc_packet=use_Hc_packet,
step_instruction=step_instruction)
self.assertTrue(state_reached)
# Verify we hit the next state.
args["expected_g_c1"] = "1"
args["expected_g_c2"] = "0"
(state_reached,
step_count) = self.count_single_steps_until_true(main_thread_id,
self.g_c1_c2_contents_are,
args,
max_step_count=5,
use_Hc_packet=use_Hc_packet,
step_instruction=step_instruction)
self.assertTrue(state_reached)
expected_step_count = 1
arch = self.getArchitecture()
# MIPS required "3" (ADDIU, SB, LD) machine instructions for updation
# of variable value
if re.match("mips", arch):
expected_step_count = 3
# S390X requires "2" (LARL, MVI) machine instructions for updation of
# variable value
if re.match("s390x", arch):
expected_step_count = 2
self.assertEqual(step_count, expected_step_count)
# Verify we hit the next state.
args["expected_g_c1"] = "0"
args["expected_g_c2"] = "0"
(state_reached,
step_count) = self.count_single_steps_until_true(main_thread_id,
self.g_c1_c2_contents_are,
args,
max_step_count=5,
use_Hc_packet=use_Hc_packet,
step_instruction=step_instruction)
self.assertTrue(state_reached)
self.assertEqual(step_count, expected_step_count)
# Verify we hit the next state.
args["expected_g_c1"] = "0"
args["expected_g_c2"] = "1"
(state_reached,
step_count) = self.count_single_steps_until_true(main_thread_id,
self.g_c1_c2_contents_are,
args,
max_step_count=5,
use_Hc_packet=use_Hc_packet,
step_instruction=step_instruction)
self.assertTrue(state_reached)
self.assertEqual(step_count, expected_step_count)
def maybe_strict_output_regex(self, regex):
return '.*' + regex + \
'.*' if lldbplatformutil.hasChattyStderr(self) else '^' + regex + '$'
def install_and_create_launch_args(self):
exe_path = self.getBuildArtifact("a.out")
if not lldb.remote_platform:
return [exe_path]
remote_path = lldbutil.append_to_process_working_directory(self,
os.path.basename(exe_path))
remote_file_spec = lldb.SBFileSpec(remote_path, False)
err = lldb.remote_platform.Install(lldb.SBFileSpec(exe_path, True),
remote_file_spec)
if err.Fail():
raise Exception("remote_platform.Install('%s', '%s') failed: %s" %
(exe_path, remote_path, err))
return [remote_path]
| {
"content_hash": "94f337ba6d6f7b58b9dfbb238aaf17f7",
"timestamp": "",
"source": "github",
"line_count": 1637,
"max_line_length": 178,
"avg_line_length": 39.42516799022602,
"alnum_prop": 0.5398441252575962,
"repo_name": "youtube/cobalt",
"id": "c5d21a9c9b7a0e13da54850a7f0747211cbc5c00",
"size": "64539",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
try:
import os
import sys
import shutil
import sqlite3
import fnmatch
except ImportError,e:
print "[f] Required module missing. %s" % e.args[0]
sys.exit(-1)
COLUMNS = ['key_remote_jid','key_from_me','key_id','status','needs_push','data','timestamp','media_url','media_mime_type','media_wa_type','media_size','media_name','latitude','longitude','thumb_image','remote_resource','received_timestamp','send_timestamp','receipt_server_timestamp','receipt_device_timestamp','raw_data']
tmessages = 0
tcontacts = 0
def merge(path,pattern,dest):
"""
Reads from files in 'path' and dumps its contents to 'dest'
"""
global COLUMNS
global tmessages
global tcontacts
first = 0
output = None
mtableid = 0
aux = []
# find and store files
for root, dirs, files in os.walk(path):
for file in files:
if fnmatch.fnmatch(file,pattern):
aux.append(os.path.join(root,file))
filenames = sorted(aux)
for filename in filenames:
print "\n+ Merging: %s" % filename ,
sys.stdout.flush()
if os.path.isdir(dest):
dest += '/' + os.path.basename(filename)
if first == 0:
shutil.copy2 (filename, dest)
first += 1
continue
elif output is None:
output = sqlite3.connect(dest)
wcursor = output.cursor()
ccontacts = 0
cmessages = 0
# get all remote_key_jid values from messages table
orig = sqlite3.connect(filename)
rcursor = orig.cursor()
if mtableid == 0:
# get biggest message_table_id value (what is this column for? :-/ )
wcursor.execute("SELECT MAX(message_table_id) FROM chat_list")
try:
mtableid = wcursor.fetchone()[0]
except:
print "\n\t- Error getting MAX(message_table_id), skipping file..."
continue
# get all key_remote_jid from the current file
rcursor.execute("SELECT DISTINCT key_remote_jid FROM chat_list")
# if each item from the above query does not exists, insert it
for krjid in rcursor:
wcursor.execute("SELECT key_remote_jid FROM chat_list WHERE key_remote_jid=?",krjid)
try:
if len(wcursor.fetchone()[0]) > 0:
continue
except:
try:
mtableid += 1 # increments message_table_id
data = (krjid[0], mtableid)
wcursor.execute("INSERT INTO chat_list (key_remote_jid,message_table_id) VALUES (?,?)", data)
ccontacts += 1
except Exception,e:
print "\n[e] Error merging contact: %s" % str(e)
tcontacts += ccontacts
# check if the column 'raw_data' exists (WhatsApp versions compatibility issue)
try:
rcursor.execute("SELECT COUNT(%s) FROM messages" % COLUMNS[len(COLUMNS) - 1])
ncols = len(COLUMNS)
except sqlite3.OperationalError,e:
if COLUMNS[len(COLUMNS)-1] in e.message:
ncols = len(COLUMNS) - 1
else:
print "\n[e] Undefined error: %s" % e.message
continue
# get all messages from messages table
rcursor.execute("SELECT %s FROM messages" % ','.join(COLUMNS[:ncols]))
messages = rcursor.fetchall()
for msg in messages:
try:
wcursor.execute("INSERT INTO messages(%s) VALUES (%s)" % (','.join(COLUMNS[:ncols]),','.join('?' for x in range(0,ncols))),msg)
cmessages += 1
except Exception,e:
pass
tmessages += cmessages
output.commit()
print " (Merged %d contacts and %d messages)" % (ccontacts,cmessages) ,
sys.stdout.flush()
orig.close()
if output is not None:
output.close()
return
if __name__ == "__main__":
print """
#######################################
# WhatsApp Msgstore Merge Tool 0.3 #
#------------------------------------#
# Merges WhatsApp message files into #
# a single one. #
# This tool is part of WForensic #
# http://sch3m4.github.com/wforensic #
######################################
"""
if len(sys.argv) != 4:
print "Usage: %s /path/to/databases/to/be/merged/ files_pattern /path/to/output\n" % sys.argv[0]
sys.exit(-1)
if sys.argv[1][-1:] != '/':
sys.argv[1] += '/'
dir = os.path.dirname(sys.argv[3])
if len(dir) > 0 and not os.path.isdir(dir):
print "[e] Error: Directory \"%s\" does not exists\n" % sys.argv[3]
sys.exit(-2)
if not os.path.isdir(sys.argv[1]):
print "[e] Error: \"%s\" is not a directory\n" % sys.argv[1]
sys.exit(-3)
print "[i] Origin: %s%s" % ( sys.argv[1] , sys.argv[2] )
print "[i] Output file: %s" % sys.argv[3]
merge(sys.argv[1],sys.argv[2], sys.argv[3])
print "\n\n[i] Merged %d contacts and %d messages!\n" % (tcontacts,tmessages)
sys.exit(0)
| {
"content_hash": "cc62ffdc71353e79414dd88bf6a4d68b",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 322,
"avg_line_length": 33.56410256410256,
"alnum_prop": 0.5324675324675324,
"repo_name": "Abhikos/wforensic",
"id": "6c6032c69eb744644cd75ae1edf20c3277d5c2ad",
"size": "7370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/merge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.defaults import *
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'django_abm.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('apps.home.urls', namespace='home', app_name='home')),
url(r'^persona/', include('apps.persona.urls', namespace='persona', app_name='persona')),
)
| {
"content_hash": "9636b06881b181be42c8f0e79334a03f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 93,
"avg_line_length": 30.8125,
"alnum_prop": 0.6450304259634888,
"repo_name": "MDA2014/django-abm",
"id": "0ecffdd82d4aebba31390d08957f711daf185128",
"size": "493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_abm/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10189"
},
{
"name": "JavaScript",
"bytes": "13696"
},
{
"name": "Python",
"bytes": "18052"
},
{
"name": "Shell",
"bytes": "3745"
}
],
"symlink_target": ""
} |
import gettext
import os
_localedir = os.environ.get('heat-translator'.upper() + '_LOCALEDIR')
_t = gettext.translation('heat-translator', localedir=_localedir,
fallback=True)
def _(msg):
return _t.gettext(msg)
| {
"content_hash": "cdc416b1403408c5fd7ca6e698cc5af7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 69,
"avg_line_length": 24.3,
"alnum_prop": 0.6419753086419753,
"repo_name": "spzala/tosca-parser",
"id": "63db495196e28e2be3e9b442e2528ef2023cdb56",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parser/utils/gettextutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "254621"
},
{
"name": "Shell",
"bytes": "14080"
}
],
"symlink_target": ""
} |
import re
import codecs
from os import path
from glob import glob
from subprocess import Popen, PIPE, STDOUT
import misaka
from misaka import Markdown, BaseRenderer, HtmlRenderer, SmartyPants, \
EXT_NO_INTRA_EMPHASIS, EXT_TABLES, EXT_FENCED_CODE, EXT_AUTOLINK, \
EXT_STRIKETHROUGH, EXT_LAX_SPACING, EXT_SPACE_HEADERS, \
EXT_SUPERSCRIPT, \
HTML_SKIP_HTML, HTML_SKIP_STYLE, HTML_SKIP_IMAGES, HTML_SKIP_LINKS, \
HTML_EXPAND_TABS, HTML_SAFELINK, HTML_TOC, HTML_HARD_WRAP, \
HTML_USE_XHTML, HTML_ESCAPE, \
HTML_SMARTYPANTS
from minitest import TestCase, ok, runner
def clean_html(dirty_html):
input_html = dirty_html.encode('utf-8')
p = Popen(['tidy', '--show-body-only', '1', '--quiet', '1', '--show-warnings', '0', '-utf8'],
stdout=PIPE, stdin=PIPE, stderr=STDOUT)
stdout, stderr = p.communicate(input=input_html)
return stdout.decode('utf-8')
class SmartyPantsTest(TestCase):
name = 'SmartyPants'
def setup(self):
self.r = lambda html: misaka.html(html, render_flags=HTML_SMARTYPANTS)
def test_single_quotes_re(self):
html = self.r('<p>They\'re not for sale.</p>\n')
ok(html).diff('<p>They’re not for sale.</p>\n')
def test_single_quotes_ll(self):
html = self.r('<p>Well that\'ll be the day</p>\n')
ok(html).diff('<p>Well that’ll be the day</p>\n')
def test_double_quotes_to_curly_quotes(self):
html = self.r('<p>"Quoted text"</p>\n')
ok(html).diff('<p>“Quoted text”</p>\n')
def test_single_quotes_ve(self):
html = self.r('<p>I\'ve been meaning to tell you ..</p>\n')
ok(html).diff('<p>I’ve been meaning to tell you ..</p>\n')
def test_single_quotes_m(self):
html = self.r('<p>I\'m not kidding</p>\n')
ok(html).diff('<p>I’m not kidding</p>\n')
def test_single_quotes_d(self):
html = self.r('<p>what\'d you say?</p>\n')
ok(html).diff('<p>what’d you say?</p>\n')
class HtmlRenderTest(TestCase):
name = 'Html Renderer'
def setup(self):
pants = SmartyPants()
self.r = {
HTML_SKIP_HTML: HtmlRenderer(HTML_SKIP_HTML),
HTML_SKIP_IMAGES: HtmlRenderer(HTML_SKIP_IMAGES),
HTML_SKIP_LINKS: HtmlRenderer(HTML_SKIP_LINKS),
HTML_SAFELINK: HtmlRenderer(HTML_SAFELINK),
HTML_ESCAPE: HtmlRenderer(HTML_ESCAPE),
HTML_HARD_WRAP: HtmlRenderer(HTML_HARD_WRAP)
}
def render_with(self, flag, text):
return Markdown(self.r[flag]).render(text)
# Hint: overrides HTML_SKIP_HTML, HTML_SKIP_IMAGES and HTML_SKIP_LINKS
def test_escape_html(self):
source = '''
Through <em>NO</em> <script>DOUBLE NO</script>
<script>BAD</script>
<img src="/favicon.ico" />
'''
expected = clean_html('''
<p>Through <em>NO</em> <script>DOUBLE NO</script></p>
<p><script>BAD</script></p>
<p><img src="/favicon.ico" /></p>
''')
markdown = clean_html(self.render_with(HTML_ESCAPE, source))
ok(markdown).diff(expected)
def test_skip_html(self):
markdown = self.render_with(HTML_SKIP_HTML, 'Through <em>NO</em> <script>DOUBLE NO</script>')
ok(markdown).diff('<p>Through NO DOUBLE NO</p>\n')
def test_skip_html_two_space_hard_break(self):
markdown = self.render_with(HTML_SKIP_HTML, 'Lorem, \nipsum\n')
ok(markdown).diff('<p>Lorem,<br>\nipsum</p>\n')
def test_skip_image(self):
markdown = self.render_with(HTML_SKIP_IMAGES, ' <img src="image.png" />')
ok(markdown).not_contains('<img')
def test_skip_links(self):
markdown = self.render_with(HTML_SKIP_LINKS, '[This link](http://example.net/) <a href="links.html">links</a>')
ok(markdown).not_contains('<a ')
def test_safelink(self):
markdown = self.render_with(HTML_SAFELINK, '[IRC](irc://chat.freenode.org/#freenode)')
ok(markdown).diff('<p>[IRC](irc://chat.freenode.org/#freenode)</p>\n')
def test_hard_wrap(self):
markdown = self.render_with(HTML_HARD_WRAP, '''
Hello world,
this is just a simple test
With hard wraps
and other *things*.''')
ok(markdown).contains('<br>')
class MarkdownParserTest(TestCase):
name = 'Markdown Parser'
def setup(self):
self.r = Markdown(HtmlRenderer()).render
def render_with(self, text, flags=0, extensions=0):
return Markdown(HtmlRenderer(), extensions).render(text)
def test_one_liner_to_html(self):
markdown = self.r('Hello World.')
ok(markdown).diff('<p>Hello World.</p>\n')
def test_inline_markdown_to_html(self):
markdown = self.r('_Hello World_!')
ok(markdown).diff('<p><em>Hello World</em>!</p>\n')
def test_inline_markdown_start_end(self):
markdown = self.render_with('_start _ foo_bar bar_baz _ end_ *italic* **bold** <a>_blah_</a>',
extensions=EXT_NO_INTRA_EMPHASIS)
ok(markdown).diff('<p><em>start _ foo_bar bar_baz _ end</em> <em>italic</em> <strong>bold</strong> <a><em>blah</em></a></p>\n')
markdown = self.r('Run \'rake radiant:extensions:rbac_base:migrate\'')
ok(markdown).diff('<p>Run 'rake radiant:extensions:rbac_base:migrate'</p>\n')
def test_urls_not_doubly_escaped(self):
markdown = self.r('[Page 2](/search?query=Markdown+Test&page=2)')
ok(markdown).diff('<p><a href="/search?query=Markdown+Test&page=2">Page 2</a></p>\n')
def test_inline_html(self):
markdown = self.r('before\n\n<div>\n foo\n</div>\n\nafter')
ok(markdown).diff('<p>before</p>\n\n<div>\n foo\n</div>\n\n<p>after</p>\n')
def test_html_block_end_tag_on_same_line(self):
markdown = self.r('Para 1\n\n<div><pre>HTML block\n</pre></div>\n\nPara 2 [Link](#anchor)')
ok(markdown).diff('<p>Para 1</p>\n\n<div><pre>HTML block\n</pre></div>\n\n<p>Para 2 <a href=\"#anchor\">Link</a></p>\n')
# This isn't in the spec but is Markdown.pl behavior.
def test_block_quotes_preceded_by_spaces(self):
markdown = self.r(
'A wise man once said:\n\n' \
' > Isn\'t it wonderful just to be alive.\n')
ok(markdown).diff(
'<p>A wise man once said:</p>\n\n' \
'<blockquote>\n<p>Isn't it wonderful just to be alive.</p>\n</blockquote>\n')
def test_html_block_not_wrapped_in_p(self):
markdown = self.render_with(
'Things to watch out for\n\n' \
'<ul>\n<li>Blah</li>\n</ul>\n',
extensions=EXT_LAX_SPACING)
ok(markdown).diff(
'<p>Things to watch out for</p>\n\n' \
'<ul>\n<li>Blah</li>\n</ul>\n')
# http://github.com/rtomayko/rdiscount/issues/#issue/13
def test_headings_with_trailing_space(self):
markdown = self.render_with(
'The Ant-Sugar Tales \n' \
'=================== \n\n' \
'By Candice Yellowflower \n')
ok(markdown).diff('<h1>The Ant-Sugar Tales </h1>\n\n<p>By Candice Yellowflower </p>\n')
def test_intra_emphasis(self):
markdown = self.r('foo_bar_baz')
ok(markdown).diff('<p>foo<em>bar</em>baz</p>\n')
markdown = self.render_with('foo_bar_baz', extensions=EXT_NO_INTRA_EMPHASIS)
ok(markdown).diff('<p>foo_bar_baz</p>\n')
def test_autolink(self):
markdown = self.render_with('http://axr.vg/', extensions=EXT_AUTOLINK)
ok(markdown).diff('<p><a href=\"http://axr.vg/\">http://axr.vg/</a></p>\n')
def test_tags_with_dashes_and_underscored(self):
markdown = self.r('foo <asdf-qwerty>bar</asdf-qwerty> and <a_b>baz</a_b>')
ok(markdown).diff('<p>foo <asdf-qwerty>bar</asdf-qwerty> and <a_b>baz</a_b></p>\n')
def test_no_link_in_code_blocks(self):
markdown = self.r(' This is a code block\n This is a link [[1]] inside\n')
ok(markdown).diff('<pre><code>This is a code block\nThis is a link [[1]] inside\n</code></pre>\n')
def test_whitespace_after_urls(self):
markdown = self.render_with('Japan: http://www.abc.net.au/news/events/japan-quake-2011/beforeafter.htm (yes, japan)',
extensions=EXT_AUTOLINK)
ok(markdown).diff('<p>Japan: <a href="http://www.abc.net.au/news/events/japan-quake-2011/beforeafter.htm">http://www.abc.net.au/news/events/japan-quake-2011/beforeafter.htm</a> (yes, japan)</p>\n')
def test_infinite_loop_in_header(self):
markdown = self.render_with(
'######\n' \
'#Body#\n' \
'######\n')
ok(markdown).diff('<h1>Body</h1>\n')
def test_tables(self):
text = ' aaa | bbbb\n' \
'-----|------\n' \
'hello|sailor\n'
ok(self.render_with(text)).not_contains('<table')
ok(self.render_with(text, extensions=EXT_TABLES)).contains('<table')
def test_strikethrough(self):
text = 'this is ~some~ striked ~~text~~'
ok(self.render_with(text)).not_contains('<del')
ok(self.render_with(text, extensions=EXT_STRIKETHROUGH)).contains('<del')
def test_fenced_code_blocks(self):
text = '''
This is a simple test
~~~~~
This is some awesome code
with tabs and shit
~~~
'''
ok(self.render_with(text)).not_contains('<code')
ok(self.render_with(text, extensions=EXT_FENCED_CODE)).contains('<code')
def test_fenced_code_blocks_without_space(self):
text = 'foo\nbar\n```\nsome\ncode\n```\nbaz'
ok(self.render_with(text)).not_contains('<pre><code>')
ok(self.render_with(text, extensions=EXT_FENCED_CODE | EXT_LAX_SPACING)).contains('<pre><code>')
def test_linkable_headers(self):
markdown = self.r('### Hello [GitHub](http://github.com)')
ok(markdown).diff('<h3>Hello <a href=\"http://github.com\">GitHub</a></h3>\n')
def test_autolinking_with_ent_chars(self):
markdown = self.render_with('This a stupid link: https://github.com/rtomayko/tilt/issues?milestone=1&state=open',
extensions=EXT_AUTOLINK)
ok(markdown).diff('<p>This a stupid link: <a href=\"https://github.com/rtomayko/tilt/issues?milestone=1&state=open\">https://github.com/rtomayko/tilt/issues?milestone=1&state=open</a></p>\n')
def test_spaced_headers(self):
text = '#123 a header yes\n'
ok(self.render_with(text, extensions=EXT_SPACE_HEADERS)).not_contains('<h1>')
class MarkdownConformanceTest_10(TestCase):
name = 'Markdown Conformance 1.0'
suite = 'MarkdownTest_1.0'
def setup(self):
self.r = Markdown(HtmlRenderer()).render
tests_dir = path.dirname(__file__)
for text_path in glob(path.join(tests_dir, self.suite, '*.text')):
html_path = '%s.html' % path.splitext(text_path)[0]
self._create_test(text_path, html_path)
def _create_test(self, text_path, html_path):
def test():
with codecs.open(text_path, 'r', encoding='utf-8') as fd:
text = fd.read()
with codecs.open(html_path, 'r', encoding='utf-8') as fd:
expected_html = fd.read()
actual_html = self.r(text)
expected_result = clean_html(expected_html)
actual_result = clean_html(actual_html)
ok(actual_result).diff(expected_result)
test.__name__ = self._test_name(text_path)
self.add_test(test)
def _test_name(self, text_path):
name = path.splitext(path.basename(text_path))[0]
name = name.replace(' - ', '_')
name = name.replace(' ', '_')
name = re.sub('[(),]', '', name)
return 'test_%s' % name.lower()
class MarkdownConformanceTest_103(MarkdownConformanceTest_10):
name = 'Markdown Conformance 1.0.3'
suite = 'MarkdownTest_1.0.3'
class UnicodeTest(TestCase):
name = 'Unicode'
def setup(self):
self.r = Markdown(HtmlRenderer()).render
def test_unicode(self):
tests_dir = path.dirname(__file__)
with codecs.open(path.join(tests_dir, 'unicode.txt'), 'r', encoding='utf-8') as fd:
text = fd.read()
with codecs.open(path.join(tests_dir, 'unicode.html'), 'r', encoding='utf-8') as fd:
html = fd.read()
markdown = self.r(text)
ok(markdown).diff(html)
def run_tests():
runner([
SmartyPantsTest,
HtmlRenderTest,
MarkdownParserTest,
MarkdownConformanceTest_10,
MarkdownConformanceTest_103,
UnicodeTest
])
if __name__ == '__main__':
run_tests()
| {
"content_hash": "fecc8cad5c6a068b7f175ccc3a854261",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 207,
"avg_line_length": 36.95029239766082,
"alnum_prop": 0.6004589696921738,
"repo_name": "hepochen/misaka",
"id": "5ae45c498f3e6df8b7550a9af8456ceddc3b5885",
"size": "12720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/misaka_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "312530"
},
{
"name": "CSS",
"bytes": "14704"
},
{
"name": "Python",
"bytes": "36252"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
} |
import unittest
from quickbooks.objects.detailline import SalesItemLineDetail, DiscountOverride, DetailLine, SubtotalLineDetail, \
DiscountLineDetail, SubtotalLine, DescriptionLineDetail, DescriptionLine, SalesItemLine, DiscountLine, GroupLine, \
AccountBasedExpenseLineDetail, ItemBasedExpenseLineDetail, DescriptionOnlyLine, ItemBasedExpenseLine
class DetailLineTests(unittest.TestCase):
def test_unicode(self):
detail = DetailLine()
detail.LineNum = 1
detail.Description = "Product Description"
detail.Amount = 100
self.assertEquals(str(detail), "[1] Product Description 100")
class SalesItemLineDetailTests(unittest.TestCase):
def test_unicode(self):
sales_detail = SalesItemLineDetail()
sales_detail.UnitPrice = 10
self.assertEquals(str(sales_detail), "10")
class DiscountOverrideTests(unittest.TestCase):
def test_init(self):
discount_override = DiscountOverride()
self.assertEquals(discount_override.DiscountPercent, 0)
self.assertEquals(discount_override.DiscountRef, None)
self.assertEquals(discount_override.DiscountAccountRef, None)
self.assertFalse(discount_override.PercentBased)
class DiscountLineDetailTesets(unittest.TestCase):
def test_init(self):
discount_detail = DiscountLineDetail()
self.assertEquals(discount_detail.Discount, None)
self.assertEquals(discount_detail.ClassRef, None)
self.assertEquals(discount_detail.TaxCodeRef, None)
class SubtotalLineDetailTest(unittest.TestCase):
def test_init(self):
detail = SubtotalLineDetail()
self.assertEquals(detail.ItemRef, None)
class SubtotalLineTest(unittest.TestCase):
def test_init(self):
subtotal_line = SubtotalLine()
self.assertEquals(subtotal_line.DetailType, "SubtotalLineDetail")
self.assertEquals(subtotal_line.SubtotalLineDetail, None)
class DescriptionLineDetailTest(unittest.TestCase):
def test_init(self):
description_detail = DescriptionLineDetail()
self.assertEquals(description_detail.ServiceDate, "")
self.assertEquals(description_detail.TaxCodeRef, None)
class DescriptionLineTest(unittest.TestCase):
def test_init(self):
line = DescriptionLine()
self.assertEquals(line.DetailType, "DescriptionOnly")
self.assertEquals(line.DescriptionLineDetail, None)
class SalesItemLineTest(unittest.TestCase):
def test_init(self):
line = SalesItemLine()
self.assertEquals(line.DetailType, "SalesItemLineDetail")
self.assertEquals(line.SalesItemLineDetail, None)
class DiscountLineTest(unittest.TestCase):
def test_init(self):
line = DiscountLine()
self.assertEquals(line.DetailType, "DiscountLineDetail")
self.assertEquals(line.DiscountLineDetail, None)
class GroupLineTest(unittest.TestCase):
def test_init(self):
line = GroupLine()
self.assertEquals(line.DetailType, "SalesItemLineDetail")
self.assertEquals(line.SalesItemLineDetail, None)
class ItemBasedExpenseLineDetailTest(unittest.TestCase):
def test_init(self):
detail = ItemBasedExpenseLineDetail()
self.assertEquals(detail.BillableStatus, "")
self.assertEquals(detail.UnitPrice, 0)
self.assertEquals(detail.TaxInclusiveAmt, 0)
self.assertEquals(detail.Qty, 0)
self.assertEquals(detail.ItemRef, None)
self.assertEquals(detail.ClassRef, None)
self.assertEquals(detail.PriceLevelRef, None)
self.assertEquals(detail.TaxCodeRef, None)
self.assertEquals(detail.MarkupInfo, None)
self.assertEquals(detail.CustomerRef, None)
class ItemBasedExpenseLineTests(unittest.TestCase):
def test_unicode(self):
line = ItemBasedExpenseLine()
self.assertEquals(line.DetailType, "ItemBasedExpenseLineDetail")
self.assertEquals(line.ItemBasedExpenseLineDetail, None)
class AccountBasedExpenseLineDetailTests(unittest.TestCase):
def test_unicode(self):
acct_detail = AccountBasedExpenseLineDetail()
acct_detail.BillableStatus = "test"
self.assertEquals(str(acct_detail), "test")
class DescriptionOnlyLineTests(unittest.TestCase):
def test_unicode(self):
line = DescriptionOnlyLine()
self.assertEquals(line.DetailType, "DescriptionLineDetail")
self.assertEquals(line.DescriptionLineDetail, None)
| {
"content_hash": "1ddf35186cf17896051eb09b3e5dd606",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 119,
"avg_line_length": 32.904411764705884,
"alnum_prop": 0.7275977653631285,
"repo_name": "porn/python-quickbooks",
"id": "5a7dd009d432c3dbca0d90416a91aee210b92f0d",
"size": "4475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/objects/test_detailline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "208523"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
import logging
from multiprocessing.pool import Pool
from traceback import print_exc
from lxml import etree
from django.db import transaction
from django.conf import settings
from docutil.str_util import clean_breaks, normalize
from docutil.etree_util import clean_tree, get_word_count, XPathList,\
SingleXPath, get_word_count_text, get_text_context, get_sentence,\
get_complex_text
from docutil.url_util import get_relative_url, get_path
from docutil.commands_util import chunk_it, import_clazz, get_encoding
from docutil.progress_monitor import NullProgressMonitor
from codebase.models import DOCUMENT_SOURCE
from codebase.actions import get_project_code_words, get_default_kind_dict,\
parse_single_code_references, get_java_strategies,\
get_default_filters, classify_code_snippet
from doc.models import Document, Page, Section
DEFAULT_POOL_SIZE = 4
logger = logging.getLogger("recodoc.doc.parser.generic")
@transaction.autocommit
def sub_process_parse(pinput):
try:
# Unecessary if already closed by parent process.
# But it's ok to be sure.
#print('In subprocess')
from django.db import connection
connection.close()
from django.core.cache import cache
cache.close()
(parser_clazz, doc_pk, parse_refs, pages) = pinput
parser = import_clazz(parser_clazz)(doc_pk)
#print('Got input')
#print('Input: {0}'.format(pinput))
for page_input in pages:
#print('Considering page {0}'.format(page_input))
if page_input is not None:
(local_path, page_url) = page_input
# Do not parse gifs...
if local_path.endswith('.html'):
parser.parse_page(local_path, page_url, parse_refs)
return True
except Exception:
print_exc()
return False
finally:
# Manually close this connection
connection.close()
cache.close()
@transaction.autocommit
def parse(document, pages, parse_refs=True,
progress_monitor=NullProgressMonitor(),
pool_size=DEFAULT_POOL_SIZE):
progress_monitor.start('Parsing Pages', pool_size + 1)
progress_monitor.info('Pages: {0}'.format(len(pages)))
progress_monitor.info('Building code words cache')
get_project_code_words(document.project_release.project)
# Prepare input
pages = [(page.local_url, page.url) for page in
pages.values() if page.local_url is not None]
pages_chunks = chunk_it(pages, pool_size)
inputs = []
for pages_chunk in pages_chunks:
inputs.append((document.parser, document.pk, parse_refs, pages_chunk))
# Close connection to allow the new processes to create their own.
from django.db import connection
connection.close()
from django.core.cache import cache
cache.close()
# Split work
progress_monitor.info('Sending {0} chunks to worker pool'
.format(len(inputs)))
pool = Pool(pool_size)
for result in pool.imap_unordered(sub_process_parse, inputs, 1):
progress_monitor.work('Parsed 1/{0} of the pages'.\
format(pool_size), 1)
# Word Count
word_count = 0
for page in document.pages.all():
word_count += page.word_count
document.word_count = word_count
document.save()
progress_monitor.work('Counted Total Words', 1)
pool.close()
progress_monitor.done()
class ParserLoad(object):
def __init__(self):
self.tree = None
self.code_words = None
self.sections = None
self.parse_refs = True
self.mix_mode = False
class GenericParser(object):
POOL_SIZE = 4
xbody = SingleXPath("//body[1]")
'''Page body'''
xtitles = XPathList(['//h1[1]', '//title[1]', '//h2[1]'])
'''Page title'''
xtopsection = None
'''XPath to find the top-level section. Optional'''
xsections = None
'''XPath to find the sections in a page. Required'''
xsectiontitle = None
'''XPath to find a section title. Required'''
xcoderef = None
'''XPath to find single code references. Required'''
xcoderef_url = None
'''Text that must be found in a url to be considered a coderef.
Optional'''
xsnippet = None
'''XPath to find code snippets. Required'''
xparagraphs = None
'''XPath to find the text minus the snippets in a section. Required only
if mix_mode=True'''
def __init__(self, document_pk):
self.document = Document.objects.get(pk=document_pk)
self.kinds = get_default_kind_dict()
self.kind_strategies = get_java_strategies()
def parse_page(self, page_local_path, page_url, parse_refs=True):
try:
#print('Starting parser {0}'.format(page_url))
relative_url = get_relative_url(get_path(page_local_path))
page = Page(url=page_url,
file_path=relative_url,
document=self.document)
page.save()
#print('After save {0}'.format(page_url))
load = ParserLoad()
load.parse_refs = parse_refs
self._build_code_words(load)
#print('Parser will be processing page {0}'.format(page_url))
self._process_page(page, load)
#print('Parser processed page {0}'.format(page_url))
except Exception:
print('An exception occurred in the parser {0}'.format(page_url))
print_exc()
def _build_code_words(self, load):
# Build code words and put it in self.load
load.code_words = \
get_project_code_words(self.document.project_release.project)
def _process_page(self, page, load):
load.tree = self.get_page_etree(page)
clean_tree(load.tree)
page.title = self._process_page_title(page, load)
page.title = clean_breaks(page.title, True)
body = self.xbody.get_element(load.tree)
body_elements = self.xbody.get_element_as_list(body)
page.word_count = get_word_count(body_elements)
page.xpath = load.tree.getpath(body)
page.save()
self._process_init_page(page, load)
check = self._check_parser(page, load)
if not check:
return
self._process_sections(page, load)
def get_page_text(self, page, complex_text=False):
tree = self.get_page_etree(page)
clean_tree(tree)
body = self.xbody.get_element(tree)
if complex_text:
text = normalize(get_complex_text(body))
else:
text = self.xbody.get_text(body)
return text
def get_page_etree(self, page):
page_path = os.path.join(settings.PROJECT_FS_ROOT, page.file_path)
page_file = open(page_path)
content = page_file.read()
page_file.close()
encoding = get_encoding(content)
parser = etree.HTMLParser(remove_comments=True, encoding=encoding)
tree = etree.fromstring(content, parser).getroottree()
return tree
def get_section_text(self, section, tree=None, complex_text=False):
if tree is None:
tree = self.get_page_etree(section.page)
element = tree.xpath(section.xpath)[0]
text = self.xsections.get_text(element, complex_text)
return text
def _process_page_title(self, page, load):
title = self.xtitles.get_text_from_parent(load.tree)
if title is None or title == '':
title = 'Default Title'
logger.warning('No title for page {0}'.format(page.file_path))
return title
def _process_init_page(self, page, load):
pass
def _check_parser(self, page, load):
check = True
if self.xsections is None:
logger.error('xsections field needs to be defined.')
return False
elif self.xsectiontitle is None:
logger.error('xsectiontitle field needs to be defined.')
return False
elif self.xparagraphs is None and load.mix_mode:
logger.error('xparagraphs needs to be defined if mix_mode is on')
return check
def _process_sections(self, page, load):
sections = []
sections_number = {}
if self.xtopsection is not None:
section_element = self.xtopsection.get_element(load.tree)
if section_element is not None and self._is_top_section(page, load,
section_element):
text = self.xtopsection.get_text(section_element)
section = self._create_section(page, load, section_element,
text)
if section is not None:
sections.append(section)
sections_number[section.number] = section
section_elements = self.xsections.get_elements(load.tree)
for section_element in section_elements:
if self._is_section(page, load, section_element):
text = self.xsections.get_text(section_element)
section = self._create_section(page, load, section_element,
text)
if section is not None:
sections.append(section)
sections_number[section.number] = section
self._find_section_parent(page, load, sections, sections_number)
if load.parse_refs:
self._parse_section_references(page, load, sections)
def _is_top_section(self, page, load, section_element):
'''Indicates whether or not an element is a top section.'''
return True
def _is_section(self, page, load, section_element):
'''Indicates whether or not an element is a section.'''
return True
def _create_section(self, page, load, section_element, text):
tree = load.tree
title = \
self.xsectiontitle.get_text_from_parent(section_element).strip()
title = title.replace('\n', ' ').replace('\t', ' ').replace('\r', '')
xpath = tree.getpath(section_element)
number = \
self._get_section_number(page, load, section_element, title,
xpath).strip()
word_count = get_word_count_text(text)
if (len(title) > 500):
title = title[:497] + '...'
section = Section(
page=page,
title=title,
xpath=xpath,
file_path=page.file_path,
url=page.url,
number=number,
word_count=word_count)
if settings.SAVE_SECTION_TEXT:
section.text_content = text
section.save()
return section
def _get_section_number(self, page, load, section_element, title, xpath):
'''Returns the section number (e.g., 1.2.3.) of a section.'''
return '1'
def _find_section_parent(self, page, load, sections, sections_number):
pass
def _parse_section_references(self, page, load, sections):
s_code_references = []
snippets = []
# get code references
code_ref_elements = self.xcoderef.get_elements(load.tree)
for i, code_ref_element in enumerate(code_ref_elements):
self._add_code_ref(i, code_ref_element, page, load,
s_code_references)
# get snippets
snippet_elements = self.xsnippet.get_elements(load.tree)
for i, snippet_element in enumerate(snippet_elements):
self._add_code_snippet(i, snippet_element, page, load, snippets)
# Find section for each code reference
for code_reference in s_code_references:
self._find_section(code_reference, sections, page, load)
# Find snippet for each code reference
for snippet in snippets:
self._find_section(snippet, sections, page, load)
# Process sections' title
for section in sections:
self._process_title_references(page, load, section)
# If mix mode, analyze the text of each section.
if load.mix_mode:
for section in sections:
if self._process_mix_mode_section(page, load, section):
self._process_mix_mode(page, load, section)
def _is_valid_code_ref(self, code_ref_element, load):
if code_ref_element.tag == 'a':
if code_ref_element.getparent().tag == 'pre':
return False
if 'href' not in code_ref_element.attrib:
return False
elif len(code_ref_element) > 0 and code_ref_element[0].tag in \
{'tt', 'code', 'pre', 'span', 'em', 'b', 'strong', 'i'}:
return False
elif self.xcoderef_url is not None and \
code_ref_element.attrib['href'].\
find(self.xcoderef_url) == -1:
return False
return True
def _add_code_ref(self, index, code_ref_element, page, load,
s_code_references):
# If the code ref is a link, filter the link to ensure that
# it is a real code ref.
if not self._is_valid_code_ref(code_ref_element, load):
return
text = self.xcoderef.get_text(code_ref_element)
text = clean_breaks(text).strip()
# Not significant
if len(text) < 2 or text.isdigit():
return
text_context = get_text_context(code_ref_element)
sentence = get_sentence(code_ref_element, text, text_context)
(text, kind_hint) = self._get_code_ref_kind(code_ref_element, text)
xpath = load.tree.getpath(code_ref_element)
for code in parse_single_code_references(text, kind_hint,
self.kind_strategies, self.kinds):
code.xpath = xpath
code.file_path = page.file_path
code.source = DOCUMENT_SOURCE
code.index = index
code.sentence = sentence
code.paragraph = text_context
code.project = self.document.project_release.project
code.project_release = self.document.project_release
code.resource = self.document
code.save()
s_code_references.append(code)
def _add_code_snippet(self, index, snippet_element, page, load, snippets):
text = self.xsnippet.get_text(snippet_element)
xpath = load.tree.getpath(snippet_element)
snippet = classify_code_snippet(text, get_default_filters())
if snippet is None:
return
snippet.xpath = xpath
snippet.file_path = page.file_path
snippet.source = DOCUMENT_SOURCE
snippet.index = index
snippet.project = self.document.project_release.project
snippet.project_release = self.document.project_release
snippet.resource = self.document
snippet.save()
snippets.append(snippet)
def _find_section(self, reference, sections, page, load):
parent_section = None
max_len = 0
for section in sections:
section_len = len(section.xpath)
if reference.xpath.startswith(section.xpath) and \
section_len > max_len:
parent_section = section
max_len = section_len
if parent_section != None:
reference.local_context = parent_section
reference.mid_context = self._get_mid_context(parent_section)
reference.global_context = parent_section.page
reference.resource = self.document
reference.save()
else:
content = None
try:
content = reference.content
except Exception:
content = 'SNIPPET'
logger.debug('orphan ref {0}, path {1}, page {2}'
.format(content, reference.xpath, page.title))
# Delete, otherwise, it won't be deleted when clearning document.
reference.delete()
def _process_title_references(self, page, load, section):
text_context = section.title
sentence = section.title
kind_hint = self.kinds['unknown']
xpath = section.xpath
for code in parse_single_code_references(sentence, kind_hint,
self.kind_strategies, self.kinds, strict=True):
code.xpath = xpath
code.file_path = page.file_path
code.source = DOCUMENT_SOURCE
code.index = -100000
code.sentence = sentence
code.paragraph = text_context
code.project = self.document.project_release.project
code.project_release = self.document.project_release
code.title_context = section
code.local_context = section
code.mid_context = self._get_mid_context(section)
code.global_context = page
code.resource = self.document
code.save()
def _process_mix_mode_section(self, page, load, section):
return True
def _process_mix_mode(self, page, load, section):
section_element = load.tree.xpath(section.xpath)[0]
section_text = self.xparagraphs.get_text(section_element)
#print('\n\nDEBUG: {0}\n{1}\n\n'.format(section.title,
#section_text).encode('utf8'))
section_refs = section.code_references.all()
existing_refs = [code_ref.content for code_ref in section_refs]
kind_hint = self.kinds['unknown']
mid_context = self._get_mid_context(section)
for i, code in enumerate(
parse_single_code_references(
section_text, kind_hint, self.kind_strategies,
self.kinds, find_context=True, strict=True,
existing_refs=existing_refs)):
code.xpath = section.xpath
code.file_path = page.file_path
code.index = 1000 + i
code.project = self.document.project_release.project
code.project_release = self.document.project_release
code.local_context = section
code.mid_context = mid_context
code.global_context = page
code.resource = self.document
code.save()
def _get_code_ref_kind(self, code_ref_tag, text):
kind_hint = self.kinds['unknown']
if 'class' in code_ref_tag.attrib:
clazz = code_ref_tag.attrib['class']
if clazz.find('method') > -1:
kind_hint = self.kinds['method']
elif clazz.find('class') > -1 or clazz.find('interface') > -1 or \
clazz.find('type') > -1:
kind_hint = self.kinds['class']
return (text, kind_hint)
def _get_mid_context(self, section):
'''Returns mid-level sections.
e.g., {3., 3.1, 3.1.1} will return 3.1
'''
if section is None:
return None
elif section.parent is None:
return None
if section.parent.parent is None:
return section.parent
else:
return self._get_mid_context(section.parent)
| {
"content_hash": "c28907d16d5c672a9c271136f6a13089",
"timestamp": "",
"source": "github",
"line_count": 521,
"max_line_length": 79,
"avg_line_length": 36.82341650671785,
"alnum_prop": 0.5946833463643472,
"repo_name": "bartdag/recodoc2",
"id": "63101675103f44a4c09c4df9e6567e662a7cdcca",
"size": "19185",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "recodoc2/apps/doc/parser/generic_parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5581"
},
{
"name": "HTML",
"bytes": "32211467"
},
{
"name": "Java",
"bytes": "13646"
},
{
"name": "Perl",
"bytes": "503"
},
{
"name": "Python",
"bytes": "717834"
}
],
"symlink_target": ""
} |
'''
Created on 20 Apr 2010
logging options : INFO, DEBUG
hash_method : content, timestamp
@author: Chris Filo Gorgolewski
'''
import ConfigParser
from json import load, dump
import os
import shutil
from StringIO import StringIO
from warnings import warn
from ..external import portalocker
homedir = os.environ['HOME']
default_cfg = """
[logging]
workflow_level = INFO
filemanip_level = INFO
interface_level = INFO
log_to_file = false
log_directory = %s
log_size = 16384000
log_rotate = 4
[execution]
create_report = true
crashdump_dir = %s
display_variable = :1
hash_method = timestamp
job_finished_timeout = 5
keep_inputs = false
local_hash_check = true
matplotlib_backend = Agg
plugin = Linear
remove_node_directories = false
remove_unnecessary_outputs = true
single_thread_matlab = true
stop_on_first_crash = false
stop_on_first_rerun = false
use_relative_paths = false
stop_on_unknown_version = false
write_provenance = false
parameterize_dirs = true
poll_sleep_duration = 60
[check]
interval = 1209600
""" % (homedir, os.getcwd())
class NipypeConfig(object):
"""Base nipype config class
"""
def __init__(self, *args, **kwargs):
self._config = ConfigParser.ConfigParser()
config_dir = os.path.expanduser('~/.nipype')
if not os.path.exists(config_dir):
os.makedirs(config_dir)
old_config_file = os.path.expanduser('~/.nipype.cfg')
new_config_file = os.path.join(config_dir, 'nipype.cfg')
# To be deprecated in two releases
if os.path.exists(old_config_file):
if os.path.exists(new_config_file):
msg=("Detected presence of both old (%s, used by versions "
"< 0.5.2) and new (%s) config files. This version will "
"proceed with the new one. We advise to merge settings "
"and remove old config file if you are not planning to "
"use previous releases of nipype.") % (old_config_file,
new_config_file)
warn(msg)
else:
warn("Moving old config file from: %s to %s" % (old_config_file,
new_config_file))
shutil.move(old_config_file, new_config_file)
self.data_file = os.path.join(config_dir, 'nipype.json')
self._config.readfp(StringIO(default_cfg))
self._config.read([new_config_file, old_config_file, 'nipype.cfg'])
def set_default_config(self):
self._config.readfp(StringIO(default_cfg))
def enable_debug_mode(self):
"""Enables debug configuration
"""
self._config.set('execution', 'stop_on_first_crash', 'true')
self._config.set('execution', 'remove_unnecessary_outputs', 'false')
self._config.set('execution', 'keep_inputs', 'true')
self._config.set('logging', 'workflow_level', 'DEBUG')
self._config.set('logging', 'interface_level', 'DEBUG')
def set_log_dir(self, log_dir):
"""Sets logging directory
This should be the first thing that is done before any nipype class
with logging is imported.
"""
self._config.set('logging', 'log_directory', log_dir)
def get(self, section, option):
return self._config.get(section, option)
def set(self, section, option, value):
return self._config.set(section, option, value)
def getboolean(self, section, option):
return self._config.getboolean(section, option)
def has_option(self, section, option):
return self._config.has_option(section, option)
@property
def _sections(self):
return self._config._sections
def get_data(self, key):
if not os.path.exists(self.data_file):
return None
with open(self.data_file, 'rt') as file:
portalocker.lock(file, portalocker.LOCK_EX)
datadict = load(file)
if key in datadict:
return datadict[key]
return None
def save_data(self, key, value):
datadict = {}
if os.path.exists(self.data_file):
with open(self.data_file, 'rt') as file:
portalocker.lock(file, portalocker.LOCK_EX)
datadict = load(file)
with open(self.data_file, 'wt') as file:
portalocker.lock(file, portalocker.LOCK_EX)
datadict[key] = value
dump(datadict, file)
def update_config(self, config_dict):
for section in ['execution', 'logging', 'check']:
if section in config_dict:
for key, val in config_dict[section].items():
if not key.startswith('__'):
self._config.set(section, key, str(val))
def update_matplotlib(self):
import matplotlib
matplotlib.use(self.get('execution', 'matplotlib_backend'))
def enable_provenance(self):
self._config.set('execution', 'write_provenance', 'true')
self._config.set('execution', 'hash_method', 'content')
| {
"content_hash": "1fa292d56c21873f648f26ff3bac5f62",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 81,
"avg_line_length": 32.94193548387097,
"alnum_prop": 0.6069330199764983,
"repo_name": "Leoniela/nipype",
"id": "ef83094b0ee31f1abaeee62b5f463862bbbd6815",
"size": "5220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/utils/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "5018"
},
{
"name": "Python",
"bytes": "3767360"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RemoveFromWaitlistRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'waitlist_entry_ids': 'list[int]'
}
attribute_map = {
'waitlist_entry_ids': 'WaitlistEntryIds'
}
def __init__(self, waitlist_entry_ids=None): # noqa: E501
"""RemoveFromWaitlistRequest - a model defined in Swagger""" # noqa: E501
self._waitlist_entry_ids = None
self.discriminator = None
self.waitlist_entry_ids = waitlist_entry_ids
@property
def waitlist_entry_ids(self):
"""Gets the waitlist_entry_ids of this RemoveFromWaitlistRequest. # noqa: E501
A list of waiting list IDs to remove from waiting lists. # noqa: E501
:return: The waitlist_entry_ids of this RemoveFromWaitlistRequest. # noqa: E501
:rtype: list[int]
"""
return self._waitlist_entry_ids
@waitlist_entry_ids.setter
def waitlist_entry_ids(self, waitlist_entry_ids):
"""Sets the waitlist_entry_ids of this RemoveFromWaitlistRequest.
A list of waiting list IDs to remove from waiting lists. # noqa: E501
:param waitlist_entry_ids: The waitlist_entry_ids of this RemoveFromWaitlistRequest. # noqa: E501
:type: list[int]
"""
if waitlist_entry_ids is None:
raise ValueError("Invalid value for `waitlist_entry_ids`, must not be `None`") # noqa: E501
self._waitlist_entry_ids = waitlist_entry_ids
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RemoveFromWaitlistRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemoveFromWaitlistRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "1fd05f9345244e7f7fdacfb29d3f088f",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 119,
"avg_line_length": 31.586206896551722,
"alnum_prop": 0.579967248908297,
"repo_name": "mindbody/API-Examples",
"id": "cb5a7a0e9c2947bb5a0c4579dc4144e9a82c0f8f",
"size": "3681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SDKs/Python/swagger_client/models/remove_from_waitlist_request.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PHP",
"bytes": "3610259"
},
{
"name": "Python",
"bytes": "2338642"
},
{
"name": "Ruby",
"bytes": "2284441"
},
{
"name": "Shell",
"bytes": "5058"
}
],
"symlink_target": ""
} |
import pickle
import pytest
from pyrsistent import CheckedPSet, PSet, InvariantException, CheckedType, CheckedPVector, CheckedValueTypeError
class Naturals(CheckedPSet):
__type__ = int
__invariant__ = lambda value: (value >= 0, 'Negative value')
def test_instantiate():
x = Naturals([1, 2, 3, 3])
assert list(x) == [1, 2, 3]
assert isinstance(x, Naturals)
assert isinstance(x, PSet)
assert isinstance(x, CheckedType)
def test_add():
x = Naturals()
x2 = x.add(1)
assert list(x2) == [1]
assert isinstance(x2, Naturals)
def test_invalid_type():
with pytest.raises(CheckedValueTypeError):
Naturals([1, 2.0])
def test_breaking_invariant():
try:
Naturals([1, -1])
assert False
except InvariantException as e:
assert e.invariant_errors == ['Negative value']
def test_repr():
x = Naturals([1, 2])
assert str(x) == 'Naturals([1, 2])'
def test_default_serialization():
x = Naturals([1, 2])
assert x.serialize() == set([1, 2])
class StringNaturals(Naturals):
@staticmethod
def __serializer__(format, value):
return format.format(value)
def test_custom_serialization():
x = StringNaturals([1, 2])
assert x.serialize("{0}") == set(["1", "2"])
class NaturalsVector(CheckedPVector):
__type__ = Naturals
def test_multi_level_serialization():
x = NaturalsVector.create([[1, 2], [3, 4]])
assert str(x) == "NaturalsVector([Naturals([1, 2]), Naturals([3, 4])])"
sx = x.serialize()
assert sx == [set([1, 2]), set([3, 4])]
assert isinstance(sx[0], set)
def test_create():
assert Naturals.create([1, 2]) == Naturals([1, 2])
def test_evolver_returns_same_instance_when_no_updates():
x = Naturals([1, 2])
assert x.evolver().persistent() is x
def test_pickling():
x = Naturals([1, 2])
y = pickle.loads(pickle.dumps(x, -1))
assert x == y
assert isinstance(y, Naturals) | {
"content_hash": "1b8f3610c1b2d30f17f46057e1648d32",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 112,
"avg_line_length": 24.3,
"alnum_prop": 0.6244855967078189,
"repo_name": "jml/pyrsistent",
"id": "c48c69cb1c81f9239d58ec53367d44e7a631494a",
"size": "1944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/checked_set_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "47761"
},
{
"name": "Python",
"bytes": "214597"
},
{
"name": "Shell",
"bytes": "247"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from nark import *
class IDb():
def connect(self):
""" Make a connection to the db engine """
pass
def disconnect(self):
""" Close the connection to the db """
pass
| {
"content_hash": "d791b727c5e99c5059a4a090aa08fd2e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 46,
"avg_line_length": 17.53846153846154,
"alnum_prop": 0.631578947368421,
"repo_name": "shadowmint/py-test-watcher",
"id": "c26cfecce88d39daace52efe046d13a8a8a6f98d",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/base/idb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "109878"
},
{
"name": "Python",
"bytes": "488664"
},
{
"name": "Shell",
"bytes": "544"
}
],
"symlink_target": ""
} |
import datetime
import json
from flask.ext.jwt import current_identity, jwt_required
from flask_restplus import Namespace, Resource, fields, reqparse
from packr.models import Order, OrderStatus, StatusType
api = Namespace('update',
description='Operations related to updating an order')
update_status = api.model('UpdateStatus', {
'con_number': fields.Integer(readOnly=True,
description='The consignment number'),
'status': fields.String(readOnly=True,
description='The new status')
})
update_driver = api.model('UpdateDriver', {
'con_number': fields.Integer(readOnly=True,
description='The consignment number'),
'adminComments': fields.String(readOnly=True,
description='The admin comments')
})
update_admin = api.model('UpdateAdmin', {
'con_number': fields.Integer(readOnly=True,
description='The consignment number'),
'driver': fields.String(readOnly=True,
description='The driver'),
'eta': fields.String(readOnly=True,
description='The eta'),
'cost': fields.String(readOnly=True,
description='The cost')
})
@api.route('/status')
class UpdateStatus(Resource):
@api.expect(update_status)
@api.response(204, 'Successfully updated status.')
@jwt_required()
def post(self):
req_parse = reqparse.RequestParser(bundle_errors=True)
req_parse.add_argument('con_number', type=int, required=True,
location='json')
req_parse.add_argument('status', type=str, required=True,
location='json')
args = req_parse.parse_args()
con_number = args.get('con_number', -1)
status = json.loads(args.get('status'))
if con_number == -1:
return {'message': {'con_number':
'No consignment number provided'}}, 400
# Find the consignment note information.
order = Order.query.filter_by(id=con_number).first()
if not order:
return {'description': 'Unknown consignment number.'}, 404
if current_identity.role.role_name != 'admin':
if current_identity.role.role_name == 'driver':
if order.driver_id != current_identity.id:
return {'description': 'Access denied.'}, 401
else:
return {'description': 'Access denied.'}, 401
status_type = StatusType.query.filter_by(name=status['status']).first()
order_status = OrderStatus(status=status_type,
address=status['address'],
time=datetime.datetime.utcnow(),
order_id=order.id)
order_status.save()
order.status.append(order_status)
order.save()
return {'message': {'description': 'Updated status'}}, 201
@api.route("/admin")
class UpdateAdmin(Resource):
@api.expect(update_admin)
@api.response(204, 'Successfully updated admin details.')
@jwt_required()
def post(self):
req_parse = reqparse.RequestParser(bundle_errors=True)
req_parse.add_argument('con_number', type=int, required=True,
location='json')
req_parse.add_argument('driver', type=str, required=True,
location='json')
req_parse.add_argument('eta', type=str, required=True,
location='json')
req_parse.add_argument('cost', type=int, required=True,
location='json')
args = req_parse.parse_args()
con_number = args.get('con_number', -1)
driver = args.get('driver', None)
eta = args.get('eta')
cost = args.get('cost', 0)
if con_number == -1:
return {'message': {'con_number':
'No consignment number provided'}}, 400
if current_identity.role.role_name != 'admin':
return {'description': 'Access denied.'}, 401
# Find the consignment note information.
order = Order.query.filter_by(id=con_number).first()
if not order:
return {'description': 'Unknown consignment number.'}, 404
order.driver_id = driver
order.cost = cost
order.eta = datetime.datetime.strptime(eta, "%Y-%m-%dT%H:%M:%S.%fZ") \
.date()
order.save()
return {'message': {'description': 'Updated consignment'}}, 201
@api.route("/driver")
class UpdateDriver(Resource):
@api.expect(update_driver)
@api.response(204, 'Successfully updated driver details.')
@jwt_required()
def post(self):
req_parse = reqparse.RequestParser(bundle_errors=True)
req_parse.add_argument('con_number', type=int, required=True,
location='json')
req_parse.add_argument('adminComments', type=str, required=True,
location='json')
args = req_parse.parse_args()
con_number = args.get('con_number', -1)
admin_comments = args.get('adminComments')
if con_number == -1:
return {'message': {'con_number':
'No consignment number provided'}}, 400
# Find the consignment note information.
order = Order.query.filter_by(id=con_number).first()
if not order:
return {'description': 'Unknown consignment number.'}, 404
if current_identity.role.role_name != 'admin':
if current_identity.role.role_name == 'driver':
if order.driver_id != current_identity.id:
return {'description': 'Access denied.'}, 401
else:
return {'description': 'Access denied.'}, 401
order.driver_notes = admin_comments
order.save()
return {'message': {'description': 'Updated consignment'}}, 201
| {
"content_hash": "93897a839bd57efc26e94ea85c0151e5",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 36.49404761904762,
"alnum_prop": 0.5605937041265698,
"repo_name": "KnightHawk3/packr",
"id": "8e1d242a8fb27986cc9c5bfa295eb180cb23395a",
"size": "6131",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "packr/api/update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "551"
},
{
"name": "HTML",
"bytes": "67549"
},
{
"name": "JavaScript",
"bytes": "44675"
},
{
"name": "Perl",
"bytes": "4351"
},
{
"name": "Python",
"bytes": "68604"
},
{
"name": "Ruby",
"bytes": "279"
},
{
"name": "Shell",
"bytes": "1049"
}
],
"symlink_target": ""
} |
"""
SDoc
Copyright 2016 Set Based IT Consultancy
Licence MIT
"""
# ----------------------------------------------------------------------------------------------------------------------
from sdoc.sdoc2.NodeStore import NodeStore
from sdoc.sdoc2.formatter.html.HtmlFormatter import HtmlFormatter
class ItemizeHtmlFormatter(HtmlFormatter):
"""
HtmlFormatter for generating HTML code for itemize.
"""
# ------------------------------------------------------------------------------------------------------------------
def generate(self, node, file):
"""
Generates the HTML code for an itemize node.
:param sdoc.sdoc2.node.ItemizeNode.ItemizeNode node: The itemize node.
:param file file: The output file.
"""
file.write('<ul>')
HtmlFormatter.generate(self, node, file)
file.write('</ul>')
# ----------------------------------------------------------------------------------------------------------------------
NodeStore.register_formatter('itemize', 'html', ItemizeHtmlFormatter)
| {
"content_hash": "91f8ef83b1207ee6de643ffe22a85b43",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 120,
"avg_line_length": 33.40625,
"alnum_prop": 0.4490177736202058,
"repo_name": "OlegKlimenko/py-sdoc",
"id": "1d2175185aa2e2c9f4c3722c055c60d3c3d9c03c",
"size": "1069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdoc/sdoc2/formatter/html/ItemizeHtmlFormatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "7975"
},
{
"name": "Python",
"bytes": "405351"
}
],
"symlink_target": ""
} |
from configurator import config_manager
from configurator.config import Option
opts = []
opts.append(Option('host', 'host', 'h', False, 'localhost', 'help...'))
opts.append(Option('vcip', 'vcip', 'v', True, True, 'help...'))
opts.append(Option('Virgo Directory', 'virgoDir', 'h', False, 'localhost', 'help...'))
opts.append(Option('Virgo Dir 1', 'virgoDir1', 'h', False, 'localhost', 'help...'))
opts.append(Option('NVM bin', 'NVM_BIN', 'h', False, 'localhost', 'help...'))
config_manager.setup(opts)
res = config_manager.config
print '====='
print res
| {
"content_hash": "7f4545309681db2f3ca1cc915645d7ca",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 86,
"avg_line_length": 37.06666666666667,
"alnum_prop": 0.6600719424460432,
"repo_name": "kaleksandrov/python-configurator",
"id": "e8b65ec6ea83ccf6de3cf5ff69e1ed80c51ac56f",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test-config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6680"
}
],
"symlink_target": ""
} |
"""
Update packages and product definition source troves managed by Conary
"""
from rbuild import pluginapi
from rbuild.productstore.decorators import requiresStage
from rbuild.pluginapi import command
class PromoteCommand(pluginapi.command.BaseCommand):
"""Promote groups and packages to the next stage"""
commands = ['promote']
help = 'Promote groups and packages to next stage'
docs = {
'info' : 'Show what would be done but do not actually promote',
}
def addLocalParameters(self, argDef):
argDef['info'] = command.NO_PARAM
def runCommand(self, handle, argSet, args):
"""
Process the command line provided for this plugin
@param handle: context handle
@type handle: rbuild.handle.RbuildHandle
@param args: command-line arguments
@type args: iterable
"""
self.requireParameters(args)
handle.Promote.promoteAll(infoOnly=argSet.get('info', False))
class Promote(pluginapi.Plugin):
"""
Promote plugin
"""
name = 'promote'
def registerCommands(self):
"""
Register the command-line handling portion of the promote plugin.
"""
self.handle.Commands.registerCommand(PromoteCommand)
@requiresStage
def promoteAll(self, infoOnly=False):
"""
Promote all appropriate troves from the currently active stage
to the next stage.
"""
store, product = self.handle.productStore, self.handle.product
ui = self.handle.ui
cny = self.handle.facade.conary
activeStage = store.getActiveStageName()
activeLabel = product.getLabelForStage(activeStage)
nextStage = store.getNextStageName(activeStage)
nextLabel = product.getLabelForStage(nextStage)
# Collect a list of groups to promote.
groupSpecs = [ '%s[%s]' % x for x in store.getGroupFlavors() ]
ui.progress('Preparing to promote %d troves', len(groupSpecs))
allTroves = cny._findTrovesFlattened(groupSpecs, activeLabel)
# Get a list of all labels that are in the product's search
# path (including subtroves).
platformLabels = set()
platformTroves = []
for searchElement in product.getGroupSearchPaths():
if searchElement.troveName:
version = searchElement.label
if searchElement.version:
version += '/' + searchElement.version
platformTroves.append((searchElement.troveName, version, None))
elif searchElement.label:
platformLabels.add(searchElement.label)
platformLabels.update(cny.getAllLabelsFromTroves(platformTroves))
# Now get a list of all labels that are referenced by the
# groups to be promoted but are not in the platform. These will
# be "flattened" to the target label.
flattenLabels = cny.getAllLabelsFromTroves(allTroves) - platformLabels
fromTo = product.getPromoteMapsForStages(activeStage, nextStage,
flattenLabels=flattenLabels)
ui.info("The following promote map will be used:")
for fromLabel, toBranch in sorted(fromTo.iteritems()):
ui.info(" %s -- %s", fromLabel, toBranch)
# Now promote.
ui.progress('Promoting %d troves', len(groupSpecs))
promotedList = cny.promoteGroups(allTroves, fromTo, infoOnly=infoOnly)
promotedList = [ x for x in promotedList
if (':' not in x[0]
or x[0].split(':')[-1] == 'source') ]
promotedList = [ '%s=%s[%s]' % (x[0], x[1].split('/')[-1], x[2])
for x in promotedList ]
promotedList.sort()
promotedTroveList = '\n '.join(promotedList)
if infoOnly:
ui.write('The following would be promoted to %s:\n %s',
nextStage, promotedTroveList)
else:
ui.write('Promoted to %s:\n %s', nextStage, promotedTroveList)
return promotedList, nextStage
| {
"content_hash": "1f23ea0e4c3e20546c4ba5bb54ec8b1b",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 39.32692307692308,
"alnum_prop": 0.6256723716381418,
"repo_name": "sassoftware/rbuild",
"id": "de117581f8624e7dedd372f1023c2ef21c496436",
"size": "4676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/promote.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "355"
},
{
"name": "Makefile",
"bytes": "17287"
},
{
"name": "Python",
"bytes": "997556"
},
{
"name": "Shell",
"bytes": "4530"
}
],
"symlink_target": ""
} |
"""Learn a Markov chain with an RNN and sample from it."""
from __future__ import print_function
import argparse
import logging
import pprint
import sys
import dill
import numpy
import theano
from theano import tensor
from blocks.bricks import Tanh
from blocks.bricks.recurrent import GatedRecurrent
from blocks.bricks.sequence_generators import (
SequenceGenerator, LinearReadout, SoftmaxEmitter, LookupFeedback)
from blocks.graph import ComputationGraph
from blocks.datasets import DataStream
from blocks.datasets.schemes import ConstantScheme
from blocks.algorithms import GradientDescent, SteepestDescent
from blocks.initialization import Orthogonal, IsotropicGaussian, Constant
from blocks.monitoring import aggregation
from blocks.extensions import FinishAfter, Printing
from blocks.extensions.saveload import SerializeMainLoop
from blocks.extensions.monitoring import TrainingDataMonitoring
from blocks.main_loop import MainLoop
from blocks.select import Selector
from examples.markov_chain.dataset import MarkovChainDataset
sys.setrecursionlimit(10000)
floatX = theano.config.floatX
logger = logging.getLogger(__name__)
def main(mode, save_path, steps, num_batches):
num_states = MarkovChainDataset.num_states
if mode == "train":
# Experiment configuration
rng = numpy.random.RandomState(1)
batch_size = 50
seq_len = 100
dim = 10
feedback_dim = 8
# Build the bricks and initialize them
transition = GatedRecurrent(name="transition", activation=Tanh(),
dim=dim)
generator = SequenceGenerator(
LinearReadout(readout_dim=num_states, source_names=["states"],
emitter=SoftmaxEmitter(name="emitter"),
feedbacker=LookupFeedback(
num_states, feedback_dim, name='feedback'),
name="readout"),
transition,
weights_init=IsotropicGaussian(0.01), biases_init=Constant(0),
name="generator")
generator.push_initialization_config()
transition.weights_init = Orthogonal()
generator.initialize()
# Give an idea of what's going on.
logger.info("Parameters:\n" +
pprint.pformat(
[(key, value.get_value().shape) for key, value
in Selector(generator).get_params().items()],
width=120))
logger.info("Markov chain entropy: {}".format(
MarkovChainDataset.entropy))
logger.info("Expected min error: {}".format(
-MarkovChainDataset.entropy * seq_len))
# Build the cost computation graph.
x = tensor.lmatrix('data')
cost = aggregation.mean(generator.cost(x[:, :]).sum(),
x.shape[1])
cost.name = "sequence_log_likelihood"
algorithm = GradientDescent(
cost=cost, params=list(Selector(generator).get_params().values()),
step_rule=SteepestDescent(0.001))
main_loop = MainLoop(
model=generator,
data_stream=DataStream(
MarkovChainDataset(rng, seq_len),
iteration_scheme=ConstantScheme(batch_size)),
algorithm=algorithm,
extensions=[FinishAfter(after_n_batches=num_batches),
TrainingDataMonitoring([cost], prefix="this_step",
after_every_batch=True),
TrainingDataMonitoring([cost], prefix="average",
every_n_batches=100),
SerializeMainLoop(save_path, every_n_batches=500),
Printing(every_n_batches=100)])
main_loop.run()
elif mode == "sample":
main_loop = dill.load(open(save_path, "rb"))
generator = main_loop.model
sample = ComputationGraph(generator.generate(
n_steps=steps, batch_size=1, iterate=True)).get_theano_function()
states, outputs, costs = [data[:, 0] for data in sample()]
numpy.set_printoptions(precision=3, suppress=True)
print("Generation cost:\n{}".format(costs.sum()))
freqs = numpy.bincount(outputs).astype(floatX)
freqs /= freqs.sum()
print("Frequencies:\n {} vs {}".format(freqs,
MarkovChainDataset.equilibrium))
trans_freqs = numpy.zeros((num_states, num_states), dtype=floatX)
for a, b in zip(outputs, outputs[1:]):
trans_freqs[a, b] += 1
trans_freqs /= trans_freqs.sum(axis=1)[:, None]
print("Transition frequencies:\n{}\nvs\n{}".format(
trans_freqs, MarkovChainDataset.trans_prob))
else:
assert False
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
parser = argparse.ArgumentParser(
"Case study of generating a Markov chain with RNN.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"mode", choices=["train", "sample"],
help="The mode to run. Use `train` to train a new model"
" and `sample` to sample a sequence generated by an"
" existing one.")
parser.add_argument(
"save_path", default="chain",
help="The path to save the training process.")
parser.add_argument(
"--steps", type=int, default=1000,
help="Number of steps to samples.")
parser.add_argument(
"--num-batches", default=1000, type=int,
help="Train on this many batches.")
args = parser.parse_args()
main(**vars(args))
| {
"content_hash": "a57ad850af0dc232edef3d282cc8f8fb",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 39.71232876712329,
"alnum_prop": 0.6105553639185927,
"repo_name": "vdumoulin/blocks-contrib",
"id": "4257d07bdd34f9074068eef4553595f699692b6e",
"size": "5820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/markov_chain/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40131"
}
],
"symlink_target": ""
} |
from pytac.exceptions import PvException
import pytac.device
import pytest
import mock
@pytest.fixture
def create_device(readback, setpoint):
_rb = readback
_sp = setpoint
device = pytac.device.Device(rb_pv=_rb, sp_pv=_sp, cs=mock.MagicMock())
return device
def test_set_device_value():
rb_pv = 'SR01A-PC-SQUAD-01:I'
sp_pv = 'SR01A-PC-SQUAD-01:SETI'
device1 = create_device(rb_pv, sp_pv)
device1.put_value(40)
device1._cs.put.assert_called_with(sp_pv, 40)
device2 = create_device(rb_pv, None)
with pytest.raises(PvException):
device2.put_value(40)
def test_get_device_value():
sp_pv = 'SR01A-PC-SQUAD-01:SETI'
device = create_device(None, sp_pv)
with pytest.raises(PvException):
device.get_value('non_existent')
with pytest.raises(PvException):
create_device(None, None)
| {
"content_hash": "d4073156bdf70d82f6cc14afceaf8001",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 24.02777777777778,
"alnum_prop": 0.6705202312138728,
"repo_name": "willrogers/pml",
"id": "ec3a766632cf7dd91bc8bfc8344958b6c5417b36",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Matlab",
"bytes": "12522"
},
{
"name": "Python",
"bytes": "45764"
}
],
"symlink_target": ""
} |
import math
import numbers
import re
import textwrap
from collections.abc import Iterator, Mapping
import sys
import traceback
from contextlib import contextmanager
import numpy as np
import pandas as pd
from pandas.api.types import (
is_categorical_dtype,
is_scalar,
is_sparse,
is_period_dtype,
is_datetime64tz_dtype,
is_interval_dtype,
)
# include these here for compat
from ._compat import ( # noqa: F401
PANDAS_VERSION,
PANDAS_GT_0240,
PANDAS_GT_0250,
PANDAS_GT_100,
PANDAS_GT_110,
HAS_INT_NA,
tm,
)
from .extensions import make_array_nonempty, make_scalar
from ..base import is_dask_collection
from ..core import get_deps
from ..local import get_sync
from ..utils import asciitable, is_arraylike, Dispatch, typename
from ..utils import is_dataframe_like as dask_is_dataframe_like
from ..utils import is_series_like as dask_is_series_like
from ..utils import is_index_like as dask_is_index_like
# register pandas extension types
from . import _dtypes # noqa: F401
def is_integer_na_dtype(t):
dtype = getattr(t, "dtype", t)
if HAS_INT_NA:
types = (
pd.Int8Dtype,
pd.Int16Dtype,
pd.Int32Dtype,
pd.Int64Dtype,
pd.UInt8Dtype,
pd.UInt16Dtype,
pd.UInt32Dtype,
pd.UInt64Dtype,
)
else:
types = ()
return isinstance(dtype, types)
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if is_categorical_dtype(index):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[: indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i] : indices[i + 1]]
yield df.iloc[indices[-1] :]
_META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional"
_META_DESCRIPTION = """\
An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and
column names of the output. This metadata is necessary for many algorithms
in dask dataframe to work. For ease of use, some alternative inputs are
also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``
or iterable of ``(name, dtype)`` can be provided (note that the order of
the names should match the order of the columns). Instead of a series, a
tuple of ``(name, dtype)`` can be used. If not provided, dask will try to
infer the metadata. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
"""
def insert_meta_param_description(*args, **kwargs):
"""Replace `$META` in docstring with param description.
If pad keyword is provided, will pad description by that number of
spaces (default is 8)."""
if not args:
return lambda f: insert_meta_param_description(f, **kwargs)
f = args[0]
indent = " " * kwargs.get("pad", 8)
body = textwrap.wrap(
_META_DESCRIPTION, initial_indent=indent, subsequent_indent=indent, width=78
)
descr = "{0}\n{1}".format(_META_TYPES, "\n".join(body))
if f.__doc__:
if "$META" in f.__doc__:
f.__doc__ = f.__doc__.replace("$META", descr)
else:
# Put it at the end of the parameters section
parameter_header = "Parameters\n%s----------" % indent[4:]
first, last = re.split("Parameters\\n[ ]*----------", f.__doc__)
parameters, rest = last.split("\n\n", 1)
f.__doc__ = "{0}{1}{2}\n{3}{4}\n\n{5}".format(
first, parameter_header, parameters, indent[4:], descr, rest
)
return f
@contextmanager
def raise_on_meta_error(funcname=None, udf=False):
"""Reraise errors in this block to show metadata inference failure.
Parameters
----------
funcname : str, optional
If provided, will be added to the error message to indicate the
name of the method that failed.
"""
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = "".join(traceback.format_tb(exc_traceback))
msg = "Metadata inference failed{0}.\n\n"
if udf:
msg += (
"You have supplied a custom function and Dask is unable to \n"
"determine the type of output that that function returns. \n\n"
"To resolve this please provide a meta= keyword.\n"
"The docstring of the Dask function you ran should have more information.\n\n"
)
msg += (
"Original error is below:\n"
"------------------------\n"
"{1}\n\n"
"Traceback:\n"
"---------\n"
"{2}"
)
msg = msg.format(" in `{0}`".format(funcname) if funcname else "", repr(e), tb)
raise ValueError(msg) from e
UNKNOWN_CATEGORIES = "__UNKNOWN_CATEGORIES__"
def has_known_categories(x):
"""Returns whether the categories in `x` are known.
Parameters
----------
x : Series or CategoricalIndex
"""
x = getattr(x, "_meta", x)
if is_series_like(x):
return UNKNOWN_CATEGORIES not in x.cat.categories
elif is_index_like(x) and hasattr(x, "categories"):
return UNKNOWN_CATEGORIES not in x.categories
raise TypeError("Expected Series or CategoricalIndex")
def strip_unknown_categories(x, just_drop_unknown=False):
"""Replace any unknown categoricals with empty categoricals.
Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
cat_mask = x.dtypes == "category"
if cat_mask.any():
cats = cat_mask[cat_mask].index
for c in cats:
if not has_known_categories(x[c]):
if just_drop_unknown:
x[c].cat.remove_categories(UNKNOWN_CATEGORIES, inplace=True)
else:
x[c].cat.set_categories([], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype) and not has_known_categories(x):
x.cat.set_categories([], inplace=True)
if isinstance(x.index, pd.CategoricalIndex) and not has_known_categories(
x.index
):
x.index = x.index.set_categories([])
elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x):
x = x.set_categories([])
return x
def clear_known_categories(x, cols=None, index=True):
"""Set categories to be unknown.
Parameters
----------
x : DataFrame, Series, Index
cols : iterable, optional
If x is a DataFrame, set only categoricals in these columns to unknown.
By default, all categorical columns are set to unknown categoricals
index : bool, optional
If True and x is a Series or DataFrame, set the clear known categories
in the index as well.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
mask = x.dtypes == "category"
if cols is None:
cols = mask[mask].index
elif not mask.loc[cols].all():
raise ValueError("Not all columns are categoricals")
for c in cols:
x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype):
x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
if index and isinstance(x.index, pd.CategoricalIndex):
x.index = x.index.set_categories([UNKNOWN_CATEGORIES])
elif isinstance(x, pd.CategoricalIndex):
x = x.set_categories([UNKNOWN_CATEGORIES])
return x
def _empty_series(name, dtype, index=None):
if isinstance(dtype, str) and dtype == "category":
return pd.Series(
pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index
).iloc[:0]
return pd.Series([], dtype=dtype, name=name, index=index)
make_meta = Dispatch("make_meta")
@make_meta.register((pd.Series, pd.DataFrame))
def make_meta_pandas(x, index=None):
return x.iloc[:0]
@make_meta.register(pd.Index)
def make_meta_index(x, index=None):
return x[0:0]
@make_meta.register(object)
def make_meta_object(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
if hasattr(x, "_meta"):
return x._meta
elif is_arraylike(x) and x.shape:
return x[:0]
if index is not None:
index = make_meta(index)
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except Exception:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
_numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)
meta_nonempty = Dispatch("meta_nonempty")
@meta_nonempty.register(object)
def meta_nonempty_object(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError(
"Expected Pandas-like Index, Series, DataFrame, or scalar, "
"got {0}".format(typename(type(x)))
)
@meta_nonempty.register(pd.DataFrame)
def meta_nonempty_dataframe(x):
idx = meta_nonempty(x.index)
dt_s_dict = dict()
data = dict()
for i, c in enumerate(x.columns):
series = x.iloc[:, i]
dt = series.dtype
if dt not in dt_s_dict:
dt_s_dict[dt] = _nonempty_series(x.iloc[:, i], idx=idx)
data[i] = dt_s_dict[dt]
res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns)))
res.columns = x.columns
return res
@meta_nonempty.register(pd.Index)
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in _numeric_index_types:
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(["a", "b"], name=idx.name)
elif typ is pd.DatetimeIndex:
start = "1970-01-01"
# Need a non-monotonic decreasing index to avoid issues with
# partial string indexing see https://github.com/dask/dask/issues/2389
# and https://github.com/pandas-dev/pandas/issues/16515
# This doesn't mean `_meta_nonempty` should ever rely on
# `self.monotonic_increasing` or `self.monotonic_decreasing`
try:
return pd.date_range(
start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
except ValueError: # older pandas versions
data = [start, "1970-01-02"] if idx.freq is None else None
return pd.DatetimeIndex(
data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
elif typ is pd.PeriodIndex:
return pd.period_range(
start="1970-01-01", periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, "D")
try:
return pd.timedelta_range(
start=start, periods=2, freq=idx.freq, name=idx.name
)
except ValueError: # older pandas versions
start = np.timedelta64(1, "D")
data = [start, start + 1] if idx.freq is None else None
return pd.TimedeltaIndex(
data, start=start, periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.CategoricalIndex:
if len(idx.categories) == 0:
data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered)
else:
data = pd.Categorical.from_codes(
[-1, 0], categories=idx.categories, ordered=idx.ordered
)
return pd.CategoricalIndex(data, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(l) for l in idx.levels]
codes = [[0, 0] for i in idx.levels]
try:
return pd.MultiIndex(levels=levels, codes=codes, names=idx.names)
except TypeError: # older pandas versions
return pd.MultiIndex(levels=levels, labels=codes, names=idx.names)
raise TypeError(
"Don't know how to handle index of type {0}".format(typename(type(idx)))
)
hash_object_dispatch = Dispatch("hash_object_dispatch")
@hash_object_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def hash_object_pandas(
obj, index=True, encoding="utf8", hash_key=None, categorize=True
):
return pd.util.hash_pandas_object(
obj, index=index, encoding=encoding, hash_key=hash_key, categorize=categorize
)
group_split_dispatch = Dispatch("group_split_dispatch")
@group_split_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def group_split_pandas(df, c, k, ignore_index=False):
indexer, locations = pd._libs.algos.groupsort_indexer(
c.astype(np.int64, copy=False), k
)
df2 = df.take(indexer)
locations = locations.cumsum()
parts = [df2.iloc[a:b] for a, b in zip(locations[:-1], locations[1:])]
return dict(zip(range(k), parts))
_simple_fake_mapping = {
"b": np.bool_(True),
"V": np.void(b" "),
"M": np.datetime64("1970-01-01"),
"m": np.timedelta64(1),
"S": np.str_("foo"),
"a": np.str_("foo"),
"U": np.unicode_("foo"),
"O": "foo",
}
def _scalar_from_dtype(dtype):
if dtype.kind in ("i", "f", "u"):
return dtype.type(1)
elif dtype.kind == "c":
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ("m", "M") else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
@make_scalar.register(np.dtype)
def _(dtype):
return _scalar_from_dtype(dtype)
@make_scalar.register(pd.Timestamp)
@make_scalar.register(pd.Timedelta)
@make_scalar.register(pd.Period)
@make_scalar.register(pd.Interval)
def _(x):
return x
def _nonempty_scalar(x):
if type(x) in make_scalar._lookup:
return make_scalar(x)
if np.isscalar(x):
dtype = x.dtype if hasattr(x, "dtype") else np.dtype(type(x))
return make_scalar(dtype)
raise TypeError("Can't handle meta of type '{0}'".format(typename(type(x))))
@meta_nonempty.register(pd.Series)
def _nonempty_series(s, idx=None):
# TODO: Use register dtypes with make_array_nonempty
if idx is None:
idx = _nonempty_index(s.index)
dtype = s.dtype
if is_datetime64tz_dtype(dtype):
entry = pd.Timestamp("1970-01-01", tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = None
data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered)
elif is_integer_na_dtype(dtype):
data = pd.array([1, None], dtype=dtype)
elif is_period_dtype(dtype):
# pandas 0.24.0+ should infer this to be Series[Period[freq]]
freq = dtype.freq
data = [pd.Period("2000", freq), pd.Period("2001", freq)]
elif is_sparse(dtype):
# TODO: pandas <0.24
# Pandas <= 0.23.4:
if PANDAS_GT_0240:
entry = _scalar_from_dtype(dtype.subtype)
else:
entry = _scalar_from_dtype(dtype.subtype)
data = pd.SparseArray([entry, entry], dtype=dtype)
elif is_interval_dtype(dtype):
entry = _scalar_from_dtype(dtype.subtype)
if PANDAS_GT_0240:
data = pd.array([entry, entry], dtype=dtype)
else:
data = np.array([entry, entry], dtype=dtype)
elif type(dtype) in make_array_nonempty._lookup:
data = make_array_nonempty(dtype)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
return pd.Series(data, name=s.name, index=idx)
def is_dataframe_like(df):
return dask_is_dataframe_like(df)
def is_series_like(s):
return dask_is_series_like(s)
def is_index_like(s):
return dask_is_index_like(s)
def check_meta(x, meta, funcname=None, numeric_equal=True):
"""Check that the dask metadata matches the result.
If metadata matches, ``x`` is passed through unchanged. A nice error is
raised if metadata doesn't match.
Parameters
----------
x : DataFrame, Series, or Index
meta : DataFrame, Series, or Index
The expected metadata that ``x`` should match
funcname : str, optional
The name of the function in which the metadata was specified. If
provided, the function name will be included in the error message to be
more helpful to users.
numeric_equal : bool, optionl
If True, integer and floating dtypes compare equal. This is useful due
to panda's implicit conversion of integer to floating upon encountering
missingness, which is hard to infer statically.
"""
eq_types = {"i", "f", "u"} if numeric_equal else set()
def equal_dtypes(a, b):
if is_categorical_dtype(a) != is_categorical_dtype(b):
return False
if isinstance(a, str) and a == "-" or isinstance(b, str) and b == "-":
return False
if is_categorical_dtype(a) and is_categorical_dtype(b):
if UNKNOWN_CATEGORIES in a.categories or UNKNOWN_CATEGORIES in b.categories:
return True
return a == b
return (a.kind in eq_types and b.kind in eq_types) or (a == b)
if not (
is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta)
) or is_dask_collection(meta):
raise TypeError(
"Expected partition to be DataFrame, Series, or "
"Index, got `%s`" % typename(type(meta))
)
if type(x) != type(meta):
errmsg = "Expected partition of type `%s` but got `%s`" % (
typename(type(meta)),
typename(type(x)),
)
elif is_dataframe_like(meta):
dtypes = pd.concat([x.dtypes, meta.dtypes], axis=1, sort=True)
bad_dtypes = [
(col, a, b)
for col, a, b in dtypes.fillna("-").itertuples()
if not equal_dtypes(a, b)
]
if bad_dtypes:
errmsg = "Partition type: `%s`\n%s" % (
typename(type(meta)),
asciitable(["Column", "Found", "Expected"], bad_dtypes),
)
else:
check_matching_columns(meta, x)
return x
else:
if equal_dtypes(x.dtype, meta.dtype):
return x
errmsg = "Partition type: `%s`\n%s" % (
typename(type(meta)),
asciitable(["", "dtype"], [("Found", x.dtype), ("Expected", meta.dtype)]),
)
raise ValueError(
"Metadata mismatch found%s.\n\n"
"%s" % ((" in `%s`" % funcname if funcname else ""), errmsg)
)
def check_matching_columns(meta, actual):
# Need nan_to_num otherwise nan comparison gives False
if not np.array_equal(np.nan_to_num(meta.columns), np.nan_to_num(actual.columns)):
extra = actual.columns.difference(meta.columns).tolist()
missing = meta.columns.difference(actual.columns).tolist()
if extra or missing:
extra_info = f" Extra: {extra}\n Missing: {missing}"
else:
extra_info = "Order of columns does not match"
raise ValueError(
"The columns in the computed data do not match"
" the columns in the provided metadata\n"
f"{extra_info}"
)
def index_summary(idx, name=None):
"""Summarized representation of an Index.
"""
n = len(idx)
if name is None:
name = idx.__class__.__name__
if n:
head = idx[0]
tail = idx[-1]
summary = ", {} to {}".format(head, tail)
else:
summary = ""
return "{}: {} entries{}".format(name, n, summary)
###############################################################
# Testing
###############################################################
def _check_dask(dsk, check_names=True, check_dtypes=True, result=None):
import dask.dataframe as dd
if hasattr(dsk, "dask"):
if result is None:
result = dsk.compute(scheduler="sync")
if isinstance(dsk, dd.Index):
assert "Index" in type(result).__name__, type(result)
# assert type(dsk._meta) == type(result), type(dsk._meta)
if check_names:
assert dsk.name == result.name
assert dsk._meta.name == result.name
if isinstance(result, pd.MultiIndex):
assert result.names == dsk._meta.names
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.Series):
assert "Series" in type(result).__name__, type(result)
assert type(dsk._meta) == type(result), type(dsk._meta)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(
dsk.index,
check_names=check_names,
check_dtypes=check_dtypes,
result=result.index,
)
elif isinstance(dsk, dd.DataFrame):
assert "DataFrame" in type(result).__name__, type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
assert type(dsk._meta) == type(result), type(dsk._meta)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
tm.assert_index_equal(dsk._meta.columns, result.columns)
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(
dsk.index,
check_names=check_names,
check_dtypes=check_dtypes,
result=result.index,
)
elif isinstance(dsk, dd.core.Scalar):
assert np.isscalar(result) or isinstance(
result, (pd.Timestamp, pd.Timedelta)
)
if check_dtypes:
assert_dask_dtypes(dsk, result)
else:
msg = "Unsupported dask instance {0} found".format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if is_dataframe_like(a):
if set(a.index.names) & set(a.columns):
a.index.names = [
"-overlapped-index-name-%d" % i for i in range(len(a.index.names))
]
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def assert_eq(
a,
b,
check_names=True,
check_dtypes=True,
check_divisions=True,
check_index=True,
**kwargs,
):
if check_divisions:
assert_divisions(a)
assert_divisions(b)
if hasattr(a, "divisions") and hasattr(b, "divisions"):
at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python
bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion
assert at == bt, (at, bt)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)
b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)
if not check_index:
a = a.reset_index(drop=True)
b = b.reset_index(drop=True)
if hasattr(a, "to_pandas"):
a = a.to_pandas()
if hasattr(b, "to_pandas"):
b = b.to_pandas()
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def assert_dask_graph(dask, label):
if hasattr(dask, "dask"):
dask = dask.dask
assert isinstance(dask, Mapping)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
raise AssertionError(
"given dask graph doesn't contain label: {label}".format(label=label)
)
def assert_divisions(ddf):
if not hasattr(ddf, "divisions"):
return
if not getattr(ddf, "known_divisions", False):
return
def index(x):
if is_index_like(x):
return x
try:
return x.index.get_level_values(0)
except AttributeError:
return x.index
results = get_sync(ddf.dask, ddf.__dask_keys__())
for i, df in enumerate(results[:-1]):
if len(df):
assert index(df).min() >= ddf.divisions[i]
assert index(df).max() < ddf.divisions[i + 1]
if len(results[-1]):
assert index(results[-1]).min() >= ddf.divisions[-2]
assert index(results[-1]).max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, "dask"):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert " " not in k
assert k.split("-")[0].isidentifier()
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_type_sets = [{"O", "S", "U", "a"}] # treat object and strings alike
if numeric_equal:
eq_type_sets.append({"i", "f", "u"})
def eq_dtypes(a, b):
return any(
a.kind in eq_types and b.kind in eq_types for eq_types in eq_type_sets
) or (a == b)
if not is_dask_collection(res) and is_dataframe_like(res):
for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes], axis=1).itertuples():
assert eq_dtypes(a, b)
elif not is_dask_collection(res) and (is_index_like(res) or is_series_like(res)):
a = ddf._meta.dtype
b = res.dtype
assert eq_dtypes(a, b)
else:
if hasattr(ddf._meta, "dtype"):
a = ddf._meta.dtype
if not hasattr(res, "dtype"):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert eq_dtypes(a, b)
else:
assert type(ddf._meta) == type(res)
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
def valid_divisions(divisions):
""" Are the provided divisions valid?
Examples
--------
>>> valid_divisions([1, 2, 3])
True
>>> valid_divisions([3, 2, 1])
False
>>> valid_divisions([1, 1, 1])
False
>>> valid_divisions([0, 1, 1])
True
>>> valid_divisions(123)
False
>>> valid_divisions([0, float('nan'), 1])
False
"""
if not isinstance(divisions, (tuple, list)):
return False
for i, x in enumerate(divisions[:-2]):
if x >= divisions[i + 1]:
return False
if isinstance(x, numbers.Number) and math.isnan(x):
return False
for x in divisions[-2:]:
if isinstance(x, numbers.Number) and math.isnan(x):
return False
if divisions[-2] > divisions[-1]:
return False
return True
def drop_by_shallow_copy(df, columns, errors="raise"):
""" Use shallow copy to drop columns in place
"""
df2 = df.copy(deep=False)
if not pd.api.types.is_list_like(columns):
columns = [columns]
df2.drop(columns=columns, inplace=True, errors=errors)
return df2
| {
"content_hash": "5b20c2225e2944deaf95910a5ceabad4",
"timestamp": "",
"source": "github",
"line_count": 970,
"max_line_length": 94,
"avg_line_length": 32.48969072164948,
"alnum_prop": 0.5764873869585911,
"repo_name": "ContinuumIO/dask",
"id": "fb56a18882687899bb252a594f5b0e32beb2d9c4",
"size": "31515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dask/dataframe/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "1033885"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
} |
"""Debug code for testing module is functioning properly.
"""
import time
from pivim import temperature as t
import ptvsd # pylint: disable=unused-import
def main():
"""
Debug code for testing module is functioning properly.
Uncomment ptvsd lines to enable remote debugging from Visual Studio
Use format tcp://[email protected]:5678 or
tcp://pi@ipaddress:5678 when attaching to the debugger.
"""
# ptvsd.enable_attach(secret='pi')
# ptvsd.wait_for_attach()
while True:
try:
latest_temp = round(t.read_temp() -0.5)
print(latest_temp)
time.sleep(5)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| {
"content_hash": "598283b0407c643225d7eeb191750f0a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 71,
"avg_line_length": 25.75,
"alnum_prop": 0.6324549237170597,
"repo_name": "pleasereleaseme/PiVIM-py",
"id": "2dc62d82a9ef35bb1e7d5b59a9bc21db7fc1e322",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "temperature_debug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15922"
}
],
"symlink_target": ""
} |
from flask_login import login_required
from flask_restful import Resource
from app.decorators import wx_required
class BusinessResource(Resource):
method_decorators = [login_required, wx_required]
| {
"content_hash": "de1a7f38e5f033c087977af8d87f7759",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 53,
"avg_line_length": 26.5,
"alnum_prop": 0.7735849056603774,
"repo_name": "by46/coffee",
"id": "4cd3a3d0cddd08a8b1dc6d0a690682298742ce3d",
"size": "212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/views/resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7825"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "79856"
},
{
"name": "Shell",
"bytes": "2693"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.