text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""Generated class for configuration_endpoint.json"""
class ObjectA90DCC28:
"""Generated schema class"""
def __init__(self):
self.basic = None
self.jwt = None
@staticmethod
def from_dict(source):
if not source:
return None
result = ObjectA90DCC28()
result.basic = source.get('basic')
result.jwt = source.get('jwt')
return result
@staticmethod
def map_from(source):
if not source:
return None
result = {}
for key in source:
result[key] = ObjectA90DCC28.from_dict(source[key])
return result
@staticmethod
def expand_dict(input):
result = {}
for property in input:
result[property] = input[property].to_dict() if input[property] else {}
return result
def to_dict(self):
result = {}
if self.basic:
result['basic'] = self.basic # 5
if self.jwt:
result['jwt'] = self.jwt # 5
return result
class EndpointConfiguration:
"""Generated schema class"""
def __init__(self):
self.protocol = None
self.transport = None
self.hostname = None
self.port = None
self.config_sync_sec = None
self.client_id = None
self.topic_prefix = None
self.auth_provider = None
self.nonce = None
@staticmethod
def from_dict(source):
if not source:
return None
result = EndpointConfiguration()
result.protocol = source.get('protocol')
result.transport = source.get('transport')
result.hostname = source.get('hostname')
result.port = source.get('port')
result.config_sync_sec = source.get('config_sync_sec')
result.client_id = source.get('client_id')
result.topic_prefix = source.get('topic_prefix')
result.auth_provider = ObjectA90DCC28.from_dict(source.get('auth_provider'))
result.nonce = source.get('nonce')
return result
@staticmethod
def map_from(source):
if not source:
return None
result = {}
for key in source:
result[key] = EndpointConfiguration.from_dict(source[key])
return result
@staticmethod
def expand_dict(input):
result = {}
for property in input:
result[property] = input[property].to_dict() if input[property] else {}
return result
def to_dict(self):
result = {}
if self.protocol:
result['protocol'] = self.protocol # 5
if self.transport:
result['transport'] = self.transport # 5
if self.hostname:
result['hostname'] = self.hostname # 5
if self.port:
result['port'] = self.port # 5
if self.config_sync_sec:
result['config_sync_sec'] = self.config_sync_sec # 5
if self.client_id:
result['client_id'] = self.client_id # 5
if self.topic_prefix:
result['topic_prefix'] = self.topic_prefix # 5
if self.auth_provider:
result['auth_provider'] = self.auth_provider.to_dict() # 4
if self.nonce:
result['nonce'] = self.nonce # 5
return result
| {
"content_hash": "16c701462041215ed66e2930ae4edf2f",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 80,
"avg_line_length": 26.1981981981982,
"alnum_prop": 0.6365199449793673,
"repo_name": "faucetsdn/udmi",
"id": "b5f1e96f5a45deaed7e9f450f9c1536caaea31ba",
"size": "2908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gencode/python/udmi/schema/configuration_endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1294"
},
{
"name": "HCL",
"bytes": "16883"
},
{
"name": "HTML",
"bytes": "15862"
},
{
"name": "Java",
"bytes": "495172"
},
{
"name": "JavaScript",
"bytes": "35643"
},
{
"name": "Python",
"bytes": "58201"
},
{
"name": "SCSS",
"bytes": "6256"
},
{
"name": "Shell",
"bytes": "79835"
},
{
"name": "Smarty",
"bytes": "4333"
},
{
"name": "TypeScript",
"bytes": "233396"
}
],
"symlink_target": ""
} |
import logging
import os
import sys
import yaml
from os.path import abspath, dirname, exists, join
from ob2.config.assignment import Assignment
from ob2.config.cli import parse_args
LOG = logging.getLogger(__name__)
class _ConfigModule(object):
def __init__(self, mode):
# A dictionary of configuration values.
self._config = {}
# Should be "server" or "ipython"
self.mode = mode
# A list of "functions.py" files that will be executed after the configuration values and
# the runtime system have finished loading, but before the services are started. These files
# can contain hooks (see ob2/util/hooks.py) and other code to run.
self._custom_functions_files = []
def exec_custom_functions(self):
for f in self._custom_functions_files:
execfile(f, {"__file__": f})
def _load_from_directory(self, directory):
"""
Loads configuration from DIRECTORY. The directory can have:
config.yaml -- Contains key-value pairs
functions.py -- Contains arbitrary Python code (useful for registering hooks)
"""
LOG.info("Loading configuration from %s" % repr(abspath(directory)))
try:
with open(join(directory, "config.yaml")) as f:
config_dict = yaml.load(f.read())
except IOError:
LOG.info(" -> No config.yaml found (skipping)")
else:
for key, value in config_dict.items():
# We support the special suffixes "_APPEND" and "_UPDATE" for advanced users who
# need to modify (rather than replace) a configuration value.
is_append = is_update = False
if key.endswith("_APPEND"):
key = key[:-len("_APPEND")]
is_append = True
elif key.endswith("_UPDATE"):
key = key[:-len("_UPDATE")]
is_update = True
if key == "assignments":
# The "assignments" dictionary is special. We turn the assignments into objects
# first, because they're so important and so often used.
assert isinstance(value, list)
value = [Assignment(**kwargs) for kwargs in value]
elif key.endswith("_path"):
# Configuration options that end in "_path" are treated specially.
# Paths are relative to the config directory root.
assert isinstance(value, basestring)
value = abspath(join(directory, value))
if is_append:
assert isinstance(value, list)
if key in self._config:
assert isinstance(self._config[key], list)
self._config[key].extend(value)
else:
self._config[key] = value
elif is_update:
assert isinstance(value, dict)
assert key not in self._config or isinstance(self._config[key], dict)
else:
self._config[key] = value
LOG.info(" -> config.yaml loaded")
# Supports an optional "functions.py" script, which can register hooks.
functions_script = join(directory, "functions.py")
if exists(functions_script):
self._custom_functions_files.append(functions_script)
LOG.info(" -> functions.py loaded")
else:
LOG.info(" -> No functions.py found (skipping)")
def _lookup(self, key):
try:
return self._config[key]
except KeyError:
raise KeyError("No such configuration key: %s" % repr(key))
def __getattr__(self, key):
return self._lookup(key)
args = parse_args()
config_mode = "ipython" if args.ipython else "server"
config_module = _ConfigModule(mode=config_mode)
# Step 1: Load from the defaults that are bundled with ob2
config_module._load_from_directory(join(dirname(__file__), "..", "..", "config"))
# Step 2: Load from paths in OB2_CONFIG_PATHS environment variable, if any
for directory in os.environ.get("OB2_CONFIG_PATHS", "").split(":"):
directory = directory.strip()
if directory:
config_module._load_from_directory(directory)
# Step 3: Load from paths specified on the command line, if any
for directory in args.config:
config_module._load_from_directory(directory)
sys.modules[__name__] = config_module
| {
"content_hash": "8f90639b2e083263b88f7caefb811b08",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 100,
"avg_line_length": 38.98290598290598,
"alnum_prop": 0.5792589344442008,
"repo_name": "octobear2/ob2",
"id": "bae80ca280bb69e497364c1944535e525dd4abee",
"size": "4561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ob2/config/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3196"
},
{
"name": "CoffeeScript",
"bytes": "6053"
},
{
"name": "HTML",
"bytes": "109099"
},
{
"name": "JavaScript",
"bytes": "13577"
},
{
"name": "Puppet",
"bytes": "2366"
},
{
"name": "Python",
"bytes": "183885"
},
{
"name": "Shell",
"bytes": "4137"
}
],
"symlink_target": ""
} |
"""
Tests for utility
"""
import unittest
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a1be4fa089dfaa21490d63d1d463d704",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 26,
"avg_line_length": 11,
"alnum_prop": 0.5151515151515151,
"repo_name": "systempuntoout/buckethandle",
"id": "8340544e96af5df6114c9c38d95087ac9fe53a93",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tests/test_worker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "83166"
},
{
"name": "JavaScript",
"bytes": "27319"
},
{
"name": "Python",
"bytes": "454765"
}
],
"symlink_target": ""
} |
import proto # type: ignore
from google.cloud.automl_v1beta1.types import text_segment as gca_text_segment
__protobuf__ = proto.module(
package="google.cloud.automl.v1beta1",
manifest={
"TextExtractionAnnotation",
"TextExtractionEvaluationMetrics",
},
)
class TextExtractionAnnotation(proto.Message):
r"""Annotation for identifying spans of text.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
text_segment (google.cloud.automl_v1beta1.types.TextSegment):
An entity annotation will set this, which is
the part of the original text to which the
annotation pertains.
This field is a member of `oneof`_ ``annotation``.
score (float):
Output only. A confidence estimate between
0.0 and 1.0. A higher value means greater
confidence in correctness of the annotation.
"""
text_segment = proto.Field(
proto.MESSAGE,
number=3,
oneof="annotation",
message=gca_text_segment.TextSegment,
)
score = proto.Field(
proto.FLOAT,
number=1,
)
class TextExtractionEvaluationMetrics(proto.Message):
r"""Model evaluation metrics for text extraction problems.
Attributes:
au_prc (float):
Output only. The Area under precision recall
curve metric.
confidence_metrics_entries (Sequence[google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]):
Output only. Metrics that have confidence
thresholds. Precision-recall curve can be
derived from it.
"""
class ConfidenceMetricsEntry(proto.Message):
r"""Metrics for a single confidence threshold.
Attributes:
confidence_threshold (float):
Output only. The confidence threshold value
used to compute the metrics. Only annotations
with score of at least this threshold are
considered to be ones the model would return.
recall (float):
Output only. Recall under the given
confidence threshold.
precision (float):
Output only. Precision under the given
confidence threshold.
f1_score (float):
Output only. The harmonic mean of recall and
precision.
"""
confidence_threshold = proto.Field(
proto.FLOAT,
number=1,
)
recall = proto.Field(
proto.FLOAT,
number=3,
)
precision = proto.Field(
proto.FLOAT,
number=4,
)
f1_score = proto.Field(
proto.FLOAT,
number=5,
)
au_prc = proto.Field(
proto.FLOAT,
number=1,
)
confidence_metrics_entries = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=ConfidenceMetricsEntry,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "db3fe195fb0c4ad05f38724ecd39661b",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 136,
"avg_line_length": 29.72641509433962,
"alnum_prop": 0.5963186290066645,
"repo_name": "googleapis/python-automl",
"id": "ca2e13df18d5ee3d8b0896cfd29c749554f93ba8",
"size": "3751",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/automl_v1beta1/types/text_extraction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2347989"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
import sys
def note_error(strE):
return ["Bad start: "+strE]
def check_compiler(strC):
lstErrors = list()
lstC = strC[1:].strip().split("=")
lstKnownCompilers = ["file_title","gdbot_syntax_version","log_file","log_email"]
if lstC[0].strip() in lstKnownCompilers:
# gdbot version
if lstC[0].strip() == "gdbot_syntax_version":
if not lstC[1].strip().replace('.','',1).isdigit():
lstErrors.append("Bad compiler. Bad gdbot ver.: "+lstC[1].strip())
# log file
if lstC[0].strip() == "log_file":
if len(lstC[1].strip().replace('.','')) < 1:
lstErrors.append("Bad compiler. Short logfile name: "+lstC[1].strip())
# # send log
# if lstC[0].strip() == "send_log_file":
# if lstC[1].strip().lower() not in ("true","false"):
# lstErrors.append("Bad compiler. Invalid send_log_boolean: "+lstC[1].strip())
# email address
if lstC[0].strip() == "log_email":
if not "@" in lstC[1]:
lstErrors.append("Bad compiler. No valid email address: "+lstC[1].strip())
else:
lstErrors.append("Bad compiler: "+lstC[0].strip())
return lstErrors
def check_gdbotrule(strR):
lstErrors = list()
lstR = strR[1:].strip().split(":")
if len(lstR) == 9: # Valid length
if lstR[0].strip().isdigit(): # Valid ID number
lstLegalModes = ["SQL"]
if lstR[2].strip() in lstLegalModes: # Valid Mode
if True: # FC is valid
if True: # FCS is valid
if True: # SQL is valid
if lstR[6].strip().upper() in ("LOG","FIX"): # Action is valid
if True: # Action-SQL / Log-message is valid
pass
else:
lstErrors.append("Bad rule. Unknown Action-SQL / Log-message: "+strR)
else:
lstErrors.append("Bad rule. Unknown Action: "+strR)
else:
lstErrors.append("Bad rule. Invalid SQL: "+strR)
else:
lstErrors.append("Bad rule. Unknown Feature Class Subtype: "+strR)
else:
lstErrors.append("Bad rule. Unknown Feature Class: "+strR)
else:
lstErrors.append("Bad rule. Unknown rule mode: "+strR)
else:
lstErrors.append("Bad rule. Rule ID not a number: "+strR)
else:
lstErrors.append("Bad rule. Not 9 tokens: "+strR)
return lstErrors
def syntax_check(lstLines):
# strip empty- and comment-lines, and EOL-marks
lstRL = list()
for strRL in lstLines:
strRule = strRL[:strRL.find("#")].strip()
if len(strRule) > 1:
lstRL.append(strRule)
# analyze lines
lstErrors = list()
for strRule in lstRL:
if strRule[0] == "%":
lstErrors.extend(check_compiler(strRule))
elif strRule[0] == ":":
lstErrors.extend(check_gdbotrule(strRule))
else:
lstErrors.extend(note_error(strRule))
lstErrors = [x for x in lstErrors if x != ""]
return lstErrors
if __name__ == "__main__":
if len(sys.argv) >= 1:
print "Checking file(s)..."
for fil in sys.argv[1:]: # Don't check the .py itself...
print " Syntax check : ",fil
# Open file
filR = open(fil,"r")
lstR = filR.readlines()
# Run Syntax check
x = syntax_check(lstR)
print x
else:
print "Usage : gdbot_systax_check input.gdbot"
| {
"content_hash": "1c4278da3c685e964dd19d3c118da1a1",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 105,
"avg_line_length": 39.25,
"alnum_prop": 0.5069002123142251,
"repo_name": "MartinHvidberg/gdbot",
"id": "fd7c188ce3eba0cb34679cfdcbee56f0f2808aae",
"size": "3768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdbot_syntax_check.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "77764"
}
],
"symlink_target": ""
} |
"""This file defines functions to generated different splits of
the data for federated simulations.
"""
from collections import OrderedDict, defaultdict
import numpy as np
import tensorflow as tf
from data_utils import create_masks
def gather_indices(idxs, in_data, slot_inputs, padding_masks, look_ahead_masks,
intent_masks, slot_targets, intent_data):
"""Gathes the given indices from the given data and returns it as a OrderedDict
"""
data = OrderedDict(x=(tf.gather(in_data, idxs, axis=0),
tf.gather(slot_inputs, idxs, axis=0),
tf.gather(padding_masks, idxs, axis=0),
tf.gather(look_ahead_masks, idxs, axis=0),
tf.gather(slot_targets, idxs, axis=0)),
y=(tf.gather(slot_targets, idxs, axis=0),
tf.gather(intent_data, idxs, axis=0)))
return data
def create_tff_dataset(client_idxs, in_data, slot_data, intent_data,
num_clients):
"""Generates the dataset format required for tensorflow federated.
Args:
client_idxs: A list of clients for each client.
in_data: The input data.
slot_data: The slot labels.
intent_data: The intent labels.
num_clients: Number of clients.
Returns:
A dictionary of client ids mapped to the client dataset and an
additional validation dataset in case of personalization.
"""
train_fed_data = OrderedDict()
slot_inputs, slot_targets = slot_data[:, :-1], slot_data[:, 1:]
padding_masks, look_ahead_masks, intent_masks = create_masks(
in_data, slot_targets)
for i in range(num_clients):
client_idx = np.array(client_idxs[i])
train_idxs = client_idx
client_data = gather_indices(train_idxs, in_data, slot_inputs,
padding_masks, look_ahead_masks,
intent_masks, slot_targets, intent_data)
train_fed_data[str(i)] = client_data
return train_fed_data
def generate_iid_splits(in_data, slot_data, intent_data, num_clients=10):
"""Creates IID splits of the dataset by randomly partitioning the data
into multiple splits.
"""
instances_per_client = len(in_data) // num_clients
shuffled_idxs = np.arange(len(in_data))
np.random.shuffle(shuffled_idxs)
client_idxs = defaultdict()
for i in range(num_clients):
client_idxs = shuffled_idxs[i * instances_per_client:(i + 1) *
instances_per_client]
fed_data = create_tff_dataset(client_idxs, in_data, slot_data, intent_data,
num_clients)
return fed_data
def generate_splits_type1(in_data, slot_data, intent_data, num_clients=10):
"""Creates non-IID splits of the dataset. Each intent type is distributed
among the clients according to a random multinomial distribution.
"""
unique_intents = np.unique(intent_data)
client_idxs = defaultdict(list)
for intent_id in unique_intents:
# generate random multinomial distribution over clients
intent_client_distribution = np.random.randint(
low=0, high=1000, size=num_clients).astype(np.float)
intent_client_distribution /= np.sum(intent_client_distribution)
intent_idxs = np.where(np.array(intent_data).squeeze() == intent_id)[0]
# Assign each intent instance to a client based on the previously
# generated distribution
client_idx_distribution = np.random.multinomial(
1, intent_client_distribution, size=len(intent_idxs))
client_idx_distribution = np.argmax(client_idx_distribution, axis=1)
for client_id in range(num_clients):
client_idxs[client_id] += intent_idxs[(
client_idx_distribution == client_id)].tolist()
fed_data = create_tff_dataset(client_idxs, in_data, slot_data, intent_data,
num_clients)
return fed_data
def generate_splits_type2(in_data,
slot_data,
intent_data,
instance_types_per_client=1):
"""Creates non-IID splits of the dataset. Each client is given only a fixed number
of intent types.
"""
unique_intents = np.unique(intent_data)
np.random.shuffle(unique_intents)
num_clients = int(
np.ceil(len(unique_intents) / float(instance_types_per_client)))
client_idxs = defaultdict(list)
for client_id in range(num_clients):
intent_ids = unique_intents[client_id *
instance_types_per_client:(client_id + 1) *
instance_types_per_client]
for intent_id in intent_ids:
intent_idxs = np.where(
np.array(intent_data).squeeze() == intent_id)[0]
client_idxs[client_id] += intent_idxs.tolist()
fed_data = create_tff_dataset(client_idxs, in_data, slot_data, intent_data,
num_clients)
return fed_data, num_clients
def generate_splits_type3(in_data,
slot_data,
intent_data,
instance_types_per_client=3,
clients_per_instance_type=3):
"""Creates non-IID splits of the dataset. Each client is given only a fixed number
of intent types. This is different from type2 since in type 2 each intent type belongs
exclusively to a certain user but in type 3 the instances having the same intent type can
belong to different users.
"""
unique_intents = np.unique(intent_data)
np.random.shuffle(unique_intents)
num_clients = int(
np.ceil(len(unique_intents) /
float(instance_types_per_client))) * clients_per_instance_type
client_list = []
# Create a list of shuffled client ids
for _ in range(
int(
np.ceil(clients_per_instance_type * len(unique_intents) /
num_clients))):
client_shuffled = np.arange(num_clients)
np.random.shuffle(client_shuffled)
client_list.append(client_shuffled)
client_list = np.concatenate(client_list)
client_idxs = defaultdict(list)
for idx, intent_id in enumerate(unique_intents):
# select a subset of clients for each instance
client_ids = client_list[idx * clients_per_instance_type:(idx + 1) *
clients_per_instance_type]
# generate a random multinomial distribution
intent_client_distribution = np.random.randint(
low=0, high=1000, size=clients_per_instance_type).astype(np.float)
intent_client_distribution /= np.sum(intent_client_distribution)
intent_idxs = np.where(np.array(intent_data).squeeze() == intent_id)[0]
# sample from the distribution
client_idx_distribution = np.random.multinomial(
1, intent_client_distribution, size=len(intent_idxs))
client_idx_distribution = np.argmax(client_idx_distribution, axis=1)
for i, client_id in enumerate(client_ids):
client_idxs[client_id] += intent_idxs[(
client_idx_distribution == i)].tolist()
fed_data = create_tff_dataset(client_idxs, in_data, slot_data, intent_data,
num_clients)
return fed_data, num_clients
| {
"content_hash": "7f50968b073118d89ec81b69be1cca0d",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 93,
"avg_line_length": 36.41747572815534,
"alnum_prop": 0.6095707811250334,
"repo_name": "googleinterns/fednsp",
"id": "2db8344c07a173d4caf43bcffa80ce4e672d0f86",
"size": "7502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Federated Model (seq-to-seq)/generate_splits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "270154"
}
],
"symlink_target": ""
} |
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v12.enums",
marshal="google.ads.googleads.v12",
manifest={"ListingGroupFilterProductTypeLevelEnum",},
)
class ListingGroupFilterProductTypeLevelEnum(proto.Message):
r"""Level of the type of a product offer.
"""
class ListingGroupFilterProductTypeLevel(proto.Enum):
r"""Enum describing the level of the type of a product offer."""
UNSPECIFIED = 0
UNKNOWN = 1
LEVEL1 = 2
LEVEL2 = 3
LEVEL3 = 4
LEVEL4 = 5
LEVEL5 = 6
__all__ = tuple(sorted(__protobuf__.manifest))
| {
"content_hash": "d6bcafc5cd87991c35962d690b567778",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 24.73076923076923,
"alnum_prop": 0.640746500777605,
"repo_name": "googleads/google-ads-python",
"id": "e6bff861dacc02cf65066443fde3639980812573",
"size": "1243",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/enums/types/listing_group_filter_product_type_level.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
from ..utils import _make_pseudo_id
from .base import BaseModel, SourceMixin, AssociatedLinkMixin, LinkMixin
from .schemas.event import schema
class EventAgendaItem(dict, AssociatedLinkMixin):
event = None
def __init__(self, description, event):
super(EventAgendaItem, self).__init__({
"description": description,
"classification": [],
"related_entities": [],
"subjects": [],
"media": [],
"notes": [],
"order": str(len(event.agenda)),
"extras": {},
})
self.event = event
def add_subject(self, what):
self['subjects'].append(what)
def add_classification(self, what):
self['classification'].append(what)
def add_vote_event(self, vote_event, *, id=None, note='consideration'):
self.add_entity(name=vote_event, entity_type='vote_event', id=id, note=note)
def add_committee(self, committee, *, id=None, note='participant'):
self.add_entity(name=committee, entity_type='organization', id=id, note=note)
def add_bill(self, bill, *, id=None, note='consideration'):
self.add_entity(name=bill, entity_type='bill', id=id, note=note)
def add_person(self, person, *, id=None, note='participant'):
self.add_entity(name=person, entity_type='person', id=id, note=note)
def add_media_link(self, note, url, media_type, *, text='', type='media',
on_duplicate='error'):
return self._add_associated_link(collection='media', note=note, url=url, text=text,
media_type=media_type, on_duplicate=on_duplicate)
def add_entity(self, name, entity_type, *, id, note):
ret = {
"name": name,
"entity_type": entity_type,
"note": note
}
if id:
ret['id'] = id
elif entity_type:
if entity_type in ('organization', 'person'):
id = _make_pseudo_id(name=name)
elif entity_type == 'bill':
id = _make_pseudo_id(identifier=name)
elif entity_type == 'vote_event':
id = _make_pseudo_id(identifier=name)
else:
raise NotImplementedError('{} entity type not implemented'.format(entity_type))
ret[entity_type + '_id'] = id
self['related_entities'].append(ret)
class Event(BaseModel, SourceMixin, AssociatedLinkMixin, LinkMixin):
"""
Details for an event in .format
"""
_type = 'event'
_schema = schema
def __init__(self, name, start_date, location_name, *,
all_day=False, description="", end_date="",
status="confirmed", classification="event"
):
super(Event, self).__init__()
self.start_date = start_date
self.all_day = all_day
self.end_date = end_date
self.name = name
self.description = description
self.status = status
self.classification = classification
self.location = {"name": location_name, "note": "", "coordinates": None}
self.documents = []
self.participants = []
self.media = []
self.agenda = []
def __str__(self):
return '{} {}'.format(self.start_date, self.name.strip())
def set_location(self, name, *, note="", url="", coordinates=None):
self.location = {"name": name, "note": note, "url": url, "coordinates": coordinates}
def add_participant(self, name, type, *, id=None, note='participant'):
p = {
"name": name,
"entity_type": type,
"note": note
}
if id:
p['id'] = id
elif type:
id = _make_pseudo_id(name=name)
p[type + '_id'] = id
self.participants.append(p)
def add_person(self, name, *, id=None, note='participant'):
return self.add_participant(name=name, type='person', id=id, note=note)
def add_committee(self, name, *, id=None, note='participant'):
return self.add_participant(name=name, type='organization', id=id, note=note)
def add_agenda_item(self, description):
obj = EventAgendaItem(description, self)
self.agenda.append(obj)
return obj
def add_media_link(self, note, url, media_type, *, text='', type='media',
on_duplicate='error'):
return self._add_associated_link(collection='media', note=note, url=url, text=text,
media_type=media_type, on_duplicate=on_duplicate)
def add_document(self, note, url, *, text='', media_type='', on_duplicate='error'):
return self._add_associated_link(collection='documents', note=note, url=url, text=text,
media_type=media_type, on_duplicate=on_duplicate)
| {
"content_hash": "e1ee903d3dd4ada83350bed709be5af4",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 95,
"avg_line_length": 37.6,
"alnum_prop": 0.5613747954173486,
"repo_name": "datamade/pupa",
"id": "451d7357764dc2bef46bd94f917bea26d83fa160",
"size": "4888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pupa/scrape/event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "258752"
},
{
"name": "Shell",
"bytes": "99"
}
],
"symlink_target": ""
} |
"""Tests for google3.third_party.py.madi.datasets.forestcover_dataset."""
import os
from absl import logging
from madi.datasets import forestcover_dataset
import tensorflow as tf
_DATA_FILE_IN = os.path.join(
os.path.dirname(__file__), 'test_data/covtype.test.data')
_DATA_FILE_TEST = 'covtype.data'
_COL_NAMES_SELECT = [
'Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology',
'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways',
'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm',
'Horizontal_Distance_To_Fire_Points', 'class_label'
]
class TestForestCoverDataset:
def test_forsetcover_dataset(self, tmpdir):
datadir = tmpdir
datafile_test = os.path.join(datadir, _DATA_FILE_TEST)
logging.info(_DATA_FILE_IN)
tf.io.gfile.copy(_DATA_FILE_IN, datafile_test, overwrite=True)
ds = forestcover_dataset.ForestCoverDataset(datadir)
assert len(ds.sample) == 139
assert sorted(ds.sample.columns) == sorted(_COL_NAMES_SELECT)
assert set(ds.sample.columns) == set(_COL_NAMES_SELECT)
| {
"content_hash": "d5e6f5bdc220ba27297544b9e40a9f41",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 32.27272727272727,
"alnum_prop": 0.7154929577464789,
"repo_name": "google/madi",
"id": "1595f44bea46415805ec63c03b655f5254207e42",
"size": "1699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/forestcover_dataset_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "67773"
},
{
"name": "Python",
"bytes": "96142"
},
{
"name": "Starlark",
"bytes": "11824"
}
],
"symlink_target": ""
} |
import os
import urlparse
from redis import Redis
from rq import Worker, Queue, Connection
listen = ['default']
redis_url = os.getenv('REDIS_URL')
if not redis_url:
raise RuntimeError('Set up Redis To Go first.')
urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(redis_url)
conn = Redis(host=url.hostname, port=url.port, db=0, password=url.password)
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
| {
"content_hash": "56bea96a081150e76bb75c2adafd5ae8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 25,
"alnum_prop": 0.7288888888888889,
"repo_name": "jms/compress_service",
"id": "719e1a858193eb478f328d31085003028b968aa5",
"size": "450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run-worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7918"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
} |
import gc
import easydict
import numpy as np
import tensorflow as tf
from luminoth.models.base.base_network import (
BaseNetwork, _R_MEAN, _G_MEAN, _B_MEAN, VALID_ARCHITECTURES
)
class BaseNetworkTest(tf.test.TestCase):
def setUp(self):
self.config = easydict.EasyDict({
'architecture': 'vgg_16',
})
tf.reset_default_graph()
def testDefaultImageSize(self):
m = BaseNetwork(easydict.EasyDict({'architecture': 'vgg_16'}))
self.assertEqual(m.default_image_size, 224)
m = BaseNetwork(easydict.EasyDict({'architecture': 'resnet_v1_50'}))
self.assertEqual(m.default_image_size, 224)
def testSubtractChannels(self):
m = BaseNetwork(self.config)
inputs = tf.placeholder(tf.float32, [1, 2, 2, 3])
subtracted_inputs = m._subtract_channels(inputs)
# White image
r = 255. - _R_MEAN
g = 255. - _G_MEAN
b = 255. - _B_MEAN
with self.test_session() as sess:
res = sess.run(subtracted_inputs, feed_dict={
inputs: np.ones([1, 2, 2, 3]) * 255
})
# Assert close and not equals because of floating point
# differences between TF and numpy
self.assertAllClose(
res,
# numpy broadcast multiplication
np.ones([1, 2, 2, 3]) * [r, g, b]
)
def testAllArchitectures(self):
for architecture in VALID_ARCHITECTURES:
self.config.architecture = architecture
m = BaseNetwork(self.config)
inputs = tf.placeholder(tf.float32, [1, None, None, 3])
# Should not fail.
m(inputs)
# Free up memory for Travis
tf.reset_default_graph()
gc.collect(generation=2)
def testTrainableVariables(self):
inputs = tf.placeholder(tf.float32, [1, 224, 224, 3])
model = BaseNetwork(easydict.EasyDict({'architecture': 'vgg_16'}))
model(inputs)
# Variables in VGG16:
# 0 conv1/conv1_1/weights:0
# 1 conv1/conv1_1/biases:0
# (...)
# 30 fc8/weights:0
# 31 fc8/biases:0
self.assertEqual(len(model.get_trainable_vars()), 32)
model = BaseNetwork(
easydict.EasyDict(
{'architecture': 'vgg_16', 'fine_tune_from': 'conv5/conv5_3'}
)
)
model(inputs)
# Variables from `conv5/conv5_3` to the end:
# conv5/conv5_3/weights:0
# conv5/conv5_3/biases:0
# fc6/weights:0
# fc6/biases:0
# fc7/weights:0
# fc7/biases:0
# fc8/weights:0
# fc8/biases:0
self.assertEqual(len(model.get_trainable_vars()), 8)
#
# Check invalid fine_tune_from raises proper exception
#
model = BaseNetwork(
easydict.EasyDict(
{'architecture': 'vgg_16', 'fine_tune_from': 'conv5/conv99'}
)
)
model(inputs)
with self.assertRaises(ValueError):
model.get_trainable_vars()
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "3174b98655b88e81c7a518fe0b83fb7e",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 77,
"avg_line_length": 31.186274509803923,
"alnum_prop": 0.5488839987425338,
"repo_name": "tryolabs/luminoth",
"id": "cf5ee29167dd797e4745a51b17df1c22bad01979",
"size": "3181",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "luminoth/models/base/base_network_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4588"
},
{
"name": "HTML",
"bytes": "2203"
},
{
"name": "JavaScript",
"bytes": "5293"
},
{
"name": "Python",
"bytes": "624708"
}
],
"symlink_target": ""
} |
import sys, time
# numpy and scipy
import numpy as np
from scipy.ndimage import filters
# OpenCV
import cv2, cv
# Ros libraries
import roslib
import rospy
# Ros Messages
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
'''Initialize ros publisher, ros subscriber'''
# topic where we publish
self.bridge = CvBridge()
out_image = rospy.get_param(rospy.search_param('out'))
image = rospy.get_param(rospy.search_param('image'))
# subscribed Topic
self.image_pub = rospy.Publisher(out_image, Image)
self.subscriber = rospy.Subscriber(image, CompressedImage, self.callback, queue_size = 1000)
def callback(self, ros_data):
'''Callback function of subscribed topic.
Here images get converted and features detected'''
#### direct conversion to CV2 ####
np_arr = np.fromstring(ros_data.data, np.uint8)
image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
image_cv = cv.fromarray(image_np)
# Publish new image
try:
self.image_pub.publish(self.bridge.cv_to_imgmsg(image_cv, "bgr8"))
except CvBridgeError, e:
print e
def main(args):
'''Initializes and cleanup ros node'''
rospy.init_node('image_converter', anonymous=True)
ic = image_converter()
try:
rospy.spin()
except KeyboardInterrupt:
print "Shutting down ROS Image feature detector module"
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "fdb7bc073b5bcc7c6f0c94640cf4a465",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 100,
"avg_line_length": 26.03174603174603,
"alnum_prop": 0.6426829268292683,
"repo_name": "sameeptandon/sail-car-log",
"id": "404e59b801faa4cf58ced1efc515ce6601cdf503",
"size": "1676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ros_drivers/src/ov_camera_driver/src/ov_image_converter.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "283486"
},
{
"name": "C++",
"bytes": "428270"
},
{
"name": "CMake",
"bytes": "75122"
},
{
"name": "CSS",
"bytes": "1110"
},
{
"name": "Cuda",
"bytes": "31989"
},
{
"name": "HTML",
"bytes": "2414"
},
{
"name": "JavaScript",
"bytes": "12886"
},
{
"name": "Matlab",
"bytes": "14794"
},
{
"name": "Protocol Buffer",
"bytes": "4913"
},
{
"name": "Python",
"bytes": "870911"
},
{
"name": "Shell",
"bytes": "2144"
}
],
"symlink_target": ""
} |
"""
Copydog syncs Redmine and Trello.
Usage:
runner.py --config=<yaml> [options]
runner.py (start|stop|restart) --config=<yaml> [options]
runner.py (debug_storage|flush_storage) [options]
Options:
--config=<yaml> Config file.
-f --fullsync Full sync (all issues, opposed to date range delta sync). Needed only once.
-v --verbose Make verbose output.
-q --quiet Make no output.
-h --help Show this screen.
--version Show version.
"""
import logging
import logging.config
from daemon.runner import DaemonRunner
from docopt import docopt
import copydog
from ..storage.factory import StorageFactory
from ..utils.config import Config
from ..utils import storage_browser
from ..watcher import Watch
def setup_logging(arguments):
logging.config.fileConfig('logging.cfg', disable_existing_loggers=True)
if arguments['--verbose']:
level = logging.DEBUG
elif arguments['--quiet']:
level = logging.CRITICAL
else:
level = logging.INFO
logging.getLogger('copydog').setLevel(level)
def setup_config(config_path):
config = Config()
try:
config = Config(file=config_path)
except Exception as e:
exit(str(e))
return config
class DeamonApp(object):
stdin_path='/tmp/copydog.in.log'
stdout_path='/tmp/copydog.out.log'
stderr_path='/tmp/copydog.err.log'
pidfile_path='/tmp/copydog.pid'
pidfile_timeout=100
run=lambda: None
def execute(config, full_sync=False, daemonize=False):
if not any(map(lambda item: item[1].get('write'), config.clients)):
exit('Allow at least one client to write')
watch = Watch(config, full_sync=full_sync)
if daemonize:
app = DeamonApp()
app.run = watch.run
DaemonRunner(app).do_action()
else:
watch.run()
def flush_storage(storage):
""" Erase all copydog keys and values from storage"""
msg = 'This is irreversible operation, please confirm you want to empty copydog storage.'
shall = True if raw_input("%s (y/N) " % msg).lower() == 'y' else False
if shall:
storage.flush()
else:
print 'No action taken'
def main():
arguments = docopt(__doc__, version='Copydog %s' % copydog.__version__)
setup_logging(arguments)
config = setup_config(arguments['--config'])
storage = StorageFactory.get(config.get('storage'))
if arguments['debug_storage']:
storage_browser.browse(storage)
exit()
if arguments['flush_storage']:
flush_storage(storage)
exit()
daemonize = bool(arguments.get('start') or arguments.get('stop') or arguments.get('restart'))
execute(config, full_sync=arguments['--fullsync'], daemonize=daemonize)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| {
"content_hash": "9c8f01dc9bcbda53e75808b316c5da96",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 97,
"avg_line_length": 27.23076923076923,
"alnum_prop": 0.6528954802259888,
"repo_name": "coagulant/copydog",
"id": "21b778cc9bfbd49a16fc63634113d07473ec03fa",
"size": "2856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "copydog/bin/runner.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7254"
},
{
"name": "Python",
"bytes": "61933"
}
],
"symlink_target": ""
} |
import threading
import Queue
from multiprocessing import Manager
from multiprocessing import Process
import os
from splunktalib.common import log
logger = log.Logs().get_logger("main")
import splunktalib.event_writer as ew
import splunktalib.timer_queue as tq
import splunktalib.common.util as scutil
import splunktalib.orphan_process_monitor as opm
import pubsub_mod.google_pubsub_data_loader as gpdl
import cloud_monitor_mod.google_cloud_monitor_data_loader as gmdl
import pubsub_mod.google_pubsub_consts as gpc
import google_ta_common.google_consts as ggc
def create_data_loader(config):
service_2_data_loader = {
ggc.google_pubsub: gpdl.GooglePubSubDataLoader,
ggc.google_cloud_monitor: gmdl.GoogleCloudMonitorDataLoader,
}
assert config.get(ggc.google_service)
assert config[ggc.google_service] in service_2_data_loader
return service_2_data_loader[config[ggc.google_service]](config)
def _wait_for_tear_down(tear_down_q, loader):
checker = opm.OrphanProcessChecker()
while 1:
try:
go_exit = tear_down_q.get(block=True, timeout=2)
except Queue.Empty:
go_exit = checker.is_orphan()
if go_exit:
logger.info("%s becomes orphan, going to exit", os.getpid())
if go_exit:
break
if loader is not None:
loader.stop()
logger.info("End of waiting for tear down signal")
def _load_data(tear_down_q, task_config):
loader = create_data_loader(task_config)
thr = threading.Thread(
target=_wait_for_tear_down, args=(tear_down_q, loader))
thr.daemon = True
thr.start()
loader.index_data()
thr.join()
logger.info("End of load data")
class GoogleConcurrentDataLoader(object):
def __init__(self, task_config, tear_down_q, process_safe):
if process_safe:
self._worker = Process(
target=_load_data, args=(tear_down_q, task_config))
else:
self._worker = threading.Thread(
target=_load_data, args=(tear_down_q, task_config))
self._worker.daemon = True
self._started = False
self._tear_down_q = tear_down_q
self.name = task_config[ggc.name]
def start(self):
if self._started:
return
self._started = True
self._worker.start()
logger.info("GoogleConcurrentDataLoader started.")
def tear_down(self):
self.stop()
def stop(self):
if not self._started:
return
self._started = False
self._tear_down_q.put(True)
logger.info("GoogleConcurrentDataLoader is going to exit.")
class GoogleDataLoaderManager(object):
def __init__(self, task_configs):
self._task_configs = task_configs
self._wakeup_queue = Queue.Queue()
self._timer_queue = tq.TimerQueue()
self._mgr = None
self._started = False
self._stop_signaled = False
def start(self):
if self._started:
return
self._started = True
self._timer_queue.start()
process_safe = self._use_multiprocess()
logger.info("Use multiprocessing=%s", process_safe)
event_writer = ew.create_event_writer(
self._task_configs[0], process_safe)
event_writer.start()
tear_down_q = self._create_tear_down_queue(process_safe)
loaders = []
for task in self._task_configs:
task[ggc.event_writer] = event_writer
loader = GoogleConcurrentDataLoader(
task, tear_down_q, process_safe)
loader.start()
loaders.append(loader)
logger.info("GoogleDataLoaderManager started")
_wait_for_tear_down(self._wakeup_queue, None)
logger.info("GoogleDataLoaderManager got stop signal")
for loader in loaders:
logger.info("Notify loader=%s", loader.name)
loader.stop()
event_writer.tear_down()
self._timer_queue.tear_down()
if self._mgr is not None:
self._mgr.shutdown()
logger.info("GoogleDataLoaderManager stopped")
def tear_down(self):
self.stop()
def stop(self):
self._stop_signaled = True
self._wakeup_queue.put(True)
logger.info("GoogleDataLoaderManager is going to stop.")
def stopped(self):
return not self._started
def received_stop_signal(self):
return self._stop_signaled
def add_timer(self, callback, when, interval):
return self._timer_queue.add_timer(callback, when, interval)
def remove_timer(self, timer):
self._timer_queue.remove_timer(timer)
def _use_multiprocess(self):
if not self._task_configs:
return False
return scutil.is_true(self._task_configs[0].get(ggc.use_multiprocess))
def _create_tear_down_queue(self, process_safe):
if process_safe:
self._mgr = Manager()
tear_down_q = self._mgr.Queue()
else:
tear_down_q = Queue.Queue()
return tear_down_q
if __name__ == "__main__":
import time
import sys
import logging
import google_wrapper.pubsub_wrapper as gpw
class O(object):
def write_events(self, index, source, sourcetype, events):
for event in events:
sys.stdout.write(event)
sys.stdout.write("\n")
logger = logging.getLogger("google")
ch = logging.StreamHandler()
logger.addHandler(ch)
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "zlchenken-78c88c5c115b.json"
config = {
ggc.name: "test_pubsub",
ggc.google_service: ggc.google_pubsub,
ggc.checkpoint_dir: ".",
ggc.server_uri: "https://localhost:8089",
ggc.server_host: "localhost",
ggc.index: "main",
gpc.google_project: "zlchenken",
gpc.google_topic: "test_topic",
gpc.google_subscription: "sub_test_topic",
gpc.batch_count: 10,
gpc.base64encoded: True,
}
def pub():
ps = gpw.GooglePubSub(logger, config)
for i in range(10):
messages = ["i am counting {} {}".format(i, j) for j in range(10)]
ps.publish_messages(messages)
time.sleep(1)
pubthr = threading.Thread(target=pub)
pubthr.start()
l = GoogleDataLoaderManager([config])
def _tear_down():
time.sleep(30)
l.stop()
threading.Thread(target=_tear_down).start()
l.start()
pubthr.join()
time.sleep(1)
| {
"content_hash": "320e634f485cd504b767f8570dc3de10",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 80,
"avg_line_length": 28.28448275862069,
"alnum_prop": 0.6132276744894849,
"repo_name": "chenziliang/google",
"id": "8e84954e13367b7ed4a23654330cd1303dbeb7d3",
"size": "6562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Splunk_TA_google/bin/google_concurrent_data_loader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5822"
},
{
"name": "JavaScript",
"bytes": "34015"
},
{
"name": "Python",
"bytes": "1245611"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
} |
__author__ = 'Neil Butcher'
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import pyqtSignal
from Rota_System.UI.Roles.commands_role_compatibility import CommandChangeCompatibilityRole
class RoleCompatibilitiesSettingModel(QtCore.QAbstractTableModel):
"""
used to manipulate the relative compatibilities between roles
"""
commandIssued = pyqtSignal(QtGui.QUndoCommand)
def __init__(self, all_roles_model):
QtCore.QAbstractTableModel.__init__(self)
self._allRolesModel = all_roles_model
all_roles_model.rolelist.roleAdded.connect(self.roles_changed)
all_roles_model.rolelist.roleRemoved.connect(self.roles_changed)
all_roles_model.rolelist.rolesChanged.connect(self.roles_changed)
for r in all_roles_model.rolelist.roles:
r.compatibilitiesChanged.connect(self.role_changed)
@QtCore.pyqtSlot()
def roles_changed(self):
self.reset()
for r in self._allRolesModel.rolelist.roles:
r.compatibilitiesChanged.connect(self.role_changed)
@QtCore.pyqtSlot()
def role_changed(self):
self.reset()
def columnCount(self, index):
return len(self._allRolesModel.rolelist.roles)
def rowCount(self, index):
return len(self._allRolesModel.rolelist.roles)
def flags(self, index):
return QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def data(self, index, role=QtCore.Qt.DisplayRole):
if not index.isValid():
return None
if not (role == QtCore.Qt.CheckStateRole):
return None
role1 = self._allRolesModel.rolelist.roles[index.row()]
role2 = self._allRolesModel.rolelist.roles[index.column()]
return role1.compatible_with(role2)
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if not (role == QtCore.Qt.DisplayRole):
return None
return self._allRolesModel.rolelist.roles[section].description
def setData(self, index, value, role=QtCore.Qt.EditRole):
if not index.isValid():
return False
if not (role == QtCore.Qt.CheckStateRole):
return False
role1 = self._allRolesModel.rolelist.roles[index.row()]
role2 = self._allRolesModel.rolelist.roles[index.column()]
compatible = role1.compatible_with(role2)
command = CommandChangeCompatibilityRole(role1, role2, not compatible)
self.commandIssued.emit(command)
return True
| {
"content_hash": "ab4b9b402437b2ce2f9857ae8d9604d1",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 99,
"avg_line_length": 37.220588235294116,
"alnum_prop": 0.6835242986961675,
"repo_name": "ergoregion/Rota-Program",
"id": "c7e1e3a995817853c9fec4849acbc59997a98225",
"size": "2531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Rota_System/UI/Roles/model_role_compatibility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "254617"
}
],
"symlink_target": ""
} |
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from mistral.openstack.common import log as logging
from mistral.api.controllers import resource
from mistral.db import api as db_api
from mistral.utils import rest_utils
LOG = logging.getLogger(__name__)
class Event(resource.Resource):
"""Event descriptor."""
pass
class TaskEvent(Event):
type = "TASK_STATE"
task = wtypes.text
class ExecutionEvent(Event):
type = "EXECUTION_STATE"
workbook_name = wtypes.text
class Listener(resource.Resource):
"""Listener resource."""
id = wtypes.text
description = wtypes.text
workbook_name = wtypes.text
webhook = wtypes.text
events = [Event]
class Listeners(resource.Resource):
"""A collection of Listener resources."""
listeners = [Listener]
class ListenersController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(Listener, wtypes.text, wtypes.text)
def get(self, workbook_name, id):
LOG.debug("Fetch listener [workbook_name=%s, id=%s]" %
(workbook_name, id))
values = db_api.listener_get(workbook_name, id)
return Listener.from_dict(values)
@wsme_pecan.wsexpose(Listener, wtypes.text, wtypes.text, body=Listener)
def put(self, workbook_name, id, listener):
LOG.debug("Update listener [workbook_name=%s, id=%s, listener=%s]" %
(workbook_name, id, listener))
values = db_api.listener_update(workbook_name, id, listener.to_dict())
return Listener.from_dict(values)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(Listener, wtypes.text, body=Listener, status_code=201)
def post(self, workbook_name, listener):
LOG.debug("Create listener [workbook_name=%s, listener=%s]" %
(workbook_name, listener))
values = db_api.listener_create(workbook_name, listener.to_dict())
return Listener.from_dict(values)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(None, wtypes.text, wtypes.text, status_code=204)
def delete(self, workbook_name, id):
LOG.debug("Delete listener [workbook_name=%s, id=%s]" %
(workbook_name, id))
db_api.listener_delete(workbook_name, id)
@wsme_pecan.wsexpose(Listeners, wtypes.text)
def get_all(self, workbook_name):
LOG.debug("Fetch listeners [workbook_name=%s]" % workbook_name)
listeners = [Listener.from_dict(values)
for values in db_api.listeners_get(workbook_name)]
return Listeners(listeners=listeners)
| {
"content_hash": "72c2945dd3c57c63041410c91450e80c",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 79,
"avg_line_length": 30.295454545454547,
"alnum_prop": 0.6699174793698425,
"repo_name": "dmitryilyin/mistral",
"id": "2bd12a80a2321ad19e7c705546978ab3533d7f8f",
"size": "3300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/api/controllers/v1/listener.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_comment_excerpt'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ['-updated', '-created'], 'verbose_name': 'comment', 'verbose_name_plural': 'comments'},
),
]
| {
"content_hash": "27e2d33895a27309bad9fa517fb4dab8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 121,
"avg_line_length": 24.11764705882353,
"alnum_prop": 0.6,
"repo_name": "amaozhao/blogular",
"id": "7922a8ee8d1623c6707e68e697c859a6b4065597",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/migrations/0005_auto_20160103_1929.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "630768"
},
{
"name": "HTML",
"bytes": "152862"
},
{
"name": "JavaScript",
"bytes": "855396"
},
{
"name": "Python",
"bytes": "4112262"
}
],
"symlink_target": ""
} |
"""In this file tests for Crawler scripts are gathered."""
import pytest
from crawler import Crawler
class TestCrawler:
"""Crawler class unit tests."""
@pytest.mark.parametrize('url,expected,server_name', [
('www.test.com', 'http://www.test.com', 'www.test.com'),
('http://www.test.com', 'http://www.test.com', 'www.test.com'),
('https://www.test.com', 'https://www.test.com', 'www.test.com'),
('https://www.test.co.uk', 'https://www.test.co.uk', 'www.test.co.uk'),
])
def test_extract_url_properly(self, url, expected, server_name):
"""Test Crawler can extract url properly"""
crawler = Crawler(url)
assert crawler.base_url == expected
assert crawler.server == server_name
def test_raise_exception_when_no_url(self):
"""Test Crawler raises exception when no url were passed."""
with pytest.raises(AttributeError):
Crawler(None)
@pytest.mark.usefixtures('mocker', 'correct_response')
def test_analyzing_page_different_style(self, mocker, correct_response):
"""Test getting links from page content."""
requests_mck = mocker.patch('requests.get')
requests_mck.return_value = correct_response
url = 'test1.com'
crawler = Crawler(url)
crawler.analyze()
expected = {
'http://test1.com',
'http://test1.com/help.html',
}
assert crawler.links == expected
@pytest.mark.usefixtures('mocker', 'correct_response')
def test_crawling_over_page_with_limit(self, mocker, correct_response):
"""Test that crawling will stop when limit value were met."""
requests_mck = mocker.patch('requests.get')
requests_mck.return_value = correct_response
url = 'test1.com'
crawler = Crawler(url, 1)
crawler.analyze()
expected = {'http://test1.com'}
assert crawler.links == expected
@pytest.mark.usefixtures('mocker', 'response_with_mail', 'not_found_response')
def test_analyzing_page_with_mailto(self, mocker, response_with_mail, not_found_response):
requests_mck = mocker.patch('requests.get')
requests_mck.side_effect = [response_with_mail, not_found_response]
url = 'test2.com'
crawler = Crawler(url)
crawler.analyze()
# http://test2.com/help.html doesn't exists
# that's why only base page is visible
expected = {'http://test2.com'}
assert crawler.links == expected
| {
"content_hash": "542169683910cd712448f2e805b7bb8a",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 94,
"avg_line_length": 37.10294117647059,
"alnum_prop": 0.6214823622671423,
"repo_name": "jkaluzka/siteMapGen",
"id": "9b7efeda557ce75cf7c0b019c73d9bb174d9aee1",
"size": "2523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15893"
}
],
"symlink_target": ""
} |
"""
Wrappers for doing binary IO on file-like objects
"""
import numpy
import struct
import sys
## Standard size:
## short is 8 bits
## int and long are 32 bits
## long long is 64 bits
class BadMagicNumber( IOError ):
pass
class BinaryFileReader( object ):
"""
Wrapper for doing binary reads on any file like object.
Currently this is not heavily optimized (it uses the `struct` module to
unpack)
"""
def __init__( self, file, magic = None, is_little_endian = False ):
self.is_little_endian = is_little_endian
self.file = file
if magic is not None:
# Attempt to read magic number and chuck endianess
bytes = file.read( 4 )
if struct.unpack( ">I", bytes )[0] == magic:
pass
elif struct.unpack( "<I", bytes )[0] == magic:
self.is_little_endian = True
else:
raise BadMagicNumber( "File does not have expected magic number: %x != %x or %x" \
% ( magic, struct.unpack( ">I", bytes )[0], struct.unpack( "<I", bytes )[0] ) )
# Set endian code
if self.is_little_endian:
self.endian_code = "<"
self.byteswap_needed = ( sys.byteorder != "little" )
else:
self.endian_code = ">"
self.byteswap_needed = ( sys.byteorder != "big" )
def unpack( self, format, buffer, byte_count=None ):
pattern = "%s%s" % ( self.endian_code, format )
if byte_count is None:
byte_count = struct.calcsize( pattern )
return struct.unpack( pattern, buffer )
def read_and_unpack( self, format, byte_count=None ):
"""
Read enough bytes to unpack according to `format` and return the
tuple of unpacked values.
"""
pattern = "%s%s" % ( self.endian_code, format )
if byte_count is None:
byte_count = struct.calcsize( pattern )
return struct.unpack( pattern, self.file.read( byte_count ) )
def read_c_string( self ):
"""
Read a zero terminated (C style) string
"""
rval = []
while 1:
ch = self.file.read(1)
assert len( ch ) == 1, "Unexpected end of file"
if ch == '\0':
break
rval.append( ch )
return ''.join( rval )
def read_raw_array( self, dtype, size ):
a = numpy.fromfile( self.file, dtype=dtype, count=size )
if self.byteswap_needed:
a.byteswap()
return a
def read( self, byte_count=1 ):
return self.file.read( byte_count )
def tell( self ):
return self.file.tell()
def skip( self, count ):
self.file.seek( count, 1 )
def seek( self, pos, whence=0 ):
return self.file.seek( pos, whence )
def read_uint8( self ):
return self.read_and_unpack( "B", 1 )[0]
def read_uint16( self ):
return self.read_and_unpack( "H", 2 )[0]
def read_uint32( self ):
return self.read_and_unpack( "L", 4 )[0]
def read_uint64( self ):
return self.read_and_unpack( "Q", 8 )[0]
def read_float( self ):
return self.read_and_unpack( "f", 4 )[0]
class BinaryFileWriter( object ):
"""
Wrapper for doing binary writes on any file like object.
Currently this is not heavily optimized (it uses the `struct` module to
unpack)
"""
def __init__( self, file, magic = None, is_little_endian = False ):
self.is_little_endian = is_little_endian
if self.is_little_endian:
self.endian_code = "<"
else:
self.endian_code = ">"
self.file = file
if magic is not None:
self.write_uint32( magic )
def pack( self, format, buffer ):
pattern = "%s%s" % ( self.endian_code, format )
return struct.pack( pattern, buffer )
def pack_and_write( self, format, value ):
"""
Read enough bytes to unpack according to `format` and return the
tuple of unpacked values.
"""
pattern = "%s%s" % ( self.endian_code, format )
return self.file.write( struct.pack( pattern, value ) )
def write_c_string( self, value ):
"""
Read a zero terminated (C style) string
"""
self.file.write( value )
self.file.write( '\0' )
def write_raw_array( self, value ):
value.tofile( self.file )
def write( self, value ):
return self.file.write( value )
def skip( self, count ):
self.file.seek( count, 1 )
def tell( self ):
return self.file.tell()
def seek( self, pos, whence=0 ):
return self.file.seek( pos, whence )
def write_uint8( self, value ):
return self.pack_and_write( "B", value )
def write_uint16( self, value ):
return self.pack_and_write( "H", value )
def write_uint32( self, value ):
return self.pack_and_write( "L", value )
def write_uint64( self, value ):
return self.pack_and_write( "Q", value ) | {
"content_hash": "a532fb5510537b04a2e4b8d409b0da9c",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 103,
"avg_line_length": 31.05325443786982,
"alnum_prop": 0.5377286585365854,
"repo_name": "kensugino/jGEM",
"id": "7d7eed332cc89208263feabc4161992b9c4e131e",
"size": "5248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jgem/bxbbi/binary_file.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1119090"
}
],
"symlink_target": ""
} |
import os
import subprocess
import shutil
try:
import argparse as ap
except ImportError:
import pyne._argparse as ap
absexpanduser = lambda x: os.path.abspath(os.path.expanduser(x))
def check_windows_cmake(cmake_cmd):
if os.name == 'nt':
files_on_path = set()
for p in os.environ['PATH'].split(';')[::-1]:
if os.path.exists(p):
files_on_path.update(os.listdir(p))
if 'cl.exe' in files_on_path:
pass
elif 'sh.exe' in files_on_path:
cmake_cmd += ['-G "MSYS Makefiles"']
elif 'gcc.exe' in files_on_path:
cmake_cmd += ['-G "MinGW Makefiles"']
cmake_cmd = ' '.join(cmake_cmd)
def install_cycamore(args):
if not os.path.exists(args.build_dir):
os.mkdir(args.build_dir)
elif args.replace:
shutil.rmtree(args.build_dir)
os.mkdir(args.build_dir)
root_dir = os.path.split(__file__)[0]
src_dir = os.path.join(root_dir, 'src')
makefile = os.path.join(args.build_dir, 'Makefile')
if not os.path.exists(makefile):
cmake_cmd = ['cmake', absexpanduser(root_dir)]
if args.prefix:
cmake_cmd += ['-DCMAKE_INSTALL_PREFIX=' + absexpanduser(args.prefix)]
if args.cmake_prefix_path:
cmake_cmd += ['-DCMAKE_PREFIX_PATH=' + absexpanduser(args.cmake_prefix_path)]
if args.coin_root:
cmake_cmd += ['-DCOIN_ROOT_DIR=' + absexpanduser(args.coin_root)]
if args.boost_root:
cmake_cmd += ['-DBOOST_ROOT=' + absexpanduser(args.boost_root)]
if args.cyclus_root:
cmake_cmd += ['-DCYCLUS_ROOT_DIR='+absexpanduser(args.cyclus_root)]
if args.build_type:
cmake_cmd += ['-DCMAKE_BUILD_TYPE=' + args.build_type]
check_windows_cmake(cmake_cmd)
rtn = subprocess.check_call(cmake_cmd, cwd=absexpanduser(args.build_dir), shell=(os.name=='nt'))
make_cmd = ['make']
if args.threads:
make_cmd += ['-j' + str(args.threads)]
rtn = subprocess.call(make_cmd, cwd=args.build_dir,
shell=(os.name == 'nt'))
if args.test:
make_cmd += ['test']
elif not args.build_only:
make_cmd += ['install']
rtn = subprocess.check_call(make_cmd, cwd=args.build_dir,
shell=(os.name == 'nt'))
def uninstall_cycamore(args):
makefile = os.path.join(args.build_dir, 'Makefile')
if not os.path.exists(args.build_dir) or not os.path.exists(makefile):
sys.exist("May not uninstall cycamore since it has not yet been built.")
rtn = subprocess.check_call(['make', 'uninstall'], cwd=args.build_dir,
shell=(os.name == 'nt'))
def main():
localdir = absexpanduser('~/.local')
description = "A Cycamore installation helper script. "+\
"For more information, please see cyclus.github.com."
parser = ap.ArgumentParser(description=description)
build_dir = 'where to place the build directory'
parser.add_argument('--build_dir', help=build_dir, default='build')
uninst = 'uninstall'
parser.add_argument('--uninstall', action='store_true', help=uninst, default=False)
replace = 'whether or not to remove the build directory if it exists'
parser.add_argument('--replace', type=bool, help=replace, default=False)
threads = "the number of threads to use in the make step"
parser.add_argument('-j', '--threads', type=int, help=threads)
install = "the relative path to the installation directory"
parser.add_argument('--prefix', help=install, default=localdir)
test = 'run tests after building'
parser.add_argument('--test', action='store_true', help=test)
build_only = 'only build the package, do not install'
parser.add_argument('--build-only', action='store_true', help=build_only)
coin = "the relative path to the Coin-OR libraries directory"
parser.add_argument('--coin_root', help=coin)
cyclus = "the relative path to Cyclus installation directory"
parser.add_argument('--cyclus_root',help=cyclus, default=localdir)
boost = "the relative path to the Boost libraries directory"
parser.add_argument('--boost_root', help=boost)
cmake_prefix_path = "the cmake prefix path for use with FIND_PACKAGE, " + \
"FIND_PATH, FIND_PROGRAM, or FIND_LIBRARY macros"
parser.add_argument('--cmake_prefix_path', help=cmake_prefix_path)
build_type = "the CMAKE_BUILD_TYPE"
parser.add_argument('--build_type', help=build_type)
args = parser.parse_args()
if args.uninstall:
uninstall_cycamore(args)
else:
install_cycamore(args)
if __name__ == "__main__":
main()
| {
"content_hash": "74a7adca1d199d26557bccb64bbe3b8f",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 104,
"avg_line_length": 37.118110236220474,
"alnum_prop": 0.6224013576580398,
"repo_name": "Baaaaam/cyBaM",
"id": "baa29a2891765df1fd1834c253864995aac962e8",
"size": "4738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "install.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "731"
},
{
"name": "C++",
"bytes": "258733"
},
{
"name": "CMake",
"bytes": "67239"
},
{
"name": "Python",
"bytes": "37083"
},
{
"name": "Shell",
"bytes": "1959"
}
],
"symlink_target": ""
} |
from django.db import models
from apps.dc_algorithm.models import Area, Compositor, Satellite
from apps.dc_algorithm.models import (Query as BaseQuery, Metadata as BaseMetadata, Result as BaseResult, ResultType as
BaseResultType, UserHistory as BaseUserHistory, AnimationType as
BaseAnimationType, ToolInfo as BaseToolInfo)
from utils.data_cube_utilities.dc_mosaic import (create_mosaic, create_mean_mosaic)
import numpy as np
class UserHistory(BaseUserHistory):
"""
Extends the base user history adding additional fields
See the dc_algorithm.UserHistory docstring for more information
"""
pass
class ToolInfo(BaseToolInfo):
"""
Extends the base ToolInfo adding additional fields
See the dc_algorithm.ToolInfo docstring for more information
"""
pass
class BaselineMethod(models.Model):
"""
acts like result type, but for baseline selection.
"""
id = models.CharField(unique=True, primary_key=True, max_length=100)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Query(BaseQuery):
"""
Extends base query, adds app specific elements. See the dc_algorithm.Query docstring for more information
Defines the get_or_create_query_from_post as required, adds new fields, recreates the unique together
field, and resets the abstract property. Functions are added to get human readable names for various properties,
foreign keys should define __str__ for a human readable name.
"""
baseline_method = models.ForeignKey(BaselineMethod)
baseline_length = models.IntegerField(default=10)
base_result_dir = '/datacube/ui_results/slip'
class Meta(BaseQuery.Meta):
unique_together = (
('satellite', 'area_id', 'time_start', 'time_end', 'latitude_max', 'latitude_min', 'longitude_max',
'longitude_min', 'title', 'description', 'baseline_method', 'baseline_length'))
abstract = True
def get_fields_with_labels(self, labels, field_names):
for idx, label in enumerate(labels):
yield [label, getattr(self, field_names[idx])]
def get_chunk_size(self):
"""Implements get_chunk_size as required by the base class
See the base query class docstring for more information.
"""
return {'time': None, 'geographic': 0.005}
def get_iterative(self):
"""implements get_iterative as required by the base class
See the base query class docstring for more information.
"""
return False
def get_reverse_time(self):
"""implements get_reverse_time as required by the base class
See the base query class docstring for more information.
"""
return True
def get_processing_method(self):
"""implements get_processing_method as required by the base class
See the base query class docstring for more information.
"""
processing_methods = {'composite': create_mosaic, 'average': create_mean_mosaic}
return processing_methods.get(self.baseline_method.id, create_mosaic)
@classmethod
def get_or_create_query_from_post(cls, form_data, pixel_drill=False):
"""Implements the get_or_create_query_from_post func required by base class
See the get_or_create_query_from_post docstring for more information.
Parses out the time start/end, creates the product, and formats the title/description
Args:
form_data: python dict containing either a single obj or a list formatted with post_data_to_dict
Returns:
Tuple containing the query model and a boolean value signifying if it was created or loaded.
"""
query_data = form_data
query_data['title'] = "SLIP Query" if 'title' not in form_data or form_data['title'] == '' else form_data[
'title']
query_data['description'] = "None" if 'description' not in form_data or form_data[
'description'] == '' else form_data['description']
valid_query_fields = [field.name for field in cls._meta.get_fields()]
query_data = {key: query_data[key] for key in valid_query_fields if key in query_data}
try:
query = cls.objects.get(pixel_drill_task=pixel_drill, **query_data)
return query, False
except cls.DoesNotExist:
query = cls(pixel_drill_task=pixel_drill, **query_data)
query.save()
return query, True
class Metadata(BaseMetadata):
"""
Extends base metadata, adding additional fields and adding abstract=True.
zipped_metadata_fields is required.
See the dc_algorithm.Metadata docstring for more information
"""
slip_pixels_per_acquisition = models.CharField(max_length=100000, default="")
zipped_metadata_fields = [
'acquisition_list', 'clean_pixels_per_acquisition', 'clean_pixel_percentages_per_acquisition',
'slip_pixels_per_acquisition'
]
class Meta(BaseMetadata.Meta):
abstract = True
def metadata_from_dataset(self, metadata, dataset, clear_mask, parameters, time):
"""implements metadata_from_dataset as required by the base class
See the base metadata class docstring for more information.
"""
clean_pixels = np.sum(clear_mask == True)
slip_slice = dataset.slip.values
if time not in metadata:
metadata[time] = {}
metadata[time]['clean_pixels'] = 0
metadata[time]['slip_pixels'] = 0
metadata[time]['clean_pixels'] += clean_pixels
metadata[time]['slip_pixels'] += len(slip_slice[slip_slice > 0])
return metadata
def combine_metadata(self, old, new):
"""implements combine_metadata as required by the base class
See the base metadata class docstring for more information.
"""
for key in new:
if key in old:
old[key]['clean_pixels'] += new[key]['clean_pixels']
old[key]['slip_pixels'] += new[key]['slip_pixels']
continue
old[key] = new[key]
return old
def final_metadata_from_dataset(self, dataset):
"""implements final_metadata_from_dataset as required by the base class
See the base metadata class docstring for more information.
"""
self.pixel_count = len(dataset.latitude) * len(dataset.longitude)
self.clean_pixel_count = np.sum(dataset[list(dataset.data_vars)[0]].values != -9999)
self.percentage_clean_pixels = (self.clean_pixel_count / self.pixel_count) * 100
self.save()
def metadata_from_dict(self, metadata_dict):
"""implements metadata_from_dict as required by the base class
See the base metadata class docstring for more information.
"""
dates = list(metadata_dict.keys())
dates.sort(reverse=True)
self.total_scenes = len(dates)
self.scenes_processed = len(dates)
self.acquisition_list = ",".join([date.strftime("%m/%d/%Y") for date in dates])
self.slip_pixels_per_acquisition = ",".join([str(metadata_dict[date]['slip_pixels']) for date in dates])
self.clean_pixels_per_acquisition = ",".join([str(metadata_dict[date]['clean_pixels']) for date in dates])
self.clean_pixel_percentages_per_acquisition = ",".join(
[str((metadata_dict[date]['clean_pixels'] * 100) / self.pixel_count) for date in dates])
self.save()
class Result(BaseResult):
"""
Extends base result, adding additional fields and adding abstract=True
See the dc_algorithm.Result docstring for more information
"""
result_mosaic_path = models.CharField(max_length=250, default="")
plot_path = models.CharField(max_length=250, default="")
data_path = models.CharField(max_length=250, default="")
data_netcdf_path = models.CharField(max_length=250, default="")
class Meta(BaseResult.Meta):
abstract = True
class SlipTask(Query, Metadata, Result):
"""
Combines the Query, Metadata, and Result abstract models
"""
pass
| {
"content_hash": "e55e82034110317f9bae2fcc61b0ddcb",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 119,
"avg_line_length": 38.51173708920188,
"alnum_prop": 0.6523223211020358,
"repo_name": "ceos-seo/data_cube_ui",
"id": "167d3838ad681424b185b251a78af5bb504e87f2",
"size": "9208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/slip/models.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "402543"
},
{
"name": "GLSL",
"bytes": "167522"
},
{
"name": "HTML",
"bytes": "8002318"
},
{
"name": "JavaScript",
"bytes": "46178533"
},
{
"name": "PHP",
"bytes": "28128"
},
{
"name": "PLSQL",
"bytes": "14578"
},
{
"name": "Python",
"bytes": "908507"
},
{
"name": "Shell",
"bytes": "21979"
},
{
"name": "TSQL",
"bytes": "31758"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cvmfsweb.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "fab1035045c135e332d80332508c079b",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7117903930131004,
"repo_name": "cernvm/cvmfs-monitor",
"id": "923525e2e1c0dda81efa53c89c258a5956677568",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cvmfsweb/manage.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "11765"
}
],
"symlink_target": ""
} |
import tensorflow as tf
class VBN(object):
"""
Virtual Batch Normalization
(modified from https://github.com/openai/improved-gan/ definition)
"""
def __init__(self, x, name, epsilon=1e-5):
"""
x is the reference batch
"""
assert isinstance(epsilon, float)
shape = x.get_shape().as_list()
assert len(shape) == 3, shape
with tf.variable_scope(name) as scope:
assert name.startswith("d_") or name.startswith("g_")
self.epsilon = epsilon
self.name = name
self.mean = tf.reduce_mean(x, [0, 1], keep_dims=True)
self.mean_sq = tf.reduce_mean(tf.square(x), [0, 1], keep_dims=True)
self.batch_size = int(x.get_shape()[0])
assert x is not None
assert self.mean is not None
assert self.mean_sq is not None
out = self._normalize(x, self.mean, self.mean_sq, "reference")
self.reference_output = out
def __call__(self, x):
shape = x.get_shape().as_list()
with tf.variable_scope(self.name) as scope:
new_coeff = 1. / (self.batch_size + 1.)
old_coeff = 1. - new_coeff
new_mean = tf.reduce_mean(x, [0, 1], keep_dims=True)
new_mean_sq = tf.reduce_mean(tf.square(x), [0, 1], keep_dims=True)
mean = new_coeff * new_mean + old_coeff * self.mean
mean_sq = new_coeff * new_mean_sq + old_coeff * self.mean_sq
out = self._normalize(x, mean, mean_sq, "live")
return out
def _normalize(self, x, mean, mean_sq, message):
# make sure this is called with a variable scope
shape = x.get_shape().as_list()
assert len(shape) == 3
self.gamma = tf.get_variable("gamma", [shape[-1]],
initializer=tf.random_normal_initializer(1., 0.02))
gamma = tf.reshape(self.gamma, [1, 1, -1])
self.beta = tf.get_variable("beta", [shape[-1]],
initializer=tf.constant_initializer(0.))
beta = tf.reshape(self.beta, [1, 1, -1])
assert self.epsilon is not None
assert mean_sq is not None
assert mean is not None
std = tf.sqrt(self.epsilon + mean_sq - tf.square(mean))
out = x - mean
out = out / std
out = out * gamma
out = out + beta
return out
| {
"content_hash": "b88d83e1478e90e5b48874a0ab060994",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 83,
"avg_line_length": 38.983870967741936,
"alnum_prop": 0.5399255275134465,
"repo_name": "santi-pdp/segan",
"id": "a659c5b588f931fa6dea3bdb8f4f87035bc312b1",
"size": "2417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bnorm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82457"
},
{
"name": "Shell",
"bytes": "3669"
}
],
"symlink_target": ""
} |
__version__ = "1.1.0"
"""
:mod:`spaghetti` --- Spatial Graphs: Networks, Topology, & Inference
====================================================================
"""
from .network import Network, PointPattern, SimulatedPointPattern, SortedEdges
from .analysis import NetworkBase, NetworkG, NetworkK, NetworkF
from .analysis import gfunction, kfunction, ffunction
from .util import compute_length, get_neighbor_distances, generatetree
from .util import dijkstra, dijkstra_mp
from .util import squared_distance_point_segment, snap_points_on_segments
| {
"content_hash": "c5d35087a9469cabcb4592463fd15f18",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 78,
"avg_line_length": 45.916666666666664,
"alnum_prop": 0.6860254083484574,
"repo_name": "lixun910/pysal",
"id": "5a247790d1abda7352a586486da98a8cf91b626f",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysal/explore/spaghetti/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1315254"
},
{
"name": "Jupyter Notebook",
"bytes": "1407521"
},
{
"name": "Makefile",
"bytes": "526"
},
{
"name": "OpenEdge ABL",
"bytes": "595378"
},
{
"name": "Python",
"bytes": "3994938"
},
{
"name": "Shell",
"bytes": "3743"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import timedelta
from django.db import models
from django.utils import timezone
from sentry.utils.cache import cache
from sentry.db.models import BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
class ReleaseProjectEnvironment(Model):
__core__ = False
release = FlexibleForeignKey("sentry.Release")
project = FlexibleForeignKey("sentry.Project")
environment = FlexibleForeignKey("sentry.Environment")
new_issues_count = BoundedPositiveIntegerField(default=0)
first_seen = models.DateTimeField(default=timezone.now)
last_seen = models.DateTimeField(default=timezone.now, db_index=True)
last_deploy_id = BoundedPositiveIntegerField(null=True, db_index=True)
class Meta:
app_label = "sentry"
db_table = "sentry_releaseprojectenvironment"
unique_together = (("project", "release", "environment"),)
__repr__ = sane_repr("project", "release", "environment")
@classmethod
def get_cache_key(cls, release_id, project_id, environment_id):
return u"releaseprojectenv:{}:{}:{}".format(release_id, project_id, environment_id)
@classmethod
def get_or_create(cls, release, project, environment, datetime, **kwargs):
cache_key = cls.get_cache_key(project.id, release.id, environment.id)
instance = cache.get(cache_key)
if instance is None:
instance, created = cls.objects.get_or_create(
release=release,
project=project,
environment=environment,
defaults={"first_seen": datetime, "last_seen": datetime},
)
cache.set(cache_key, instance, 3600)
else:
created = False
# Same as releaseenvironment model. Minimizes last_seen updates to once a minute
if not created and instance.last_seen < datetime - timedelta(seconds=60):
cls.objects.filter(
id=instance.id, last_seen__lt=datetime - timedelta(seconds=60)
).update(last_seen=datetime)
instance.last_seen = datetime
cache.set(cache_key, instance, 3600)
return instance
| {
"content_hash": "37d78b1f09ed86318910a4ee896e8111",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 94,
"avg_line_length": 39.285714285714285,
"alnum_prop": 0.6640909090909091,
"repo_name": "mvaled/sentry",
"id": "4620a84bc73621b61deab5811ae22417b3773394",
"size": "2200",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/models/releaseprojectenvironment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
import json
from r5d4.mapping_functions import MEASURING_FUNCTIONS_MAP,\
DIMENSION_PARSERS_MAP, CONDITION_KEYS
TOP_KEYS = ["name", "description", "query_dimensions", "slice_dimensions",
"data_db", "measures", "mapping"]
class Analytics:
def __init__(self, analytics_definition):
"""
Constructor for Analytics class.
Deserializes analytics from a json format definition
Throws ValueError if the JSON is not parseable
Throws AssertError if the Analytics definition is invalid
"""
self.definition = json.loads(analytics_definition)
self.validate()
def json_serialize(self, fp=None, indent=2):
"""
Serialize Analytics definition and return json
"""
if fp is None:
return json.dumps(self.definition, indent=indent, sort_keys=True)
else:
return json.dump(self.definition,
fp,
indent=indent,
sort_keys=True)
def validate(self):
"""
Validates Analytics definition and raises AssertError if invalid.
"""
# Checking for top-level keys
assert "name" in self.definition, \
"Definition doesn't have 'name'"
assert ':' not in self.definition['name'], \
"Analytics name cannot contain ':'"
assert "measures" in self.definition, \
"Definition doesn't contain 'measures' array"
assert "query_dimensions" in self.definition, \
"Definition doesn't contain 'query_dimensions' array"
assert "slice_dimensions" in self.definition, \
"Definition doesn't contain 'slice_dimensions' array"
assert "mapping" in self.definition, \
"Definition doesn't contain 'mapping' dictionary"
for top_key in self.definition.keys():
assert top_key in TOP_KEYS, \
"Definition has unexpected key '%(top_key)s'" % locals()
mapping = self.definition["mapping"]
mapped_measures = set()
mapped_dimensions = set()
# Checking if atleast one measure is present
assert len(self.definition["measures"]) > 0, \
"Definition should contain atleast one measure"
for measure in self.definition["measures"]:
# Checking if measure has mapping
assert measure in mapping, \
"Measure '%s' doesn't have a mapping" % measure
mapped_measures.add(measure)
# Checking if resource is present
assert "resource" in mapping[measure], \
"Measure '%s' is missing 'resource'" % measure
# Checking type of measure
assert "type" in mapping[measure], \
"Measure '%s' is missing 'type'" % measure
assert mapping[measure]["type"] in MEASURING_FUNCTIONS_MAP, \
"Measure '%s' type '%s' is not a valid measure type" % (
measure,
mapping[measure]["type"])
if mapping[measure]["type"] == "score":
assert "field" in mapping[measure], \
"Measure '%s' has type 'score' but missing 'field'" % \
measure
if mapping[measure]["type"] == "unique":
assert "field" in mapping[measure], \
"Measure '%s' has type 'unique' but missing 'field'" % \
measure
if "conditions" in mapping[measure]:
for condition in mapping[measure]["conditions"]:
assert "field" in condition, \
"Conditional measure '%s' missing 'field' in one of " \
"the conditions" % measure
filter_count = 0
for condition_key in CONDITION_KEYS:
if condition_key in condition:
filter_count += 1
assert filter_count > 0, \
"Conditional measure '%s' field '%s' has no " \
"conditions" % (measure, condition["field"])
assert filter_count <= 1, \
"Conditional measure '%s' field '%s' has " \
"> 1 conditions" % (measure, condition["field"])
for dimension in self.definition["query_dimensions"] \
+ self.definition["slice_dimensions"]:
# Checking if all dimensions are mapped
assert dimension in mapping, \
"Dimension '%s' doesn't have a mapping" % dimension
mapped_dimensions.add(dimension)
# Checking type of dimension
assert "type" in mapping[dimension], \
"Dimension '%s' is missing 'type'" % dimension
assert mapping[dimension]["type"] in DIMENSION_PARSERS_MAP, \
"Dimension '%s' type '%s' is not valid dimension type" % (
dimension,
mapping[dimension]["type"])
# Checking if field is present
assert "field" in mapping[dimension], \
"Dimension '%s' is missing 'field'" % dimension
unmapped = set(mapping.keys()) - (mapped_measures | mapped_dimensions)
assert unmapped == set(), \
"Unmapped keys in mapping: [%s]" % ",".join(unmapped)
def set_data_db(self, data_db):
self.definition["data_db"] = data_db
def __getitem__(self, name):
if name in self.definition:
return self.definition[name]
return None
def __getattr__(self, name):
return self.definition[name]
if __name__ == "__main__":
if len(sys.argv) >= 2:
for filepath in sys.argv[1:]:
with open(filepath, 'r') as f:
try:
a = Analytics(f.read())
except AssertionError, e:
sys.stderr.write("%s: %s\n" % (filepath, e))
except ValueError, e:
sys.stderr.write("%s: %s\n" % (filepath, e))
| {
"content_hash": "73676df0a5f6a7ab19d658f36d3e4915",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 79,
"avg_line_length": 41.38255033557047,
"alnum_prop": 0.5376256892637041,
"repo_name": "practo/r5d4",
"id": "28c0c2d98eb48c4c1574830473c04ea696e8c464",
"size": "6188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "r5d4/analytics.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52496"
}
],
"symlink_target": ""
} |
from django import template
from django.conf import settings
from pCMS.fpg.models import Flatpage, Menu
register = template.Library()
class FpgNode(template.Node):
def __init__(self, context_name, starts_with=None, user=None):
self.context_name = context_name
if starts_with:
self.starts_with = template.Variable(starts_with)
else:
self.starts_with = None
if user:
self.user = template.Variable(user)
else:
self.user = None
def render(self, context):
flatpages = Flatpage.objects.filter(published=True,sites__id=settings.SITE_ID)
# If a prefix was specified, add a filter
if self.starts_with:
flatpages = flatpages.filter(
url__startswith=self.starts_with.resolve(context))
# If the provided user is not authenticated, or no user
# was provided, filter the list to only public flatpages.
if self.user:
user = self.user.resolve(context)
if not user.is_authenticated():
flatpages = flatpages.filter(registration_required=False)
else:
flatpages = flatpages.filter(registration_required=False)
context[self.context_name] = flatpages
return ''
def getflatpages(parser, token):
"""
Retrieves all flatpage objects available for the current site and
visible to the specific user (or visible to all users if no user is
specified). Populates the template context with them in a variable
whose name is defined by the ``as`` clause.
An optional ``for`` clause can be used to control the user whose
permissions are to be used in determining which flatpages are visible.
An optional argument, ``starts_with``, can be applied to limit the
returned flatpages to those beginning with a particular base URL.
This argument can be passed as a variable or a string, as it resolves
from the template context.
Syntax::
{% get_flatpages ['url_starts_with'] [for user] as context_name %}
Example usage::
{% get_flatpages as flatpages %}
{% get_flatpages for someuser as flatpages %}
{% get_flatpages '/about/' as about_pages %}
{% get_flatpages prefix as about_pages %}
{% get_flatpages '/about/' for someuser as about_pages %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"['url_starts_with'] [for user] as context_name" %
dict(tag_name=bits[0]))
# Must have at 3-6 bits in the tag
if len(bits) >= 3 and len(bits) <= 6:
# If there's an even number of bits, there's no prefix
if len(bits) % 2 == 0:
prefix = bits[1]
else:
prefix = None
# The very last bit must be the context name
if bits[-2] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
# If there are 5 or 6 bits, there is a user defined
if len(bits) >= 5:
if bits[-4] != 'for':
raise template.TemplateSyntaxError(syntax_message)
user = bits[-3]
else:
user = None
return FpgNode(context_name, starts_with=prefix, user=user)
else:
raise template.TemplateSyntaxError(syntax_message)
register.tag('getflatpages', getflatpages)
class MenuNode(template.Node):
def __init__(self, context_name,scope):
self.context_name = context_name
self.scope = scope
def render(self, context):
if self.scope == 'public':
menus = Menu.objects.filter(registration_required=False)
else:
menus = Menu.objects.filter(registration_required=True)
context[self.context_name] = menus
return ''
def getmenu(parser, token):
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s public/private as context_name" % dict(tag_name=bits[0]))
if len(bits) == 4:
# The very last bit must be the context name
if bits[-2] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
scope = bits[1]
if scope not in ['public','private']:
raise template.TemplateSyntaxError(syntax_message)
return MenuNode(context_name,scope)
else:
raise template.TemplateSyntaxError(syntax_message)
register.tag('getmenu', getmenu)
| {
"content_hash": "315481f22d9737d7b2369fd96c1cedf9",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 126,
"avg_line_length": 35.29457364341085,
"alnum_prop": 0.6202503843619591,
"repo_name": "sv1jsb/pCMS",
"id": "b863f3bd3ba3d2eaf99443b25cfafddbf0ed7b05",
"size": "4553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pCMS/fpg/templatetags/fpg_flatpages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "582047"
},
{
"name": "Python",
"bytes": "74399"
}
],
"symlink_target": ""
} |
from django.db import models
from OctaHomeCore.models import *
class ButtonInputDevice(InputDevice):
ButtonOneAction = models.ForeignKey('OctaHomeCore.TriggerEvent', related_name="oneButtonActions", blank=True, null=True, on_delete=models.SET_NULL)
ButtonTwoAction = models.ForeignKey('OctaHomeCore.TriggerEvent', related_name="twoButtonActions", blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
db_table = u'ButtonInputDevice'
class MotionInputDevice(InputDevice):
TriggerAction = models.ForeignKey('OctaHomeCore.TriggerEvent', related_name="motionActions", blank=True, null=True, on_delete=models.SET_NULL)
WaitTime = models.IntegerField() # in seconds
TimeOutTaskCeleryId = models.TextField()
TimeOutAction = models.ForeignKey('OctaHomeCore.TriggerEvent', related_name="motionTimeoutActions", blank=True, null=True, on_delete=models.SET_NULL)
Activated = models.BooleanField(default=False)
Armed = models.BooleanField(default=False)
class Meta:
db_table = u'MotionInputDevice'
| {
"content_hash": "4aec94ba6202dbcc1ad36a74e6d83c39",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 150,
"avg_line_length": 48.285714285714285,
"alnum_prop": 0.7948717948717948,
"repo_name": "Tomcuzz/OctaHomeAutomation",
"id": "d0e08368eb5eb7a5935068a5572685d80c1baedb",
"size": "1014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OctaHomeDeviceInput/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "110234"
},
{
"name": "JavaScript",
"bytes": "2966773"
},
{
"name": "PHP",
"bytes": "3467"
},
{
"name": "Python",
"bytes": "113535"
},
{
"name": "Shell",
"bytes": "4340"
}
],
"symlink_target": ""
} |
"""Utility functions of the cli."""
import datetime
import json
import os
import re
import random
import string
import sys
import time
import click
import requests
import tabulate
from keep import about
# Directory for Keep files
dir_path = os.path.join(os.path.expanduser('~'), '.keep')
# URL for the API
api_url = 'https://keep-cli.herokuapp.com'
def check_update(forced=False):
update_check_file = os.path.join(dir_path, 'update_check.json')
today = datetime.date.today().strftime("%m/%d/%Y")
if os.path.exists(update_check_file):
dates = json.loads(open(update_check_file, 'r').read())
else:
dates = []
if today not in dates or forced:
dates.append(today)
if os.path.exists(update_check_file):
with open(update_check_file, 'w') as f:
f.write(json.dumps(dates))
r = requests.get("https://pypi.python.org/pypi/keep/json").json()
version = r['info']['version']
curr_version = about.__version__
if version != curr_version:
click.secho("Keep seems to be outdated. Current version = "
"{}, Latest version = {}".format(curr_version, version) +
"\n\nPlease update with ", bold=True, fg='red')
click.secho("\tpip --no-cache-dir install -U keep==" + str(version), fg='green')
sys.exit(0)
def first_time_use(ctx):
click.secho("Initializing environment in ~/.keep directory", fg='green')
for i in range(2):
click.echo('.', nl=False)
time.sleep(0.5)
click.echo('.OK', nl=True)
os.mkdir(dir_path)
register()
sys.exit(0)
def list_commands(ctx):
commands = read_commands()
table = []
for cmd, desc in commands.items():
table.append(['$ ' + cmd, desc])
print(tabulate.tabulate(table, headers=['Command', 'Description']))
def log(ctx, message):
"""Prints log when verbose set to True."""
if ctx.verbose:
ctx.log(message)
def push(ctx):
credentials_file = os.path.join(dir_path, '.credentials')
credentials = json.loads(open(credentials_file, 'r').read())
json_path = os.path.join(dir_path, 'commands.json')
credentials['commands'] = open(json_path).read()
url = api_url + '/push'
if click.confirm("This will overwrite the saved "
"commands on the server. Proceed?", default=True):
r = requests.post(url, json=credentials)
if r.status_code == 200:
click.echo("Server successfully updated.")
def pull(ctx, overwrite):
credentials_file = os.path.join(dir_path, '.credentials')
credentials = json.loads(open(credentials_file, 'r').read())
json_path = os.path.join(dir_path, 'commands.json')
url = api_url + '/pull'
r = requests.post(url, json=credentials)
if r.status_code == 200:
commands = json.loads(r.json()['commands'])
if not overwrite:
my_commands = read_commands()
commands.update(my_commands)
if not overwrite or (
overwrite and click.confirm(
"This will overwrite the locally saved commands. Proceed?", default=True)):
with open(json_path, 'w') as f:
f.write(json.dumps(commands))
click.echo("Local database successfully updated.")
def register():
if not os.path.exists(dir_path):
click.secho("\n[CRITICAL] {0} does not exits.\nPlease run 'keep init',"
" and try registering again.\n".format(dir_path),
fg="red", err=True)
sys.exit(1)
# User may not choose to register and work locally.
# Registration is required to push the commands to server
if click.confirm('Proceed to register?', abort=True, default=True):
# Verify for existing user
click.echo("Your credentials will be saved in the ~/.keep directory.")
email = click.prompt('Email', confirmation_prompt=True)
json_res = {'email': email}
click.echo('Verifying with existing users...')
r = requests.post('https://keep-cli.herokuapp.com/check-user', json=json_res)
if r.json()['exists']:
click.secho('User already exists !', fg='red')
email = click.prompt('Email', confirmation_prompt=True)
json_res = {'email': email}
r = requests.post('https://keep-cli.herokuapp.com/check-user', json=json_res)
# Generate password for the user
chars = string.ascii_letters + string.digits
password = ''.join(random.choice(chars) for _ in range(255))
credentials_file = os.path.join(dir_path, '.credentials')
credentials = {
'email': email,
'password': password
}
click.secho("Generated password for " + email, fg='cyan')
# Register over the server
click.echo("Registering new user ...")
json_res = {
'email': email,
'password': password
}
r = requests.post('https://keep-cli.herokuapp.com/register', json=json_res)
if r.status_code == 200:
click.secho("User successfully registered !", fg='green')
# Save the credentials into a file
with open(credentials_file, 'w+') as f:
f.write(json.dumps(credentials))
click.secho(password, fg='cyan')
click.secho("Credentials file saved at ~/.keep/.credentials.json", fg='green')
sys.exit(0)
def remove_command(cmd):
commands = read_commands()
if cmd in commands:
del commands[cmd]
write_commands(commands)
else:
click.echo('Command - {} - does not exist.'.format(cmd))
def save_command(cmd, desc):
json_path = os.path.join(dir_path, 'commands.json')
commands = {}
if os.path.exists(json_path):
commands = json.loads(open(json_path, 'r').read())
commands[cmd] = desc
with open(json_path, 'w') as f:
f.write(json.dumps(commands))
def read_commands():
json_path = os.path.join(dir_path, 'commands.json')
if not os.path.exists(json_path):
return None
commands = json.loads(open(json_path, 'r').read())
return commands
def write_commands(commands):
json_path = os.path.join(dir_path, 'commands.json')
with open(json_path, 'w') as f:
f.write(json.dumps(commands))
def grep_commands(pattern):
commands = read_commands()
result = None
if commands:
result = []
for cmd, desc in commands.items():
if re.search(pattern, cmd + " :: " + desc):
result.append((cmd, desc))
continue
# Show if all the parts of the pattern are in one command/desc
keywords_len = len(pattern.split())
i_keyword = 0
for keyword in pattern.split():
if keyword.lower() in cmd.lower() or keyword.lower() in desc.lower():
i_keyword += 1
if i_keyword == keywords_len:
result.append((cmd, desc))
return result
def select_command(commands):
click.echo("\n\n")
for idx, command in enumerate(commands):
cmd, desc = command
click.secho(" " + str(idx + 1) + "\t", nl=False, fg='yellow')
click.secho("$ {} :: {}".format(cmd, desc), fg='green')
click.echo("\n\n")
selection = 1
while True and len(commands) > 1:
selection = click.prompt(
"Select a command [1-{}] (0 to cancel)"
.format(len(commands)), type=int)
if selection in range(len(commands) + 1):
break
click.echo("Number is not in range")
return selection - 1
def edit_commands(commands, editor=None, edit_header=""):
edit_msg = [edit_header]
for cmd, desc in commands.items():
cmd = json.dumps(cmd)
desc = json.dumps(desc)
edit_msg.append("{} :: {}".format(cmd, desc))
edited = click.edit('\n'.join(edit_msg), editor=editor)
command_regex = re.compile(r'(\".*\")\s*::\s*(\".*\")')
new_commands = {}
if edited:
for line in edited.split('\n'):
if (line.startswith('#') or line == ""):
continue
re_match = command_regex.search(line)
if re_match and len(re_match.groups()) == 2:
cmd, desc = re_match.groups()
try:
cmd = json.loads(cmd)
desc = json.loads(desc)
except ValueError:
click.echo("Error parsing json from edit file.")
return None
new_commands[cmd] = desc
else:
click.echo("Could not read line '{}'".format(line))
return new_commands
def format_commands(commands):
res = []
for cmd, desc in commands.items():
res.append("$ {} :: {}".format(cmd, desc))
return res
def create_pcmd(command):
class KeepCommandTemplate(string.Template):
default_sep = '='
idpattern = r'[_a-z][_a-z0-9{}]*'.format(default_sep)
def __init__(self, template):
super().__init__(template)
return KeepCommandTemplate(command)
def get_params_in_pcmd(pcmd):
patt = pcmd.pattern
params = []
defaults = []
raw = []
for match in re.findall(patt, pcmd.template):
var = match[1] or match[2]
svar = var.split(pcmd.default_sep)
p, d = svar[0], pcmd.default_sep.join(svar[1:])
if p and p not in params:
raw.append(var)
params.append(p)
defaults.append(d)
return raw, params, defaults
def substitute_pcmd(pcmd, kargs, safe=False):
if safe:
return pcmd.safe_substitute(**kargs)
else:
return pcmd.substitute(**kargs)
| {
"content_hash": "5ef4a433917a6fb90e2cce4ac46f57f4",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 92,
"avg_line_length": 33.29692832764505,
"alnum_prop": 0.5788232882328823,
"repo_name": "paci4416/keep",
"id": "6a8b713676e473069e2071c2ccc3fc8f827853c3",
"size": "9756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keep/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19864"
},
{
"name": "Shell",
"bytes": "100"
}
],
"symlink_target": ""
} |
from twisted.internet import defer
from ipd.libvirt import error, constants
from structlog import get_logger
logger = get_logger()
class ProcedureBase(object):
id = None
name = None
args = None
ret = None
def __init__(self, program):
self._program = program
self._pending = defer.Deferred()
self._log = program._log.bind(procedure=self.name)
def __call__(self, *args, **kwargs):
return self._program.call(self, args, kwargs)
def handle_CALL(self, status, payload):
self._log.msg('libvirt.recv.call')
raise error.FeatureNotSupported(
'Remote procedure calls are not supported.')
def handle_REPLY(self, status, payload):
self._log.msg('libvirt.recv.reply', status=constants.status[status])
if status == constants.status.OK:
response = self.unpack_ret(payload)
self._pending.callback(response)
else:
response = self.unpack_err(payload)
self._pending.errback(response)
def handle_EVENT(self, status, payload):
self._log.msg('libvirt.recv.event')
raise error.FeatureNotSupported('Remote events are not supported.')
def handle_STREAM(self, status, payload):
self._log.msg('libvirt.recv.stream', status=constants.status[status])
raise error.FeatureNotSupported('Remote streaming is not supported.')
@classmethod
def pack_args(cls, stream, args, kwargs):
if cls.args is not None:
value = cls.args.model(*args, **kwargs)
cls.args.pack(stream, value)
@classmethod
def unpack_ret(cls, stream):
if cls.ret is not None:
return cls.ret.unpack(stream)
@classmethod
def unpack_err(cls, stream):
from ipd.libvirt import remote
return error.RemoteError(remote.error.unpack(stream))
| {
"content_hash": "f33dd269ccd5540ae46c95deae59f4c3",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 32.3448275862069,
"alnum_prop": 0.6375266524520256,
"repo_name": "GaretJax/ipd",
"id": "fea942ed665fd0917c687edb769a48c4e2e11964",
"size": "1876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipd/libvirt/procedures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "239274"
}
],
"symlink_target": ""
} |
import roslib
import rospy
import math
import tf
from geometry_msgs.msg import Pose
if __name__ == '__main__':
rospy.init_node('tf_box_pose_footprint')
pub = rospy.Publisher('/box_pose_footprint', Pose, queue_size=10)
listener = tf.TransformListener()
pose = Pose()
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
try:
(trans,rot) = listener.lookupTransform('/base_footprint', '/box', rospy.Time(0))
#print(trans,rot)
pose.position.x = trans[0]
pose.position.y = trans[1]
pose.position.z = trans[2]
pose.orientation.x = rot[0]
pose.orientation.y = rot[1]
pose.orientation.z = rot[2]
pose.orientation.w = rot[3]
print(pose)
pub.publish(pose)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
rate.sleep()
| {
"content_hash": "49ff277cc08a40e18bf4b99dc1189112",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 92,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.6284090909090909,
"repo_name": "matteopantano/youbot-thesis",
"id": "bd337824c30c9bb717c518d0f0cedfee48a395f2",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teleop_kuka/src/tf_listener_BOX_footprint.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8600"
},
{
"name": "CMake",
"bytes": "26570"
},
{
"name": "Python",
"bytes": "303558"
}
],
"symlink_target": ""
} |
def extractGlitchditchHomeBlog(item):
'''
Parser for 'glitchditch.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Lonely Attack on the Different World', 'Lonely Attack on the Different World', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "182d223fdf26fad3e5dd2b7a74b26ca0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 124,
"avg_line_length": 32.19047619047619,
"alnum_prop": 0.6227810650887574,
"repo_name": "fake-name/ReadableWebProxy",
"id": "7a7ce87a8c112fa946b4c3e527514aab11be8184",
"size": "676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractGlitchditchHomeBlog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.codegen.register import build_file_aliases as register_codegen
from pants.backend.codegen.targets.java_wire_library import JavaWireLibrary
from pants.backend.codegen.tasks.wire_gen import WireGen
from pants.backend.core.register import build_file_aliases as register_core
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.exceptions import TaskError
from pants.base.revision import Revision
from pants.build_graph.target import Target
from pants_test.tasks.task_test_base import TaskTestBase
class WireGenTest(TaskTestBase):
# A bogus target workdir.
TARGET_WORKDIR = ".pants.d/bogus/workdir"
@classmethod
def task_type(cls):
return WireGen
@property
def alias_groups(self):
return register_core().merge(register_codegen())
def _create_fake_wire_tool(self, version='1.8.0'):
self.make_target(':wire-compiler', JarLibrary, jars=[
JarDependency(org='com.squareup.wire', name='wire-compiler', rev=version),
])
def test_compiler_args(self):
self._create_fake_wire_tool()
simple_wire_target = self.make_target('src/wire:simple-wire-target', JavaWireLibrary,
sources=['foo.proto'])
context = self.context(target_roots=[simple_wire_target])
task = self.create_task(context)
self.assertEquals([
'--java_out={}'.format(self.TARGET_WORKDIR),
'--proto_path={}/src/wire'.format(self.build_root),
'foo.proto'],
task.format_args_for_target(simple_wire_target, self.TARGET_WORKDIR))
def test_compiler_args_wirev1(self):
self._create_fake_wire_tool()
wire_targetv1 = self.make_target('src/wire:wire-targetv1', JavaWireLibrary,
sources=['bar.proto'],
service_writer='org.pantsbuild.DummyServiceWriter',
service_writer_options=['opt1', 'opt2'])
task = self.create_task(self.context(target_roots=[wire_targetv1]))
self.assertEquals([
'--java_out={}'.format(self.TARGET_WORKDIR),
'--service_writer=org.pantsbuild.DummyServiceWriter',
'--service_writer_opt', 'opt1',
'--service_writer_opt', 'opt2',
'--proto_path={}/src/wire'.format(self.build_root),
'bar.proto'],
task.format_args_for_target(wire_targetv1, self.TARGET_WORKDIR))
def test_compiler_args_all(self):
self._create_fake_wire_tool(version='1.8.0')
kitchen_sink = self.make_target('src/wire:kitchen-sink', JavaWireLibrary,
sources=['foo.proto', 'bar.proto', 'baz.proto'],
registry_class='org.pantsbuild.Registry',
service_writer='org.pantsbuild.DummyServiceWriter',
no_options=True,
roots=['root1', 'root2', 'root3'],
enum_options=['enum1', 'enum2', 'enum3'],)
task = self.create_task(self.context(target_roots=[kitchen_sink]))
self.assertEquals([
'--java_out={}'.format(self.TARGET_WORKDIR),
'--no_options',
'--service_writer=org.pantsbuild.DummyServiceWriter',
'--registry_class=org.pantsbuild.Registry',
'--roots=root1,root2,root3',
'--enum_options=enum1,enum2,enum3',
'--proto_path={}/src/wire'.format(self.build_root),
'foo.proto',
'bar.proto',
'baz.proto'],
task.format_args_for_target(kitchen_sink, self.TARGET_WORKDIR))
def test_compiler_args_proto_paths(self):
self._create_fake_wire_tool()
parent_target = self.make_target('src/main/wire:parent-target', JavaWireLibrary,
sources=['bar.proto'])
simple_wire_target = self.make_target('src/wire:simple-wire-target', JavaWireLibrary,
sources=['foo.proto'], dependencies=[parent_target])
context = self.context(target_roots=[parent_target, simple_wire_target])
task = self.create_task(context)
self.assertEquals([
'--java_out={}'.format(self.TARGET_WORKDIR),
'--proto_path={}/src/wire'.format(self.build_root),
'foo.proto'],
task.format_args_for_target(simple_wire_target, self.TARGET_WORKDIR))
| {
"content_hash": "6025772eb5780458f7af280a01f3c16a",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 94,
"avg_line_length": 46.357142857142854,
"alnum_prop": 0.6350429231785164,
"repo_name": "jtrobec/pants",
"id": "321ecf38050aa0931541c8b618dc9fb304829e0a",
"size": "4690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/codegen/tasks/test_wire_gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1569"
},
{
"name": "HTML",
"bytes": "64616"
},
{
"name": "Java",
"bytes": "281600"
},
{
"name": "JavaScript",
"bytes": "31040"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4241664"
},
{
"name": "Scala",
"bytes": "84066"
},
{
"name": "Shell",
"bytes": "50645"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
} |
"""
Created on Jun 01, 2012
@author: Bilel Msekni
@contact: [email protected]
@author: Houssem Medhioub
@contact: [email protected]
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
import pyocni.pyocni_tools.config as config
import pyocni.pyocni_tools.occi_Joker as joker
import pyocni.pyocni_tools.uuid_Generator as uuid_Generator
try:
import simplejson as json
except ImportError:
import json
from pyocni.pyocni_tools.config import return_code
# getting the Logger
logger = config.logger
class ResourceManager(object):
"""
Manager of resource documents
"""
def register_resources(self, occi_descriptions, url_path, db_occi_ids_locs, default_attributes):
"""
Add new resources to the database
Args:
@param occi_descriptions: the OCCI description of the new resources
@param db_occi_ids_locs: OCCI IDs and OCCI Location extracted from the database
@param url_path: URL path of the request
@param default_attributes: the default attributes extracted from kind
"""
loc_res = list()
kind_occi_id = None
#Step[1]: Get the kind on which the request was sent
for elem in db_occi_ids_locs:
if elem['OCCI_Location'] == url_path:
kind_occi_id = elem['OCCI_ID']
break
if kind_occi_id is not None:
for desc in occi_descriptions:
#Note: Verify if the kind to which this request is sent is the same as the one in the link description
if desc['kind'] == kind_occi_id:
#Note: create the url of the id based on the id provided in the request
loc = joker.make_entity_location_from_url(url_path, desc['id'])
exist_same = joker.verify_existences_teta([loc], db_occi_ids_locs)
#Step[2]: Create the new resource
if exist_same is False:
jData = dict()
jData['_id'] = uuid_Generator.get_UUID()
jData['OCCI_Location'] = loc
full_att = joker.complete_occi_description_with_default_attributes(desc['attributes'],
default_attributes)
desc['attributes'] = full_att
jData['OCCI_Description'] = desc
jData['Type'] = "Resource"
loc_res.append(jData)
else:
logger.error(" ===== Register_resources : Bad Resource id ===== ")
return list(), return_code['Conflict']
else:
mesg = "Kind description and kind location don't match"
logger.error("===== Register_resources: " + mesg + " ===== ")
return list(), return_code['Conflict']
#Step[3]: return the list of resources for creation
logger.debug("===== Register_resources: Resources sent for creation =====")
return loc_res, return_code['OK, and location returned']
else:
mesg = "No kind corresponding to this location was found"
logger.error("===== Register_resources: " + mesg + " =====")
return list(), return_code['Not Found']
def get_filtered_resources(self, filters, descriptions_res):
"""
Retrieve the resources that match the filters provided
Args:
@param filters: Filters
@param descriptions_res: Resource descriptions
"""
var = list()
try:
for desc in descriptions_res:
for filter in filters:
#Step[1]: Check if descriptions match the filters
checks = joker.filter_occi_description(desc['OCCI_Description'], filter)
if checks is True:
#Step[2]: Keep record of those description matching the filter
var.append(desc['OCCI_ID'])
logger.debug("===== Get_filtered_resources: A resource document is found =====")
return var, return_code['OK']
except Exception as e:
logger.error("===== Get_filtered_resources : " + e.message + " =====")
return list(), return_code['Internal Server Error']
def register_custom_resource(self, occi_description, path_url, db_occi_ids_locs):
"""
Add a new resource with a custom URL to the database
Args:
@param occi_description: Resource description
@param path_url: Custom URL of the resource
@param db_occi_ids_locs: Ids and locations from the database
"""
#Step[1]: Verify if the kind of the new resource exists
ok_k = joker.verify_existences_beta([occi_description['kind']], db_occi_ids_locs)
#Step[2]: create the resource
if ok_k is True:
jData = dict()
jData['_id'] = uuid_Generator.get_UUID()
jData['OCCI_Location'] = path_url
jData['OCCI_Description'] = occi_description
jData['Type'] = "Resource"
else:
mesg = "This kind does not exist"
logger.error(" ===== Register_custom_resource : " + mesg + " =====")
return list(), return_code['Not Found']
#Step[3]: send resource for creation
logger.debug("===== Register_custom_resource : Resources sent for creation")
return jData, return_code['OK, and location returned']
def update_resource(self, old_doc, occi_new_description):
"""
Fully update the resource's old description
Args:
@param old_doc: Old resource document
@param occi_new_description: New resource description
"""
try:
logger.debug("===== Update_resource: Resource sent for update =====")
#Step[1]: Replace the old occi description with the new occi description
old_doc['OCCI_Description'] = occi_new_description
#Step[2]: Return the hole document for update
return old_doc, return_code['OK, and location returned']
except Exception as e:
logger.error("===== Update_resource: Resource couldn't be updated =====")
return {}, return_code['Internal Server Error']
def partial_resource_update(self, old_data, occi_description):
"""
Partially update the resource's old occi description
Args:
@param occi_description: Resource description
@param old_data: Old resource description
"""
#Step[1]: try a partial resource update
problems, updated_data = joker.update_occi_entity_description(old_data, occi_description)
#Step[2]: if no problem then return the new data for update else return the old data with conflict status code
if problems is False:
logger.debug("===== Update_partial_resource: Resource sent for update =====")
return updated_data, return_code['OK, and location returned']
else:
logger.error("===== Update_partial_resource: Resource couldn't have been fully updated =====")
return old_data, return_code['Conflict']
| {
"content_hash": "887c9a28d7ae49430635484310381d61",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 118,
"avg_line_length": 40.43715846994535,
"alnum_prop": 0.5781081081081081,
"repo_name": "jordan-developer/pyOCNI",
"id": "5c152b110a7b496a25169276fdad1629429ab206",
"size": "7998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyocni/junglers/managers/resourceManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "332210"
}
],
"symlink_target": ""
} |
"""
Collect gene statistics based on gff file:
Exon length, Intron length, Gene length, Exon count
"""
import os.path as op
import sys
import logging
from jcvi.utils.cbook import SummaryStats, percentage, human_size
from jcvi.utils.range import range_interleave
from jcvi.utils.table import tabulate
from jcvi.formats.fasta import Fasta
from jcvi.formats.gff import make_index
from jcvi.formats.base import DictFile
from jcvi.apps.base import OptionParser, ActionDispatcher, mkdir, need_update
metrics = ("Exon_Length", "Intron_Length", "Gene_Length", "Exon_Count")
class GeneStats (object):
def __init__(self, feat, conf_class, transcript_sizes, exons):
self.fid = feat.id
self.conf_class = conf_class
self.num_exons = len(exons)
self.num_transcripts = len(transcript_sizes)
self.locus_size = feat.stop - feat.start + 1
self.cum_transcript_size = sum(transcript_sizes)
self.cum_exon_size = sum((stop - start + 1) \
for (c, start, stop) in exons)
def __str__(self):
return "\t".join(str(x) for x in (self.fid, self.conf_class,
self.num_exons, self.num_transcripts,
self.locus_size,
self.cum_transcript_size,
self.cum_exon_size))
def main():
actions = (
('stats', 'collect gene statistics based on gff file'),
('statstable', 'print gene statistics table based on output of stats'),
('histogram', 'plot gene statistics based on output of stats'),
# summary tables of various styles
('genestats', 'print detailed gene statistics'),
('summary', 'print detailed gene/exon/intron statistics'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def gc(seqs):
gc = total = 0
for s in seqs:
s = s.upper()
gc += s.count('G') + s.count('C')
total += sum(s.count(x) for x in 'ACGT')
return percentage(gc, total, precision=0, mode=-1)
def summary(args):
"""
%prog summary gffile fastafile
Print summary stats, including:
- Gene/Exon/Intron
- Number
- Average size (bp)
- Median size (bp)
- Total length (Mb)
- % of genome
- % GC
"""
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gff_file, ref = args
s = Fasta(ref)
g = make_index(gff_file)
geneseqs, exonseqs, intronseqs = [], [], [] # Calc % GC
for f in g.features_of_type("gene"):
fid = f.id
fseq = s.sequence({'chr': f.chrom, 'start': f.start, 'stop': f.stop})
geneseqs.append(fseq)
exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \
if c.featuretype == "exon")
exons = list(exons)
for chrom, start, stop in exons:
fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop})
exonseqs.append(fseq)
introns = range_interleave(exons)
for chrom, start, stop in introns:
fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop})
intronseqs.append(fseq)
r = {} # Report
for t, tseqs in zip(("Gene", "Exon", "Intron"), (geneseqs, exonseqs, intronseqs)):
tsizes = [len(x) for x in tseqs]
tsummary = SummaryStats(tsizes, dtype="int")
r[t, "Number"] = tsummary.size
r[t, "Average size (bp)"] = tsummary.mean
r[t, "Median size (bp)"] = tsummary.median
r[t, "Total length (Mb)"] = human_size(tsummary.sum, precision=0, target="Mb")
r[t, "% of genome"] = percentage(tsummary.sum, s.totalsize, precision=0, mode=-1)
r[t, "% GC"] = gc(tseqs)
print >> sys.stderr, tabulate(r)
def genestats(args):
"""
%prog genestats gffile
Print summary stats, including:
- Number of genes
- Number of single-exon genes
- Number of multi-exon genes
- Number of distinct exons
- Number of genes with alternative transcript variants
- Number of predicted transcripts
- Mean number of distinct exons per gene
- Mean number of transcripts per gene
- Mean gene locus size (first to last exon)
- Mean transcript size (UTR, CDS)
- Mean exon size
Stats modeled after barley genome paper Table 1.
A physical, genetic and functional sequence assembly of the barley genome
"""
p = OptionParser(genestats.__doc__)
p.add_option("--groupby", default="conf_class",
help="Print separate stats groupby")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gff_file, = args
gb = opts.groupby
g = make_index(gff_file)
tf = "transcript.sizes"
if need_update(gff_file, tf):
fw = open(tf, "w")
for feat in g.features_of_type("mRNA"):
fid = feat.id
conf_class = feat.attributes.get(gb, "all")
tsize = sum((c.stop - c.start + 1) for c in g.children(fid, 1) \
if c.featuretype == "exon")
print >> fw, "\t".join((fid, str(tsize), conf_class))
fw.close()
tsizes = DictFile(tf, cast=int)
conf_classes = DictFile(tf, valuepos=2)
logging.debug("A total of {0} transcripts populated.".format(len(tsizes)))
genes = []
for feat in g.features_of_type("gene"):
fid = feat.id
transcripts = [c.id for c in g.children(fid, 1) \
if c.featuretype == "mRNA"]
transcript_sizes = [tsizes[x] for x in transcripts]
exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \
if c.featuretype == "exon")
conf_class = conf_classes[transcripts[0]]
gs = GeneStats(feat, conf_class, transcript_sizes, exons)
genes.append(gs)
r = {} # Report
distinct_groups = set(conf_classes.values())
for g in distinct_groups:
num_genes = num_single_exon_genes = num_multi_exon_genes = 0
num_genes_with_alts = num_transcripts = num_exons = max_transcripts = 0
cum_locus_size = cum_transcript_size = cum_exon_size = 0
for gs in genes:
if gs.conf_class != g:
continue
num_genes += 1
if gs.num_exons == 1:
num_single_exon_genes += 1
else:
num_multi_exon_genes += 1
num_exons += gs.num_exons
if gs.num_transcripts > 1:
num_genes_with_alts += 1
if gs.num_transcripts > max_transcripts:
max_transcripts = gs.num_transcripts
num_transcripts += gs.num_transcripts
cum_locus_size += gs.locus_size
cum_transcript_size += gs.cum_transcript_size
cum_exon_size += gs.cum_exon_size
mean_num_exons = num_exons * 1. / num_genes
mean_num_transcripts = num_transcripts * 1. / num_genes
mean_locus_size = cum_locus_size * 1. / num_genes
mean_transcript_size = cum_transcript_size * 1. / num_transcripts
mean_exon_size = cum_exon_size * 1. / num_exons
r[("Number of genes", g)] = num_genes
r[("Number of single-exon genes", g)] = \
percentage(num_single_exon_genes, num_genes, mode=1)
r[("Number of multi-exon genes", g)] = \
percentage(num_multi_exon_genes, num_genes, mode=1)
r[("Number of distinct exons", g)] = num_exons
r[("Number of genes with alternative transcript variants", g)] = \
percentage(num_genes_with_alts, num_genes, mode=1)
r[("Number of predicted transcripts", g)] = num_transcripts
r[("Mean number of distinct exons per gene", g)] = mean_num_exons
r[("Mean number of transcripts per gene", g)] = mean_num_transcripts
r[("Max number of transcripts per gene", g)] = max_transcripts
r[("Mean gene locus size (first to last exon)", g)] = mean_locus_size
r[("Mean transcript size (UTR, CDS)", g)] = mean_transcript_size
r[("Mean exon size", g)] = mean_exon_size
print >> sys.stderr, tabulate(r)
def statstable(args):
"""
%prog statstable *.gff
Print gene statistics table.
"""
p = OptionParser(statstable.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
gff_files = args
for metric in metrics:
logging.debug("Parsing files in `{0}`..".format(metric))
table = {}
for x in gff_files:
pf = op.basename(x).split(".")[0]
numberfile = op.join(metric, pf + ".txt")
ar = [int(x.strip()) for x in open(numberfile)]
sum = SummaryStats(ar).todict().items()
keys, vals = zip(*sum)
keys = [(pf, x) for x in keys]
table.update(dict(zip(keys, vals)))
print >> sys.stderr, tabulate(table)
def histogram(args):
"""
%prog histogram *.gff
Plot gene statistics based on output of stats. For each gff file, look to
see if the metrics folder (i.e. Exon_Length) contains the data and plot
them.
"""
from jcvi.graphics.histogram import histogram_multiple
p = OptionParser(histogram.__doc__)
p.add_option("--bins", dest="bins", default=40, type="int",
help="number of bins to plot in the histogram [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
gff_files = args
# metrics = ("Exon_Length", "Intron_Length", "Gene_Length", "Exon_Count")
colors = ("red", "green", "blue", "black")
vmaxes = (1000, 1000, 4000, 20)
xlabels = ("bp", "bp", "bp", "number")
for metric, color, vmax, xlabel in zip(metrics, colors, vmaxes, xlabels):
logging.debug("Parsing files in `{0}`..".format(metric))
numberfiles = [op.join(metric, op.basename(x).split(".")[0] + ".txt") \
for x in gff_files]
histogram_multiple(numberfiles, 0, vmax, xlabel, metric,
bins=opts.bins, facet=True, fill=color,
prefix=metric + ".")
def stats(args):
"""
%prog stats infile.gff
Collect gene statistics based on gff file. There are some terminology issues
here and so normally we call "gene" are actually mRNA, and sometimes "exon"
are actually CDS, but they are configurable.
Use --txt to send the numbers to text file in four separate folders,
corresponding to the four metrics:
Exon length, Intron length, Gene length, Exon count
With data written to disk then you can run %prog histogram
"""
p = OptionParser(stats.__doc__)
p.add_option("--gene", default="mRNA",
help="The gene type [default: %default]")
p.add_option("--exon", default="CDS",
help="The exon type [default: %default]")
p.add_option("--txt", default=False, action="store_true",
help="Print out numbers for further analyses [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gff_file, = args
g = make_index(gff_file)
exon_lengths = []
intron_lengths = []
gene_lengths = []
exon_counts = []
for feat in g.features_of_type(opts.gene):
exons = []
for c in g.children(feat.id, 1):
if c.featuretype != opts.exon:
continue
exons.append((c.chrom, c.start, c.stop))
introns = range_interleave(exons)
feat_exon_lengths = [(stop - start + 1) for (chrom, start, stop) in exons]
feat_intron_lengths = [(stop - start + 1) for (chrom, start, stop) in introns]
exon_lengths += feat_exon_lengths
intron_lengths += feat_intron_lengths
gene_lengths.append(sum(feat_exon_lengths))
exon_counts.append(len(feat_exon_lengths))
a = SummaryStats(exon_lengths)
b = SummaryStats(intron_lengths)
c = SummaryStats(gene_lengths)
d = SummaryStats(exon_counts)
for x, title in zip((a, b, c, d), metrics):
x.title = title
print >> sys.stderr, x
if not opts.txt:
return
prefix = gff_file.split(".")[0]
for x in (a, b, c, d):
dirname = x.title
mkdir(dirname)
txtfile = op.join(dirname, prefix + ".txt")
x.tofile(txtfile)
if __name__ == '__main__':
main()
| {
"content_hash": "2892faddb5266815acf7973196aacd0c",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 89,
"avg_line_length": 35.26553672316384,
"alnum_prop": 0.5819448894585069,
"repo_name": "sgordon007/jcvi_062915",
"id": "56925cc1b19d035e53a65549be9e23037e8f7fdd",
"size": "12531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "annotation/stats.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1981838"
}
],
"symlink_target": ""
} |
"""Component to configure Home Assistant via an API."""
import asyncio
import importlib
import os
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
CONF_ID,
EVENT_COMPONENT_LOADED,
HTTP_BAD_REQUEST,
HTTP_NOT_FOUND,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import ATTR_COMPONENT
from homeassistant.util.yaml import dump, load_yaml
DOMAIN = "config"
SECTIONS = (
"area_registry",
"auth",
"auth_provider_homeassistant",
"automation",
"config_entries",
"core",
"customize",
"device_registry",
"entity_registry",
"group",
"script",
"scene",
)
ON_DEMAND = ("zwave",)
ACTION_CREATE_UPDATE = "create_update"
ACTION_DELETE = "delete"
async def async_setup(hass, config):
"""Set up the config component."""
hass.components.frontend.async_register_built_in_panel(
"config", "config", "hass:settings", require_admin=True
)
async def setup_panel(panel_name):
"""Set up a panel."""
panel = importlib.import_module(f".{panel_name}", __name__)
if not panel:
return
success = await panel.async_setup(hass)
if success:
key = f"{DOMAIN}.{panel_name}"
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: key})
@callback
def component_loaded(event):
"""Respond to components being loaded."""
panel_name = event.data.get(ATTR_COMPONENT)
if panel_name in ON_DEMAND:
hass.async_create_task(setup_panel(panel_name))
hass.bus.async_listen(EVENT_COMPONENT_LOADED, component_loaded)
tasks = [setup_panel(panel_name) for panel_name in SECTIONS]
for panel_name in ON_DEMAND:
if panel_name in hass.config.components:
tasks.append(setup_panel(panel_name))
if tasks:
await asyncio.wait(tasks)
return True
class BaseEditConfigView(HomeAssistantView):
"""Configure a Group endpoint."""
def __init__(
self,
component,
config_type,
path,
key_schema,
data_schema,
*,
post_write_hook=None,
data_validator=None,
):
"""Initialize a config view."""
self.url = f"/api/config/{component}/{config_type}/{{config_key}}"
self.name = f"api:config:{component}:{config_type}"
self.path = path
self.key_schema = key_schema
self.data_schema = data_schema
self.post_write_hook = post_write_hook
self.data_validator = data_validator
self.mutation_lock = asyncio.Lock()
def _empty_config(self):
"""Empty config if file not found."""
raise NotImplementedError
def _get_value(self, hass, data, config_key):
"""Get value."""
raise NotImplementedError
def _write_value(self, hass, data, config_key, new_value):
"""Set value."""
raise NotImplementedError
def _delete_value(self, hass, data, config_key):
"""Delete value."""
raise NotImplementedError
async def get(self, request, config_key):
"""Fetch device specific config."""
hass = request.app["hass"]
async with self.mutation_lock:
current = await self.read_config(hass)
value = self._get_value(hass, current, config_key)
if value is None:
return self.json_message("Resource not found", HTTP_NOT_FOUND)
return self.json(value)
async def post(self, request, config_key):
"""Validate config and return results."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON specified", HTTP_BAD_REQUEST)
try:
self.key_schema(config_key)
except vol.Invalid as err:
return self.json_message(f"Key malformed: {err}", HTTP_BAD_REQUEST)
hass = request.app["hass"]
try:
# We just validate, we don't store that data because
# we don't want to store the defaults.
if self.data_validator:
await self.data_validator(hass, data)
else:
self.data_schema(data)
except (vol.Invalid, HomeAssistantError) as err:
return self.json_message(f"Message malformed: {err}", HTTP_BAD_REQUEST)
path = hass.config.path(self.path)
async with self.mutation_lock:
current = await self.read_config(hass)
self._write_value(hass, current, config_key, data)
await hass.async_add_executor_job(_write, path, current)
if self.post_write_hook is not None:
hass.async_create_task(
self.post_write_hook(ACTION_CREATE_UPDATE, config_key)
)
return self.json({"result": "ok"})
async def delete(self, request, config_key):
"""Remove an entry."""
hass = request.app["hass"]
async with self.mutation_lock:
current = await self.read_config(hass)
value = self._get_value(hass, current, config_key)
path = hass.config.path(self.path)
if value is None:
return self.json_message("Resource not found", HTTP_NOT_FOUND)
self._delete_value(hass, current, config_key)
await hass.async_add_executor_job(_write, path, current)
if self.post_write_hook is not None:
hass.async_create_task(self.post_write_hook(ACTION_DELETE, config_key))
return self.json({"result": "ok"})
async def read_config(self, hass):
"""Read the config."""
current = await hass.async_add_job(_read, hass.config.path(self.path))
if not current:
current = self._empty_config()
return current
class EditKeyBasedConfigView(BaseEditConfigView):
"""Configure a list of entries."""
def _empty_config(self):
"""Return an empty config."""
return {}
def _get_value(self, hass, data, config_key):
"""Get value."""
return data.get(config_key)
def _write_value(self, hass, data, config_key, new_value):
"""Set value."""
data.setdefault(config_key, {}).update(new_value)
def _delete_value(self, hass, data, config_key):
"""Delete value."""
return data.pop(config_key)
class EditIdBasedConfigView(BaseEditConfigView):
"""Configure key based config entries."""
def _empty_config(self):
"""Return an empty config."""
return []
def _get_value(self, hass, data, config_key):
"""Get value."""
return next((val for val in data if val.get(CONF_ID) == config_key), None)
def _write_value(self, hass, data, config_key, new_value):
"""Set value."""
value = self._get_value(hass, data, config_key)
if value is None:
value = {CONF_ID: config_key}
data.append(value)
value.update(new_value)
def _delete_value(self, hass, data, config_key):
"""Delete value."""
index = next(
idx for idx, val in enumerate(data) if val.get(CONF_ID) == config_key
)
data.pop(index)
def _read(path):
"""Read YAML helper."""
if not os.path.isfile(path):
return None
return load_yaml(path)
def _write(path, data):
"""Write YAML helper."""
# Do it before opening file. If dump causes error it will now not
# truncate the file.
data = dump(data)
with open(path, "w", encoding="utf-8") as outfile:
outfile.write(data)
| {
"content_hash": "cc6f3383cb69f51f5aaf690c0bc323a8",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 83,
"avg_line_length": 29.440613026819925,
"alnum_prop": 0.6029411764705882,
"repo_name": "robbiet480/home-assistant",
"id": "d7a257b1d9b5af44df3c8b5fee29e07340edf840",
"size": "7684",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/config/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
import tweetpony
import Event.models as ev
def send_tweet(event_id):
CONSUMER_KEY = "vbu0rPw39tCEtIEDfZK5g"
CONSUMER_SECRET = "gB235sqx1GrJZKHde5y4ZaNyVawQ0oaGolQTRVEz0"
ACCESS_TOKEN = "137592952-RGzgPZu4lRUegsMZEHv6DzXQGOuXtZ8SCqLRUS3m"
ACCESS_TOKEN_SECRET = "RX51tm1UihA889TtW4rARKROuYV4sPgoInnB3QwqxCg"
api = tweetpony.API(consumer_key=CONSUMER_KEY,
consumer_secret=CONSUMER_SECRET,
access_token=ACCESS_TOKEN,
access_token_secret=ACCESS_TOKEN_SECRET
)
event = ev.Event.objects.get(id=event_id)
url = "http://localhost:8000/event/"+str(event.id)
users = ev.GeneralUser.objects.filter(subscription__location=event.location, subscription__event_type=event.event_type)
msg = event.title[0:60]+ " at " + event.location.name +" on " + str(event.date) + " at " + str(event.time) + " " + url
for usr in users:
try:
api.send_message(text=msg, screen_name=usr.twitter)
except tweetpony.APIError as err:
print "Oops, something went wrong! Twitter returned error #%i and said: %s" % (err.code, err.description)
else:
print "Yay! Your tweet has been sent!"
# user = api.user
# print "Hello, @%s!" % user.screen_name
# text = "Tweet via API"
# try:
# # api.update_status(status=text)
# api.send_message(text="hello pandey", screen_name="errajsubit")
# except tweetpony.APIError as err:
# print "Oops, something went wrong! Twitter returned error #%i and said: %s" % (err.code, err.description)
# else:
# print "Yay! Your tweet has been sent!"
# # send_message': {
# 'endpoint': "direct_messages/new.json",
# 'post': True,
# 'url_params': [],
# 'required_params': ['text'],
# 'optional_params': ['user_id', 'screen_name'],
# 'model': Message,
# }, | {
"content_hash": "9422d2bbd1d579b43c1faed31be4b63a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 123,
"avg_line_length": 37.254901960784316,
"alnum_prop": 0.6263157894736842,
"repo_name": "kaflesudip/jootau",
"id": "56b4eff9f732b8e2a0d6cbb981acceb9c119cc2c",
"size": "1900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jootau/Event/SendTweet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2692"
},
{
"name": "HTML",
"bytes": "21101"
},
{
"name": "Python",
"bytes": "27118"
}
],
"symlink_target": ""
} |
__author__ = 'Neil Butcher'
from PyQt4 import QtGui
class CommandChangeRole(QtGui.QUndoCommand):
def __init__(self, role, key, value, *__args):
QtGui.QUndoCommand.__init__(self, *__args)
self.role = role
self.key = key
self.setText('Changed the ' + key + ' of a role')
self.value = value
if key == 'description':
self.startingValue = role.description
elif key == 'priority':
self.startingValue = role.priority
def redo(self):
if self.key == 'description':
self.role.description = self.value
elif self.key == 'priority':
self.role.priority = self.value
def undo(self):
if self.key == 'description':
self.role.description = self.startingValue
elif self.key == 'priority':
self.role.priority = self.startingValue | {
"content_hash": "1f31479d01224a7e4fad2c3d1a730cab",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 57,
"avg_line_length": 31.642857142857142,
"alnum_prop": 0.5790067720090294,
"repo_name": "ergoregion/Rota-Program",
"id": "6aa87e842edb54f9bd99f3f226f05d0d406416a7",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Rota_System/UI/Roles/commands_role.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "254617"
}
],
"symlink_target": ""
} |
import uuid
import tornado
class ListScenariosHandler(tornado.web.RequestHandler):
#web server instance
_ws = None
def initialize(self, ws):
self._ws = ws
def get(self):
self.finish({"list": [name for name, scenario in self._ws.action_processor.list_all() if scenario.published]})
class ExecuteScenarioHandler(tornado.web.RequestHandler):
#web server instance
_ws = None
def initialize(self, ws):
self._ws = ws
def post(self):
data = tornado.escape.json_decode(self.request.body)
scenario = data['scenario'] if 'scenario' in data else None
parameters = data['parameters'] if 'parameters' in data else {}
if scenario:
res = self._ws.action_processor.execute(scenario, parameters)
if res:
self.finish(res)
else:
raise tornado.HTTPError(405, "Scenario is not defined") | {
"content_hash": "9498b6a02183127e5d6c5e6600729788",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 118,
"avg_line_length": 26.4,
"alnum_prop": 0.6309523809523809,
"repo_name": "Cirreth/shome",
"id": "bfffefdab87b82358815f14953440891f8e7b776",
"size": "924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/handlers/ClientApplicationHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "125643"
},
{
"name": "HTML",
"bytes": "31880"
},
{
"name": "JavaScript",
"bytes": "317583"
},
{
"name": "Python",
"bytes": "91654"
},
{
"name": "Smarty",
"bytes": "197"
}
],
"symlink_target": ""
} |
"""Test segwit transactions and blocks on P2P network."""
from binascii import hexlify
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_WITNESS_FLAG,
NODE_NETWORK,
NODE_WITNESS,
msg_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_witness_block,
msg_witness_tx,
ser_uint256,
ser_vector,
sha256,
uint256_from_str,
)
from test_framework.mininode import (
P2PInterface,
mininode_lock,
wait_until,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitVersion1SignatureHash,
SignatureHash,
hash160,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
connect_nodes,
disconnect_nodes,
get_bip9_status,
hex_str_to_bytes,
sync_blocks,
sync_mempools,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def get_p2pkh_script(pubkeyhash):
"""Get the script associated with a P2PKH."""
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitVersion1SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(rpc, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
p2p.send_message(tx_message)
p2p.sync_with_ping()
assert_equal(tx.hash in rpc.getrawmempool(), accepted)
if (reason is not None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(p2p.last_message["reject"].reason, reason)
def test_witness_block(rpc, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
if with_witness:
p2p.send_message(msg_witness_block(block))
else:
p2p.send_message(msg_block(block))
p2p.sync_with_ping()
assert_equal(rpc.getbestblockhash() == block.hash, accepted)
if (reason is not None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(p2p.last_message["reject"].reason, reason)
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
if success:
self.wait_for_getdata(timeout)
else:
time.sleep(timeout)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.version = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
for conn in (self.test_node, self.old_node, self.std_node):
conn.wait_for_verack()
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Segwit status 'defined'
self.segwit_status = 'defined'
self.test_non_witness_transaction()
self.test_unnecessary_witness_before_segwit_activation()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.advance_to_segwit_started()
# Segwit status 'started'
self.test_getblocktemplate_before_lockin()
self.advance_to_segwit_lockin()
# Segwit status 'locked_in'
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay()
self.test_standardness_v0()
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit status = {})".format(func.__name__, self.segwit_status))
# Assert segwit status is as expected
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
sync_blocks(self.nodes)
# Assert segwit status is as expected at end of subtest
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], self.segwit_status)
return func_wrapper
@subtest
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
wait_until(lambda: 'reject' in self.test_node.last_message and self.test_node.last_message["reject"].reason == b"unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0].rpc, self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0].rpc, self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0].rpc, self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if self.segwit_status != 'active':
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
@subtest
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propogate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
disconnect_nodes(self.nodes[0], 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(script_pubkey)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason=b'unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason=b'unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
connect_nodes(self.nodes[0], 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest
def advance_to_segwit_started(self):
"""Mine enough blocks for segwit's vb state to be 'started'."""
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD - height - 1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.segwit_status = 'started'
@subtest
def test_getblocktemplate_before_lockin(self):
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time()) + 10)
self.nodes[2].setmocktime(int(time.time()) + 10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
@subtest
def advance_to_segwit_lockin(self):
"""Mine enough blocks to lock in segwit, but don't activate."""
height = self.nodes[0].getblockcount()
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD - 1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.segwit_status = 'locked_in'
@subtest
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0].rpc, self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if self.segwit_status != 'active':
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(self.nodes[0].testmempoolaccept([bytes_to_hex_str(tx3.serialize_with_witness())]), [{'txid': tx3.hash, 'allowed': True}])
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height % VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
self.segwit_status = 'active'
@subtest
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
script_pubkey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=True)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the script_sig, should also fail.
spend_tx.vin[0].script_sig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
@subtest
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
script_pubkey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2 * 1024 * 1024)
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
@subtest
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH + 1)
long_witness_hash = sha256(long_witness_program)
long_script_pubkey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * 520] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit version transactions are non-standard, but valid in blocks.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for i in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
script_pubkey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
@subtest
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_wsh = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(script_wsh)
script_p2sh = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = get_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest
def test_signature_version_1(self):
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for i in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = get_p2pkh_script(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
script_pubkey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, True, False, b'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[0], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[3], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# Restart with the new binary
self.stop_node(2)
self.start_node(2, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], 2)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[2], 'segwit')['status'] == "active")
# Make sure this peer's blocks match those of node0.
height = self.nodes[2].getblockcount()
while height >= 0:
block_hash = self.nodes[2].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[2].getblock(block_hash))
height -= 1
@subtest
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, witness_hash])
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
script_pubkey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
script_pubkey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
if __name__ == '__main__':
SegWitTest().main()
| {
"content_hash": "10b263f2d7f8572d0aa3c8e23c3b4c22",
"timestamp": "",
"source": "github",
"line_count": 2071,
"max_line_length": 220,
"avg_line_length": 46.2892322549493,
"alnum_prop": 0.6314713399050749,
"repo_name": "rawodb/bitcoin",
"id": "52f6482d5682db8e9bc55caa8e84e8b365e03374",
"size": "96079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/p2p_segwit.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "682783"
},
{
"name": "C++",
"bytes": "5670101"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "197075"
},
{
"name": "Makefile",
"bytes": "114403"
},
{
"name": "Objective-C",
"bytes": "148160"
},
{
"name": "Objective-C++",
"bytes": "6763"
},
{
"name": "Python",
"bytes": "1341597"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "75802"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
default_app_config = 'spirit.user.admin.apps.SpiritUserAdminConfig'
from django.contrib import admin
from ..models import UserProfile, UserSuspensionLog
class UserProfileAdmin(admin.ModelAdmin):
search_fields = ('user__username',)
list_display = ('__str__', 'last_seen', 'is_verified', 'is_administrator', 'is_moderator')
raw_id_fields = ('user',)
admin.site.register(UserProfile, UserProfileAdmin)
class UserSuspensionLogAdmin(admin.ModelAdmin):
search_fields = ('user__username', )
list_display = ('__str__', 'date_created', 'suspension_reason')
raw_id_fields = ('user', 'suspended_by')
admin.site.register(UserSuspensionLog, UserSuspensionLogAdmin)
| {
"content_hash": "9aad7e8861d3a9c341c5850877f016be",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 94,
"avg_line_length": 29.08,
"alnum_prop": 0.7221458046767538,
"repo_name": "alesdotio/Spirit",
"id": "b18cea3a975c7a01bf3c839abc0438805debc6a4",
"size": "752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spirit/user/admin/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "255435"
},
{
"name": "CoffeeScript",
"bytes": "128350"
},
{
"name": "HTML",
"bytes": "203306"
},
{
"name": "JavaScript",
"bytes": "28458"
},
{
"name": "Makefile",
"bytes": "187"
},
{
"name": "Python",
"bytes": "773246"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import nturl2path
config = Script.get_config()
ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
has_metric_collector = not len(ams_collector_hosts) == 0
if has_metric_collector:
metric_collector_host = ams_collector_hosts[0]
metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_port and metric_collector_port.find(':') != -1:
metric_collector_port = metric_collector_port.split(':')[1]
pass
sink_home = os.environ["SINK_HOME"]
timeline_plugin_url = "file:"+nturl2path.pathname2url(os.path.join(sink_home, "hadoop-sink", "ambari-metrics-hadoop-sink.jar"))
hadoop_conf_dir = os.environ["HADOOP_CONF_DIR"]
hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
| {
"content_hash": "050dd62f5f875de25b7d994ef4dff45e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 129,
"avg_line_length": 43.526315789473685,
"alnum_prop": 0.7623941958887546,
"repo_name": "zouzhberk/ambaridemo",
"id": "093ea38947349af803b1e81abf720e105d68ac94",
"size": "1654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo-server/src/main/resources/stacks/HDPWIN/2.1/hooks/before-START/scripts/params.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5982"
},
{
"name": "Groff",
"bytes": "13935"
},
{
"name": "HTML",
"bytes": "52"
},
{
"name": "Java",
"bytes": "8681846"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "105599"
},
{
"name": "PowerShell",
"bytes": "43170"
},
{
"name": "Python",
"bytes": "2751909"
},
{
"name": "Ruby",
"bytes": "9652"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "247846"
}
],
"symlink_target": ""
} |
from django import shortcuts
from django.views import generic as generic_views
class HTTP401Exception(Exception):
pass
class ForcedAuthenticationMixin(object):
def check_user(self):
if not self.request.user.is_authenticated():
raise HTTP401Exception("Unauthorized")
return
def dispatch(self, request, *args, **kwargs):
self.check_user()
return super(ForcedAuthenticationMixin, self).dispatch(request, *args, **kwargs)
| {
"content_hash": "4ab5ba80b3161c64e258318ea6c934ff",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 88,
"avg_line_length": 31.8,
"alnum_prop": 0.7064989517819706,
"repo_name": "smadacm/TimeManagement",
"id": "9245f3343bfc1bda1106bd26c7ebd1c89a9b9a5f",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Overhead/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1898"
},
{
"name": "HTML",
"bytes": "15983"
},
{
"name": "JavaScript",
"bytes": "3563"
},
{
"name": "Python",
"bytes": "32590"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="customdata", parent_name="funnel", **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| {
"content_hash": "c7fad6001cfd401f60136b56a6c3e348",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 81,
"avg_line_length": 37.09090909090909,
"alnum_prop": 0.6323529411764706,
"repo_name": "plotly/plotly.py",
"id": "18d87b1b232c093737283c18a7e31c153ddfc6a3",
"size": "408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/funnel/_customdata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
import UserDict
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from exceptions import ReferenceError
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceError", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary"]
class WeakValueDictionary(UserDict.UserDict):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[wr.key]
self._remove = remove
UserDict.UserDict.__init__(self, *args, **kw)
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError, key
else:
return o
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def has_key(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
L = []
for key, wr in self.data.items():
o = wr()
if o is not None:
L.append((key, o))
return L
def iteritems(self):
for wr in self.data.itervalues():
value = wr()
if value is not None:
yield wr.key, value
def iterkeys(self):
return self.data.iterkeys()
def __iter__(self):
return self.data.iterkeys()
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.itervalues()
def itervalues(self):
for wr in self.data.itervalues():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
while 1:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError, key
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return self.data.values()
def values(self):
L = []
for wr in self.data.values():
o = wr()
if o is not None:
L.append(o)
return L
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super(KeyedRef, self).__init__(ob, callback)
class WeakKeyDictionary(UserDict.UserDict):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
del self.data[k]
self._remove = remove
if dict is not None: self.update(dict)
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def has_key(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return 0
return wr in self.data
def items(self):
L = []
for key, value in self.data.items():
o = key()
if o is not None:
L.append((o, value))
return L
def iteritems(self):
for wr, value in self.data.iteritems():
key = wr()
if key is not None:
yield key, value
def iterkeyrefs(self):
"""Return an iterator that yields the weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.iterkeys()
def iterkeys(self):
for wr in self.data.iterkeys():
obj = wr()
if obj is not None:
yield obj
def __iter__(self):
return self.iterkeys()
def itervalues(self):
return self.data.itervalues()
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return self.data.keys()
def keys(self):
L = []
for wr in self.data.keys():
o = wr()
if o is not None:
L.append(o)
return L
def popitem(self):
while 1:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
| {
"content_hash": "7346cefc0e3b299e92bb2b1d94aae480",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 79,
"avg_line_length": 28.246684350132625,
"alnum_prop": 0.5547938773593765,
"repo_name": "bkillenit/AbletonAPI",
"id": "84d81a27347b55fe922f63f096dd656be1e98cf2",
"size": "10649",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python-api-materials/code/RpycHost/weakref.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3969"
},
{
"name": "Clojure",
"bytes": "306617"
},
{
"name": "HTML",
"bytes": "515"
},
{
"name": "JavaScript",
"bytes": "1367208"
},
{
"name": "Python",
"bytes": "401253"
}
],
"symlink_target": ""
} |
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.Transform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None):
super(BaseInMemorySource, self).__init__()
self._data = data
self._batch_size = (1 if batch_size is None else batch_size)
self._queue_capacity = (self._batch_size * 10 if batch_size is None
else batch_size)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None
else min_after_dequeue)
self._seed = seed
@transform.parameter
def data(self):
return self._data
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input):
queue = feeding_functions.enqueue_data(
self.data, self.queue_capacity, self.shuffle, self.min_after_dequeue)
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, batch_size, queue_capacity,
shuffle, min_after_dequeue, seed)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| {
"content_hash": "301c631c66a279fdff165e8320c816bf",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 84,
"avg_line_length": 28.925233644859812,
"alnum_prop": 0.6494345718901454,
"repo_name": "EvenStrangest/tensorflow",
"id": "e5880b065c76de12ad8d0a961d44c399b8bf2ca9",
"size": "3785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156263"
},
{
"name": "C++",
"bytes": "9372687"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "784316"
},
{
"name": "Java",
"bytes": "39229"
},
{
"name": "JavaScript",
"bytes": "10875"
},
{
"name": "Jupyter Notebook",
"bytes": "1533241"
},
{
"name": "Makefile",
"bytes": "11364"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45585"
},
{
"name": "Protocol Buffer",
"bytes": "112557"
},
{
"name": "Python",
"bytes": "6949434"
},
{
"name": "Shell",
"bytes": "196466"
},
{
"name": "TypeScript",
"bytes": "411503"
}
],
"symlink_target": ""
} |
import sys
from appvalidator import validate_app, validate_packaged_app
path = sys.argv[1]
if path.endswith(".webapp"):
print validate_app(path, format="json")
else:
print validate_packaged_app(path, format="json")
| {
"content_hash": "7aa9467d828c13521560290226e940b4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 28,
"alnum_prop": 0.7366071428571429,
"repo_name": "mattbasta/perfalator",
"id": "581c48e5a510a7871bc550797f12acdf93c62cb9",
"size": "224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "griz.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "192"
},
{
"name": "Python",
"bytes": "296128"
},
{
"name": "Shell",
"bytes": "803"
}
],
"symlink_target": ""
} |
"""
pyexcel.plugins.sources.output_to_memory
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Representation of output file sources
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
from pyexcel.internal import RENDERER
from pyexcel.source import AbstractSource, MemorySourceMixin
# pylint: disable=W0223
class WriteSheetToMemory(AbstractSource, MemorySourceMixin):
"""
Single sheet to memory
"""
def __init__(self, file_type=None, file_stream=None,
renderer_library=None, **keywords):
AbstractSource.__init__(self, **keywords)
self._renderer = RENDERER.get_a_plugin(file_type, renderer_library)
if file_stream:
self._content = file_stream
else:
self._content = self._renderer.get_io()
self.attributes = RENDERER.get_all_file_types()
def write_data(self, sheet):
self._renderer.render_sheet_to_stream(
self._content, sheet, **self._keywords)
# pylint: disable=W0223
class WriteBookToMemory(WriteSheetToMemory):
"""
Multiple sheet data source for writting back to memory
"""
def write_data(self, book):
self._renderer.render_book_to_stream(
self._content, book, **self._keywords)
| {
"content_hash": "1f10b8005a01e8608c0e84b00176bdff",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 75,
"avg_line_length": 30.595238095238095,
"alnum_prop": 0.6311284046692607,
"repo_name": "caspartse/QQ-Groups-Spider",
"id": "e3c516ee59602605e32563e859bc40861536d137",
"size": "1285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/pyexcel/plugins/sources/output_to_memory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "157970"
},
{
"name": "Python",
"bytes": "10416"
},
{
"name": "Smarty",
"bytes": "9490"
}
],
"symlink_target": ""
} |
"""
filename: test_members.py
description: Tests for Membership.
created by: Omar De La Hoz
created on: 10/05/17
"""
import pytest
import config
from app import app, db, socketio
from mock import patch, MagicMock
from app.users.models import Users
from app.members.models import Members, Roles
from app.committees.models import Committees
from flask_socketio import SocketIOTestClient
from app.members.members_response import Response
from app.notifications.controllers import new_committee
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
class TestMembers(object):
@classmethod
def setup_class(self):
self.app = app.test_client()
app.config['TESTING'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = config.SQLALCHEMY_TEST_DATABASE_URI
db = SQLAlchemy(app)
db.session.close()
db.drop_all()
db.create_all()
db.event.remove(Committees, "after_insert", new_committee)
self.socketio = socketio.test_client(app);
self.socketio.connect()
@classmethod
def setup_method(self, method):
db.drop_all()
db.create_all()
self.user_data = {
"user_id": "testuser",
"committee_id": "testcommittee",
"role": "NormalMember"
}
# Create admin user for tests.
admin = Users(id = "adminuser")
admin.first_name = "Admin"
admin.last_name = "User"
admin.email = "[email protected]"
admin.is_admin = True
db.session.add(admin)
#db.session.expunge(admin)
db.session.commit()
self.admin_token = admin.generate_auth()
self.admin_token = self.admin_token.decode('ascii')
# Create normal user for tests.
self.user = Users(id = "testuser")
self.user.first_name = "Test1"
self.user.last_name = "User"
self.user.email = "[email protected]"
self.user.is_admin = False
db.session.add(self.user)
db.session.commit()
self.user_token = self.user.generate_auth()
self.user_token = self.user_token.decode('ascii')
# Create normal user2 for tests.
self.user2 = Users(id = "test2user")
self.user2.first_name = "Test2"
self.user2.last_name = "User"
self.user2.email = "[email protected]"
self.user2.is_admin = False
db.session.add(self.user2)
db.session.commit()
self.user2_token = self.user2.generate_auth()
self.user2_token = self.user2_token.decode('ascii')
# Create a test committee.
self.committee = Committees(id = "testcommittee")
self.committee.title = "Test Committee"
self.committee.description = "Test Description"
self.committee.location = "Test Location"
self.committee.meeting_time = "1200"
self.committee.meeting_day = 2
self.committee.head = "adminuser"
# Create a seconnd test committee.
self.committee2 = Committees(id = "testcommittee2")
self.committee2.title = "Test Committee"
self.committee2.description = "Test Description"
self.committee2.location = "Test Location"
self.committee2.meeting_time = "1200"
self.committee2.meeting_day = 2
self.committee2.head = "adminuser"
# Add user2 to committee.
role = Members(role= Roles.NormalMember)
role.member = self.user2
self.committee.members.append(role)
db.session.add(self.committee)
db.session.add(self.committee2)
db.session.commit()
@classmethod
def teardown_class(self):
db.session.close()
db.drop_all()
db.event.listen(Committees, "after_insert", new_committee)
self.socketio.disconnect()
# Test get members of nonexistent committee.
def test_get_committee_members_nonexistent(self):
self.socketio.emit("get_members", "nonexistent")
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.ComDoesntExist
# Test get members of committee.
def test_get_committee_members(self):
self.socketio.emit("get_members", "testcommittee")
received = self.socketio.get_received()
commitee = received[0]["args"][0]
result = {
'id': 'test2user',
'name': "Test2 User",
'role': 'NormalMember'
}
assert commitee["committee_id"] == "testcommittee"
assert (commitee["members"] == [result])
# Test add to committee when admin.
def test_add_to_committee(self):
self.user_data["token"] = self.admin_token
self.socketio.emit("add_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0]["members"][1]["id"] == self.user_data["user_id"]
assert received[1]["args"][0] == Response.AddSuccess
# Test add user to more than one committee.
def test_add_to_second_committee(self):
self.user_data["token"] = self.admin_token
self.user_data["committee_id"] = "testcommittee2"
self.user_data["user_id"] = "test2user"
self.socketio.emit("add_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0]["members"][0]["id"] == self.user_data["user_id"]
assert received[1]["args"][0] == Response.AddSuccess
# Test add to committee when admin and
# no role specified.
def test_add_to_committee_no_role(self):
self.user_data["token"] = self.admin_token
del self.user_data["role"]
self.socketio.emit("add_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0]["members"][1]["id"] == self.user_data["user_id"]
assert received[0]["args"][0]["members"][1]["role"] == Roles.NormalMember.value
assert received[1]["args"][0] == Response.AddSuccess
# Test add committee doesnt exist.
def test_add_non_existent(self):
self.user_data["token"] = self.admin_token
self.user_data["committee_id"] = "nonexistent"
self.socketio.emit("add_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.UserDoesntExist
# Test adding a member raises an Exception.
@patch('app.committees.models.Committees.members')
def test_add_exception(self, mock_obj):
mock_obj.append.side_effect = Exception("User couldn't be added.")
self.user_data["token"] = self.admin_token
self.socketio.emit("add_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.AddError
# Test trying to remove not admin.
def test_remove_member_notadmin(self):
self.user_data["token"] = self.user_token
self.socketio.emit("remove_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.PermError
# Test remove member admin
def test_remove_member_admin(self):
self.user_data["token"] = self.admin_token
self.user_data["user_id"] = self.user2.id
self.socketio.emit("remove_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0]["members"] == []
assert received[1]["args"][0] == Response.RemoveSuccess
# Test remove committee head should fail.
def test_remove_head_admin(self):
self.user_data["token"] = self.admin_token
self.user_data["user_id"] = "adminuser"
self.socketio.emit("remove_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.RemoveHeadError
# Test remove nonexistent member.
def test_remove_member_nonexistent(self):
self.user_data["token"] = self.admin_token
self.user_data["user_id"] = "nonexistent"
self.socketio.emit("remove_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.UserDoesntExist
# Test remove member nonexistent committee.
def test_remove_member_noncomm(self):
self.user_data["token"] = self.admin_token
self.user_data["committee_id"] = "nonexistent"
self.socketio.emit("remove_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.UserDoesntExist
# Test removing a member raises an Exception.
@patch('app.members.controllers.db.session.delete')
def test_remove_exception(self, mock_obj):
mock_obj.side_effect = Exception("User couldn't be removed.")
self.user_data["token"] = self.admin_token
self.user_data["user_id"] = self.user2.id
self.socketio.emit("remove_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.RemoveError
# Test trying to remove not admin.
def test_edit_member_notadmin(self):
self.user_data["token"] = self.user_token
self.user_data["role"] = Roles.ActiveMember.value
self.socketio.emit("edit_role_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.PermError
# Test remove member not admin
def test_edit_member_admin(self):
self.user_data["token"] = self.admin_token
self.user_data["user_id"] = self.user2.id
self.user_data["role"] = Roles.ActiveMember.value
self.socketio.emit("edit_role_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.EditSuccess
# Test remove nonexistent member.
def test_edit_member_nonexistent(self):
self.user_data["token"] = self.admin_token
self.user_data["user_id"] = "nonexistent"
self.user_data["role"] = Roles.ActiveMember.value
self.socketio.emit("edit_role_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.UserDoesntExist
# Test remove member nonexistent committee.
def test_edit_member_noncomm(self):
self.user_data["token"] = self.admin_token
self.user_data["committee_id"] = "nonexistent"
self.user_data["role"] = Roles.ActiveMember.value
self.socketio.emit("edit_role_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.UserDoesntExist
# Test remove member nonexistent role.
def test_edit_member_nonrole(self):
self.user_data["token"] = self.admin_token
self.user_data["role"] = "nonexistent"
self.socketio.emit("edit_role_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.RoleDoesntExist
# Test editing a member raises an Exception.
@patch('app.committees.models.Committees.members')
def test_edit_exception(self, mock_obj):
mock_obj.filter_by.side_effect = Exception("User couldn't be edited.")
self.user_data["token"] = self.admin_token
self.user_data["user_id"] = self.user2.id
self.user_data["role"] = Roles.ActiveMember.value
self.socketio.emit("edit_role_member_committee", self.user_data)
received = self.socketio.get_received()
assert received[0]["args"][0] == Response.EditError
| {
"content_hash": "56cf3fa63cf341c925e693d3923bb6bb",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 87,
"avg_line_length": 38.51633986928105,
"alnum_prop": 0.6398269132869506,
"repo_name": "ritstudentgovernment/chargeflask",
"id": "e3206c4c615a5d0590851fe40df88403018e67cb",
"size": "11786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/members/test_members.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1632"
},
{
"name": "Dockerfile",
"bytes": "381"
},
{
"name": "HTML",
"bytes": "21682"
},
{
"name": "Python",
"bytes": "221454"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
} |
class Const(object):
'''
Bibliography:
[1] VideoRay Example Code [Online]
Available: https://github.com/videoray/Thruster/blob/master/thruster.py
'''
# VRCSR protocol defines
sync_request = 0x5ff5
sync_response = 0x0ff0
protocol_vrcsr_header_size = 6
protocol_vrcsr_xsum_size = 4
# CSR address for sending an application specific custom command
addr_custom_command = 0xf0
propulsion_command = 0xaa
# Flag for the standard thruster response which contains
response_thruster_standard = 0x2
# Standard response is the device type followed by 4 32-bit floats and 1 byte
response_thruster_standard_length = 1 + 4 * 4 + 1 | {
"content_hash": "68d6d53bec8eef8e4dbbf9eda2051a9a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 83,
"avg_line_length": 33.714285714285715,
"alnum_prop": 0.6765536723163842,
"repo_name": "pemami4911/Sub8",
"id": "f83b0a8fe4b3a4592c08ccbb44db4b645a389b9a",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drivers/sub8_videoray_m5_thruster/sub8_thruster_comm/protocol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "55662"
},
{
"name": "CMake",
"bytes": "7113"
},
{
"name": "GLSL",
"bytes": "7571"
},
{
"name": "Python",
"bytes": "123852"
},
{
"name": "Shell",
"bytes": "5565"
}
],
"symlink_target": ""
} |
import os
import math
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--run', type=str, required=True,
help='The command to run.')
parser.add_argument('--scanner', type=str, required=False,
help='The path of the virtual_scanner')
parser.add_argument('--simplify_points', type=str, required=False,
default='simplify_points',
help='The path of the simplify_points')
parser.add_argument('--transform_points', type=str, required=False,
default='transform_points',
help='The path of the transform_points')
parser.add_argument('--align_y', type=str, required=False, default='false',
help='Align the points with y axis')
abs_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
root_folder = os.path.join(abs_path, 'data/ModelNet40')
args = parser.parse_args()
virtual_scanner = args.scanner
simplify = args.simplify_points
transform = args.transform_points
def download_m40():
# download via wget
if not os.path.exists(root_folder):
os.makedirs(root_folder)
url = 'http://modelnet.cs.princeton.edu/ModelNet40.zip'
cmd = 'wget %s -O %s/ModelNet40.zip' % (url, root_folder)
print(cmd)
os.system(cmd)
# unzip
cmd = 'unzip %s/ModelNet40.zip -d %s' % (root_folder, root_folder)
print(cmd)
os.system(cmd)
def download_m40_points():
# download via wget
if not os.path.exists(root_folder):
os.makedirs(root_folder)
url = 'https://www.dropbox.com/s/m233s9eza3acj2a/ModelNet40.points.zip?dl=0'
zip_file = os.path.join(root_folder, 'ModelNet40.points.zip')
cmd = 'wget %s -O %s' % (url, zip_file)
print(cmd)
os.system(cmd)
# unzip
cmd = 'unzip %s -d %s/ModelNet40.points' % (zip_file, root_folder)
print(cmd)
os.system(cmd)
def clean_off_file(filename):
# read the contents of the file
with open(filename) as fid:
file_str = fid.read()
# fix the file
if file_str[0:3] != 'OFF':
print('Error: not an OFF file: ' + filename)
elif file_str[0:4] != 'OFF\n':
print('Info: fix an OFF file: ' + filename)
new_str = file_str[0:3] + '\n' + file_str[3:]
with open(filename, 'w') as f_rewrite:
f_rewrite.write(new_str)
def get_filelist(root_folder, train=True, suffix='off', ratio=1.0):
filelist, category = [], []
folders = sorted(os.listdir(root_folder))
assert(len(folders) == 40)
for idx, folder in enumerate(folders):
subfolder = 'train' if train else 'test'
current_folder = os.path.join(root_folder, folder, subfolder)
filenames = sorted(os.listdir(current_folder))
filenames = [fname for fname in filenames if fname.endswith(suffix)]
total_num = math.ceil(len(filenames) * ratio)
for i in range(total_num):
filelist.append(os.path.join(folder, subfolder, filenames[i]))
category.append(idx)
return filelist, category
def move_files(src_folder, des_folder, suffix):
folders = os.listdir(src_folder)
for folder in folders:
for subfolder in ['train', 'test']:
curr_src_folder = os.path.join(src_folder, folder, subfolder)
curr_des_folder = os.path.join(des_folder, folder, subfolder)
if not os.path.exists(curr_des_folder):
os.makedirs(curr_des_folder)
filenames = os.listdir(curr_src_folder)
for filename in filenames:
if filename.endswith(suffix):
os.rename(os.path.join(curr_src_folder, filename),
os.path.join(curr_des_folder, filename))
def convert_mesh_to_points():
mesh_folder = os.path.join(root_folder, 'ModelNet40')
# Delete the following 3 files since the virtualscanner can not deal with them
filelist = ['cone/train/cone_0117.off',
'curtain/train/curtain_0066.off',
'car/train/car_0021.off.off']
for filename in filelist:
filename = os.path.join(mesh_folder, filename)
if os.path.exists(filename):
os.remove(filename)
# clean the off files
train_list, _ = get_filelist(mesh_folder, train=True, suffix='off')
test_list, _ = get_filelist(mesh_folder, train=False, suffix='off')
filelist = train_list + test_list
for filename in filelist:
clean_off_file(os.path.join(mesh_folder, filename))
# run virtualscanner
folders = os.listdir(mesh_folder)
for folder in folders:
for subfolder in ['train', 'test']:
curr_folder = os.path.join(mesh_folder, folder, subfolder)
cmd = '%s %s 14' % (virtual_scanner, curr_folder)
print(cmd)
os.system(cmd)
# move points
move_files(mesh_folder, mesh_folder + '.points', 'points')
def simplify_points(resolution=64):
# rename and backup the original folders
points_folder = os.path.join(root_folder, 'ModelNet40.points')
original_folder = points_folder + ".dense"
if os.path.exists(points_folder):
os.rename(points_folder, original_folder)
folders = os.listdir(original_folder)
for folder in folders:
for subfolder in ['train', 'test']:
curr_folder = os.path.join(original_folder, folder, subfolder)
# write filelist to disk
filenames = os.listdir(curr_folder)
filelist_name = os.path.join(curr_folder, 'list.txt')
with open(filelist_name, 'w') as fid:
for filename in filenames:
if filename.endswith('.points'):
fid.write(os.path.join(curr_folder, filename) + '\n')
# run simplify_points
output_path = os.path.join(points_folder, folder, subfolder)
if not os.path.exists(output_path):
os.makedirs(output_path)
cmd = '%s --filenames %s --output_path %s --dim %d' % \
(simplify, filelist_name, output_path, resolution)
print(cmd)
os.system(cmd)
os.remove(filelist_name)
def transform_points():
points_folder = os.path.join(root_folder, 'ModelNet40.points')
output_folder = os.path.join(root_folder, 'ModelNet40.points.y')
folders = os.listdir(points_folder)
for folder in folders:
for subfolder in ['train', 'test']:
curr_folder = os.path.join(points_folder, folder, subfolder)
output_path = os.path.join(output_folder, folder, subfolder)
if not os.path.exists(output_path):
os.makedirs(output_path)
# write filelist to disk
filenames = os.listdir(curr_folder)
filelist_name = os.path.join(curr_folder, 'list.txt')
with open(filelist_name, 'w') as fid:
for filename in filenames:
if filename.endswith('.points'):
fid.write(os.path.join(curr_folder, filename) + '\n')
# write the transformation matrix
mat = '0 0 1 1 0 0 0 1 0'
mat_name = os.path.join(curr_folder, 'mat.txt')
with open(mat_name, 'w') as fid:
fid.write(mat)
# run transform points
cmd = '%s --filenames %s --output_path %s --mat %s' % \
(transform, filelist_name, output_path, mat_name)
print(cmd)
os.system(cmd)
os.remove(filelist_name)
os.remove(mat_name)
def generate_points_filelist():
points_folder = os.path.join(root_folder, 'ModelNet40.points')
for folder in ['train', 'test']:
train = folder == 'train'
filelist, idx = get_filelist(points_folder, train=train, suffix='points')
prefix = 'm40_' + folder
filename = os.path.join(root_folder, '%s_points_list.txt' % prefix)
print('Save to %s' % filename)
with open(filename, 'w') as fid:
for i in range(len(filelist)):
fid.write('%s %d\n' % (filelist[i], idx[i]))
def generate_points_filelist_ratios():
ratios = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1.0]
points_folder = os.path.join(root_folder, 'ModelNet40.points.y')
for folder in ['train', 'test']:
train = folder == 'train'
for ratio in ratios:
if train == False and ratio < 1:
continue
prefix = 'm40_y_%.02f_%s' % (ratio, folder)
filename = os.path.join(root_folder, '%s_points_list.txt' % prefix)
filelist, idx = get_filelist(points_folder, train=train,
suffix='points', ratio=ratio)
print('Save to %s' % filename)
with open(filename, 'w') as fid:
for i in range(len(filelist)):
fid.write('%s %d\n' % (filelist[i], idx[i]))
if __name__ == '__main__':
eval('%s()' % args.run)
| {
"content_hash": "e6ef79a85f09464dc0a8ff045af6d48e",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 80,
"avg_line_length": 35.502145922746784,
"alnum_prop": 0.6381769825918762,
"repo_name": "microsoft/O-CNN",
"id": "555e52acc3cfba599cf2908e25928787890e6822",
"size": "8272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytorch/projects/tools/modelnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1028"
},
{
"name": "C++",
"bytes": "1168252"
},
{
"name": "CMake",
"bytes": "12150"
},
{
"name": "Cuda",
"bytes": "107918"
},
{
"name": "Dockerfile",
"bytes": "2505"
},
{
"name": "MATLAB",
"bytes": "989"
},
{
"name": "Python",
"bytes": "379722"
}
],
"symlink_target": ""
} |
from django import forms
from django.contrib.admin.widgets import RelatedFieldWidgetWrapper
from django.utils.translation import ugettext_lazy as _
from mptt.forms import TreeNodeChoiceField
from .models import Category
class CategoryAdminForm(forms.ModelForm):
"""
Form for Category's Admin.
"""
parent = TreeNodeChoiceField(
label=_('Parent category'),
empty_label=_('No parent category'),
level_indicator='|--', required=False,
queryset=Category.objects.all())
def __init__(self, *args, **kwargs):
super(CategoryAdminForm, self).__init__(*args, **kwargs)
self.fields['parent'].widget = RelatedFieldWidgetWrapper(
self.fields['parent'].widget,
Category.parent.field.remote_field,
self.admin_site)
def clean_parent(self):
"""
Check if category parent is not selfish.
"""
data = self.cleaned_data['parent']
if data == self.instance:
raise forms.ValidationError(
_('A category cannot be parent of itself.'),
code='self_parenting')
return data
class Meta:
"""
CategoryAdminForm's Meta.
"""
model = Category
fields = forms.ALL_FIELDS
| {
"content_hash": "302054cf8dd9dd228c3152eb335ad959",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 66,
"avg_line_length": 30.5,
"alnum_prop": 0.6112412177985949,
"repo_name": "am1tyadava/amityadav_blog",
"id": "4f18c3811cca145be79e0375deb48c0f1a63bd20",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "categories/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176523"
},
{
"name": "HTML",
"bytes": "13730"
},
{
"name": "JavaScript",
"bytes": "668"
},
{
"name": "Python",
"bytes": "34834"
}
],
"symlink_target": ""
} |
'''
synbiochem (c) University of Manchester 2017
All rights reserved.
@author: neilswainston
'''
# pylint: disable=invalid-name
import re
import sys
import pandas as pd
def analyse(df):
'''analyse.'''
result_df = df.groupby(['plasmid',
'strain',
'treatment'])['mg/L'].agg({'mg/L': ['mean',
'std']})
print result_df
def get_data(filename):
'''get data.'''
df = pd.read_csv(filename)
df['Name'] = df['Name'].apply(_fix_name)
names_df = df['Name'].str.split('[ _]', expand=True)
names_df.columns = ['strain', 'plasmid', 'mutant', 'colony', 'media',
'carbon source', 'antibiotic', 'treatment',
'injection']
names_df['strain'] = names_df['strain'] + '_' + names_df['mutant']
names_df.index = df.index
df = df.join(names_df)
df.to_csv('out.csv')
return df
def _fix_name(name):
'''fix name.'''
if name.count('_') == 0 or name.count('_') == 6:
return name
pos = [m.start() for m in re.finditer('_', name)][1]
return name[:pos] + '_' + name[pos:]
def main(args):
'''main method.'''
df = get_data(args[0])
analyse(df)
if __name__ == '__main__':
main(sys.argv[1:])
| {
"content_hash": "ad98cd8db43ed0fbeb8ac1aceb00e51f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 73,
"avg_line_length": 22.216666666666665,
"alnum_prop": 0.4988747186796699,
"repo_name": "neilswainston/development-py",
"id": "e035af7b68a189520e8e362be31f3986eafc4ee6",
"size": "1333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synbiochemdev/ms/test/analyse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1478945"
},
{
"name": "Python",
"bytes": "213190"
}
],
"symlink_target": ""
} |
username = 'admin'
email = '[email protected]'
#####
import airflow
from airflow import models, settings
from airflow.contrib.auth.backends.password_auth import PasswordUser
# https://stackoverflow.com/questions/3854692/generate-password-in-python
import string
from random import sample, choice
chars = string.letters + string.digits
length = 20
password = ''.join(choice(chars) for _ in range(length))
print "%s : %s" % (username, password)
user = PasswordUser(models.User())
user.username = username
user.email = email
user.password = password
session = settings.Session()
session.add(user)
session.commit()
session.close()
exit()
| {
"content_hash": "227f763ca36fd69dacacb469a8a00f37",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 73,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.755868544600939,
"repo_name": "teamclairvoyant/hadoop-deployment-bash",
"id": "1853e1497434a3a498ff1a5ba2fc0b93c0d116f6",
"size": "710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/airflow/mkuser.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "710"
},
{
"name": "Shell",
"bytes": "627305"
}
],
"symlink_target": ""
} |
"""
Methods for accessing Bag entities, GET the
tiddlers in the bag, list the available bags,
PUT a Bag as a JSON object.
These need some refactoring.
"""
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.collections import Tiddlers
from tiddlyweb.model.policy import create_policy_check
from tiddlyweb.store import NoBagError, StoreMethodNotImplemented
from tiddlyweb.serializer import (Serializer, NoSerializationError,
BagFormatError)
from tiddlyweb.web import util as web
from tiddlyweb.web.sendentity import send_entity
from tiddlyweb.web.sendtiddlers import send_tiddlers
from tiddlyweb.web.listentities import list_entities
from tiddlyweb.web.http import HTTP400, HTTP404, HTTP409, HTTP415
from tiddlyweb.web.validator import validate_bag, InvalidBagError
def delete(environ, start_response):
"""
Remove a bag and its tiddlers from the store.
How the store chooses to handle remove and what
it means is up to the store.
"""
bag_name = web.get_route_value(environ, 'bag_name')
bag_name = web.handle_extension(environ, bag_name)
usersign = environ['tiddlyweb.usersign']
bag = _get_bag(environ, bag_name)
bag.policy.allows(usersign, 'manage')
# reuse the store attribute that was set on the
# bag when we "got" it.
# we don't need to check for existence here because
# the above get already did
try:
store = environ['tiddlyweb.store']
store.delete(bag)
except StoreMethodNotImplemented:
raise HTTP400('Bag DELETE not supported')
start_response("204 No Content", [])
return []
def get(environ, start_response):
"""
Get a representation in some serialization of
a bag (the bag itself not the tiddlers within).
"""
bag_name = web.get_route_value(environ, 'bag_name')
bag_name = web.handle_extension(environ, bag_name)
bag = _get_bag(environ, bag_name)
bag.policy.allows(environ['tiddlyweb.usersign'], 'manage')
return send_entity(environ, start_response, bag)
def get_tiddlers(environ, start_response):
"""
Get a list representation of the tiddlers in a
bag. The information sent is dependent on the
serialization chosen.
"""
store = environ['tiddlyweb.store']
filters = environ['tiddlyweb.filters']
bag_name = web.get_route_value(environ, 'bag_name')
bag = _get_bag(environ, bag_name)
title = 'Tiddlers From Bag %s' % bag.name
title = environ['tiddlyweb.query'].get('title', [title])[0]
usersign = environ['tiddlyweb.usersign']
# will raise exception if there are problems
bag.policy.allows(usersign, 'read')
if filters:
tiddlers = Tiddlers(title=title)
else:
tiddlers = Tiddlers(title=title, store=store)
for tiddler in store.list_bag_tiddlers(bag):
tiddlers.add(tiddler)
tiddlers.link = '%s/tiddlers' % web.bag_url(environ, bag, full=False)
return send_tiddlers(environ, start_response, tiddlers=tiddlers)
def list_bags(environ, start_response):
"""
List all the bags that the current user can read.
"""
store = environ['tiddlyweb.store']
serialize_type, mime_type = web.get_serialize_type(environ)
serializer = Serializer(serialize_type, environ)
return list_entities(environ, start_response, mime_type, store.list_bags,
serializer.list_bags)
def put(environ, start_response):
"""
Put a bag to the server, meaning the description and
policy of the bag, if policy allows.
"""
bag_name = web.get_route_value(environ, 'bag_name')
bag_name = web.handle_extension(environ, bag_name)
bag = Bag(bag_name)
store = environ['tiddlyweb.store']
length = environ['CONTENT_LENGTH']
usersign = environ['tiddlyweb.usersign']
try:
bag = store.get(bag)
bag.policy.allows(usersign, 'manage')
except NoBagError:
create_policy_check(environ, 'bag', usersign)
try:
serialize_type = web.get_serialize_type(environ)[0]
serializer = Serializer(serialize_type, environ)
serializer.object = bag
content = environ['wsgi.input'].read(int(length))
serializer.from_string(content.decode('utf-8'))
bag.policy.owner = usersign['name']
_validate_bag(environ, bag)
store.put(bag)
except BagFormatError, exc:
raise HTTP400('unable to put bag: %s' % exc)
except TypeError:
raise HTTP400('Content-type header required')
except NoSerializationError:
raise HTTP415('Content type not supported: %s' % serialize_type)
start_response("204 No Content",
[('Location', web.bag_url(environ, bag))])
return []
def _validate_bag(environ, bag):
"""
Unless bag is valid raise a 409 with the reason why.
"""
try:
validate_bag(bag, environ)
except InvalidBagError, exc:
raise HTTP409('Bag content is invalid: %s' % exc)
def _get_bag(environ, bag_name):
"""
Get the named bag out of the store.
"""
store = environ['tiddlyweb.store']
bag = Bag(bag_name)
try:
bag = store.get(bag)
except NoBagError, exc:
raise HTTP404('%s not found, %s' % (bag.name, exc))
return bag
| {
"content_hash": "7230b7ad799c43f8b5cc4dbeee9f062f",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 77,
"avg_line_length": 31.053571428571427,
"alnum_prop": 0.6731838221199923,
"repo_name": "funkyeah/tiddlyweb",
"id": "1cc6e260e3b2c14dddb9fd49c7a7d0c20c540490",
"size": "5217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tiddlyweb/web/handler/bag.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_db import api
CONF = cfg.CONF
_BACKEND_MAPPING = {'sqlalchemy': 'bilean.db.sqlalchemy.api'}
IMPL = api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING)
def get_engine():
return IMPL.get_engine()
def get_session():
return IMPL.get_session()
def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(engine, version=version)
def db_version(engine):
"""Display the current database version."""
return IMPL.db_version(engine)
def user_get(context, user_id, show_deleted=False, tenant_safe=True):
return IMPL.user_get(context, user_id,
show_deleted=show_deleted,
tenant_safe=tenant_safe)
def user_update(context, user_id, values):
return IMPL.user_update(context, user_id, values)
def user_create(context, values):
return IMPL.user_create(context, values)
def user_delete(context, user_id):
return IMPL.user_delete(context, user_id)
def user_get_all(context, show_deleted=False, limit=None,
marker=None, sort_keys=None, sort_dir=None,
filters=None):
return IMPL.user_get_all(context, show_deleted=show_deleted,
limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir,
filters=filters)
def rule_get(context, rule_id, show_deleted=False):
return IMPL.rule_get(context, rule_id, show_deleted=False)
def rule_get_all(context, show_deleted=False, limit=None,
marker=None, sort_keys=None, sort_dir=None,
filters=None):
return IMPL.rule_get_all(context, show_deleted=show_deleted,
limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir,
filters=filters)
def rule_create(context, values):
return IMPL.rule_create(context, values)
def rule_update(context, rule_id, values):
return IMPL.rule_update(context, rule_id, values)
def rule_delete(context, rule_id):
return IMPL.rule_delete(context, rule_id)
def resource_get(context, resource_id, show_deleted=False, tenant_safe=True):
return IMPL.resource_get(context, resource_id,
show_deleted=show_deleted,
tenant_safe=tenant_safe)
def resource_get_all(context, user_id=None, show_deleted=False,
limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, tenant_safe=True):
return IMPL.resource_get_all(context, user_id=user_id,
show_deleted=show_deleted,
limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir,
filters=filters, tenant_safe=tenant_safe)
def resource_create(context, values):
return IMPL.resource_create(context, values)
def resource_update(context, resource_id, values):
return IMPL.resource_update(context, resource_id, values)
def resource_delete(context, resource_id):
IMPL.resource_delete(context, resource_id)
def event_get(context, event_id, tenant_safe=True):
return IMPL.event_get(context, event_id, tenant_safe=tenant_safe)
def event_get_all(context, user_id=None, show_deleted=False,
filters=None, limit=None, marker=None,
sort_keys=None, sort_dir=None, tenant_safe=True,
start_time=None, end_time=None):
return IMPL.event_get_all(context, user_id=user_id,
show_deleted=show_deleted,
filters=filters, limit=limit,
marker=marker, sort_keys=sort_keys,
sort_dir=sort_dir, tenant_safe=tenant_safe,
start_time=start_time, end_time=end_time)
def event_create(context, values):
return IMPL.event_create(context, values)
def event_delete(context, event_id):
return IMPL.event_delete(context, event_id)
def job_create(context, values):
return IMPL.job_create(context, values)
def job_get_all(context, engine_id=None):
return IMPL.job_get_all(context, engine_id=engine_id)
def job_delete(context, job_id):
return IMPL.job_delete(context, job_id)
def policy_get(context, policy_id, show_deleted=False):
return IMPL.policy_get(context, policy_id, show_deleted=False)
def policy_get_all(context, limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, show_deleted=False):
return IMPL.policy_get_all(context, limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir,
filters=filters, show_deleted=show_deleted)
def policy_create(context, values):
return IMPL.policy_create(context, values)
def policy_update(context, policy_id, values):
return IMPL.policy_update(context, policy_id, values)
def policy_delete(context, policy_id):
return IMPL.policy_delete(context, policy_id)
| {
"content_hash": "a4e3b241f68fbb03f1537f802410a8ab",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 77,
"avg_line_length": 31.39156626506024,
"alnum_prop": 0.6252158894645942,
"repo_name": "lvdongbing/bilean",
"id": "49e7a1a52a884085389b90a4c6ed5d1c9bb84750",
"size": "5787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bilean/db/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "356105"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cpro', '0016_auto_20171216_1606'),
]
operations = [
migrations.AddField(
model_name='card',
name='leader_skill_apply',
field=models.PositiveIntegerField(null=True, verbose_name=b'Leader Skill: Which idols does it apply to?', choices=[(None, b'Idols of the same type [Cute/Cool/Passion]'), (1, b'Idols of all 3 types, when all types are in the team [Tricolor]'), (2, b'Idols of all 3 types [Shiny]')]),
preserve_default=True,
),
migrations.AddField(
model_name='card',
name='skill_value3',
field=models.FloatField(null=True, verbose_name=b'Other Skill Value'),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='i_skill',
field=models.PositiveIntegerField(null=True, verbose_name='Skill', choices=[(0, 'Lesser Perfect Lock'), (1, 'Greater Perfect Lock'), (2, 'Extreme Perfect Lock'), (3, 'Combo Lock'), (4, 'Healer'), (5, 'Life Lock'), (6, 'Combo Bonus'), (7, 'Perfect Score Bonus'), (8, 'Overload'), (9, 'Score Boost'), (10, 'All Round'), (11, 'Concentration'), (12, 'Skill Boost'), (13, 'Cute/Cool/Passion Focus'), (14, 'Encore'), (15, 'Sparkle'), (16, 'Tricolor Synergy')]),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='leader_skill_percent',
field=models.FloatField(null=True, verbose_name=b'Leader Skill: Percentage'),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='leader_skill_type',
field=models.PositiveIntegerField(null=True, verbose_name=b'Leader Skill: What kind of stat gets raised?', choices=[(0, b'Vocal appeal [Voice]'), (2, b'Visual appeal [Make-up]'), (1, b'Dance appeal [Step]'), (101, b'Vocal/Visual/Dance appeals [Brilliance]'), (105, b'Vocal/Visual/Dance appeals, when only same type in the team [Princess]'), (103, b'Skill probability [Ability]'), (102, b'Life [Energy]'), (104, b'Life, when only same type in the team [Cheer]'), (106, b'Fan gain, end of live [Cinderella Charm]'), (107, b'Rewards, end of live [Fortune Present]')]),
preserve_default=True,
),
]
| {
"content_hash": "7069ce70a4f14fb27167896d59d91daf",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 577,
"avg_line_length": 57,
"alnum_prop": 0.6062831497348021,
"repo_name": "SchoolIdolTomodachi/CinderellaProducers",
"id": "62184e3aedc16f6193bbe1ff41bf99a405021b2d",
"size": "2475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cpro/migrations/0017_auto_20180830_0333.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "506"
},
{
"name": "HTML",
"bytes": "42216"
},
{
"name": "Python",
"bytes": "173112"
}
],
"symlink_target": ""
} |
"""UserUpdateHandlerFunction
Updates a user's profile attributes.
"""
from __future__ import print_function
import os
import json
import boto3
import botocore
from apigateway_helpers.exception import APIGatewayException
from apigateway_helpers.headers import get_response_headers
cognito_idp_client = boto3.client("cognito-idp")
def lambda_handler(event, context):
print("Event: {}".format(json.dumps(event)))
if "warming" in event and "{}".format(event["warming"]).lower() == "true":
return {
"message": "Warmed!"
}
event["request-body"] = json.loads(event.get("body", "{}"))
new_email_address = event["request-body"].get("email-address", "")
if new_email_address == "":
raise APIGatewayException("Value for \"email-address\" must be specified in request body.", 400)
cognito_auth_provider_string = event["requestContext"]["identity"]["cognitoAuthenticationProvider"]
cognito_idp_name = cognito_auth_provider_string.split(",")[0]
user_pool_id = "/".join(cognito_idp_name.split("/")[1:])
cognito_user_pool_sub_value = cognito_auth_provider_string.split(",")[1].split(":")[2]
response = cognito_idp_client.list_users(
UserPoolId = user_pool_id,
AttributesToGet = [],
Filter = "sub = \"{}\"".format(cognito_user_pool_sub_value),
Limit = 1
)
cognito_user_pool_username = response["Users"][0]["Username"]
cognito_idp_client.admin_update_user_attributes(
UserPoolId = user_pool_id,
Username = cognito_user_pool_username,
UserAttributes = [
{
"Name": "email",
"Value": new_email_address
}
]
)
return {
"registration-id": cognito_user_pool_username,
"message": "E-mail address verification message sent."
}
def proxy_lambda_handler(event, context):
response_headers = get_response_headers(event, context)
try:
return_dict = lambda_handler(event, context)
except APIGatewayException as e:
return {
"statusCode": e.http_status_code,
"headers": response_headers,
"body": json.dumps({
"message": e.http_status_message
})
}
return {
"statusCode": 200,
"headers": response_headers,
"body": json.dumps(return_dict)
} | {
"content_hash": "2e4eaf9d6fd63a2d87a86509eda483ed",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 104,
"avg_line_length": 29.71951219512195,
"alnum_prop": 0.6011489536315141,
"repo_name": "moduspwnens/boa-chat",
"id": "06b154d8ed6b2d55cddbcca7cbaaf26a9c4019b6",
"size": "2437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boa-nimbus/lambda/UserUpdateHandlerFunction/index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7685"
},
{
"name": "Dockerfile",
"bytes": "1002"
},
{
"name": "HTML",
"bytes": "20463"
},
{
"name": "JavaScript",
"bytes": "64145"
},
{
"name": "Python",
"bytes": "160254"
},
{
"name": "Shell",
"bytes": "781"
}
],
"symlink_target": ""
} |
import sys
from test import regrtest
regrtest.main_in_temp_cwd()
| {
"content_hash": "258e8ce8ac4470be1d0c36ecc4e2ff8e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 27,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.8,
"repo_name": "ms-iot/python",
"id": "09125aa0def08bd39f1461d3f1ea2b76bec8dd8e",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cpython/Tools/pyuwp/pyuwpbackgroundservice/StartupTask.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "481852"
},
{
"name": "Batchfile",
"bytes": "35616"
},
{
"name": "C",
"bytes": "15555469"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "726292"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "M4",
"bytes": "223087"
},
{
"name": "Makefile",
"bytes": "197108"
},
{
"name": "Objective-C",
"bytes": "2098686"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "24948876"
},
{
"name": "Roff",
"bytes": "254942"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from random import randint
class Stand:
def __init__(self,balance,lemons,name,coords):
self.balance = balance
self.lemons = lemons
self.name = name
self.stand_coords = coords
self.ads = set()
def sell(self, cost):
if self.lemons == 0:
raw_input("You need to order more lemons!")
return
sold = 0
for n in range(100):
if randint(0, cost) == 1:
self.balance += cost
self.lemons -= 1
sold += 1
if self.lemons <= 0:
if self.balance < 3:
print "You have no more money - Game Over!"
sys.exit()
else:
print "You ran out of lemons."
break
raw_input("Sold " + str(sold) + " glasses of lemonade")
def buy(self, amount):
oldBalance = self.balance
oldLemons = self.lemons
discount = 0
if amount >= 100:
discount += (amount / 100)
try:
choiceN = amount
self.lemons += choiceN
self.balance -= (2 * choiceN) - (discount * 50)
if self.balance < 0:
self.lemons = oldLemons
self.balance = oldBalance
raw_input("You can't buy that many, you don't have enough money.")
except:
print "You can't do that?"
def has_ads_running(self):
if len(self.ads) > 0:
return True
else:
return False | {
"content_hash": "992fc5bfa42d4237799e360dd274beeb",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 82,
"avg_line_length": 30.055555555555557,
"alnum_prop": 0.4596426370918053,
"repo_name": "ProgramQ/lemonator",
"id": "9389f783550e0a9fd06ff9c7bbefd5399badb004",
"size": "1623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stand.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4538"
}
],
"symlink_target": ""
} |
import pytest
@pytest.mark.benchmark(
group="ModelSerializer get_fields",
warmup=True
)
def test_serializer_get_fields(serializer_class, data, benchmark):
serializer = serializer_class(data=data)
@benchmark
def result():
return serializer.get_fields()
@pytest.mark.benchmark(
group="ModelSerializer get_fields",
warmup=True
)
def test_nested_serializer_get_fields(nested_serializer_class, nested_data, benchmark):
serializer = nested_serializer_class(data=nested_data)
@benchmark
def result():
return serializer.get_fields()
@pytest.mark.benchmark(
group="ModelSerializer get_fields twice",
warmup=True
)
def test_serializer_get_fields_twice(serializer_class, data, benchmark):
serializer = serializer_class(data=data)
@benchmark
def result():
serializer.get_fields()
return serializer.get_fields()
@pytest.mark.benchmark(
group="ModelSerializer get_fields twice",
warmup=True
)
def test_nested_serializer_get_fields_twice(nested_serializer_class, nested_data, benchmark):
serializer = nested_serializer_class(data=nested_data)
@benchmark
def result():
serializer.get_fields()
return serializer.get_fields()
| {
"content_hash": "5cafae84bac1471f270d5b56bd7d75c2",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 93,
"avg_line_length": 24.470588235294116,
"alnum_prop": 0.7067307692307693,
"repo_name": "thedrow/drf-benchmarks",
"id": "ed2be4c7f5e5fc59a8ab1877fee1e9064a9342e2",
"size": "1294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drf_benchmarks/serializers/test_model_serializer_get_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23633"
}
],
"symlink_target": ""
} |
from setuptools import find_packages
from setuptools import setup
from cvmchain import config
setup(name='cvmchain',
version=config.APP_VERSION,
description='',
author=config.AUTHOR,
author_email=config.AUTHOR_EMAIL,
setup_requires='setuptools',
packages=['cvmchain', 'cvmchain.network', 'cvmchain.chain', 'cvmchain.crypto'],
entry_points={
'console_scripts': [
'cvmchain=cvmchain.cvmchain:main',
'cvm-gengenesis=cvmchain.cvmchain:gengenesis',
],
},
install_requires=open ('requirements.txt', 'r').read ().split ('\n')
)
| {
"content_hash": "64612b84934b81d11403fa31f233deca",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 80,
"avg_line_length": 28.473684210526315,
"alnum_prop": 0.7319778188539742,
"repo_name": "dakk/cvmchain",
"id": "0c871bd146c9d53216535bf9ad9878690d078d04",
"size": "718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32960"
},
{
"name": "Shell",
"bytes": "430"
}
],
"symlink_target": ""
} |
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
from random import randrange
black = Color(0x000000, 1.0)
blue = Color(0x2D9FC2,1.0)
white = Color(0xFFFFFF, 1.0)
red = Color(0xFC5D5D,1.0)
grey = Color(0x858585,1.0)
thinline = LineStyle(1, black)
celld = 35
rectangle3 = RectangleAsset(celld, celld, thinline, red)
rectangle = RectangleAsset(celld, celld, thinline, blue)
rectangle2 = RectangleAsset(celld, celld, thinline, white)
rectangle4 = RectangleAsset(celld, celld, thinline, black)
rectangle5 = RectangleAsset(celld, celld, thinline, grey)
ocean = {}
oceanself = {}
enemyBoats = {}
selfBoats = {}
selfBoatsalive = {}
enemyboatsalive = {}
enemyboatsunk = {}
selfBoatssunk = {}
cpushots = {}
length = 5
height = 10
width = 10
overlapping = True
shipsmadevisible = 0
shooting = False
class cell(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.visible = True
class enemyships(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.visible = False
for x in range(0, height):
for y in range(0, width):
#screen one set up
Sprite(rectangle2, (x*celld, y*celld))
enemyBoats[(x,y)] = enemyships(rectangle3, (x*celld, y*celld))
ocean[(x,y)] = cell(rectangle, (x*celld, y*celld))
#screen two set up
yshift = height*celld + 20
Sprite(rectangle2, (x*celld, y*celld + yshift))
selfBoats[(x,y+10)] = cell(rectangle5, (x*celld, y*celld + yshift))
selfBoatssunk[(x,y+10)] = cell(rectangle3, (x*celld, y*celld + yshift))
oceanself[(x,y+10)] = cell(rectangle, (x*celld, y*celld + yshift))
while overlapping == True:
shipsmadevisible = 0
for aaa in range(0, height):
for bbb in range(0, width):
enemyBoats[(aaa,bbb)].visible = False
for a in range(0, 3):
randx = randrange(1, 10)
randy = randrange(1, 10)
if randx > 5:
nsx = -1
else:
nsx = 1
if randy > 5:
nsy = -1
else:
nsy = 1
randxy = randrange(1, 3)
for u in range(0, length-1):
enemyBoats[(randx+nsx,randy+nsy)].visible = True
enemyboatsalive[(randx+nsx,randy+nsy)] = (randx+nsx,randy+nsy)
if randxy == 2:
randx = randx + 1
else:
randy = randy + 1
length = length - 1
for aa in range(0, height):
for bb in range(0, width):
if enemyBoats[(aa,bb)].visible == True:
shipsmadevisible =shipsmadevisible + 1
if shipsmadevisible == 9:
overlapping = False
class Battleship(App):
def __init__(self):
Battleship.listenKeyEvent("keydown", "space", self.spaceclick)
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 1000
self.going = False
self.squarehit = 0
self.playerturn = True
self.nonalcoholicshotstaken = 0
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT)
Battleship.listenMouseEvent("click",self.breathlife)
def spaceclick(self,event):
self.going = True
def breathlife(self, event):
self.cx = int(event.x/celld)
self.cy = int(event.y/celld)
if self.playerturn == True:
if self.going == False:
if (self.cx, self.cy-1) in oceanself:
oceanself[(self.cx, self.cy-1)].visible = not oceanself[(self.cx, self.cy-1)].visible
selfBoats[(self.cx,self.cy-1)].visible = not selfBoats[(self.cx,self.cy-1)].visible
selfBoatsalive[(self.cx,self.cy-1)] = (self.cx,self.cy-1)
else:
if (self.cx, self.cy) in ocean:
ocean[(self.cx, self.cy)].visible = False
if (self.cx, self.cy) in enemyboatsalive and (self.cx, self.cy) not in enemyboatsunk:
self.squarehit = self.squarehit + 1
enemyboatsunk[self.cx, self.cy] = "hit"
if self.going == True:
self.playerturn = False
self.nonalcoholicshotstaken = self.nonalcoholicshotstaken + 1
def step(self):
if self.squarehit == 9:
for j in range(0, height):
for k in range(0, width):
ocean[(j,k)].visible = False
if self.going == True:
if self.playerturn == False:
#while shooting == False:
randshotx = randrange(0, 10)
randshoty = randrange(10, 20)
#if (randshotx, randshoty) not in cpushots:
#shooting = True
cpushots[(randshotx, randshoty)] = (randshotx, randshoty)
print("shots fired")
oceanself[(randshotx, randshoty)].visible = False
if (randshotx, randshoty) in selfBoatsalive:
selfBoats[(randshotx, randshoty)].visible = False
selfBoatssunk[(randshotx, randshoty)].visible = True
print("hit")
self.playerturn = True
myapp = Battleship()
myapp.run()
"""
from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame
black = Color(0x000000, 1.0)
green = Color(0x00ff00, 1.0)
orange = Color(0xFF8400,1.0)
thinline = LineStyle(1, black)
a = 0
b = 0
height = 20
width = 20
ocean = {}
thinline = LineStyle(1, black)
rectangle = RectangleAsset(20, 20, thinline, green)
rectangle2 = RectangleAsset(20, 20, thinline, orange)
class cell(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.visible = False
self.sca = 0
for x in range(0, height):
for y in range(0, width):
Sprite(rectangle2, (x*height, y*width))
ocean[(x,y)] = cell(rectangle, (x*height, y*width))
class ConwayGame(App):
def __init__(self):
ConwayGame.listenKeyEvent("keydown", "space", self.spaceclick)
SCREEN_WIDTH = 640
SCREEN_HEIGHT = 480
self.going = False
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT)
ConwayGame.listenMouseEvent("click",self.breathlife)
def breathlife(self, event):
self.cx = int(event.x/20)
self.cy = int(event.y/20)
ocean[(self.cx, self.cy)].visible = not ocean[(self.cx, self.cy)].visible
def spaceclick(self,event):
self.going = not self.going
def step(self):
if self.going == True:
for g in range(0, height):
for f in range(0, width):
if ocean[(g,f)].visible == True:
ocean[(g,f)].sca = ocean[(g,f)].sca - 1
for w in range(-1, 2):
for h in range(-1, 2):
if (w+g, h+f) in ocean and ocean[(w+g, h+f)].visible == True:
ocean[(g,f)].sca = ocean[(g,f)].sca + 1
for s in range(0, height):
for d in range(0, width):
if ocean[(s, d)].visible == True and ocean[(s, d)].sca < 2:
ocean[(s, d)].visible = False
elif ocean[(s, d)].visible == True and ocean[(s, d)].sca > 3:
ocean[(s, d)].visible = False
elif ocean[(s, d)].visible == False and ocean[(s, d)].sca == 3:
ocean[(s, d)].visible = True
ocean[(s,d)].sca = 0
myapp = ConwayGame()
myapp.run()
""" | {
"content_hash": "644b980bb0913c86b29dd23c09bee99f",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 105,
"avg_line_length": 35.532407407407405,
"alnum_prop": 0.5439739413680782,
"repo_name": "glenpassow/Final-Project",
"id": "24af9f1d7df9e13ae53546a7ca298beba3fd2eca",
"size": "7675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Final.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75900"
}
],
"symlink_target": ""
} |
import os
import os.path
import io
from operator import itemgetter
import logging
import SPARQLWrapper
from .constants import DEFAULT_TEXT_LANG
from .utils import KrnlException, is_collection
# Maximum number of nestes magic files
MAX_RECURSE = 10
# The list of implemented magics with their help, as a pair [param,help-text]
MAGICS = {
'%lsmagics': ['', 'list all magics'],
'%load': ['<filename>', 'load a file with magic lines and process them'],
'%endpoint': ['<url>', 'set SPARQL endpoint. **REQUIRED**'],
'%auth': ['(basic|digest|none) <username> <passwd>', 'send HTTP authentication (use env:<var> to get values from environment variables)'],
'%qparam': ['<name> [<value>]', 'add (or delete) a persistent custom parameter to all queries'],
'%http_header': ['<name> [<value>]', 'add (or delete) an arbitrary HTTP header to all queries'],
'%prefix': ['<name> [<uri>]', 'set (or delete) a persistent URI prefix for all queries'],
'%header': ['<string> | OFF', 'add a persistent SPARQL header line before all queries, or delete all defined headers'],
'%graph': ['<uri>', 'set default graph for the queries'],
'%format': ['JSON | N3 | XML | default | any | none', 'set requested result format'],
'%display': ['raw | table [withtypes] | diagram [svg|png] [withliterals]',
'set display format'],
'%lang': ['<lang> [...] | default | all',
'language(s) preferred for labels'],
'%show': ['<n> | all',
'maximum number of shown results'],
'%outfile': ['<filename> | off', 'save raw output to a file (use "%d" in name to add cell number, "off" to cancel saving)'],
'%log': ['critical | error | warning | info | debug',
'set logging level'],
'%method': ['get | post', 'set HTTP method'],
}
# The full list of all magics
MAGIC_HELP = ('Available magics:\n' +
' '.join(sorted(MAGICS.keys())) +
'\n\n' +
'\n'.join(('{0} {1} : {2}'.format(k, *v)
for k, v in sorted(MAGICS.items(), key=itemgetter(0)))))
# -----------------------------------------------------------------------------
def split_lines(buf):
'''
Split a buffer in lines, skipping emtpy lines and commend lines, and
stripping whitespace at the beginning or end of lines
'''
return [line for line in map(lambda x: x.strip(), buf.split('\n'))
if line and line[0] != '#']
def process_magic(line, cfg, _recurse=0):
"""
Read and process magics
@param line (str): the full line containing a magic
@param obj
@return (list): a tuple (output-message,css-class), where
the output message can be a single string or a list (containing
a Python format string and its arguments)
"""
if _recurse > MAX_RECURSE:
raise KrnlException('maximum magic file recursion level exceeded')
# The %lsmagic has no parameters
if line.startswith('%lsmagic'):
return MAGIC_HELP, 'magic-help'
# Split line into command & parameters
try:
cmd, param = line.split(None, 1)
except ValueError:
raise KrnlException("invalid magic line: {}", line)
cmd = cmd[1:].lower()
# Process each magic
if cmd == 'load':
try:
with io.open(param, 'rt', encoding='utf-8') as f:
buf = f.read()
except Exception as e:
raise KrnlException("cannot read magics file '{}': {}", param, e)
for line in split_lines(buf):
if line[0] != '%':
raise KrnlException("error in file '{}': non-magic line found: {}",
param, line)
process_magic(line, cfg, _recurse+1)
elif cmd == 'endpoint':
cfg.ept = param
return ['Endpoint set to: {}', param], 'magic'
elif cmd == 'auth':
auth_data = param.split(None, 2)
if auth_data[0].lower() == 'none':
cfg.aut = None
return ['HTTP authentication: None'], 'magic'
if auth_data and len(auth_data) != 3:
raise KrnlException("invalid %auth magic")
try:
auth_data = [os.environ[v[4:]] if v.startswith(('env:', 'ENV:')) else v
for v in auth_data]
except KeyError as e:
raise KrnlException("cannot find environment variable: {}", e)
cfg.aut = auth_data
return ['HTTP authentication: method={}, user={}, passwd set',
auth_data[0], auth_data[1]], 'magic'
elif cmd == 'qparam':
v = param.split(None, 1)
if len(v) == 0:
raise KrnlException("missing %qparam name")
elif len(v) == 1:
cfg.par.pop(v[0], None)
return ['Param deleted: {}', v[0]], 'magic'
else:
cfg.par[v[0]] = v[1]
return ['Param set: {} = {}'] + v, 'magic'
elif cmd == 'http_header':
v = param.split(None, 1)
if len(v) == 0:
raise KrnlException("missing %http_header name")
elif len(v) == 1:
try:
del cfg.hhr[v[0]]
return ['HTTP header deleted: {}', v[0]], 'magic'
except KeyError:
return ['Not-existing HTTP header: {}', v[0]], 'magic'
else:
cfg.hhr[v[0]] = v[1]
return ['HTTP header set: {} = {}'] + v, 'magic'
elif cmd == 'prefix':
v = param.split(None, 1)
if len(v) == 0:
raise KrnlException("missing %prefix value")
elif len(v) == 1:
cfg.pfx.pop(v[0], None)
return ['Prefix deleted: {}', v[0]], 'magic'
else:
cfg.pfx[v[0]] = v[1]
return ['Prefix set: {} = {}'] + v, 'magic'
elif cmd == 'show':
if param == 'all':
cfg.lmt = None
else:
try:
cfg.lmt = int(param)
except ValueError as e:
raise KrnlException("invalid result limit: {}", e)
sz = cfg.lmt if cfg.lmt is not None else 'unlimited'
return ['Result maximum size: {}', sz], 'magic'
elif cmd == 'format':
fmt_list = {'JSON': SPARQLWrapper.JSON,
'N3': SPARQLWrapper.N3,
'XML': SPARQLWrapper.XML,
'RDF': SPARQLWrapper.RDF,
'NONE': None,
'DEFAULT': True,
'ANY': False}
try:
fmt = param.upper()
cfg.fmt = fmt_list[fmt]
except KeyError:
raise KrnlException('unsupported format: {}\nSupported formats are: {!s}', param, list(fmt_list.keys()))
return ['Request format: {}', fmt], 'magic'
elif cmd == 'lang':
cfg.lan = DEFAULT_TEXT_LANG if param == 'default' else [] if param == 'all' else param.split()
return ['Label preferred languages: {}', cfg.lan], 'magic'
elif cmd in 'graph':
cfg.grh = param if param else None
return ['Default graph: {}', param if param else 'None'], 'magic'
elif cmd == 'display':
v = param.lower().split(None, 2)
if len(v) == 0 or v[0] not in ('table', 'raw', 'graph', 'diagram'):
raise KrnlException('invalid %display command: {}', param)
msg_extra = ''
if v[0] not in ('diagram', 'graph'):
cfg.dis = v[0]
cfg.typ = len(v) > 1 and v[1].startswith('withtype')
if cfg.typ and cfg.dis == 'table':
msg_extra = '\nShow Types: on'
elif len(v) == 1: # graph format, defaults
cfg.dis = ['svg']
else: # graph format, with options
if v[1] not in ('png', 'svg'):
raise KrnlException('invalid graph format: {}', param)
if len(v) > 2:
if not v[2].startswith('withlit'):
raise KrnlException('invalid graph option: {}', param)
msg_extra = '\nShow literals: on'
cfg.dis = v[1:3]
display = cfg.dis[0] if is_collection(cfg.dis) else cfg.dis
return ['Display: {}{}', display, msg_extra], 'magic'
elif cmd == 'outfile':
if param in ('NONE', 'OFF'):
cfg.out = None
return ['no output file'], 'magic'
else:
cfg.out = param
return ['Output file: {}', os.path.abspath(param)], 'magic'
elif cmd == 'log':
if not param:
raise KrnlException('missing log level')
try:
lev = param.upper()
parent_logger = logging.getLogger(__name__.rsplit('.', 1)[0])
parent_logger.setLevel(lev)
return ("Logging set to {}", lev), 'magic'
except ValueError:
raise KrnlException('unknown log level: {}', param)
elif cmd == 'header':
if param.upper() == 'OFF':
num = len(cfg.hdr)
cfg.hdr = []
return ['All headers deleted ({})', num], 'magic'
else:
if param in cfg.hdr:
return ['Header skipped (repeated)'], 'magic'
cfg.hdr.append(param)
return ['Header added: {}', param], 'magic'
elif cmd == 'method':
method = param.upper()
if method not in ('GET', 'POST'):
raise KrnlException('invalid HTTP method: {}', param)
cfg.mth = method
return ['HTTP method: {}', method], 'magic'
else:
raise KrnlException("magic not found: {}", cmd)
| {
"content_hash": "e574bd0a074246c5b64887ebee493b46",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 146,
"avg_line_length": 36.06415094339623,
"alnum_prop": 0.5177356911164591,
"repo_name": "paulovn/sparql-kernel",
"id": "8f4a01c2a2f58ad1ef6c98c4f48775b76a2d6c33",
"size": "9558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sparqlkernel/magics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "66178"
}
],
"symlink_target": ""
} |
from JumpScale import j
from time import sleep
app = j.tools.cuisine._getBaseAppClass()
class CuisineOwncloud(app):
NAME = 'owncloud'
def __init__(self, executor, cuisine):
self._executor = executor
self._cuisine = cuisine
def install(self, start=True):
"""
install owncloud 9.1 on top of nginx/php/tidb
tidb is the mysql alternative which is ultra reliable & distributed
REQUIREMENT: nginx/php/tidb installed before
"""
# TODO: *1
if start:
self.start("?")
def start(self, name="???"):
# TODO:*1
pass
def test(self):
# call the api up/download a file
pass
| {
"content_hash": "da699d013544a52a20ca9fe7b6fdbc7a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 21.303030303030305,
"alnum_prop": 0.5803698435277382,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "364950a5560091e73ebe6fd8a2aa2d8c3e66fe33",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/tools/cuisine/solutions/CuisineOwncloud.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
} |
import numpy as np
# import accelerate.cuda as cuda
from data_prep import features, targets, features_test, targets_test
# useGPU = cuda.cuda_compatible()
np.random.seed(21)
def sigmoid(x):
# calculate sigmoid
return 1 / (1+np.exp(-x))
#Hyper parameters
n_hidden = 2 # number of hidden units
epocs = 900
learn_rate = 0.042
n_records, n_features = features.shape
last_loss = None
#Initialise weights
weights_input_hidden = np.random.normal(scale=1 / n_features ** .5,
size=(n_features, n_hidden))
weights_hidden_output = np.random.normal(scale=1 / n_features ** .5,
size=n_hidden)
for e in range(epocs):
delta_w_input_hidden = np.zeros(weights_input_hidden.shape)
delta_w_hidden_output = np.zeros(weights_hidden_output.shape)
for x, y in zip(features.values, targets):
## Forward pass
# calcluate the output
hidden_input = np.dot(x, weights_input_hidden)
hidden_output = sigmoid(hidden_input)
output = sigmoid(np.dot(hidden_output, weights_hidden_output))
#backward pass
# calculate the networks prediction error
error = y - output
# calculate error term for the output unit
output_error_term = error * output * (1-output)
## pass thtough to hidden layer
# caculate the hidden layer's contribution to the error
hidden_error = np.dot(output_error_term, weights_hidden_output)
# calculate the error for hidden layer
hidden_error_term = hidden_error * hidden_output * (1-hidden_output)
# update the change in weights
delta_w_hidden_output += output_error_term * hidden_output
delta_w_input_hidden += hidden_error_term * x[:, None]
weights_hidden_output += learn_rate * delta_w_hidden_output / n_records
weights_input_hidden += learn_rate * delta_w_input_hidden / n_records
#out put the mean square on training set
if e % (epocs / 10) == 0:
hidden_output = sigmoid(np.dot(x, weights_input_hidden))
out = sigmoid(np.dot(hidden_output, weights_hidden_output))
loss = np.mean((out-targets)**2)
if last_loss and last_loss < loss:
print("WARN: Loss Increasing. Loss: ", loss)
else:
print("INFO: Loss: ", loss)
#Calculte accurayc on the test data
hidden = sigmoid(np.dot(features_test, weights_input_hidden))
out = sigmoid(np.dot(hidden, weights_hidden_output))
predictions = out >0.5
accuracy = np.mean(predictions==targets_test)
print ("Prediction accuracy: {:.3f}", format(accuracy));
| {
"content_hash": "43fd12f5d1b9403314504608003dbc9a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 76,
"avg_line_length": 32.407407407407405,
"alnum_prop": 0.6434285714285715,
"repo_name": "morphean/deep-learning",
"id": "9ffec10295b68524d67398e24ef7ea117ddac352",
"size": "2625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neural-network/backpropogation_2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "35715"
},
{
"name": "Python",
"bytes": "16017"
}
],
"symlink_target": ""
} |
import contextlib
import json
import io
import logging
import handler
import models
import transaction
models.init()
queries_log = []
logger = logging.getLogger('peewee')
logger.setLevel(logging.DEBUG)
logger.addHandler(handler.ListHandler(queries_log))
out = io.StringIO()
with contextlib.redirect_stdout(out):
transaction.execute()
combined = dict(
output=out.getvalue(),
queries=[{'sql': q, 'time': .0} for q in queries_log])
print(json.dumps(combined, indent=2))
| {
"content_hash": "44b79762a4ee193604048552d3dbbe84",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 58,
"avg_line_length": 17.428571428571427,
"alnum_prop": 0.735655737704918,
"repo_name": "omaraboumrad/djanground",
"id": "4674f591a6badf2a820068b276994530ea573a10",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "executors/python-peewee/sample/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4800"
},
{
"name": "JavaScript",
"bytes": "4486"
},
{
"name": "Python",
"bytes": "15179"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
} |
from datetime import datetime
import time
# htmlentitydefs was renamed as html.entities in python3
try:
import html.entities as htmlentitydefs
except ImportError:
try:
import htmlentitydefs
except ImportError:
raise ImportError("Import error. There is no html.entities or htmlentitydefs module")
import re
import locale
from urllib.parse import quote
def parse_datetime(string):
# Set locale for date parsing
locale.setlocale(locale.LC_TIME, 'C')
# We must parse datetime this way to work in python 2.4
date = datetime(*(time.strptime(string, '%a %b %d %H:%M:%S +0000 %Y')[0:6]))
# Reset locale back to the default setting
locale.setlocale(locale.LC_TIME, '')
return date
def parse_html_value(html):
return html[html.find('>')+1:html.rfind('<')]
def parse_a_href(atag):
start = atag.find('"') + 1
end = atag.find('"', start)
return atag[start:end]
def parse_search_datetime(string):
# Set locale for date parsing
locale.setlocale(locale.LC_TIME, 'C')
# We must parse datetime this way to work in python 2.4
date = datetime(*(time.strptime(string, '%a, %d %b %Y %H:%M:%S +0000')[0:6]))
# Reset locale back to the default setting
locale.setlocale(locale.LC_TIME, '')
return date
def unescape_html(text):
"""Created by Fredrik Lundh (http://effbot.org/zone/re-sub.htm#unescape-html)"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def convert_to_utf8_str(arg):
# written by Michael Norton (http://docondev.blogspot.com/)
if isinstance(arg, str):
arg = arg.encode('utf-8')
elif not isinstance(arg, str):
arg = str(arg).encode('utf-8')
return arg
def convert_to_unicodePoints(arg): # Python 3 or greater
if not isinstance(arg,str):
return str(arg, 'utf-8')
def import_simplejson():
try:
import json # Python 2.6+
except ImportError:
raise ImportError("Can't load a json library")
return json
def list_to_csv(item_list):
if item_list:
return ','.join([str(i) for i in item_list])
def urlencode_noplus(query):
return '&'.join(['%s=%s' % (quote(str(k), ''), quote(str(v), '')) \
for k, v in query.iteritems()])
| {
"content_hash": "49104ad549b8e9b7e4d252a01180bb98",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 93,
"avg_line_length": 26.523809523809526,
"alnum_prop": 0.5960502692998204,
"repo_name": "felHR85/Tweepy-3",
"id": "e759f6a9177918bb6c1e9e365e63920e7f404489",
"size": "2893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweepy/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132485"
},
{
"name": "Shell",
"bytes": "4511"
}
],
"symlink_target": ""
} |
from keystone.auth import controllers
def append_v3_routers(mapper, routers):
auth_controller = controllers.Auth()
mapper.connect('/auth/tokens',
controller=auth_controller,
action='authenticate_for_token',
conditions=dict(method=['POST']))
mapper.connect('/auth/tokens',
controller=auth_controller,
action='check_token',
conditions=dict(method=['HEAD']))
mapper.connect('/auth/tokens',
controller=auth_controller,
action='revoke_token',
conditions=dict(method=['DELETE']))
mapper.connect('/auth/tokens',
controller=auth_controller,
action='validate_token',
conditions=dict(method=['GET']))
mapper.connect('/auth/tokens/OS-PKI/revoked',
controller=auth_controller,
action='revocation_list',
conditions=dict(method=['GET']))
| {
"content_hash": "b018dba45c64ad249be32b6f8ab2a130",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 54,
"avg_line_length": 39.84615384615385,
"alnum_prop": 0.5386100386100386,
"repo_name": "kwss/keystone",
"id": "52466f348ef330f05042244aacd764bf8519bc8d",
"size": "1660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/auth/routers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1382015"
},
{
"name": "Shell",
"bytes": "12039"
}
],
"symlink_target": ""
} |
import six
class SymbolsMeta(type):
def __new__(cls, name, bases, attrs):
syms = {}
reverse = {}
for k, v in attrs.items():
if not k.startswith('_') and isinstance(v, int):
syms[k] = v
reverse[v] = k
attrs['_symbols'] = syms
attrs['_reverse_symbols'] = reverse
return type.__new__(cls, name, bases, attrs)
@six.add_metaclass(SymbolsMeta)
class Symbols(object):
def __getitem__(self, k):
if isinstance(k, str):
return self._symbols[k]
elif isinstance(k, int):
return self._reverse_symbols[k]
raise AttributeError(k)
| {
"content_hash": "209463047d6b082271700e7d2100a950",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 27.791666666666668,
"alnum_prop": 0.5337331334332833,
"repo_name": "lodevil/cpy",
"id": "0b159faa112cb050fef393a0b563753609b4dd36",
"size": "667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cpy/parser/grammar/symbols.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "6071"
},
{
"name": "Python",
"bytes": "70756"
},
{
"name": "Shell",
"bytes": "183"
}
],
"symlink_target": ""
} |
"""
switchboard.admin
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
from datetime import datetime
import logging
from operator import attrgetter
from bottle import Bottle, request, mako_view as view
import datastore.core
from webob.exc import HTTPNotFound
from .. import operator, signals
from ..models import Switch
from .utils import (
json_api,
SwitchboardException,
valid_sort_orders
)
from ..settings import settings
log = logging.getLogger(__name__)
app = Bottle()
# Template poke-jiggery; will hopefully give way to config options soon.
import bottle
import os
dir_name = os.path.dirname(os.path.realpath(__file__))
bottle.TEMPLATE_PATH.append(dir_name)
@app.get('/')
@view('index')
def index():
by = request.query.by or '-date_modified'
if by not in valid_sort_orders():
raise HTTPNotFound('Invalid sort order.')
reverse = by.find('-') is 0
sort_by = by.lstrip('-')
switches = Switch.all()
switches.sort(key=attrgetter(sort_by), reverse=reverse)
messages = []
if isinstance(Switch.ds, datastore.DictDatastore):
m = dict(status='warning',
message='An in-memory datastore is being used; no changes \
will persist after a server restart.')
messages.append(m)
return dict(
switches=[s.to_dict(operator) for s in switches],
all_conditions=list(operator.get_all_conditions()),
sorted_by=by,
messages=messages,
settings=settings,
)
@app.post('/add')
@json_api
def add():
key = request.forms['key']
label = request.forms.get('label', '')
description = request.forms.get('description')
if not key:
raise SwitchboardException("Key cannot be empty")
if len(key) > 32:
raise SwitchboardException("Key must be less than or equal to 32"
+ " characters in length")
if len(label) > 32:
raise SwitchboardException("Name must be less than or equal to 32"
+ " characters in length")
if Switch.get(key):
raise SwitchboardException("Switch with key %s already exists"
% key)
switch = Switch.create(key=key, label=label or None,
description=description)
log.info('Switch %r added (%%s)' % switch.key,
', '.join('%s=%r' % (k, getattr(switch, k)) for k in
sorted(('key', 'label', 'description', ))))
signals.switch_added.send(switch)
return switch.to_dict(operator)
@app.post('/update')
@json_api
def update():
curkey = request.forms['curkey']
key = request.forms['key']
label = request.forms.get('label', '')
description = request.forms.get('description')
switch = Switch.get(curkey)
if len(key) > 32:
raise SwitchboardException("Key must be less than or equal to 32"
+ " characters in length")
if len(label) > 32:
raise SwitchboardException("Name must be less than or equal to 32"
+ " characters in length")
values = dict(
label=label,
key=key,
description=description,
)
changes = {}
for k, v in values.iteritems():
old_value = getattr(switch, k)
if old_value != v:
changes[k] = (v, old_value)
if changes:
if switch.key != key:
switch.delete()
switch.key = key
switch.label = label
switch.description = description
switch.date_modified = datetime.utcnow()
switch.save()
log.info('Switch %r updated %%s' % switch.key,
', '.join('%s=%r->%r' % (k, v[0], v[1]) for k, v in
sorted(changes.iteritems())))
signals.switch_updated.send(switch)
return switch.to_dict(operator)
@app.post('/status')
@json_api # XXX Not needed?
def status():
key = request.forms['key']
status = request.forms['status']
switch = Switch.get(key)
try:
status = int(status)
except ValueError:
raise SwitchboardException("Status must be integer")
old_status_label = switch.get_status_display()
if switch.status != status:
switch.status = status
switch.date_modified = datetime.utcnow()
switch.save()
log.info('Switch %r updated (status=%%s->%%s)' % switch.key,
old_status_label, switch.get_status_display())
signals.switch_status_updated.send(switch)
return switch.to_dict(operator)
@app.post('/delete')
@json_api
def delete():
key = request.forms['key']
switch = Switch.remove(key)
log.info('Switch %r removed' % key)
signals.switch_deleted.send(switch)
return {}
@app.post('/add_condition')
@json_api
def add_condition():
post = request.POST
key = post.get("key")
condition_set_id = post.get("id")
field_name = post.get("field")
exclude = int(post.get("exclude") or 0)
if not all([key, condition_set_id, field_name]):
raise SwitchboardException("Fields cannot be empty")
condition_set = operator.get_condition_set_by_id(condition_set_id)
field = condition_set.fields[field_name]
value = field.validate(post)
switch = operator[key]
switch.add_condition(condition_set_id, field_name, value,
exclude=exclude)
log.info('Condition added to %r (%r, %s=%r, exclude=%r)',
switch.key, condition_set_id, field_name, value,
bool(exclude))
signals.switch_condition_added.send(switch)
return switch.to_dict(operator)
@app.post('/remove_condition')
@json_api
def remove_condition():
post = request.POST
key = post.get("key")
condition_set_id = post.get("id")
field_name = post.get("field")
value = post.get("value")
if not all([key, condition_set_id, field_name, value]):
raise SwitchboardException("Fields cannot be empty")
switch = operator[key]
switch.remove_condition(condition_set_id, field_name, value)
log.info('Condition removed from %r (%r, %s=%r)' % (switch.key,
condition_set_id, field_name, value))
signals.switch_condition_removed.send(switch)
return switch.to_dict(operator)
| {
"content_hash": "80529b634a3eebad9f099a23bd90dcfd",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 76,
"avg_line_length": 27.286324786324787,
"alnum_prop": 0.6043852779953015,
"repo_name": "kadams54/switchboard",
"id": "1233fedad8607d69f0a2ee304cc6c0782d3c4c8b",
"size": "6385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "switchboard/admin/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "711"
},
{
"name": "Python",
"bytes": "145929"
},
{
"name": "Smarty",
"bytes": "42670"
}
],
"symlink_target": ""
} |
# Process [link](<to> "stuff")
from ..common.utils import isSpace, normalizeReference
from .state_inline import StateInline
def link(state: StateInline, silent: bool):
href = ""
title = ""
label = None
oldPos = state.pos
maximum = state.posMax
start = state.pos
parseReference = True
if state.srcCharCode[state.pos] != 0x5B: # /* [ */
return False
labelStart = state.pos + 1
labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, True)
# parser failed to find ']', so it's not a valid link
if labelEnd < 0:
return False
pos = labelEnd + 1
if pos < maximum and state.srcCharCode[pos] == 0x28: # /* ( */
#
# Inline link
#
# might have found a valid shortcut link, disable reference parsing
parseReference = False
# [link]( <href> "title" )
# ^^ skipping these spaces
pos += 1
while pos < maximum:
code = state.srcCharCode[pos]
if not isSpace(code) and code != 0x0A:
break
pos += 1
if pos >= maximum:
return False
# [link]( <href> "title" )
# ^^^^^^ parsing link destination
start = pos
res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax)
if res.ok:
href = state.md.normalizeLink(res.str)
if state.md.validateLink(href):
pos = res.pos
else:
href = ""
# [link]( <href> "title" )
# ^^ skipping these spaces
start = pos
while pos < maximum:
code = state.srcCharCode[pos]
if not isSpace(code) and code != 0x0A:
break
pos += 1
# [link]( <href> "title" )
# ^^^^^^^ parsing link title
res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax)
if pos < maximum and start != pos and res.ok:
title = res.str
pos = res.pos
# [link]( <href> "title" )
# ^^ skipping these spaces
while pos < maximum:
code = state.srcCharCode[pos]
if not isSpace(code) and code != 0x0A:
break
pos += 1
if pos >= maximum or state.srcCharCode[pos] != 0x29: # /* ) */
# parsing a valid shortcut link failed, fallback to reference
parseReference = True
pos += 1
if parseReference:
#
# Link reference
#
if "references" not in state.env:
return False
if pos < maximum and state.srcCharCode[pos] == 0x5B: # /* [ */
start = pos + 1
pos = state.md.helpers.parseLinkLabel(state, pos)
if pos >= 0:
label = state.src[start:pos]
pos += 1
else:
pos = labelEnd + 1
else:
pos = labelEnd + 1
# covers label == '' and label == undefined
# (collapsed reference link and shortcut reference link respectively)
if not label:
label = state.src[labelStart:labelEnd]
label = normalizeReference(label)
ref = (
state.env["references"][label] if label in state.env["references"] else None
)
if not ref:
state.pos = oldPos
return False
href = ref["href"]
title = ref["title"]
#
# We found the end of the link, and know for a fact it's a valid link
# so all that's left to do is to call tokenizer.
#
if not silent:
state.pos = labelStart
state.posMax = labelEnd
token = state.push("link_open", "a", 1)
token.attrs = {"href": href}
if title:
token.attrSet("title", title)
# note, this is not part of markdown-it JS, but is useful for renderers
if label and state.md.options.get("store_labels", False):
token.meta["label"] = label
state.md.inline.tokenize(state)
token = state.push("link_close", "a", -1)
state.pos = pos
state.posMax = maximum
return True
| {
"content_hash": "582b414d602aeab8a8bd94e1d069eab9",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 88,
"avg_line_length": 29.086666666666666,
"alnum_prop": 0.5014898005959202,
"repo_name": "executablebooks/markdown-it-py",
"id": "2394d6c307cb39f8c0a62130809c746b3153404c",
"size": "4363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "markdown_it/rules_inline/link.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "228453"
},
{
"name": "Python",
"bytes": "228837"
},
{
"name": "Shell",
"bytes": "460"
}
],
"symlink_target": ""
} |
"""
InformationMachineAPILib.Models.GetUOMsWrapper
"""
from InformationMachineAPILib.APIHelper import APIHelper
from InformationMachineAPILib.Models.UOMInfo import UOMInfo
from InformationMachineAPILib.Models.MetaBase import MetaBase
class GetUOMsWrapper(object):
"""Implementation of the 'GetUOMsWrapper' model.
TODO: type model description here.
Attributes:
result (list of UOMInfo): TODO: type description here.
meta (MetaBase): TODO: type description here.
"""
def __init__(self,
**kwargs):
"""Constructor for the GetUOMsWrapper class
Args:
**kwargs: Keyword Arguments in order to initialise the
object. Any of the attributes in this object are able to
be set through the **kwargs of the constructor. The values
that can be supplied and their types are as follows::
result -- list of UOMInfo -- Sets the attribute result
meta -- MetaBase -- Sets the attribute meta
"""
# Set all of the parameters to their default values
self.result = None
self.meta = None
# Create a mapping from API property names to Model property names
replace_names = {
"result": "result",
"meta": "meta",
}
# Parse all of the Key-Value arguments
if kwargs is not None:
for key in kwargs:
# Only add arguments that are actually part of this object
if key in replace_names:
setattr(self, replace_names[key], kwargs[key])
# Other objects also need to be initialised properly
if "result" in kwargs:
# Parameter is an array, so we need to iterate through it
self.result = list()
for item in kwargs["result"]:
self.result.append(UOMInfo(**item))
# Other objects also need to be initialised properly
if "meta" in kwargs:
self.meta = MetaBase(**kwargs["meta"])
def resolve_names(self):
"""Creates a dictionary representation of this object.
This method converts an object to a dictionary that represents the
format that the model should be in when passed into an API Request.
Because of this, the generated dictionary may have different
property names to that of the model itself.
Returns:
dict: The dictionary representing the object.
"""
# Create a mapping from Model property names to API property names
replace_names = {
"result": "result",
"meta": "meta",
}
retval = dict()
return APIHelper.resolve_names(self, replace_names, retval) | {
"content_hash": "2ce29866d4ac77f6c4aa543ffed3e3a4",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 75,
"avg_line_length": 34.19047619047619,
"alnum_prop": 0.592966573816156,
"repo_name": "information-machine/information-machine-api-python",
"id": "8241346c41effb617c2109656829e3b077ccd1c5",
"size": "2897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "InformationMachineAPILib/Models/GetUOMsWrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "379009"
}
],
"symlink_target": ""
} |
import tensorflow as tf
def run():
output = None
logit_data = [2.0, 1.0, 0.1]
logits = tf.placeholder(tf.float32)
# Calculate the softmax of the logits
softmax = tf.nn.softmax(logit_data)
with tf.Session() as sess:
# Feed in the logit data
output = sess.run(softmax, feed_dict={logits: logit_data})
return output
print(run())
| {
"content_hash": "ecdc858f7e7ca09896f32b10e7b7b09a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 22.764705882352942,
"alnum_prop": 0.6098191214470284,
"repo_name": "mmaraya/nd101",
"id": "7d92e4f58c7bd13a135b2b4f2ef748be00509b09",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch03/lesson01/quiz13.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "599049"
},
{
"name": "Jupyter Notebook",
"bytes": "648096"
},
{
"name": "Python",
"bytes": "22542"
}
],
"symlink_target": ""
} |
SECRET_KEY = 'cat'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'channels',
'channels.delay',
'tests',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'asgiref.inmemory.ChannelLayer',
'ROUTING': [],
},
'fake_channel': {
'BACKEND': 'tests.test_management.FakeChannelLayer',
'ROUTING': [],
}
}
MIDDLEWARE_CLASSES = []
| {
"content_hash": "fe976872d768a2a15872a0fb8bc2b3d0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 60,
"avg_line_length": 18.633333333333333,
"alnum_prop": 0.5635062611806798,
"repo_name": "Coread/channels",
"id": "bebbd2a876579035da0b44c814ce5532bf25652e",
"size": "559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "29400"
},
{
"name": "Makefile",
"bytes": "424"
},
{
"name": "Python",
"bytes": "232339"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import mgw7510.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='WebUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('username', models.EmailField(max_length=100)),
('password', models.CharField(max_length=50)),
('confirmPassword', models.CharField(max_length=50)),
('newPassword', models.CharField(max_length=50)),
('confirmNewPassword', models.CharField(max_length=50)),
('userWorkDir', models.CharField(max_length=100)),
('pakServerIp', models.GenericIPAddressField(default=b'135.251.49.21')),
('pakServerUsername', models.CharField(default=b'xxu', max_length=100)),
('pakServerPasswd', models.CharField(default=b'initial', max_length=50)),
('pakServerFp', models.CharField(default=b'/viewstores/public/SLP', max_length=300)),
('seedVMIp', models.GenericIPAddressField(default=b'172.39.5.116')),
('seedVMUsername', models.CharField(default=b'root', max_length=100)),
('seedVMPasswd', models.CharField(default=b'newsys', max_length=50)),
('seedVMOpenrcAbsPath', models.CharField(default=b'/root/cloud-env/Rainbow-openrc.sh', max_length=300)),
('seedVMKeypairAbsPath', models.CharField(default=b'/root/cloud-env/BGW-keypair.pem', max_length=300)),
('userInputFile', models.FileField(null=True, upload_to=mgw7510.models.get_upload_path, blank=True)),
('tmpPath', models.CharField(max_length=100, null=True, blank=True)),
('userInputFileName', models.CharField(max_length=100, null=True, blank=True)),
('progressBarData', models.CharField(default=b'0', max_length=5)),
('userInputUploadedFlag', models.CharField(default=b'nok', max_length=5)),
('ceDeployState', models.CharField(default=b'initial', max_length=20)),
('ceSelectRel', models.CharField(max_length=10, null=True, blank=True)),
('ceSelectPak', models.CharField(max_length=10, null=True, blank=True)),
],
),
]
| {
"content_hash": "4174d300e368e53438a43d34bcd442fb",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 120,
"avg_line_length": 57.95238095238095,
"alnum_prop": 0.6117502054231717,
"repo_name": "gter1216/mgw7510-web",
"id": "9a7047f3a4c39ee7471ebbb070401854803b6dd3",
"size": "2458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mgw7510/migrations/0001_initial.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6869"
},
{
"name": "HTML",
"bytes": "118658"
},
{
"name": "JavaScript",
"bytes": "34456"
},
{
"name": "Python",
"bytes": "90415"
},
{
"name": "Shell",
"bytes": "11796"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
from functools import wraps
from celery import chain, chord, states
from kombu.utils import uuid
from django.contrib.auth.models import AnonymousUser
from roll_engine.utils.log import get_logger
from roll_engine.celery import app
from roll_engine import constants as _
from roll_engine.exceptions import ActionNotAllowed
re_logger = get_logger()
logger = logging.getLogger(__name__)
def log_action(msg=''):
def decorator(func):
@wraps(func)
def func_wrapper(deployment, *args, **kwargs):
operator = kwargs.get('operator', AnonymousUser())
is_retry = kwargs.get('is_retry', False)
message = '{} for retry'.format(msg) if is_retry else msg
deployment.actions.create(
action=func.__name__, message=message,
operator=getattr(operator, 'username', ''))
kwargs['operator'] = operator
func(deployment, *args, **kwargs)
return func_wrapper
return decorator
def _revoke_chain(task_id, terminate=False):
result = app.AsyncResult(task_id)
while result:
status = result.status
children = result.children
logger.warning("Result {} status: {}".format(result.id, status))
if status in states.UNREADY_STATES:
result.revoke(terminate=terminate)
if children:
if len(children) == 1:
result = children[0]
else: # len(result.children) > 1:
raise ActionNotAllowed('chain is not linear')
else:
break
class StartMixin(object):
@log_action(msg='activate deployment')
def start(self, operator=None):
try:
start_method = self.smoke
except AttributeError:
start_method = self.rollout
finally:
return start_method(operator=operator)
class SmokeMixin(object):
def __create_canvas(self, operator=None):
deployment_id = self.id
tasks = self._meta.task_set
fort_batch = self.get_fort_batch()
target_canvases = [tgt.create_smoke_canvas(operator)
for tgt in fort_batch.targets.all()]
smoke_success_status = self._meta.smoke_success_status
canvas = chain(
tasks.start_smoking.si(tasks, deployment_id, operator),
tasks.start_rolling_batch.subtask(args=(tasks, deployment_id,
fort_batch.id, operator),
immutable=True),
chord(target_canvases,
tasks.finish_smoking.si(tasks, deployment_id,
smoke_success_status, operator))
)
return canvas
@log_action(msg='start smoking')
def smoke(self, action=_.SMOKING, operator=None, is_retry=False):
canvas = self.__create_canvas(operator)
canvas.delay()
self.trans(action)
class BakeMixin(object):
def __create_canvas(self, operator=None, is_retry=False):
deployment_id = self.id
tasks = self._meta.task_set
fort_batch = self.get_fort_batch()
target_canvases = [tgt.create_bake_canvas(operator)
for tgt in fort_batch.targets.all()]
ts = [
tasks.start_baking.si(tasks, deployment_id, operator),
chord(target_canvases, tasks.finish_rolling_batch.si(
tasks, deployment_id, fort_batch.id, operator)),
tasks.finish_baking.si(tasks, deployment_id, operator)
]
if is_retry:
restart_rolling_batch = tasks.start_rolling_batch.subtask(
args=(tasks, deployment_id, fort_batch.id, operator),
immutable=True)
ts.insert(1, restart_rolling_batch)
canvas = chain(*ts)
return canvas
@log_action(msg='start baking')
def bake(self, action=_.BAKING, operator=None, is_retry=False):
canvas = self.__create_canvas(operator, is_retry)
canvas.delay()
self.trans(action)
class RolloutMixin(object):
def __create_canvas(self, operator=None, is_retry=False):
deployment_id = self.id
tasks = self._meta.task_set
batches = self.get_rollout_batches()
if is_retry:
# only retry non-successfull deployment batches
batches = batches.exclude(status=_.SUCCESS)
batch_ids = list(batches.values_list('id', flat=True)) # evaluate out
batch_canvases = [batch.create_canvas(operator) for batch in batches]
ts = [tasks.start_rolling_out.si(tasks, deployment_id, operator)]
ts.extend(batch_canvases)
ts.append(tasks.finish_rolling_out.si(tasks, deployment_id, batch_ids,
operator))
ts.append(tasks.finish_deployment.si(tasks, deployment_id, operator))
canvas = chain(*ts)
return canvas
@log_action(msg='start rolling out')
def rollout(self, action=_.ROLLING_OUT, operator=None, is_retry=False):
self.trans(action) # switch status before rolling
canvas = self.__create_canvas(operator, is_retry)
canvas.delay()
class BrakeMixin(object):
@log_action(msg='brake deployment')
def brake(self, operator=None):
status = self.status.lower()
extra = {'deploy': self, 'operator': operator}
re_logger.info('Deployment braked', extra=extra)
self.revoke(update_status=False)
action = '{}_brake'.format(status)
self.trans(action)
@log_action(msg='resume deployment')
def resume(self, operator=None):
extra = {'deploy': self, 'operator': operator}
re_logger.info('Deployment resumed', extra=extra)
action = '{}_resume'.format(self.status.lower())
handler = self.get_resume_handler()
if handler is not None:
handler(action, operator=operator)
else:
self.trans(action)
class RevokeMixin(object):
@log_action(msg='revoke deployment')
def revoke(self, terminate=None, operator=None, update_status=True):
batches = self.get_revoke_batches()
for batch in batches:
batch.revoke(update_status)
extra = {'deploy': self, 'operator': operator}
re_logger.info('Deployment revoked', extra=extra)
if update_status:
self.trans(_.REVOKED)
class RetryMixin(object):
@log_action(msg='retry deployment')
def retry(self, operator=None):
extra = {'deploy': self, 'operator': operator}
re_logger.info('Retry deployment', extra=extra)
handler = self.get_retry_handler()
action = '{}_retry'.format(handler.__name__)
handler(action, operator=operator, is_retry=True)
class BatchMixin(object):
def create_canvas(self, operator=None):
batch_id = self.id
deployment_id = self.deployment_id
targets = self.targets.all()
deployment = self.deployment
tasks = deployment._meta.task_set
first_rollout_batch = deployment.get_rollout_batches().first()
if self == first_rollout_batch:
pause_time = 0
else:
pause_time = self.pause_time
target_canvases = [t.create_rollout_canvas(operator) for t in targets]
chain_args = [
tasks.start_rolling_batch.subtask(
args=(tasks, deployment_id, batch_id, operator),
countdown=pause_time, immutable=True)
]
if target_canvases:
chain_args.append(
chord(target_canvases, tasks.finish_rolling_batch.si(
tasks, deployment_id, batch_id, operator)
)
)
else:
# empty batch is ok
chain_args.append(
tasks.finish_rolling_batch.si(
tasks, deployment_id, batch_id, operator)
)
return chain(*chain_args)
def revoke(self, update_status=True):
for tgt in self.targets.all():
tgt.revoke()
if update_status:
self.trans(_.REVOKED)
class TargetMixin(object):
def __create_canvas(self, tasks, operator=None):
task_id = uuid()
deployment_id = self.batch.deployment_id
target_id = self.id
canvas = chain(tasks[0].subtask(args=(tasks[0].__self__, deployment_id,
target_id, operator),
immutable=True,
task_id=task_id),
*[t.si(t.__self__, deployment_id, target_id, operator)
for t in tasks[1:]]
)
self.task_id = task_id
self.save(update_fields=['task_id', 'updated_at'])
return canvas
def create_smoke_canvas(self, operator=None):
smoke_job = self.batch.deployment._meta.task_set.smoke_job()
return self.__create_canvas(smoke_job, operator)
def create_bake_canvas(self, operator=None):
bake_job = self.batch.deployment._meta.task_set.bake_job()
return self.__create_canvas(bake_job, operator)
def create_rollout_canvas(self, operator=None):
rollout_job = self.batch.deployment._meta.task_set.rollout_job()
return self.__create_canvas(rollout_job, operator)
def revoke(self):
if self.task_id is not None:
_revoke_chain(self.task_id, True)
| {
"content_hash": "5f3578349cb64096c6589609b430d974",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 79,
"avg_line_length": 35.22140221402214,
"alnum_prop": 0.5875327396542692,
"repo_name": "ctripcorp/tars",
"id": "d02ae89fa45069718f6767ea5dd7d27cddb5d0df",
"size": "9545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roll_engine/mixins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "372246"
},
{
"name": "Dockerfile",
"bytes": "188"
},
{
"name": "HTML",
"bytes": "175411"
},
{
"name": "JavaScript",
"bytes": "1190261"
},
{
"name": "Makefile",
"bytes": "1731"
},
{
"name": "Python",
"bytes": "305797"
},
{
"name": "Shell",
"bytes": "12737"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('avalonline', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='player',
name='role',
field=models.CharField(null=True, max_length=50, blank=True, choices=[('black', 'black'), ('white', 'white')]),
),
]
| {
"content_hash": "eab9eb304a192414e93b9255e4abb670",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 123,
"avg_line_length": 24.5,
"alnum_prop": 0.5873015873015873,
"repo_name": "fako/avalonline",
"id": "fa8b44193a1482657dac7064a9affae1cb16b540",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avalonline/migrations/0002_auto_20160709_1342.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "596"
},
{
"name": "Python",
"bytes": "10682"
}
],
"symlink_target": ""
} |
from __future__ import division, unicode_literals
"""
A module to perform diffusion analyses (e.g. calculating diffusivity from
mean square displacements etc.). If you use this module, please consider
citing the following papers::
Ong, S. P., Mo, Y., Richards, W. D., Miara, L., Lee, H. S., & Ceder, G.
(2013). Phase stability, electrochemical stability and ionic conductivity
of the Li10+-1MP2X12 (M = Ge, Si, Sn, Al or P, and X = O, S or Se) family
of superionic conductors. Energy & Environmental Science, 6(1), 148.
doi:10.1039/c2ee23355j
Mo, Y., Ong, S. P., & Ceder, G. (2012). First Principles Study of the
Li10GeP2S12 Lithium Super Ionic Conductor Material. Chemistry of Materials,
24(1), 15-17. doi:10.1021/cm203303y
"""
__author__ = "Will Richards, Shyue Ping Ong"
__version__ = "0.2"
__maintainer__ = "Will Richards"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "5/2/13"
import numpy as np
from pymatgen.core import Structure, get_el_sp
import pymatgen.core.physical_constants as phyc
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.util.coord_utils import pbc_diff
class DiffusionAnalyzer(PMGSONable):
"""
Class for performing diffusion analysis.
.. attribute: diffusivity
Diffusivity in cm^2 / cm
.. attribute: conductivity
Conductivity in mS / cm
.. attribute: diffusivity_components
A vector with diffusivity in the a, b and c directions in cm^2 / cm
.. attribute: conductivity_components
A vector with conductivity in the a, b and c directions in mS / cm
.. attribute: diffusivity_sigma
Std dev in diffusivity in cm^2 / cm. Note that this makes sense only
for non-smoothed analyses.
.. attribute: conductivity_sigma
Std dev in conductivity in mS / cm. Note that this makes sense only
for non-smoothed analyses.
.. attribute: diffusivity_components_sigma
A vector with std dev. in diffusivity in the a, b and c directions in
cm^2 / cm. Note that this makes sense only for non-smoothed analyses.
.. attribute: conductivity_components_sigma
A vector with std dev. in conductivity in the a, b and c directions
in mS / cm. Note that this makes sense only for non-smoothed analyses.
.. attribute: max_framework_displacement
The maximum (drift adjusted) distance of any framework atom from its
starting location in A.
.. attribute: max_ion_displacements
nions x 1 array of the maximum displacement of each individual ion.
.. attribute: msd
nsteps x 1 array of the mean square displacement of specie.
.. attribute: msd_components
nsteps x 3 array of the MSD in each lattice direction of specie.
.. attribute: sq_disp_ions
The square displacement of all ion (both specie and other ions) as a
nions x nsteps array.
.. attribute: dt
Time coordinate array.
"""
def __init__(self, structure, displacements, specie, temperature,
time_step, step_skip, smoothed="max", min_obs=30,
avg_nsteps=1000):
"""
This constructor is meant to be used with pre-processed data.
Other convenient constructors are provided as class methods (see
from_vaspruns and from_files).
Given a matrix of displacements (see arguments below for expected
format), the diffusivity is given by:
D = 1 / 2dt * <mean square displacement>
where d is the dimensionality, t is the time. To obtain a reliable
diffusion estimate, a least squares regression of the MSD against
time to obtain the slope, which is then related to the diffusivity.
For traditional analysis, use smoothed=False and weighted=False.
Args:
structure (Structure): Initial structure.
displacements (array): Numpy array of with shape [site,
time step, axis]
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
smoothed (str): Whether to smooth the MSD, and what mode to smooth.
Supported modes are:
i. "max", which tries to use the maximum #
of data points for each time origin, subject to a
minimum # of observations given by min_obs, and then
weights the observations based on the variance
accordingly. This is the default.
ii. "constant", in which each timestep is averaged over
the number of time_steps given by min_steps.
iii. None / False / any other false-like quantity. No
smoothing.
min_obs (int): Used with smoothed="max". Minimum number of
observations to have before including in the MSD vs dt
calculation. E.g. If a structure has 10 diffusing atoms,
and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
Only applies in smoothed="max".
avg_nsteps (int): Used with smoothed="constant". Determines the
number of time steps to average over to get the msd for each
timestep. Default of 1000 is usually pretty good.
"""
self.structure = structure
self.disp = displacements
self.specie = specie
self.temperature = temperature
self.time_step = time_step
self.step_skip = step_skip
self.min_obs = min_obs
self.smoothed = smoothed
self.avg_nsteps = avg_nsteps
indices = []
framework_indices = []
for i, site in enumerate(structure):
if site.specie.symbol == specie:
indices.append(i)
else:
framework_indices.append(i)
if self.disp.shape[1] < 2:
self.diffusivity = 0.
self.conductivity = 0.
self.diffusivity_components = np.array([0., 0., 0.])
self.conductivity_components = np.array([0., 0., 0.])
self.max_framework_displacement = 0
else:
framework_disp = self.disp[framework_indices]
drift = np.average(framework_disp, axis=0)[None, :, :]
#drift corrected position
dc = self.disp - drift
df = structure.lattice.get_fractional_coords(dc)
nions, nsteps, dim = dc.shape
if not smoothed:
timesteps = np.arange(0, nsteps)
elif smoothed == "constant":
if nsteps <= avg_nsteps:
raise ValueError('Not enough data to calculate diffusivity')
timesteps = np.arange(0, nsteps - avg_nsteps)
else:
#limit the number of sampled timesteps to 200
min_dt = int(1000 / (self.step_skip * self.time_step))
max_dt = min(len(indices) * nsteps // self.min_obs, nsteps)
if min_dt >= max_dt:
raise ValueError('Not enough data to calculate diffusivity')
timesteps = np.arange(min_dt, max_dt,
max(int((max_dt - min_dt) / 200), 1))
dt = timesteps * self.time_step * self.step_skip
#calculate the smoothed msd values
msd = np.zeros_like(dt, dtype=np.double)
sq_disp_ions = np.zeros((len(dc), len(dt)), dtype=np.double)
msd_components = np.zeros(dt.shape + (3,))
lengths = np.array(self.structure.lattice.abc)[None, None, :]
for i, n in enumerate(timesteps):
if not smoothed:
dx = dc[:, i:i + 1, :]
dcomponents = df[:, i:i + 1, :] * lengths
elif smoothed == "constant":
dx = dc[:, i:i + avg_nsteps, :] - dc[:, 0:avg_nsteps, :]
dcomponents = (df[:, i:i + avg_nsteps, :]
- df[:, 0:avg_nsteps, :]) * lengths
else:
dx = dc[:, n:, :] - dc[:, :-n, :]
dcomponents = (df[:, n:, :] - df[:, :-n, :]) * lengths
sq_disp = dx ** 2
sq_disp_ions[:, i] = np.average(np.sum(sq_disp, axis=2), axis=1)
msd[i] = np.average(sq_disp_ions[:, i][indices])
msd_components[i] = np.average(dcomponents[indices] ** 2,
axis=(0, 1))
def weighted_lstsq(a, b):
if smoothed == "max":
# For max smoothing, we need to weight by variance.
w_root = (1 / dt) ** 0.5
return np.linalg.lstsq(a * w_root[:, None], b * w_root)
else:
return np.linalg.lstsq(a, b)
m_components = np.zeros(3)
m_components_res = np.zeros(3)
a = np.ones((len(dt), 2))
a[:, 0] = dt
for i in range(3):
(m, c), res, rank, s = weighted_lstsq(a, msd_components[:, i])
m_components[i] = max(m, 1e-15)
m_components_res[i] = res[0]
(m, c), res, rank, s = weighted_lstsq(a, msd)
#m shouldn't be negative
m = max(m, 1e-15)
#factor of 10 is to convert from A^2/fs to cm^2/s
#factor of 6 is for dimensionality
conv_factor = get_conversion_factor(self.structure, self.specie,
self.temperature)
self.diffusivity = m / 60
# Calculate the error in the diffusivity using the error in the
# slope from the lst sq.
# Variance in slope = n * Sum Squared Residuals / (n * Sxx - Sx
# ** 2) / (n-2).
n = len(dt)
# Pre-compute the denominator since we will use it later.
denom = (n * np.sum(dt ** 2) - np.sum(dt) ** 2) * (n - 2)
self.diffusivity_std_dev = np.sqrt(n * res[0] / denom) / 60
self.conductivity = self.diffusivity * conv_factor
self.conductivity_std_dev = self.diffusivity_std_dev * conv_factor
self.diffusivity_components = m_components / 20
self.diffusivity_components_std_dev = np.sqrt(
n * m_components_res / denom) / 20
self.conductivity_components = self.diffusivity_components * \
conv_factor
self.conductivity_components_std_dev = \
self.diffusivity_components_std_dev * conv_factor
# Drift and displacement information.
self.drift = drift
self.corrected_displacements = dc
self.max_ion_displacements = np.max(np.sum(
dc ** 2, axis=-1) ** 0.5, axis=1)
self.max_framework_displacement = \
np.max(self.max_ion_displacements[framework_indices])
self.msd = msd
self.sq_disp_ions = sq_disp_ions
self.msd_components = msd_components
self.dt = dt
self.indices = indices
self.framework_indices = framework_indices
def get_drift_corrected_structures(self):
"""
Returns an iterator for the drift-corrected structures. Use of
iterator is to reduce memory usage as # of structures in MD can be
huge. You don't often need all the structures all at once.
"""
coords = np.array(self.structure.cart_coords)
species = self.structure.species_and_occu
latt = self.structure.lattice
nsites, nsteps, dim = self.corrected_displacements.shape
for i in range(nsteps):
yield Structure(latt, species, coords
+ self.corrected_displacements[:, i, :],
coords_are_cartesian=True)
def get_summary_dict(self, include_msd_t=False):
"""
Provides a summary of diffusion information.
Args:
include_msd_t (bool): Whether to include mean square displace and
time data with the data.
Returns:
(dict) of diffusion and conductivity data.
"""
d = {
"D": self.diffusivity,
"D_sigma": self.diffusivity_std_dev,
"S": self.conductivity,
"S_sigma": self.conductivity_std_dev,
"D_components": self.diffusivity_components.tolist(),
"S_components": self.conductivity_components.tolist(),
"D_components_sigma": self.diffusivity_components_std_dev.tolist(),
"S_components_sigma": self.conductivity_components_std_dev.tolist(),
"specie": str(self.specie),
"step_skip": self.step_skip,
"time_step": self.time_step,
"temperature": self.temperature,
"max_framework_displacement": self.max_framework_displacement
}
if include_msd_t:
d["msd"] = self.msd.tolist()
d["msd_components"] = self.msd_components.tolist()
d["dt"] = self.dt.tolist()
return d
def get_msd_plot(self, plt=None, mode="specie"):
"""
Get the plot of the smoothed msd vs time graph. Useful for
checking convergence. This can be written to an image file.
Args:
plt: A plot object. Defaults to None, which means one will be
generated.
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8, plt=plt)
if mode == "species":
for sp in sorted(self.structure.composition.keys()):
indices = [i for i, site in enumerate(self.structure) if
site.specie == sp]
sd = np.average(self.sq_disp_ions[indices, :], axis=0)
plt.plot(self.dt, sd, label=sp.__str__())
plt.legend(loc=2, prop={"size": 20})
elif mode == "ions":
for i, site in enumerate(self.structure):
sd = self.sq_disp_ions[i, :]
plt.plot(self.dt, sd, label="%s - %d" % (
site.specie.__str__(), i))
plt.legend(loc=2, prop={"size": 20})
else: #Handle default / invalid mode case
plt.plot(self.dt, self.msd, 'k')
plt.plot(self.dt, self.msd_components[:, 0], 'r')
plt.plot(self.dt, self.msd_components[:, 1], 'g')
plt.plot(self.dt, self.msd_components[:, 2], 'b')
plt.legend(["Overall", "a", "b", "c"], loc=2, prop={"size": 20})
plt.xlabel("Timestep (fs)")
plt.ylabel("MSD ($\AA^2$)")
plt.tight_layout()
return plt
def plot_msd(self, mode="default"):
"""
Plot the smoothed msd vs time graph. Useful for checking convergence.
Args:
mode (str): Either "default" (the default, shows only the MSD for
the diffusing specie, and its components), "ions" (individual
square displacements of all ions), or "species" (mean square
displacement by specie).
"""
self.get_msd_plot(mode=mode).show()
@classmethod
def from_structures(cls, structures, specie, temperature,
time_step, step_skip, smoothed="max", min_obs=30,
avg_nsteps=1000, initial_disp=None,
initial_structure=None):
"""
Convenient constructor that takes in a list of Structure objects to
perform diffusion analysis.
Args:
structures ([Structure]): list of Structure objects (must be
ordered in sequence of run). E.g., you may have performed
sequential VASP runs to obtain sufficient statistics.
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
temperature (float): Temperature of the diffusion run in Kelvin.
time_step (int): Time step between measurements.
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
smoothed (str): Whether to smooth the MSD, and what mode to smooth.
Supported modes are:
i. "max", which tries to use the maximum #
of data points for each time origin, subject to a
minimum # of observations given by min_obs, and then
weights the observations based on the variance
accordingly. This is the default.
ii. "constant", in which each timestep is averaged over
the same number of observations given by min_obs.
iii. None / False / any other false-like quantity. No
smoothing.
min_obs (int): Used with smoothed="max". Minimum number of
observations to have before including in the MSD vs dt
calculation. E.g. If a structure has 10 diffusing atoms,
and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
Only applies in smoothed="max".
avg_nsteps (int): Used with smoothed="constant". Determines the
number of time steps to average over to get the msd for each
timestep. Default of 1000 is usually pretty good.
initial_disp (np.ndarray): Sometimes, you need to iteratively
compute estimates of the diffusivity. This supplies an
initial displacement that will be added on to the initial
displacements. Note that this makes sense only when
smoothed=False.
initial_structure (Structure): Like initial_disp, this is used
for iterative computations of estimates of the diffusivity. You
typically need to supply both variables. This stipulates the
initial strcture from which the current set of displacements
are computed.
"""
structure = structures[0]
p = [np.array(s.frac_coords)[:, None] for s in structures]
if initial_structure is not None:
p.insert(0, np.array(initial_structure.frac_coords)[:, None])
else:
p.insert(0, p[0])
p = np.concatenate(p, axis=1)
dp = p[:, 1:] - p[:, :-1]
dp = dp - np.round(dp)
f_disp = np.cumsum(dp, axis=1)
if initial_disp is not None:
f_disp += structure.lattice.get_fractional_coords(initial_disp)[:,
None, :]
disp = structure.lattice.get_cartesian_coords(f_disp)
return cls(structure, disp, specie, temperature,
time_step, step_skip=step_skip, smoothed=smoothed,
min_obs=min_obs, avg_nsteps=avg_nsteps)
@classmethod
def from_vaspruns(cls, vaspruns, specie, smoothed="max", min_obs=30,
avg_nsteps=1000, initial_disp=None,
initial_structure=None):
"""
Convenient constructor that takes in a list of Vasprun objects to
perform diffusion analysis.
Args:
vaspruns ([Vasprun]): List of Vaspruns (must be ordered in
sequence of MD simulation). E.g., you may have performed
sequential VASP runs to obtain sufficient statistics.
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
min_obs (int): Minimum number of observations to have before
including in the MSD vs dt calculation. E.g. If a structure
has 10 diffusing atoms, and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
smoothed (str): Whether to smooth the MSD, and what mode to smooth.
Supported modes are:
i. "max", which tries to use the maximum #
of data points for each time origin, subject to a
minimum # of observations given by min_obs, and then
weights the observations based on the variance
accordingly. This is the default.
ii. "constant", in which each timestep is averaged over
the same number of observations given by min_obs.
iii. None / False / any other false-like quantity. No
smoothing.
min_obs (int): Used with smoothed="max". Minimum number of
observations to have before including in the MSD vs dt
calculation. E.g. If a structure has 10 diffusing atoms,
and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
Only applies in smoothed="max".
avg_nsteps (int): Used with smoothed="constant". Determines the
number of time steps to average over to get the msd for each
timestep. Default of 1000 is usually pretty good.
initial_disp (np.ndarray): Sometimes, you need to iteratively
compute estimates of the diffusivity. This supplies an
initial displacement that will be added on to the initial
displacements. Note that this makes sense only when
smoothed=False.
initial_structure (Structure): Like initial_disp, this is used
for iterative computations of estimates of the diffusivity. You
typically need to supply both variables. This stipulates the
initial strcture from which the current set of displacements
are computed.
"""
step_skip = vaspruns[0].ionic_step_skip or 1
final_structure = vaspruns[0].initial_structure
structures = []
for vr in vaspruns:
#check that the runs are continuous
fdist = pbc_diff(vr.initial_structure.frac_coords,
final_structure.frac_coords)
if np.any(fdist > 0.001):
raise ValueError('initial and final structures do not '
'match.')
final_structure = vr.final_structure
assert (vr.ionic_step_skip or 1) == step_skip
structures.extend([s['structure'] for s in vr.ionic_steps])
temperature = vaspruns[0].parameters['TEEND']
time_step = vaspruns[0].parameters['POTIM']
return cls.from_structures(structures=structures, specie=specie,
temperature=temperature, time_step=time_step, step_skip=step_skip,
smoothed=smoothed, min_obs=min_obs, avg_nsteps=avg_nsteps,
initial_disp=initial_disp, initial_structure=initial_structure)
@classmethod
def from_files(cls, filepaths, specie, step_skip=10, smoothed="max",
min_obs=30, avg_nsteps=1000, ncores=None, initial_disp=None,
initial_structure=None):
"""
Convenient constructor that takes in a list of vasprun.xml paths to
perform diffusion analysis.
Args:
filepaths ([str]): List of paths to vasprun.xml files of runs. (
must be ordered in sequence of MD simulation). For example,
you may have done sequential VASP runs and they are in run1,
run2, run3, etc. You should then pass in
["run1/vasprun.xml", "run2/vasprun.xml", ...].
specie (Element/Specie): Specie to calculate diffusivity for as a
String. E.g., "Li".
step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
smoothed (str): Whether to smooth the MSD, and what mode to smooth.
Supported modes are:
i. "max", which tries to use the maximum #
of data points for each time origin, subject to a
minimum # of observations given by min_obs, and then
weights the observations based on the variance
accordingly. This is the default.
ii. "constant", in which each timestep is averaged over
the same number of observations given by min_obs.
iii. None / False / any other false-like quantity. No
smoothing.
min_obs (int): Used with smoothed="max". Minimum number of
observations to have before including in the MSD vs dt
calculation. E.g. If a structure has 10 diffusing atoms,
and min_obs = 30, the MSD vs dt will be
calculated up to dt = total_run_time / 3, so that each
diffusing atom is measured at least 3 uncorrelated times.
Only applies in smoothed="max".
avg_nsteps (int): Used with smoothed="constant". Determines the
number of time steps to average over to get the msd for each
timestep. Default of 1000 is usually pretty good.
ncores (int): Numbers of cores to use for multiprocessing. Can
speed up vasprun parsing considerably. Defaults to None,
which means serial. It should be noted that if you want to
use multiprocessing, the number of ionic steps in all vasprun
.xml files should be a multiple of the ionic_step_skip.
Otherwise, inconsistent results may arise. Serial mode has no
such restrictions.
initial_disp (np.ndarray): Sometimes, you need to iteratively
compute estimates of the diffusivity. This supplies an
initial displacement that will be added on to the initial
displacements. Note that this makes sense only when
smoothed=False.
initial_structure (Structure): Like initial_disp, this is used
for iterative computations of estimates of the diffusivity. You
typically need to supply both variables. This stipulates the
initial strcture from which the current set of displacements
are computed.
"""
if ncores is not None and len(filepaths) > 1:
import multiprocessing
p = multiprocessing.Pool(ncores)
vaspruns = p.map(_get_vasprun,
[(fp, step_skip) for fp in filepaths])
p.close()
p.join()
else:
vaspruns = []
offset = 0
for p in filepaths:
v = Vasprun(p, ionic_step_offset=offset,
ionic_step_skip=step_skip)
vaspruns.append(v)
# Recompute offset.
offset = (- (v.nionic_steps - offset)) % step_skip
return cls.from_vaspruns(vaspruns, min_obs=min_obs, smoothed=smoothed,
specie=specie, initial_disp=initial_disp,
initial_structure=initial_structure,
avg_nsteps=avg_nsteps)
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"displacements": self.disp.tolist(),
"specie": self.specie,
"temperature": self.temperature,
"time_step": self.time_step,
"step_skip": self.step_skip,
"min_obs": self.min_obs,
"smoothed": self.smoothed,
"avg_nsteps": self.avg_nsteps
}
@classmethod
def from_dict(cls, d):
structure = Structure.from_dict(d["structure"])
return cls(structure, np.array(d["displacements"]), specie=d["specie"],
temperature=d["temperature"], time_step=d["time_step"],
step_skip=d["step_skip"], min_obs=d["min_obs"],
smoothed=d.get("smoothed", "max"),
avg_nsteps=d.get("avg_nsteps", 1000))
def get_conversion_factor(structure, species, temperature):
"""
Conversion factor to convert between cm^2/s diffusivity measurements and
mS/cm conductivity measurements based on number of atoms of diffusing
species. Note that the charge is based on the oxidation state of the
species (where available), or else the number of valence electrons
(usually a good guess, esp for main group ions).
Args:
structure (Structure): Input structure.
species (Element/Specie): Diffusing species.
temperature (float): Temperature of the diffusion run in Kelvin.
Returns:
Conversion factor.
Conductivity (in mS/cm) = Conversion Factor * Diffusivity (in cm^2/s)
"""
df_sp = get_el_sp(species)
if hasattr(df_sp, "oxi_state"):
z = df_sp.oxi_state
else:
z = df_sp.full_electronic_structure[-1][2]
n = structure.composition[species]
vol = structure.volume * 1e-24 # units cm^3
return 1000 * n / (vol * phyc.N_a) * z ** 2 * phyc.F ** 2\
/ (phyc.R * temperature)
def _get_vasprun(args):
"""
Internal method to support multiprocessing.
"""
return Vasprun(args[0], ionic_step_skip=args[1])
def fit_arrhenius(temps, diffusivities):
"""
Returns Ea and c from the Arrhenius fit:
D = c * exp(-Ea/kT)
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
"""
t_1 = 1 / np.array(temps)
logd = np.log(diffusivities)
#Do a least squares regression of log(D) vs 1/T
a = np.array([t_1, np.ones(len(temps))]).T
w = np.array(np.linalg.lstsq(a, logd)[0])
return -w[0] * phyc.k_b / phyc.e, np.exp(w[1])
def get_extrapolated_diffusivity(temps, diffusivities, new_temp):
"""
Returns (Arrhenius) extrapolated diffusivity at new_temp
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
new_temp (float): desired temperature. units: K
Returns:
(float) Diffusivity at extrapolated temp in mS/cm.
"""
Ea, c = fit_arrhenius(temps, diffusivities)
return c * np.exp(-Ea / (phyc.k_b / phyc.e * new_temp))
def get_extrapolated_conductivity(temps, diffusivities, new_temp, structure,
species):
"""
Returns extrapolated mS/cm conductivity.
Args:
temps ([float]): A sequence of temperatures. units: K
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity). units: cm^2/s
new_temp (float): desired temperature. units: K
structure (structure): structure used for the diffusivity calculation
species (string/Specie): conducting species
Returns:
(float) Conductivity at extrapolated temp in mS/cm.
"""
return get_extrapolated_diffusivity(temps, diffusivities, new_temp) \
* get_conversion_factor(structure, species, new_temp)
def get_arrhenius_plot(temps, diffusivities, diffusivity_errors=None,
**kwargs):
"""
Returns an Arrhenius plot.
Args:
temps ([float]): A sequence of temperatures.
diffusivities ([float]): A sequence of diffusivities (e.g.,
from DiffusionAnalyzer.diffusivity).
diffusivity_errors ([float]): A sequence of errors for the
diffusivities. If None, no error bar is plotted.
\*\*kwargs:
Any keyword args supported by matplotlib.pyplot.plot.
Returns:
A matplotlib.pyplot object. Do plt.show() to show the plot.
"""
Ea, c = fit_arrhenius(temps, diffusivities)
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(12, 8)
#log10 of the arrhenius fit
arr = c * np.exp(-Ea / (phyc.k_b / phyc.e *
np.array(temps)))
t_1 = 1000 / np.array(temps)
plt.plot(t_1, diffusivities, 'ko', t_1, arr, 'k--', markersize=10,
**kwargs)
if diffusivity_errors is not None:
n = len(diffusivity_errors)
plt.errorbar(t_1[0:n], diffusivities[0:n], yerr=diffusivity_errors,
fmt='ko', ecolor='k', capthick=2, linewidth=2)
ax = plt.axes()
ax.set_yscale('log')
plt.text(0.6, 0.85, "E$_a$ = {:.0f} meV".format(Ea * 1000),
fontsize=30, transform=plt.axes().transAxes)
plt.ylabel("D (cm$^2$/s)")
plt.xlabel("1000/T (K$^{-1}$)")
plt.tight_layout()
return plt
| {
"content_hash": "731a4300fd65aa4874e7aa32234acdf5",
"timestamp": "",
"source": "github",
"line_count": 772,
"max_line_length": 80,
"avg_line_length": 44.21761658031088,
"alnum_prop": 0.5725333958284509,
"repo_name": "sonium0/pymatgen",
"id": "9c07a7e9eae7942240a0cce462f1dc423b8081d1",
"size": "34246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/analysis/diffusion_analyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Groff",
"bytes": "868"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3590333"
}
],
"symlink_target": ""
} |
class SerializerInt(object):
def dumps(self,obj):
return str(obj)
def loads(self,s):
return int(s) | {
"content_hash": "1dd087f8c210bf70fdae4af5046e0c45",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 28,
"avg_line_length": 24.4,
"alnum_prop": 0.6065573770491803,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "39d69168100c1e5c72cfb1a569e563062c09d5af",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/baselib/serializers/SerializerInt.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
} |
from flask import (
Blueprint,
redirect,
render_template,
url_for
)
from flask.ext.login import current_user
from flask.ext.security import login_required
from application.skills.forms import AuditForm
from application.skills.models import Audit
skills = Blueprint('skills', __name__, template_folder='templates')
@skills.route('/skills', methods=['GET', 'POST'])
@login_required
def audit():
form = AuditForm()
user = current_user._get_current_object()
if form.validate_on_submit():
audit = Audit.objects.create(
owner=user,
**form.data)
return redirect(url_for('.audit'))
else:
audit = Audit.objects.filter(owner=user).order_by('-created_date').first()
form = AuditForm(obj=audit)
return render_template('skills/audit.html', form=form)
| {
"content_hash": "e4f9598201ca473b5227c232b5aacb08",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 82,
"avg_line_length": 25.272727272727273,
"alnum_prop": 0.6690647482014388,
"repo_name": "crossgovernmentservices/csdigital-prototype",
"id": "51cef6da6d98cde86fce15faddf2e5c3958004f0",
"size": "834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/skills/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "473087"
},
{
"name": "HTML",
"bytes": "146550"
},
{
"name": "JavaScript",
"bytes": "114161"
},
{
"name": "Makefile",
"bytes": "323"
},
{
"name": "Python",
"bytes": "97464"
},
{
"name": "Shell",
"bytes": "448"
}
],
"symlink_target": ""
} |
import weakref
# Based on: https://stackoverflow.com/a/2022629
# By Longpoke (https://stackoverflow.com/users/80243)
class Event(list):
"""Event subscription.
A list of callable objects. Calling an instance of this will cause a
call to each item in the list in ascending order by index.
The list can also contain WeakMethods using the append_weak and
insert_weak methods. When a weak method is dead, it will be removed
from the list the next time the event is called.
Example Usage:
>>> def f(x):
... print 'f(%s)' % x
>>> def g(x):
... print 'g(%s)' % x
>>> e = Event()
>>> e()
>>> e.append(f)
>>> e(123)
f(123)
>>> e.remove(f)
>>> e()
>>> e += (f, g)
>>> e(10)
f(10)
g(10)
>>> del e[0]
>>> e(2)
g(2)
"""
def __call__(self, *args, **kwargs):
for method in self.copy(): # prevent mutation while invoking, in case callbacks themselves add to this list
if isinstance(method, weakref.WeakMethod):
strong_method = method()
if not strong_method:
# This weak reference is dead, remove it from the list
try: self.remove(method)
except ValueError: pass # allow for the possibility some other callback removed it already while we were iterating
continue
else:
# it's good, proceed with dereferenced strong_method
method = strong_method
method(*args, **kwargs)
def __repr__(self):
return "Event(%s)" % list.__repr__(self)
@staticmethod
def make_weak(method) -> weakref.WeakMethod:
return weakref.WeakMethod(method)
def append_weak(self, method):
self.append(Event.make_weak(method))
def insert_weak(self, pos, method):
self.insert(pos, Event.make_weak(method))
| {
"content_hash": "143df9d64f8d6971d5a9d273f1a44316",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 135,
"avg_line_length": 31.639344262295083,
"alnum_prop": 0.5683937823834196,
"repo_name": "fyookball/electrum",
"id": "24b162d76f5680ce22f92b5fe53fcee6b730117b",
"size": "1930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/utils/event.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "842"
},
{
"name": "NSIS",
"bytes": "7309"
},
{
"name": "Objective-C",
"bytes": "415997"
},
{
"name": "Python",
"bytes": "2365528"
},
{
"name": "Shell",
"bytes": "26389"
}
],
"symlink_target": ""
} |
"""Init file for HassIO addons git."""
import asyncio
import logging
from pathlib import Path
import shutil
import git
from .util import get_hash_from_repository
from ..const import URL_HASSIO_ADDONS
_LOGGER = logging.getLogger(__name__)
class GitRepo(object):
"""Manage addons git repo."""
def __init__(self, config, loop, path, url):
"""Initialize git base wrapper."""
self.config = config
self.loop = loop
self.repo = None
self.path = path
self.url = url
self._lock = asyncio.Lock(loop=loop)
async def load(self):
"""Init git addon repo."""
if not self.path.is_dir():
return await self.clone()
async with self._lock:
try:
_LOGGER.info("Load addon %s repository", self.path)
self.repo = await self.loop.run_in_executor(
None, git.Repo, str(self.path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError,
git.GitCommandError) as err:
_LOGGER.error("Can't load %s repo: %s.", self.path, err)
return False
return True
async def clone(self):
"""Clone git addon repo."""
async with self._lock:
try:
_LOGGER.info("Clone addon %s repository", self.url)
self.repo = await self.loop.run_in_executor(
None, git.Repo.clone_from, self.url, str(self.path))
except (git.InvalidGitRepositoryError, git.NoSuchPathError,
git.GitCommandError) as err:
_LOGGER.error("Can't clone %s repo: %s.", self.url, err)
return False
return True
async def pull(self):
"""Pull git addon repo."""
if self._lock.locked():
_LOGGER.warning("It is already a task in progress.")
return False
async with self._lock:
try:
_LOGGER.info("Pull addon %s repository", self.url)
await self.loop.run_in_executor(
None, self.repo.remotes.origin.pull)
except (git.InvalidGitRepositoryError, git.NoSuchPathError,
git.exc.GitCommandError) as err:
_LOGGER.error("Can't pull %s repo: %s.", self.url, err)
return False
return True
class GitRepoHassIO(GitRepo):
"""HassIO addons repository."""
def __init__(self, config, loop):
"""Initialize git hassio addon repository."""
super().__init__(
config, loop, config.path_addons_core, URL_HASSIO_ADDONS)
class GitRepoCustom(GitRepo):
"""Custom addons repository."""
def __init__(self, config, loop, url):
"""Initialize git hassio addon repository."""
path = Path(config.path_addons_git, get_hash_from_repository(url))
super().__init__(config, loop, path, url)
def remove(self):
"""Remove a custom addon."""
if self.path.is_dir():
_LOGGER.info("Remove custom addon repository %s", self.url)
def log_err(funct, path, _):
"""Log error."""
_LOGGER.warning("Can't remove %s", path)
shutil.rmtree(str(self.path), onerror=log_err)
| {
"content_hash": "1d95bcddf2b2d5da2e17d2980c7fd1ec",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 74,
"avg_line_length": 30.878504672897197,
"alnum_prop": 0.5562953995157385,
"repo_name": "pvizeli/hassio",
"id": "5e9c02618c1b51d115890ba6272c2f11705c06da",
"size": "3304",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "hassio/addons/git.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "103291"
},
{
"name": "Python",
"bytes": "113129"
}
],
"symlink_target": ""
} |
from django import template
from django.core.urlresolvers import reverse
from ..thumbs import DEFAULT_SIZE
#
register = template.Library()
#
@register.simple_tag
def preview(media,templatedir="mediafiles",*args,**kwargs ):
try:
t,sub = media.mimetype.split('/')
return template.loader.get_template("%s/%s.html" % ( templatedir,t )).render(
template.Context({'media':media,}) )
except:
return ""
#@register.simple_tag(takes_context=True)
#def make_delete_url(context,urlname,**kwargs ):
# p = context.get('mediafile_deleter_hint',{} )
# p.update(kwargs)
# return reverse(urlname, kwargs=p)
#
@register.simple_tag(takes_context=True)
def mediafile_delete_url(context,mediafile,**kwargs ):
urlname = context.get('mediafile_delete_url','gallery_admin_media_delete')
p = kwargs
p = context.get('mediafile_url_hint',{} )
p.update(kwargs)
return reverse(urlname, kwargs=p)
@register.simple_tag(takes_context=True)
def mediafile_image_url(context,mediafile,**kwargs ):
urlname = context.get('mediafile_image_url','mediafiles_preview')
p = context.get('mediafile_url_hint',{} )
p.update(kwargs)
return reverse(urlname, kwargs=p)
@register.simple_tag(takes_context=True)
def mediafile_thumbnail_url(context,mediafile,**kwargs ):
if mediafile.is_image() == False:
return mediafile.get_thumbnail_url()
urlname = context.get('mediafile_thumbnail_url','mediafiles_preview')
p = context.get('mediafile_url_hint',{} )
p.update(kwargs)
return reverse(urlname, kwargs=p)
| {
"content_hash": "3f98298018c9449abbb743b4c76aa53f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 85,
"avg_line_length": 33.125,
"alnum_prop": 0.6842767295597484,
"repo_name": "hdknr/django-mediafiles",
"id": "1dec7587a1c34d8b7464b165f0b9d5dfb27d4da9",
"size": "1614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mediafiles/templatetags/mediafiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1611"
},
{
"name": "JavaScript",
"bytes": "99832"
},
{
"name": "Python",
"bytes": "46310"
}
],
"symlink_target": ""
} |
import argparse
import sys
from typing import Any
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.management.base import CommandError
from django.db.utils import IntegrityError
from zerver.lib.actions import do_create_user
from zerver.lib.initial_password import initial_password
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Create the specified user with a default initial password.
Set tos_version=None, so that the user needs to do a ToS flow on login.
Omit both <email> and <full name> for interactive user creation.
"""
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument('--this-user-has-accepted-the-tos',
dest='tos',
action="store_true",
default=False,
help='Acknowledgement that the user has already accepted the ToS.')
parser.add_argument('--password',
dest='password',
type=str,
default='',
help='password of new user. For development only.'
'Note that we recommend against setting '
'passwords this way, since they can be snooped by any user account '
'on the server via `ps -ef` or by any superuser with'
'read access to the user\'s bash history.')
parser.add_argument('--password-file',
dest='password_file',
type=str,
default='',
help='The file containing the password of the new user.')
parser.add_argument('email', metavar='<email>', type=str, nargs='?', default=argparse.SUPPRESS,
help='email address of new user')
parser.add_argument('full_name', metavar='<full name>', type=str, nargs='?',
default=argparse.SUPPRESS,
help='full name of new user')
self.add_realm_args(parser, True, "The name of the existing realm to which to add the user.")
def handle(self, *args: Any, **options: Any) -> None:
if not options["tos"]:
raise CommandError("""You must confirm that this user has accepted the
Terms of Service by passing --this-user-has-accepted-the-tos.""")
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
try:
email = options['email']
full_name = options['full_name']
try:
validators.validate_email(email)
except ValidationError:
raise CommandError("Invalid email address.")
except KeyError:
if 'email' in options or 'full_name' in options:
raise CommandError("""Either specify an email and full name as two
parameters, or specify no parameters for interactive user creation.""")
else:
while True:
email = input("Email: ")
try:
validators.validate_email(email)
break
except ValidationError:
print("Invalid email address.", file=sys.stderr)
full_name = input("Full name: ")
try:
if options['password_file']:
with open(options['password_file']) as f:
pw = f.read()
elif options['password']:
pw = options['password']
else:
user_initial_password = initial_password(email)
if user_initial_password is None:
raise CommandError("Password is unusable.")
pw = user_initial_password
do_create_user(
email,
pw,
realm,
full_name,
acting_user=None,
)
except IntegrityError:
raise CommandError("User already exists.")
| {
"content_hash": "844f9bb4dca69241cda9e1f08715616e",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 103,
"avg_line_length": 43.77319587628866,
"alnum_prop": 0.5376825247291569,
"repo_name": "brainwane/zulip",
"id": "d1f6a129d70fe01f82ea276c4d272513d231767e",
"size": "4246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/management/commands/create_user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import json
import pickle
class JSON(object):
@staticmethod
def encode(item):
return json.dumps(item)
@staticmethod
def decode(item):
if isinstance(item, bytes):
item = item.decode()
return json.loads(item)
class String(object):
@staticmethod
def encode(item):
return str(item)
@staticmethod
def decode(item):
if isinstance(item, bytes):
item = item.decode()
return item
class Numeric(object):
@staticmethod
def encode(item):
return int(item)
@staticmethod
def decode(item):
return int(item)
class Pickle(object):
@staticmethod
def encode(item):
return pickle.dumps(item)
@staticmethod
def decode(item):
return pickle.loads(item)
| {
"content_hash": "d5cc727488604cfd547bbf7434faae3f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 38,
"avg_line_length": 17.708333333333332,
"alnum_prop": 0.6094117647058823,
"repo_name": "mylokin/redisext",
"id": "61f1dca17e3da2e33d8a568a4f70f66763b5f538",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redisext/serializer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "528"
},
{
"name": "Python",
"bytes": "43247"
},
{
"name": "Ruby",
"bytes": "823"
}
],
"symlink_target": ""
} |
import types
#class Type:
# @property
# def name ( self ):
# return self.m_name
#
# @property
# def modifiers ( self ):
#
# def __init__ ( self, _name, _type ):
# self.m_name = _name
# self.m_type = _type
class Parameter:
@property
def name ( self ):
return self.m_name
@property
def type ( self ):
return self.m_type
def __init__ ( self, _name, _type ):
self.m_name = _name
self.m_type = _type
class Function:
@property
def name ( self ):
return self.m_name
@property
def returnType ( self ):
return self.m_returnType
@property
def parameters ( self ):
return self.m_parameters
def __init__ ( self, _name, _returnType, _parameters ):
self.m_name = _name
self.m_returnType = _returnType
self.m_parameters = _parameters
class Enum:
@property
def name ( self ):
return self.m_name
@property
def valueNames ( self ):
for value in self.m_values:
if type(value) == types.TupleType:
yield value[0]
else:
yield value
@property
def values ( self ):
for value in self.m_values:
if type(value) == types.TupleType:
yield value
else:
yield value, None
def __init__ ( self, _name, _values ):
assert type(_name) in types.StringTypes
assert type(_values) == types.ListType
self.m_name = _name
self.m_values = _values
class ICodeParser:
def parse ( self, _input ): pass
def hasFunction ( self, _name ): pass
def getFunction ( self, _name ): pass
def listFunctions ( self ): pass
def hasEnum ( self, _name ): pass
def getEnum ( self, _name ): pass
def listEnums ( self ): pass
def clear ( self ): pass
def get ( _name ):
if _name in ['c', 'C']:
import sequanto_automation.codeparsers.c
return sequanto_automation.codeparsers.c.CodeParser()
| {
"content_hash": "e934ba88f0611b5d8721585d178cc750",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 61,
"avg_line_length": 24.522222222222222,
"alnum_prop": 0.5111010421386497,
"repo_name": "micronpn/sequanto-automation",
"id": "b05bf912bc2bd35f44e6890e49c377d4f1d9b71c",
"size": "2207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "generator/lib/sequanto_automation/codeparser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "13188"
},
{
"name": "C",
"bytes": "234162"
},
{
"name": "C++",
"bytes": "101645"
},
{
"name": "CMake",
"bytes": "26223"
},
{
"name": "Emacs Lisp",
"bytes": "344"
},
{
"name": "Java",
"bytes": "7005"
},
{
"name": "Python",
"bytes": "102062"
},
{
"name": "Shell",
"bytes": "332"
}
],
"symlink_target": ""
} |
"""
HTTP server that returns a fixed string.
"""
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class Handler(BaseHTTPRequestHandler):
def do_GET(s):
s.wfile.write(b"hi")
s.wfile.close()
httpd = HTTPServer((b"0.0.0.0", 8080), Handler)
httpd.serve_forever()
| {
"content_hash": "6bed95b6b797217c46bc7085389f6601",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 61,
"avg_line_length": 19.8,
"alnum_prop": 0.6902356902356902,
"repo_name": "1d4Nf6/flocker",
"id": "d831a2ec42c6f3c8294287d68c199d46b509ada1",
"size": "297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flocker/acceptance/hellohttp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2442948"
},
{
"name": "Ruby",
"bytes": "6401"
},
{
"name": "Shell",
"bytes": "3418"
}
],
"symlink_target": ""
} |
import copy
import re
import signal
import warnings
from datetime import datetime
from functools import reduce
from itertools import filterfalse, tee
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
List,
MutableMapping,
Optional,
Tuple,
TypeVar,
)
from urllib import parse
import flask
import jinja2
import jinja2.nativetypes
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.utils.module_loading import import_string
if TYPE_CHECKING:
from airflow.models import TaskInstance
KEY_REGEX = re.compile(r'^[\w.-]+$')
GROUP_KEY_REGEX = re.compile(r'^[\w-]+$')
CAMELCASE_TO_SNAKE_CASE_REGEX = re.compile(r'(?!^)([A-Z]+)')
T = TypeVar('T')
S = TypeVar('S')
def validate_key(k: str, max_length: int = 250):
"""Validates value used as a key."""
if not isinstance(k, str):
raise TypeError(f"The key has to be a string and is {type(k)}:{k}")
if len(k) > max_length:
raise AirflowException(f"The key has to be less than {max_length} characters")
if not KEY_REGEX.match(k):
raise AirflowException(
f"The key ({k}) has to be made of alphanumeric characters, dashes, "
f"dots and underscores exclusively"
)
def validate_group_key(k: str, max_length: int = 200):
"""Validates value used as a group key."""
if not isinstance(k, str):
raise TypeError(f"The key has to be a string and is {type(k)}:{k}")
if len(k) > max_length:
raise AirflowException(f"The key has to be less than {max_length} characters")
if not GROUP_KEY_REGEX.match(k):
raise AirflowException(
f"The key ({k!r}) has to be made of alphanumeric characters, dashes and underscores exclusively"
)
def alchemy_to_dict(obj: Any) -> Optional[Dict]:
"""Transforms a SQLAlchemy model instance into a dictionary"""
if not obj:
return None
output = {}
for col in obj.__table__.columns:
value = getattr(obj, col.name)
if isinstance(value, datetime):
value = value.isoformat()
output[col.name] = value
return output
def ask_yesno(question: str, default: Optional[bool] = None) -> bool:
"""Helper to get a yes or no answer from the user."""
yes = {'yes', 'y'}
no = {'no', 'n'}
print(question)
while True:
choice = input().lower()
if choice == "" and default is not None:
return default
if choice in yes:
return True
if choice in no:
return False
print("Please respond with y/yes or n/no.")
def prompt_with_timeout(question: str, timeout: int, default: Optional[bool] = None) -> bool:
"""Ask the user a question and timeout if they don't respond"""
def handler(signum, frame):
raise AirflowException(f"Timeout {timeout}s reached")
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
try:
return ask_yesno(question, default)
finally:
signal.alarm(0)
def is_container(obj: Any) -> bool:
"""Test if an object is a container (iterable) but not a string"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
def as_tuple(obj: Any) -> tuple:
"""
If obj is a container, returns obj as a tuple.
Otherwise, returns a tuple containing obj.
"""
if is_container(obj):
return tuple(obj)
else:
return tuple([obj])
def chunks(items: List[T], chunk_size: int) -> Generator[List[T], None, None]:
"""Yield successive chunks of a given size from a list of items"""
if chunk_size <= 0:
raise ValueError('Chunk size must be a positive integer')
for i in range(0, len(items), chunk_size):
yield items[i : i + chunk_size]
def reduce_in_chunks(fn: Callable[[S, List[T]], S], iterable: List[T], initializer: S, chunk_size: int = 0):
"""
Reduce the given list of items by splitting it into chunks
of the given size and passing each chunk through the reducer
"""
if len(iterable) == 0:
return initializer
if chunk_size == 0:
chunk_size = len(iterable)
return reduce(fn, chunks(iterable, chunk_size), initializer)
def as_flattened_list(iterable: Iterable[Iterable[T]]) -> List[T]:
"""
Return an iterable with one level flattened
>>> as_flattened_list((('blue', 'red'), ('green', 'yellow', 'pink')))
['blue', 'red', 'green', 'yellow', 'pink']
"""
return [e for i in iterable for e in i]
def parse_template_string(template_string: str) -> Tuple[Optional[str], Optional[jinja2.Template]]:
"""Parses Jinja template string."""
if "{{" in template_string: # jinja mode
return None, jinja2.Template(template_string)
else:
return template_string, None
def render_log_filename(ti: "TaskInstance", try_number, filename_template) -> str:
"""
Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template
"""
filename_template, filename_jinja_template = parse_template_string(filename_template)
if filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return render_template_to_string(filename_jinja_template, jinja_context)
return filename_template.format(
dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number,
)
def convert_camel_to_snake(camel_str: str) -> str:
"""Converts CamelCase to snake_case."""
return CAMELCASE_TO_SNAKE_CASE_REGEX.sub(r'_\1', camel_str).lower()
def merge_dicts(dict1: Dict, dict2: Dict) -> Dict:
"""
Merge two dicts recursively, returning new dict (input dict is not mutated).
Lists are not concatenated. Items in dict2 overwrite those also found in dict1.
"""
merged = dict1.copy()
for k, v in dict2.items():
if k in merged and isinstance(v, dict):
merged[k] = merge_dicts(merged.get(k, {}), v)
else:
merged[k] = v
return merged
def partition(pred: Callable[[T], bool], iterable: Iterable[T]) -> Tuple[Iterable[T], Iterable[T]]:
"""Use a predicate to partition entries into false entries and true entries"""
iter_1, iter_2 = tee(iterable)
return filterfalse(pred, iter_1), filter(pred, iter_2)
def chain(*args, **kwargs):
"""This function is deprecated. Please use `airflow.models.baseoperator.chain`."""
warnings.warn(
"This function is deprecated. Please use `airflow.models.baseoperator.chain`.",
DeprecationWarning,
stacklevel=2,
)
return import_string('airflow.models.baseoperator.chain')(*args, **kwargs)
def cross_downstream(*args, **kwargs):
"""This function is deprecated. Please use `airflow.models.baseoperator.cross_downstream`."""
warnings.warn(
"This function is deprecated. Please use `airflow.models.baseoperator.cross_downstream`.",
DeprecationWarning,
stacklevel=2,
)
return import_string('airflow.models.baseoperator.cross_downstream')(*args, **kwargs)
def build_airflow_url_with_query(query: Dict[str, Any]) -> str:
"""
Build airflow url using base_url and default_view and provided query
For example:
'http://0.0.0.0:8000/base/graph?dag_id=my-task&root=&execution_date=2020-10-27T10%3A59%3A25.615587
"""
view = conf.get('webserver', 'dag_default_view').lower()
url = flask.url_for(f"Airflow.{view}")
return f"{url}?{parse.urlencode(query)}"
# The 'template' argument is typed as Any because the jinja2.Template is too
# dynamic to be effectively type-checked.
def render_template(template: Any, context: MutableMapping[str, Any], *, native: bool) -> Any:
"""Render a Jinja2 template with given Airflow context.
The default implementation of ``jinja2.Template.render()`` converts the
input context into dict eagerly many times, which triggers deprecation
messages in our custom context class. This takes the implementation apart
and retain the context mapping without resolving instead.
:param template: A Jinja2 template to render.
:param context: The Airflow task context to render the template with.
:param native: If set to *True*, render the template into a native type. A
DAG can enable this with ``render_template_as_native_obj=True``.
:returns: The render result.
"""
context = copy.copy(context)
env = template.environment
if template.globals:
context.update((k, v) for k, v in template.globals.items() if k not in context)
try:
nodes = template.root_render_func(env.context_class(env, context, template.name, template.blocks))
except Exception:
env.handle_exception() # Rewrite traceback to point to the template.
if native:
return jinja2.nativetypes.native_concat(nodes)
return "".join(nodes)
def render_template_to_string(template: jinja2.Template, context: MutableMapping[str, Any]) -> str:
"""Shorthand to ``render_template(native=False)`` with better typing support."""
return render_template(template, context, native=False)
def render_template_as_native(template: jinja2.Template, context: MutableMapping[str, Any]) -> Any:
"""Shorthand to ``render_template(native=True)`` with better typing support."""
return render_template(template, context, native=True)
def exactly_one(*args) -> bool:
"""
Returns True if exactly one of *args is "truthy", and False otherwise.
If user supplies an iterable, we raise ValueError and force them to unpack.
"""
if is_container(args[0]):
raise ValueError(
"Not supported for iterable args. Use `*` to unpack your iterable in the function call."
)
return sum(map(bool, args)) == 1
def prune_dict(val: Any, mode='strict'):
"""
Given dict ``val``, returns new dict based on ``val`` with all
empty elements removed.
What constitutes "empty" is controlled by the ``mode`` parameter. If mode is 'strict'
then only ``None`` elements will be removed. If mode is ``truthy``, then element ``x``
will be removed if ``bool(x) is False``.
"""
def is_empty(x):
if mode == 'strict':
return x is None
elif mode == 'truthy':
return bool(x) is False
raise ValueError("allowable values for `mode` include 'truthy' and 'strict'")
if isinstance(val, dict):
new_dict = {}
for k, v in val.items():
if is_empty(v):
continue
elif isinstance(v, (list, dict)):
new_val = prune_dict(v, mode=mode)
if new_val:
new_dict[k] = new_val
else:
new_dict[k] = v
return new_dict
elif isinstance(val, list):
new_list = []
for v in val:
if is_empty(v):
continue
elif isinstance(v, (list, dict)):
new_val = prune_dict(v, mode=mode)
if new_val:
new_list.append(new_val)
else:
new_list.append(v)
return new_list
else:
return val
| {
"content_hash": "820c5da3db6c434c488d09e5e3478b06",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 108,
"avg_line_length": 33.75588235294118,
"alnum_prop": 0.6405855188638145,
"repo_name": "mistercrunch/airflow",
"id": "c75a017e2b165180d547c20295076b4aedd04986",
"size": "12264",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/utils/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import pycqed as pq
import os
import matplotlib.pyplot as plt
from pycqed.analysis_v2 import measurement_analysis as ma
from pycqed.analysis_v2 import readout_analysis as ra
# Add test: 20180508\182642 - 183214
class Test_SSRO_auto_angle(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close('all')
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_angles(self):
tp = 2*np.pi
ro_amp_high_factor = 0.1
ts = '20180508_182642'
te = '20180508_183214'
for angle in np.arange(0, 360, 30):
options_dict = {
'verbose': True,
'rotation_angle': angle*np.pi/180,
'auto_rotation_angle': True,
'fixed_p01': 0,
'fixed_p10': 0,
'nr_bins': 100,
}
label = 'SSRO_%d_%.2f' % (angle, ro_amp_high_factor*100)
aut = ma.Singleshot_Readout_Analysis(t_start=ts, t_stop=te,
label=label,
extract_only=True,
do_fitting=True,
options_dict=options_dict)
aut_angle = aut.proc_data_dict['raw_offset'][2]
aut_angle = aut_angle % tp
aut_snr = aut.fit_res['shots_all'].params['SNR'].value
options_dict['auto_rotation_angle'] = False
opt = ma.Singleshot_Readout_Analysis(t_start=ts, t_stop=te,
label=label,
extract_only=True,
do_fitting=True,
options_dict=options_dict)
opt_angle = opt.proc_data_dict['raw_offset'][2]
opt_angle = opt_angle % tp
opt_snr = opt.fit_res['shots_all'].params['SNR'].value
da = min(abs(aut_angle-opt_angle),
abs(aut_angle-opt_angle+2*np.pi),
abs(aut_angle-opt_angle-2*np.pi))
# Check if the angle was found within a few degrees
self.assertLess(da*180/np.pi, 9)
# Check if the SNRs roughly make sense
self.assertLess(aut_snr, 1.1)
self.assertLess(opt_snr, 1.1)
self.assertGreater(aut_snr, 0.55)
self.assertGreater(opt_snr, 0.55)
class Test_SSRO_discrimination_analysis(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close('all')
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def assertBetween(self, value, min_v, max_v):
"""Fail if value is not between min and max_v (inclusive)."""
self.assertGreaterEqual(value, min_v)
self.assertLessEqual(value, max_v)
def test_SSRO_analysis_basic_1D(self):
t_start = '20171016_135112'
t_stop = t_start
a = ma.Singleshot_Readout_Analysis(t_start=t_start, t_stop=t_stop,
options_dict={'plot_init': True})
np.testing.assert_almost_equal(a.proc_data_dict['threshold_raw'],
-3.66, decimal=2)
np.testing.assert_almost_equal(a.proc_data_dict['F_assignment_raw'],
0.922, decimal=3)
self.assertBetween(a.proc_data_dict['threshold_fit'], -3.69, -3.62)
np.testing.assert_almost_equal(a.proc_data_dict['F_assignment_fit'],
0.920, decimal=2)
np.testing.assert_almost_equal(a.proc_data_dict['threshold_discr'],
-3.64, decimal=1)
np.testing.assert_almost_equal(a.proc_data_dict['F_discr'],
0.996, decimal=2)
def test_SSRO_analysis_basic_1D_wrong_peak_selected(self):
# This fit failed when I made a typo in the peak selection part
t_start = '20171016_171715'
t_stop = t_start
a = ma.Singleshot_Readout_Analysis(t_start=t_start, t_stop=t_stop,
extract_only=True)
self.assertBetween(a.proc_data_dict['threshold_raw'], -3.3, -3.2)
np.testing.assert_almost_equal(a.proc_data_dict['F_assignment_raw'],
0.944, decimal=2)
self.assertBetween(a.proc_data_dict['threshold_fit'], -3.3, -3.2)
np.testing.assert_almost_equal(a.proc_data_dict['F_assignment_fit'],
0.944, decimal=2)
self.assertBetween(a.proc_data_dict['threshold_discr'], -3.3, -3.2)
np.testing.assert_almost_equal(a.proc_data_dict['F_discr'],
0.99, decimal=2)
def test_SSRO_analysis_basic_1D_misfit(self):
# This dataset failed before I added additional constraints to the
# guess
t_start = '20171016_181021'
t_stop = t_start
a = ma.Singleshot_Readout_Analysis(t_start=t_start, t_stop=t_stop,
extract_only=True)
self.assertBetween(a.proc_data_dict['threshold_raw'], -1, -0.7)
np.testing.assert_almost_equal(a.proc_data_dict['F_assignment_raw'],
0.949, decimal=2)
self.assertBetween(a.proc_data_dict['threshold_fit'], -1, -.7)
np.testing.assert_almost_equal(a.proc_data_dict['F_assignment_fit'],
0.945, decimal=2)
self.assertBetween(a.proc_data_dict['threshold_discr'], -1, -.7)
np.testing.assert_almost_equal(a.proc_data_dict['F_discr'],
1.000, decimal=2)
self.assertLess(a.proc_data_dict['residual_excitation'], 0.09)
np.testing.assert_almost_equal(
a.proc_data_dict['relaxation_events'], 0.1,
decimal=1)
class Test_readout_analysis_functions(unittest.TestCase):
def test_get_arb_comb_xx_label(self):
labels = ra.get_arb_comb_xx_label(2, qubit_idx=0)
np.testing.assert_equal(labels[0], 'x0')
np.testing.assert_equal(labels[1], 'x1')
np.testing.assert_equal(labels[2], 'x2')
labels = ra.get_arb_comb_xx_label(2, qubit_idx=1)
np.testing.assert_equal(labels[0], '0x')
np.testing.assert_equal(labels[1], '1x')
np.testing.assert_equal(labels[2], '2x')
labels = ra.get_arb_comb_xx_label(nr_of_qubits=4, qubit_idx=1)
np.testing.assert_equal(labels[0], 'xx0x')
np.testing.assert_equal(labels[1], 'xx1x')
np.testing.assert_equal(labels[2], 'xx2x')
labels = ra.get_arb_comb_xx_label(nr_of_qubits=4, qubit_idx=3)
np.testing.assert_equal(labels[0], '0xxx')
np.testing.assert_equal(labels[1], '1xxx')
np.testing.assert_equal(labels[2], '2xxx')
def test_get_assignment_fid_from_cumhist(self):
chist_0 = np.array([0, 0, 0, 0, 0, .012,
.068, .22, .43, .59, .78, 1, 1])
chist_1 = np.array([0, 0.01, .05, .16, .21,
.24, .38, .62, .81, 1, 1, 1, 1])
centers = np.linspace(0, 1, len(chist_0))
# Fidelity for identical distributions should be 0.5 (0.5 is random)
fid, th = ra.get_assignement_fid_from_cumhist(
chist_0, chist_0, centers)
self.assertEqual(fid, 0.5)
# Test on the fake distribution
fid, threshold = ra.get_assignement_fid_from_cumhist(
chist_0, chist_1)
np.testing.assert_almost_equal(fid, 0.705)
np.testing.assert_almost_equal(threshold, 9)
# Test on the fake distribution
fid, threshold = ra.get_assignement_fid_from_cumhist(
chist_0, chist_1, centers)
np.testing.assert_almost_equal(fid, 0.705)
np.testing.assert_almost_equal(threshold, 0.75)
class Test_multiplexed_readout_analysis(unittest.TestCase):
def test_multiplexed_readout_analysis(self):
timestamp='20190916_184929'
# t_start = '20180323_150203'
# t_stop = t_start
# a = ma.Multiplexed_Readout_Analysis(t_start=t_start, t_stop=t_stop,
# qubit_names=['QR', 'QL'])
# np.testing.assert_almost_equal(a.proc_data_dict['F_ass_raw QL'],
# 0.72235812133072408)
# np.testing.assert_almost_equal(a.proc_data_dict['F_ass_raw QR'],
# 0.81329500978473579)
# np.testing.assert_almost_equal(a.proc_data_dict['threshold_raw QL'],
# 1.9708007812500004)
# np.testing.assert_almost_equal(a.proc_data_dict['threshold_raw QR'],
# -7.1367667055130006)
# def test_name_assignement(self):
# t_start = '20180323_150203'
# t_stop = t_start
# a = ma.Multiplexed_Readout_Analysis(t_start=t_start, t_stop=t_stop)
# np.testing.assert_equal(a.proc_data_dict['qubit_names'], ['q1', 'q0'])
# a = ma.Multiplexed_Readout_Analysis(t_start=t_start, t_stop=t_stop,
# qubit_names=['QR', 'QL'])
# np.testing.assert_equal(a.proc_data_dict['qubit_names'], ['QR', 'QL'])
| {
"content_hash": "497a7622dd0c9665576b341e4bbc4723",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 80,
"avg_line_length": 44.07798165137615,
"alnum_prop": 0.5365802893121032,
"repo_name": "DiCarloLab-Delft/PycQED_py3",
"id": "30c82230ea4a2d233e75e332e9f2cc05a85c1126",
"size": "9609",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pycqed/tests/analysis_v2/test_readout_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8748"
},
{
"name": "C++",
"bytes": "8802"
},
{
"name": "Cython",
"bytes": "8291"
},
{
"name": "OpenQASM",
"bytes": "15894"
},
{
"name": "Python",
"bytes": "7978715"
},
{
"name": "TeX",
"bytes": "8"
}
],
"symlink_target": ""
} |
import logging
__all__ = [
'base',
'data_mining',
'database',
'model_selection',
'models',
'network_analysis',
'predict'
]
logger = logging.getLogger("pyppi")
logger.setLevel(logging.INFO)
logger.propagate = False
handler = logging.StreamHandler()
formatter = logging.Formatter(
fmt='%(asctime)s %(name)-8s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
handler.setFormatter(formatter)
if not logger.handlers:
logger.addHandler(handler)
# atexit.register(cleanup_module)
def wrap_init():
from .database import init_database, cleanup_module, db_engine
init_database(db_engine)
wrap_init()
| {
"content_hash": "d0c2d4b2ef451526b9c7f3679e1188db",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 66,
"avg_line_length": 20.87878787878788,
"alnum_prop": 0.6386066763425254,
"repo_name": "daniaki/pyPPI",
"id": "04b2d8ef823b3287c861f64099ef3167767191b7",
"size": "706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyppi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2810"
},
{
"name": "Python",
"bytes": "626034"
},
{
"name": "Shell",
"bytes": "2758"
}
],
"symlink_target": ""
} |
""" ISOBUS description """
# isobus imports:
from isobus.vt.client import VTClient
from isobus.common import IBSException
| {
"content_hash": "a685223485898a467d1106b46515b12b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 38,
"avg_line_length": 24.6,
"alnum_prop": 0.7804878048780488,
"repo_name": "jboomer/python-isobus",
"id": "699bb9331140aeb26f14a0a6670a13bcc212584b",
"size": "123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isobus/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44897"
}
],
"symlink_target": ""
} |
"""Init file for HassIO rest api."""
import logging
from pathlib import Path
from aiohttp import web
from .addons import APIAddons
from .homeassistant import APIHomeAssistant
from .host import APIHost
from .network import APINetwork
from .supervisor import APISupervisor
from .security import APISecurity
_LOGGER = logging.getLogger(__name__)
class RestAPI(object):
"""Handle rest api for hassio."""
def __init__(self, config, loop):
"""Initialize docker base wrapper."""
self.config = config
self.loop = loop
self.webapp = web.Application(loop=self.loop)
# service stuff
self._handler = None
self.server = None
def register_host(self, host_control):
"""Register hostcontrol function."""
api_host = APIHost(self.config, self.loop, host_control)
self.webapp.router.add_get('/host/info', api_host.info)
self.webapp.router.add_post('/host/reboot', api_host.reboot)
self.webapp.router.add_post('/host/shutdown', api_host.shutdown)
self.webapp.router.add_post('/host/update', api_host.update)
def register_network(self, host_control):
"""Register network function."""
api_net = APINetwork(self.config, self.loop, host_control)
self.webapp.router.add_get('/network/info', api_net.info)
self.webapp.router.add_post('/network/options', api_net.options)
def register_supervisor(self, supervisor, addons, host_control,
websession):
"""Register supervisor function."""
api_supervisor = APISupervisor(
self.config, self.loop, supervisor, addons, host_control,
websession)
self.webapp.router.add_get('/supervisor/ping', api_supervisor.ping)
self.webapp.router.add_get('/supervisor/info', api_supervisor.info)
self.webapp.router.add_get(
'/supervisor/addons', api_supervisor.available_addons)
self.webapp.router.add_post(
'/supervisor/update', api_supervisor.update)
self.webapp.router.add_post(
'/supervisor/reload', api_supervisor.reload)
self.webapp.router.add_post(
'/supervisor/options', api_supervisor.options)
self.webapp.router.add_get('/supervisor/logs', api_supervisor.logs)
def register_homeassistant(self, dock_homeassistant):
"""Register homeassistant function."""
api_hass = APIHomeAssistant(self.config, self.loop, dock_homeassistant)
self.webapp.router.add_get('/homeassistant/info', api_hass.info)
self.webapp.router.add_post('/homeassistant/options', api_hass.options)
self.webapp.router.add_post('/homeassistant/update', api_hass.update)
self.webapp.router.add_post('/homeassistant/restart', api_hass.restart)
self.webapp.router.add_get('/homeassistant/logs', api_hass.logs)
def register_addons(self, addons):
"""Register homeassistant function."""
api_addons = APIAddons(self.config, self.loop, addons)
self.webapp.router.add_get('/addons/{addon}/info', api_addons.info)
self.webapp.router.add_post(
'/addons/{addon}/install', api_addons.install)
self.webapp.router.add_post(
'/addons/{addon}/uninstall', api_addons.uninstall)
self.webapp.router.add_post('/addons/{addon}/start', api_addons.start)
self.webapp.router.add_post('/addons/{addon}/stop', api_addons.stop)
self.webapp.router.add_post(
'/addons/{addon}/restart', api_addons.restart)
self.webapp.router.add_post(
'/addons/{addon}/update', api_addons.update)
self.webapp.router.add_post(
'/addons/{addon}/options', api_addons.options)
self.webapp.router.add_get('/addons/{addon}/logs', api_addons.logs)
def register_security(self):
"""Register security function."""
api_security = APISecurity(self.config, self.loop)
self.webapp.router.add_get('/security/info', api_security.info)
self.webapp.router.add_post('/security/options', api_security.options)
self.webapp.router.add_post('/security/totp', api_security.totp)
self.webapp.router.add_post('/security/session', api_security.session)
def register_panel(self):
"""Register panel for homeassistant."""
panel = Path(__file__).parents[1].joinpath('panel/hassio-main.html')
def get_panel(request):
"""Return file response with panel."""
return web.FileResponse(panel)
self.webapp.router.add_get('/panel', get_panel)
async def start(self):
"""Run rest api webserver."""
self._handler = self.webapp.make_handler(loop=self.loop)
try:
self.server = await self.loop.create_server(
self._handler, "0.0.0.0", "80")
except OSError as err:
_LOGGER.fatal(
"Failed to create HTTP server at 0.0.0.0:80 -> %s", err)
async def stop(self):
"""Stop rest api webserver."""
if self.server:
self.server.close()
await self.server.wait_closed()
await self.webapp.shutdown()
if self._handler:
await self._handler.finish_connections(60)
await self.webapp.cleanup()
| {
"content_hash": "326ff9bbaa2f60ab9dacd1906fd01fce",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 79,
"avg_line_length": 39.88721804511278,
"alnum_prop": 0.6390197926484449,
"repo_name": "pvizeli/hassio",
"id": "856f8aabfce50cffafa6d4297b4c518e21329df3",
"size": "5305",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "hassio/api/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "103291"
},
{
"name": "Python",
"bytes": "113129"
}
],
"symlink_target": ""
} |
import logging
import shutil
import json
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotAllowed, Http404
from django.db.models import Q
from django.shortcuts import get_object_or_404, render_to_response, render
from django.template import RequestContext
from django.views.generic import View, ListView, TemplateView
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.contrib.formtools.wizard.views import SessionWizardView
from allauth.socialaccount.models import SocialToken
from requests_oauthlib import OAuth2Session
from bookmarks.models import Bookmark
from builds import utils as build_utils
from builds.models import Version
from builds.forms import AliasForm, VersionForm
from builds.filters import VersionFilter
from builds.models import VersionAlias
from core.utils import trigger_build
from oauth.models import GithubProject, BitbucketProject
from oauth import utils as oauth_utils
from projects.forms import (ProjectBackendForm, ProjectBasicsForm,
ProjectExtraForm, ProjectAdvancedForm,
UpdateProjectForm, SubprojectForm,
build_versions_form, UserForm, EmailHookForm,
TranslationForm, RedirectForm, WebHookForm)
from projects.models import Project, EmailHook, WebHook
from projects import constants, tasks
try:
from readthedocs.projects.signals import project_import
except:
from projects.signals import project_import
log = logging.getLogger(__name__)
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class PrivateViewMixin(LoginRequiredMixin):
pass
class ProjectDashboard(PrivateViewMixin, ListView):
"""
A dashboard! If you aint know what that means you aint need to.
Essentially we show you an overview of your content.
"""
model = Project
template_name = 'projects/project_dashboard.html'
def get_queryset(self):
return Project.objects.dashboard(self.request.user)
def get_context_data(self, **kwargs):
context = super(ProjectDashboard, self).get_context_data(**kwargs)
filter = VersionFilter(constants.IMPORTANT_VERSION_FILTERS, queryset=self.get_queryset())
context['filter'] = filter
bookmarks = Bookmark.objects.filter(user=self.request.user)
if bookmarks.exists:
context['bookmark_list'] = bookmarks[:3]
else:
bookmarks = None
return context
@login_required
def project_manage(request, project_slug):
"""
The management view for a project, where you will have links to edit
the projects' configuration, edit the files associated with that
project, etc.
Now redirects to the normal /projects/<slug> view.
"""
return HttpResponseRedirect(reverse('projects_detail',
args=[project_slug]))
@login_required
def project_comments_moderation(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
return render(
request,
'projects/project_comments_moderation.html',
{'project': project}
)
@login_required
def project_edit(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form_class = UpdateProjectForm
form = form_class(instance=project, data=request.POST or None,
user=request.user)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project settings updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_edit.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_advanced(request, project_slug):
"""
Edit an existing project - depending on what type of project is being
edited (created or imported) a different form will be displayed
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form_class = ProjectAdvancedForm
form = form_class(instance=project, data=request.POST or None, initial={
'num_minor': 2, 'num_major': 2, 'num_point': 2})
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project settings updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_advanced.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_versions(request, project_slug):
"""
Shows the available versions and lets the user choose which ones he would
like to have built.
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
if not project.is_imported:
raise Http404
form_class = build_versions_form(project)
form = form_class(data=request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save()
messages.success(request, _('Project versions updated'))
project_dashboard = reverse('projects_detail', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_versions.html',
{'form': form, 'project': project},
context_instance=RequestContext(request)
)
@login_required
def project_version_detail(request, project_slug, version_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
version = get_object_or_404(Version.objects.public(user=request.user, project=project, only_active=False), slug=version_slug)
form = VersionForm(request.POST or None, instance=version)
if request.method == 'POST' and form.is_valid():
form.save()
url = reverse('project_version_list', args=[project.slug])
return HttpResponseRedirect(url)
return render_to_response(
'projects/project_version_detail.html',
{'form': form, 'project': project, 'version': version},
context_instance=RequestContext(request)
)
@login_required
def project_delete(request, project_slug):
"""
Make a project as deleted on POST, otherwise show a form asking for
confirmation of delete.
"""
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
if request.method == 'POST':
# Remove the repository checkout
shutil.rmtree(project.doc_path, ignore_errors=True)
# Delete the project and everything related to it
project.delete()
messages.success(request, _('Project deleted'))
project_dashboard = reverse('projects_dashboard')
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_delete.html',
{'project': project},
context_instance=RequestContext(request)
)
class ImportWizardView(PrivateViewMixin, SessionWizardView):
'''Project import wizard'''
form_list = [('basics', ProjectBasicsForm),
('extra', ProjectExtraForm)]
condition_dict = {'extra': lambda self: self.is_advanced()}
def get_form_kwargs(self, step):
'''Get args to pass into form instantiation'''
kwargs = {}
kwargs['user'] = self.request.user
if step == 'basics':
kwargs['show_advanced'] = True
if step == 'extra':
extra_form = self.get_form_from_step('basics')
project = extra_form.save(commit=False)
kwargs['instance'] = project
return kwargs
def get_form_from_step(self, step):
form = self.form_list[step](
data=self.get_cleaned_data_for_step(step),
**self.get_form_kwargs(step)
)
form.full_clean()
return form
def get_template_names(self):
'''Return template names based on step name'''
return 'projects/import_{0}.html'.format(self.steps.current, 'base')
def done(self, form_list, **kwargs):
'''Save form data as object instance
Don't save form data directly, instead bypass documentation building and
other side effects for now, by signalling a save without commit. Then,
finish by added the members to the project and saving.
'''
# expect the first form
basics_form = form_list[0]
# Save the basics form to create the project instance, then alter
# attributes directly from other forms
project = basics_form.save()
for form in form_list[1:]:
for (field, value) in form.cleaned_data.items():
setattr(project, field, value)
else:
basic_only = True
project.save()
project_import.send(sender=project, request=self.request)
trigger_build(project, basic=basic_only)
return HttpResponseRedirect(reverse('projects_detail',
args=[project.slug]))
def is_advanced(self):
'''Determine if the user selected the `show advanced` field'''
data = self.get_cleaned_data_for_step('basics') or {}
return data.get('advanced', True)
class ImportView(PrivateViewMixin, TemplateView):
'''On GET, show the source select template, on POST, mock out a wizard
If we are accepting POST data, use the fields to seed the initial data in
:py:cls:`ImportWizardView`. The import templates will redirect the form to
`/dashboard/import`
'''
template_name = 'projects/project_import.html'
wizard_class = ImportWizardView
def post(self, request, *args, **kwargs):
initial_data = {}
initial_data['basics'] = {}
for key in ['name', 'repo', 'repo_type']:
initial_data['basics'][key] = request.POST.get(key)
initial_data['extra'] = {}
for key in ['description', 'project_url']:
initial_data['extra'][key] = request.POST.get(key)
request.method = 'GET'
return self.wizard_class.as_view(initial_dict=initial_data)(request)
class ImportDemoView(PrivateViewMixin, View):
'''View to pass request on to import form to import demo project'''
form_class = ProjectBasicsForm
request = None
args = None
kwargs = None
def get(self, request, *args, **kwargs):
'''Process link request as a form post to the project import form'''
self.request = request
self.args = args
self.kwargs = kwargs
data = self.get_form_data()
project = (Project.objects.for_admin_user(request.user)
.filter(repo=data['repo']).first())
if project is not None:
messages.success(
request, _('The demo project is already imported!'))
else:
kwargs = self.get_form_kwargs()
form = self.form_class(data=data, **kwargs)
if form.is_valid():
project = form.save()
project.save()
trigger_build(project, basic=True)
messages.success(
request, _('Your demo project is currently being imported'))
else:
for (_f, msg) in form.errors.items():
log.error(msg)
messages.error(request,
_('There was a problem adding the demo project'))
return HttpResponseRedirect(reverse('projects_dashboard'))
return HttpResponseRedirect(reverse('projects_detail',
args=[project.slug]))
def get_form_data(self):
'''Get form data to post to import form'''
return {
'name': '{0}-demo'.format(self.request.user.username),
'repo_type': 'git',
'repo': 'https://github.com/readthedocs/template.git'
}
def get_form_kwargs(self):
'''Form kwargs passed in during instantiation'''
return {'user': self.request.user}
@login_required
def edit_alias(request, project_slug, id=None):
proj = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
if id:
alias = proj.aliases.get(pk=id)
form = AliasForm(instance=alias, data=request.POST or None)
else:
form = AliasForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
alias = form.save()
return HttpResponseRedirect(alias.project.get_absolute_url())
return render_to_response(
'projects/alias_edit.html',
{'form': form},
context_instance=RequestContext(request)
)
class AliasList(PrivateViewMixin, ListView):
model = VersionAlias
template_context_name = 'alias'
template_name = 'projects/alias_list.html',
def get_queryset(self):
self.project = get_object_or_404(Project.objects.for_admin_user(self.request.user), slug=self.kwargs.get('project_slug'))
return self.project.aliases.all()
@login_required
def project_subprojects(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = SubprojectForm(data=request.POST or None, parent=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse(
'projects_subprojects', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
subprojects = project.subprojects.all()
return render_to_response(
'projects/project_subprojects.html',
{'form': form, 'project': project, 'subprojects': subprojects},
context_instance=RequestContext(request)
)
@login_required
def project_subprojects_delete(request, project_slug, child_slug):
parent = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
child = get_object_or_404(Project.objects.for_admin_user(request.user), slug=child_slug)
parent.remove_subproject(child)
return HttpResponseRedirect(reverse('projects_subprojects',
args=[parent.slug]))
@login_required
def project_users(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = UserForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
users = project.users.all()
return render_to_response(
'projects/project_users.html',
{'form': form, 'project': project, 'users': users},
context_instance=RequestContext(request)
)
@login_required
def project_users_delete(request, project_slug):
if request.method != 'POST':
raise Http404
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
user = get_object_or_404(User.objects.all(), username=request.POST.get('username'))
if user == request.user:
raise Http404
project.users.remove(user)
project_dashboard = reverse('projects_users', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_notifications(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
email_form = EmailHookForm(data=request.POST or None, project=project)
webhook_form = WebHookForm(data=request.POST or None, project=project)
if request.method == 'POST':
if email_form.is_valid():
email_form.save()
if webhook_form.is_valid():
webhook_form.save()
project_dashboard = reverse('projects_notifications',
args=[project.slug])
return HttpResponseRedirect(project_dashboard)
emails = project.emailhook_notifications.all()
urls = project.webhook_notifications.all()
return render_to_response(
'projects/project_notifications.html',
{
'email_form': email_form,
'webhook_form': webhook_form,
'project': project,
'emails': emails,
'urls': urls,
},
context_instance=RequestContext(request)
)
@login_required
def project_comments_settings(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
return render_to_response(
'projects/project_comments_settings.html',
{
'project': project,
},
context_instance=RequestContext(request)
)
@login_required
def project_notifications_delete(request, project_slug):
if request.method != 'POST':
raise Http404
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
try:
project.emailhook_notifications.get(email=request.POST.get('email')).delete()
except EmailHook.DoesNotExist:
try:
project.webhook_notifications.get(url=request.POST.get('email')).delete()
except WebHook.DoesNotExist:
raise Http404
project_dashboard = reverse('projects_notifications', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_translations(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = TranslationForm(data=request.POST or None, parent=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_translations',
args=[project.slug])
return HttpResponseRedirect(project_dashboard)
lang_projects = project.translations.all()
return render_to_response(
'projects/project_translations.html',
{'form': form, 'project': project, 'lang_projects': lang_projects},
context_instance=RequestContext(request)
)
@login_required
def project_translations_delete(request, project_slug, child_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
subproj = get_object_or_404(Project.objects.for_admin_user(request.user), slug=child_slug)
project.translations.remove(subproj)
project_dashboard = reverse('projects_translations', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
@login_required
def project_redirects(request, project_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
form = RedirectForm(data=request.POST or None, project=project)
if request.method == 'POST' and form.is_valid():
form.save()
project_dashboard = reverse('projects_redirects', args=[project.slug])
return HttpResponseRedirect(project_dashboard)
redirects = project.redirects.all()
return render_to_response(
'projects/project_redirects.html',
{'form': form, 'project': project, 'redirects': redirects},
context_instance=RequestContext(request)
)
@login_required
def project_redirects_delete(request, project_slug):
if request.method != 'POST':
return HttpResponseNotAllowed('Only POST is allowed')
project = get_object_or_404(Project.objects.for_admin_user(request.user),
slug=project_slug)
redirect = get_object_or_404(project.redirects,
pk=request.POST.get('id_pk'))
if redirect.project == project:
redirect.delete()
else:
raise Http404
return HttpResponseRedirect(reverse('projects_redirects',
args=[project.slug]))
@login_required
def project_import_github(request, sync=False):
'''Show form that prefills import form with data from GitHub'''
github_connected = oauth_utils.import_github(user=request.user, sync=sync)
repos = GithubProject.objects.filter(users__in=[request.user])
# Find existing projects that match a repo url
for repo in repos:
ghetto_repo = repo.git_url.replace('git://', '').replace('.git', '')
projects = (Project
.objects
.public(request.user)
.filter(Q(repo__endswith=ghetto_repo) |
Q(repo__endswith=ghetto_repo + '.git')))
if projects:
repo.matches = [project.slug for project in projects]
else:
repo.matches = []
return render_to_response(
'projects/project_import_github.html',
{
'repos': repos,
'github_connected': github_connected,
'sync': sync,
},
context_instance=RequestContext(request)
)
@login_required
def project_import_bitbucket(request, sync=False):
'''Show form that prefills import form with data from BitBucket'''
bitbucket_connected = oauth_utils.import_bitbucket(user=request.user, sync=sync)
repos = BitbucketProject.objects.filter(users__in=[request.user])
# Find existing projects that match a repo url
for repo in repos:
ghetto_repo = repo.git_url.replace('git://', '').replace('.git', '')
projects = (Project
.objects
.public(request.user)
.filter(Q(repo__endswith=ghetto_repo) |
Q(repo__endswith=ghetto_repo + '.git')))
if projects:
repo.matches = [project.slug for project in projects]
else:
repo.matches = []
return render_to_response(
'projects/project_import_bitbucket.html',
{
'repos': repos,
'bitbucket_connected': bitbucket_connected,
'sync': sync,
},
context_instance=RequestContext(request)
)
@login_required
def project_version_delete_html(request, project_slug, version_slug):
project = get_object_or_404(Project.objects.for_admin_user(request.user), slug=project_slug)
version = get_object_or_404(Version.objects.public(user=request.user, project=project, only_active=False), slug=version_slug)
if not version.active:
version.built = False
version.save()
tasks.clear_artifacts.delay(version.pk)
else:
raise Http404
return HttpResponseRedirect(reverse('project_version_list', kwargs={'project_slug': project_slug}))
| {
"content_hash": "96aca13071fa91afde44ce1f29511d53",
"timestamp": "",
"source": "github",
"line_count": 666,
"max_line_length": 129,
"avg_line_length": 35.87687687687688,
"alnum_prop": 0.6421277308110823,
"repo_name": "cgourlay/readthedocs.org",
"id": "4ea742dc78135735ca1a4499bd714b6b671d9a9a",
"size": "23894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/projects/views/private.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "65384"
},
{
"name": "HTML",
"bytes": "212314"
},
{
"name": "JavaScript",
"bytes": "1433027"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1548783"
},
{
"name": "Shell",
"bytes": "492"
}
],
"symlink_target": ""
} |
'''
Created on Feb 20, 2010
@author: ctoscano
'''
from slique.html.element import element
from slique.html.html import new
class table(element):
'''
classdocs
'''
def __init__(self, **attrs):
'''
Constructor
'''
super(table, self).__init__('table', **attrs)
self.set(**attrs)
self.__tbody = new.tbody
self.append(self.__tbody)
self.nextRow()
@property
def tbody(self):
return self.__tbody
def getRow(self, index):
return self.tbody[index]
def add(self, item):
assert(hasattr(item, 'tag') and item.tag == 'tr')
self.tbody.append(item)
def addCell(self, item='', as_html=False, **attrs):
td = new.td(**attrs)
if as_html:
td.appendHTML(item)
else:
td.append(item)
self.tbody[-1].append(td)
return td
def nextRow(self, **attrs):
self.tbody.append(new.tr(**attrs))
class HorizontalTable(table):
def __init__(self, **attrs):
'''
Constructor
'''
super(HorizontalTable, self).__init__(**attrs)
@property
def row(self):
return self.tbody[0]
def add(self, item, valign='top', **attrs):
if (hasattr(item, 'tag') and item.tag == 'tr'):
super(HorizontalTable, self).add(item)
else:
self.row.append(new.td(valign=valign, **attrs).append(item))
def insert(self, item, index):
self.row.insert(index, new.td().append(item))
class VerticalTable(table):
def __init__(self, **attrs):
'''
Constructor
'''
super(VerticalTable, self).__init__(**attrs)
def add(self, item):
self.addCell(item)
self.nextRow()
def insert(self, item, index):
self.row.insert(index, new.tr().append(new.td().append(item)))
| {
"content_hash": "e1c8991dc9a9629739a87bd05ebd21e7",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 72,
"avg_line_length": 23.654761904761905,
"alnum_prop": 0.5153497735279315,
"repo_name": "ctoscano/SliqueHTML",
"id": "0496bfdabf02827fd26eea2f454f67cf1fb9304c",
"size": "1987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/slique/html/table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12285"
}
],
"symlink_target": ""
} |
from .api.v2 import ClientV2
| {
"content_hash": "93ef097815cee73ffd84985e09085db2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.7931034482758621,
"repo_name": "valerylisay/digitalocean-api",
"id": "a6c4086631b1bb36aad0e1b1ae73848af988cdf9",
"size": "46",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digitalocean/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "310186"
}
],
"symlink_target": ""
} |
import hyperopt
from hyperopt import fmin, tpe, hp
from hyperopt.mongoexp import MongoTrials
import sys
import task
space = hp.uniform('x', -10, 10)
trials = MongoTrials('mongo://localhost:27017/razlaw/jobs', exp_key=sys.argv[2])
if sys.argv[1] == "search":
best = fmin(task.objective,
space=space,
trials=trials,
algo=tpe.suggest,
max_evals=100)
elif sys.argv[1] == "history":
hyperopt.plotting.main_plot_history(trials)
elif sys.argv[1] == "histogram":
hyperopt.plotting.main_plot_histogram(trials)
elif sys.argv[1] == "vars":
bandit = hyperopt.Bandit(expr=space, do_checks=False)
hyperopt.plotting.main_plot_vars(trials, bandit=bandit, colorize_best=5)
elif sys.argv[1] == "best":
print trials.best_trial
| {
"content_hash": "c20f6ebc1997a01d70092f3411c1ddef",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 28.384615384615383,
"alnum_prop": 0.7168021680216802,
"repo_name": "temporaer/hyperopt_minimal",
"id": "84188f706d3b2c82ddef66a3180cefd7bcf48937",
"size": "738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1826"
},
{
"name": "Python",
"bytes": "854"
}
],
"symlink_target": ""
} |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class RouteTablesOperations(object):
"""RouteTablesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2016-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-09-01"
self.config = config
def _delete_initial(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def get(
self, resource_group_name, route_table_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RouteTable or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2016_09_01.models.RouteTable or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def _create_or_update_initial(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RouteTable')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route
table operation.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.RouteTable
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteTable or
ClientRawResponse<RouteTable> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2016_09_01.models.RouteTable]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2016_09_01.models.RouteTable]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2016_09_01.models.RouteTablePaged[~azure.mgmt.network.v2016_09_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2016_09_01.models.RouteTablePaged[~azure.mgmt.network.v2016_09_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'}
| {
"content_hash": "827c14171e758c5d132bb45e447c3f32",
"timestamp": "",
"source": "github",
"line_count": 411,
"max_line_length": 166,
"avg_line_length": 46.2676399026764,
"alnum_prop": 0.6419856962557846,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "e82d56d6ad5445fcfff26f77277b1a29ad852b9f",
"size": "19490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_09_01/operations/route_tables_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
import copy
from oslotest import mockpatch
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import images_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services.compute import base
class TestImagesClient(base.BaseComputeServiceTest):
# Data Dictionaries used for testing #
FAKE_IMAGE_METADATA = {
"list":
{"metadata": {
"auto_disk_config": "True",
"Label": "Changed"
}},
"set_item":
{"meta": {
"auto_disk_config": "True"
}},
"show_item":
{"meta": {
"kernel_id": "nokernel",
}},
"update":
{"metadata": {
"kernel_id": "False",
"Label": "UpdatedImage"
}},
"set":
{"metadata": {
"Label": "Changed",
"auto_disk_config": "True"
}},
"delete_item": {}
}
FAKE_IMAGE_DATA = {
"list":
{"images": [
{"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{"href": "http://openstack.example.com/v2/openstack" +
"/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "self"
}
],
"name": "fakeimage7"
}]},
"show": {"image": {
"created": "2011-01-01T01:02:03Z",
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
"href": "http://openstack.example.com/v2/openstack" +
"/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "self"
},
],
"metadata": {
"architecture": "x86_64",
"auto_disk_config": "True",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
"minDisk": 0,
"minRam": 0,
"name": "fakeimage7",
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"}},
"create": {},
"delete": {}
}
func2mock = {
'get': 'tempest.lib.common.rest_client.RestClient.get',
'post': 'tempest.lib.common.rest_client.RestClient.post',
'put': 'tempest.lib.common.rest_client.RestClient.put',
'delete': 'tempest.lib.common.rest_client.RestClient.delete'}
# Variable definition
FAKE_IMAGE_ID = FAKE_IMAGE_DATA['show']['image']['id']
FAKE_SERVER_ID = "80a599e0-31e7-49b7-b260-868f441e343f"
FAKE_CREATE_INFO = {'location': 'None'}
FAKE_METADATA = FAKE_IMAGE_METADATA['show_item']['meta']
def setUp(self):
super(TestImagesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = images_client.ImagesClient(fake_auth,
"compute", "regionOne")
def _test_image_operation(self, operation="delete", bytes_body=False):
response_code = 200
mock_operation = self.func2mock['get']
expected_op = self.FAKE_IMAGE_DATA[operation]
params = {"image_id": self.FAKE_IMAGE_ID}
headers = None
if operation == 'list':
function = self.client.list_images
elif operation == 'show':
function = self.client.show_image
elif operation == 'create':
function = self.client.create_image
mock_operation = self.func2mock['post']
params = {"server_id": self.FAKE_SERVER_ID}
response_code = 202
headers = {
'connection': 'keep-alive',
'content-length': '0',
'content-type': 'application/json',
'status': '202',
'x-compute-request-id': 'req-fake',
'vary': 'accept-encoding',
'x-openstack-nova-api-version': 'v2.1',
'date': '13 Oct 2015 05:55:36 GMT',
'location': 'http://fake.com/images/fake'
}
else:
function = self.client.delete_image
mock_operation = self.func2mock['delete']
response_code = 204
self.check_service_client_function(
function, mock_operation, expected_op,
bytes_body, response_code, headers, **params)
def _test_image_metadata(self, operation="set_item", bytes_body=False):
response_code = 200
expected_op = self.FAKE_IMAGE_METADATA[operation]
if operation == 'list':
function = self.client.list_image_metadata
mock_operation = self.func2mock['get']
params = {"image_id": self.FAKE_IMAGE_ID}
elif operation == 'set':
function = self.client.set_image_metadata
mock_operation = self.func2mock['put']
params = {"image_id": "_dummy_data",
"meta": self.FAKE_METADATA}
elif operation == 'update':
function = self.client.update_image_metadata
mock_operation = self.func2mock['post']
params = {"image_id": self.FAKE_IMAGE_ID,
"meta": self.FAKE_METADATA}
elif operation == 'show_item':
mock_operation = self.func2mock['get']
function = self.client.show_image_metadata_item
params = {"image_id": self.FAKE_IMAGE_ID,
"key": "123"}
elif operation == 'delete_item':
function = self.client.delete_image_metadata_item
mock_operation = self.func2mock['delete']
response_code = 204
params = {"image_id": self.FAKE_IMAGE_ID,
"key": "123"}
else:
function = self.client.set_image_metadata_item
mock_operation = self.func2mock['put']
params = {"image_id": self.FAKE_IMAGE_ID,
"key": "123",
"meta": self.FAKE_METADATA}
self.check_service_client_function(
function, mock_operation, expected_op,
bytes_body, response_code, **params)
def _test_resource_deleted(self, bytes_body=False):
params = {"id": self.FAKE_IMAGE_ID}
expected_op = self.FAKE_IMAGE_DATA['show']['image']
self.useFixture(mockpatch.Patch('tempest.lib.services.compute'
'.images_client.ImagesClient.show_image',
side_effect=lib_exc.NotFound))
self.assertEqual(True, self.client.is_resource_deleted(**params))
tempdata = copy.deepcopy(self.FAKE_IMAGE_DATA['show'])
tempdata['image']['id'] = None
self.useFixture(mockpatch.Patch('tempest.lib.services.compute'
'.images_client.ImagesClient.show_image',
return_value=expected_op))
self.assertEqual(False, self.client.is_resource_deleted(**params))
def test_list_images_with_str_body(self):
self._test_image_operation('list')
def test_list_images_with_bytes_body(self):
self._test_image_operation('list', True)
def test_show_image_with_str_body(self):
self._test_image_operation('show')
def test_show_image_with_bytes_body(self):
self._test_image_operation('show', True)
def test_create_image_with_str_body(self):
self._test_image_operation('create')
def test_create_image_with_bytes_body(self):
self._test_image_operation('create', True)
def test_delete_image_with_str_body(self):
self._test_image_operation('delete')
def test_delete_image_with_bytes_body(self):
self._test_image_operation('delete', True)
def test_list_image_metadata_with_str_body(self):
self._test_image_metadata('list')
def test_list_image_metadata_with_bytes_body(self):
self._test_image_metadata('list', True)
def test_set_image_metadata_with_str_body(self):
self._test_image_metadata('set')
def test_set_image_metadata_with_bytes_body(self):
self._test_image_metadata('set', True)
def test_update_image_metadata_with_str_body(self):
self._test_image_metadata('update')
def test_update_image_metadata_with_bytes_body(self):
self._test_image_metadata('update', True)
def test_set_image_metadata_item_with_str_body(self):
self._test_image_metadata()
def test_set_image_metadata_item_with_bytes_body(self):
self._test_image_metadata(bytes_body=True)
def test_show_image_metadata_item_with_str_body(self):
self._test_image_metadata('show_item')
def test_show_image_metadata_item_with_bytes_body(self):
self._test_image_metadata('show_item', True)
def test_delete_image_metadata_item_with_str_body(self):
self._test_image_metadata('delete_item')
def test_delete_image_metadata_item_with_bytes_body(self):
self._test_image_metadata('delete_item', True)
def test_resource_delete_with_str_body(self):
self._test_resource_deleted()
def test_resource_delete_with_bytes_body(self):
self._test_resource_deleted(True)
| {
"content_hash": "24816f1427dcaf460a3734adc6f500a3",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 76,
"avg_line_length": 37.1394422310757,
"alnum_prop": 0.5495601802188371,
"repo_name": "nuagenetworks/tempest",
"id": "28757c39fa2d82f5e63ae7689417d0efd5fc2cae",
"size": "9953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/tests/lib/services/compute/test_images_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0030_brandingperiod_override_listings_root'),
]
operations = [
migrations.AlterField(
model_name='event',
name='ticket_type',
field=models.CharField(choices=[('NA', 'n/a'), ('NT', 'Native'), ('EB', 'Eventbrite'), ('AC', 'ACCA'), ('GN', 'Generic'), ('MSL', 'MSL')], default='NA', max_length=3),
),
]
| {
"content_hash": "d3277944b0eb892c85e912b07d90ab85",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 179,
"avg_line_length": 30.5,
"alnum_prop": 0.5573770491803278,
"repo_name": "sussexstudent/falmer",
"id": "f7d76238b0effcfd8ed14b357d772ca4cd902f86",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "falmer/events/migrations/0031_auto_20180928_1223.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "Dockerfile",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "8269"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Python",
"bytes": "513792"
},
{
"name": "Shell",
"bytes": "8120"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.